]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf32-arm.c
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2022 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
68
69 static bool elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
73
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
77
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 false, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 false, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 false), /* pcrel_offset */
94
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 true, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 false, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 true), /* pcrel_offset */
108
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 false, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 false, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 false), /* pcrel_offset */
123
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 true, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 false, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 true), /* pcrel_offset */
138
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 true, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 false, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 true), /* pcrel_offset */
153
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 false, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 false, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 false), /* pcrel_offset */
168
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 false, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 false, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 false), /* pcrel_offset */
183
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 false, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 false, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 false), /* pcrel_offset */
197
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 false, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 false, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 false), /* pcrel_offset */
212
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 false, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 false, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 false), /* pcrel_offset */
226
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 true, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 false, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 true), /* pcrel_offset */
240
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 true, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 false, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 true), /* pcrel_offset */
254
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 false, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 false, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 false), /* pcrel_offset */
268
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 false, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 false, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 false), /* pcrel_offset */
282
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 false, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 false, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 false), /* pcrel_offset */
296
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 true, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 false, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 true), /* pcrel_offset */
311
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 true, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 false, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 true), /* pcrel_offset */
326
327 /* Dynamic TLS relocations. */
328
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 false, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 true, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 false), /* pcrel_offset */
342
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 false, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 true, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 false), /* pcrel_offset */
356
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 false, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 true, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 false), /* pcrel_offset */
370
371 /* Relocs used in ARM Linux */
372
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 false, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 true, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 false), /* pcrel_offset */
386
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 false, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 true, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 false), /* pcrel_offset */
400
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 false, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 true, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 false), /* pcrel_offset */
414
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 false, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 true, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 false), /* pcrel_offset */
428
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 false, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 true, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 false), /* pcrel_offset */
442
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 true, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 true, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 true), /* pcrel_offset */
456
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 false, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 true, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 false), /* pcrel_offset */
470
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 true, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 false, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 true), /* pcrel_offset */
484
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 true, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 false, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 true), /* pcrel_offset */
498
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 true, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 false, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 true), /* pcrel_offset */
512
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 true, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 false, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 true), /* pcrel_offset */
526
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 false, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 false, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 false), /* pcrel_offset */
540
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 true, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 false, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 true), /* pcrel_offset */
554
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 true, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 false, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 true), /* pcrel_offset */
568
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 true, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 false, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 true), /* pcrel_offset */
582
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 false, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 false, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 false), /* pcrel_offset */
596
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 false, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 false, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 false), /* pcrel_offset */
610
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 false, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 false, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 false), /* pcrel_offset */
624
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 false, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 false, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 false), /* pcrel_offset */
638
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 false, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 false, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 false), /* pcrel_offset */
652
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 false, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 false, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 false), /* pcrel_offset */
666
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 false, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 false, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 true), /* pcrel_offset */
680
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 true, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 false, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 true), /* pcrel_offset */
694
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 false, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 false, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 false), /* pcrel_offset */
708
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 false, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 false, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 false), /* pcrel_offset */
722
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 true, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 false, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 true), /* pcrel_offset */
736
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 true, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 false, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 true), /* pcrel_offset */
750
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 false, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 false, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 false), /* pcrel_offset */
764
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 false, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 false, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 false), /* pcrel_offset */
778
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 true, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 false, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 true), /* pcrel_offset */
792
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 true, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 false, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 true), /* pcrel_offset */
806
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 true, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 false, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 true), /* pcrel_offset */
820
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 true, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 false, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 true), /* pcrel_offset */
834
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 true, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 false, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 true), /* pcrel_offset */
851
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 true, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 false, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 true), /* pcrel_offset */
865
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 false, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 false, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 false), /* pcrel_offset */
879
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 true, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 false, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 false), /* pcrel_offset */
893
894 /* Group relocations. */
895
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 true, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 false, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 true), /* pcrel_offset */
909
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 true, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 false, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 true), /* pcrel_offset */
923
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 true, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 false, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 true), /* pcrel_offset */
937
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 true, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 false, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 true), /* pcrel_offset */
951
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 true, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 false, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 true), /* pcrel_offset */
965
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 true, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 false, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 true), /* pcrel_offset */
979
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 true, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 false, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 true), /* pcrel_offset */
993
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 true, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 false, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 true), /* pcrel_offset */
1007
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 true, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 false, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 true), /* pcrel_offset */
1021
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 true, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 false, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 true), /* pcrel_offset */
1035
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 true, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 false, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 true), /* pcrel_offset */
1049
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 true, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 false, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 true), /* pcrel_offset */
1063
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 true, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 false, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 true), /* pcrel_offset */
1077
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 true, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 false, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 true), /* pcrel_offset */
1091
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 true, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 false, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 true), /* pcrel_offset */
1105
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 true, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 false, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 true), /* pcrel_offset */
1119
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 true, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 false, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 true), /* pcrel_offset */
1133
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 true, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 false, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 true), /* pcrel_offset */
1147
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 true, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 false, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 true), /* pcrel_offset */
1161
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 true, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 false, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 true), /* pcrel_offset */
1175
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 true, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 false, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 true), /* pcrel_offset */
1189
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 true, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 false, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 true), /* pcrel_offset */
1203
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 true, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 false, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 true), /* pcrel_offset */
1217
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 true, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 false, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 true), /* pcrel_offset */
1231
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 true, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 false, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 true), /* pcrel_offset */
1245
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 true, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 false, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 true), /* pcrel_offset */
1259
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 true, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 false, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 true), /* pcrel_offset */
1273
1274 /* End of group relocations. */
1275
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 false, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 false, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 false), /* pcrel_offset */
1289
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 false, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 false, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 false), /* pcrel_offset */
1303
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 false, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 false, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 false), /* pcrel_offset */
1317
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 false, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 false, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 false), /* pcrel_offset */
1331
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 false, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 false, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 false), /* pcrel_offset */
1345
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 false, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 false, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 false), /* pcrel_offset */
1359
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 false, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 true, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 false), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 false, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 false, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 false), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 false, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_dont,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 false, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 false), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 false, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 false, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 false), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 false, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 false, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 false), /* pcrel_offset */
1429
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 false, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 false, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 false), /* pcrel_offset */
1443
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 true, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 false, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 true), /* pcrel_offset */
1457
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 false, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 false, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 false), /* pcrel_offset */
1471
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 false, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 false, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 false), /* pcrel_offset */
1485
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 false, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 false, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 false), /* pcrel_offset */
1502
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 false, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 false, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 false), /* pcrel_offset */
1517
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 true, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 false, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 true), /* pcrel_offset */
1531
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 true, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 false, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 true), /* pcrel_offset */
1545
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 false, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 true, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 false), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 false, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 true, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 false), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 false, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 true, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 false), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 false, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 true, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 false), /* pcrel_offset */
1602
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 false, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 true, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 false), /* pcrel_offset */
1616
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 false, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 false, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 false), /* pcrel_offset */
1630
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 false, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 false, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 false), /* pcrel_offset */
1644
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 false, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 false, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 false), /* pcrel_offset */
1658
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1676
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1679
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 false, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_dont,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 false, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 false), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 16, /* bitsize. */
1699 false, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 false, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 false), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 16, /* bitsize. */
1712 false, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 false, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 false), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 16, /* bitsize. */
1725 false, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 false, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 false), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 16, /* bitsize. */
1738 false, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 false, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 false), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1751 16, /* bitsize. */
1752 true, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 false, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 true), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1764 12, /* bitsize. */
1765 true, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 false, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 true), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1777 18, /* bitsize. */
1778 true, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 false, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 true), /* pcrel_offset. */
1787 };
1788
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1791 {
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1795 32, /* bitsize */
1796 false, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 true, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 false), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1808 32, /* bitsize */
1809 false, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 false, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 false), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1821 32, /* bitsize */
1822 false, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 false, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 false), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1834 32, /* bitsize */
1835 false, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 false, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 false), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1847 64, /* bitsize */
1848 false, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 false, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 false), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1860 32, /* bitsize */
1861 false, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 false, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 false), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1873 32, /* bitsize */
1874 false, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 false, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 false), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1886 32, /* bitsize */
1887 false, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 false, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 false), /* pcrel_offset */
1896 };
1897
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1900 {
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1904 0, /* bitsize */
1905 false, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 false, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 false), /* pcrel_offset */
1914
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1918 0, /* bitsize */
1919 false, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 false, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 false), /* pcrel_offset */
1928
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1932 0, /* bitsize */
1933 false, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 false, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 false), /* pcrel_offset */
1942
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1946 0, /* bitsize */
1947 false, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 false, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 false) /* pcrel_offset */
1956 };
1957
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1960 {
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1963
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971
1972 return NULL;
1973 }
1974
1975 static bool
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1978 {
1979 unsigned int r_type;
1980
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983 {
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return false;
1989 }
1990 return true;
1991 }
1992
1993 struct elf32_arm_reloc_map
1994 {
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1997 };
1998
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001 {
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102 };
2103
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2107 {
2108 unsigned int i;
2109
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113
2114 return NULL;
2115 }
2116
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2120 {
2121 unsigned int i;
2122
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2127
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2132
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2137
2138 return NULL;
2139 }
2140
2141 /* Support for core dump NOTE sections. */
2142
2143 static bool
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146 int offset;
2147 size_t size;
2148
2149 switch (note->descsz)
2150 {
2151 default:
2152 return false;
2153
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2164
2165 break;
2166 }
2167
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2171 }
2172
2173 static bool
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175 {
2176 switch (note->descsz)
2177 {
2178 default:
2179 return false;
2180
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188 }
2189
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2193 {
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2196
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2199 }
2200
2201 return true;
2202 }
2203
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2207 {
2208 switch (note_type)
2209 {
2210 default:
2211 return NULL;
2212
2213 case NT_PRPSINFO:
2214 {
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2217
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226 */
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2234
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2237 }
2238
2239 case NT_PRSTATUS:
2240 {
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2246
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2256
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2259 }
2260 }
2261 }
2262
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2267
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2271
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2274
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2281
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2288
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2291
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2294
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2297
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2300
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2302
2303 #define CMSE_PREFIX "__acle_se_"
2304
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2306
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2310
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2313
2314 static const unsigned long tls_trampoline [] =
2315 {
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2319 };
2320
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322 {
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332 };
2333
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2338
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342 {
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2353 };
2354
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358 {
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2369 };
2370
2371 #ifdef FOUR_WORD_PLT
2372
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2378 {
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2383 };
2384
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2388 {
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2393 };
2394
2395 #else /* not FOUR_WORD_PLT */
2396
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2402 {
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2408 };
2409
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2413 {
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2417 };
2418
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2422 {
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2427 };
2428
2429 static bool elf32_arm_use_long_plt_entry = false;
2430
2431 #endif /* not FOUR_WORD_PLT */
2432
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2437 {
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2445 };
2446
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2450 {
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2457 /* b .-4 */
2458 };
2459
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463 {
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2468 };
2469
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472 {
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2479 };
2480
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483 {
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2490 };
2491
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495 {
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2498 };
2499
2500 /* The first entry in a procedure linkage table looks like
2501 this. It is set up so that any shared library function that is
2502 called before the relocation has been set up calls the dynamic
2503 linker first. */
2504 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2505 {
2506 /* First bundle: */
2507 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2508 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2509 0xe08cc00f, /* add ip, ip, pc */
2510 0xe52dc008, /* str ip, [sp, #-8]! */
2511 /* Second bundle: */
2512 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2513 0xe59cc000, /* ldr ip, [ip] */
2514 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2515 0xe12fff1c, /* bx ip */
2516 /* Third bundle: */
2517 0xe320f000, /* nop */
2518 0xe320f000, /* nop */
2519 0xe320f000, /* nop */
2520 /* .Lplt_tail: */
2521 0xe50dc004, /* str ip, [sp, #-4] */
2522 /* Fourth bundle: */
2523 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2524 0xe59cc000, /* ldr ip, [ip] */
2525 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2526 0xe12fff1c, /* bx ip */
2527 };
2528 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2529
2530 /* Subsequent entries in a procedure linkage table look like this. */
2531 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2532 {
2533 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2534 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2535 0xe08cc00f, /* add ip, ip, pc */
2536 0xea000000, /* b .Lplt_tail */
2537 };
2538
2539 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2540 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2541 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2542 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2543 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2544 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2545 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2546 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2547
2548 enum stub_insn_type
2549 {
2550 THUMB16_TYPE = 1,
2551 THUMB32_TYPE,
2552 ARM_TYPE,
2553 DATA_TYPE
2554 };
2555
2556 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2557 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2558 is inserted in arm_build_one_stub(). */
2559 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2560 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2561 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2562 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2563 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2564 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2565 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2566 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2567
2568 typedef struct
2569 {
2570 bfd_vma data;
2571 enum stub_insn_type type;
2572 unsigned int r_type;
2573 int reloc_addend;
2574 } insn_sequence;
2575
2576 /* See note [Thumb nop sequence] when adding a veneer. */
2577
2578 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2579 to reach the stub if necessary. */
2580 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2581 {
2582 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2583 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2584 };
2585
2586 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2587 available. */
2588 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2589 {
2590 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2591 ARM_INSN (0xe12fff1c), /* bx ip */
2592 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2593 };
2594
2595 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2596 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2597 {
2598 THUMB16_INSN (0xb401), /* push {r0} */
2599 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2600 THUMB16_INSN (0x4684), /* mov ip, r0 */
2601 THUMB16_INSN (0xbc01), /* pop {r0} */
2602 THUMB16_INSN (0x4760), /* bx ip */
2603 THUMB16_INSN (0xbf00), /* nop */
2604 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2605 };
2606
2607 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2608 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2609 {
2610 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2611 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2612 };
2613
2614 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2615 M-profile architectures. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2617 {
2618 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2619 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2620 THUMB16_INSN (0x4760), /* bx ip */
2621 };
2622
2623 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2624 allowed. */
2625 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2626 {
2627 THUMB16_INSN (0x4778), /* bx pc */
2628 THUMB16_INSN (0xe7fd), /* b .-2 */
2629 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2630 ARM_INSN (0xe12fff1c), /* bx ip */
2631 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2632 };
2633
2634 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2635 available. */
2636 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2637 {
2638 THUMB16_INSN (0x4778), /* bx pc */
2639 THUMB16_INSN (0xe7fd), /* b .-2 */
2640 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2641 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2642 };
2643
2644 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2645 one, when the destination is close enough. */
2646 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2647 {
2648 THUMB16_INSN (0x4778), /* bx pc */
2649 THUMB16_INSN (0xe7fd), /* b .-2 */
2650 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2651 };
2652
2653 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2654 blx to reach the stub if necessary. */
2655 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2656 {
2657 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2658 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2659 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2660 };
2661
2662 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2663 blx to reach the stub if necessary. We can not add into pc;
2664 it is not guaranteed to mode switch (different in ARMv6 and
2665 ARMv7). */
2666 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2667 {
2668 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2669 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2670 ARM_INSN (0xe12fff1c), /* bx ip */
2671 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2672 };
2673
2674 /* V4T ARM -> ARM long branch stub, PIC. */
2675 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2676 {
2677 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2678 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2679 ARM_INSN (0xe12fff1c), /* bx ip */
2680 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2681 };
2682
2683 /* V4T Thumb -> ARM long branch stub, PIC. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2685 {
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0xe7fd), /* b .-2 */
2688 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2689 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2690 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2691 };
2692
2693 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2694 architectures. */
2695 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2696 {
2697 THUMB16_INSN (0xb401), /* push {r0} */
2698 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2699 THUMB16_INSN (0x46fc), /* mov ip, pc */
2700 THUMB16_INSN (0x4484), /* add ip, r0 */
2701 THUMB16_INSN (0xbc01), /* pop {r0} */
2702 THUMB16_INSN (0x4760), /* bx ip */
2703 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2704 };
2705
2706 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2707 allowed. */
2708 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2709 {
2710 THUMB16_INSN (0x4778), /* bx pc */
2711 THUMB16_INSN (0xe7fd), /* b .-2 */
2712 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2713 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2714 ARM_INSN (0xe12fff1c), /* bx ip */
2715 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2716 };
2717
2718 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2719 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2720 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2721 {
2722 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2723 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2724 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2725 };
2726
2727 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2728 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2729 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2730 {
2731 THUMB16_INSN (0x4778), /* bx pc */
2732 THUMB16_INSN (0xe7fd), /* b .-2 */
2733 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2734 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2735 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2736 };
2737
2738 /* NaCl ARM -> ARM long branch stub. */
2739 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2740 {
2741 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2742 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2743 ARM_INSN (0xe12fff1c), /* bx ip */
2744 ARM_INSN (0xe320f000), /* nop */
2745 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2746 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2747 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2748 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2749 };
2750
2751 /* NaCl ARM -> ARM long branch stub, PIC. */
2752 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2753 {
2754 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2755 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2756 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2757 ARM_INSN (0xe12fff1c), /* bx ip */
2758 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2759 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2760 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2761 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2762 };
2763
2764 /* Stub used for transition to secure state (aka SG veneer). */
2765 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2766 {
2767 THUMB32_INSN (0xe97fe97f), /* sg. */
2768 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2769 };
2770
2771
2772 /* Cortex-A8 erratum-workaround stubs. */
2773
2774 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2775 can't use a conditional branch to reach this stub). */
2776
2777 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2778 {
2779 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2780 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2781 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2782 };
2783
2784 /* Stub used for b.w and bl.w instructions. */
2785
2786 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2787 {
2788 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2789 };
2790
2791 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2792 {
2793 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2794 };
2795
2796 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2797 instruction (which switches to ARM mode) to point to this stub. Jump to the
2798 real destination using an ARM-mode branch. */
2799
2800 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2801 {
2802 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2803 };
2804
2805 /* For each section group there can be a specially created linker section
2806 to hold the stubs for that group. The name of the stub section is based
2807 upon the name of another section within that group with the suffix below
2808 applied.
2809
2810 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2811 create what appeared to be a linker stub section when it actually
2812 contained user code/data. For example, consider this fragment:
2813
2814 const char * stubborn_problems[] = { "np" };
2815
2816 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2817 section called:
2818
2819 .data.rel.local.stubborn_problems
2820
2821 This then causes problems in arm32_arm_build_stubs() as it triggers:
2822
2823 // Ignore non-stub sections.
2824 if (!strstr (stub_sec->name, STUB_SUFFIX))
2825 continue;
2826
2827 And so the section would be ignored instead of being processed. Hence
2828 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2829 C identifier. */
2830 #define STUB_SUFFIX ".__stub"
2831
2832 /* One entry per long/short branch stub defined above. */
2833 #define DEF_STUBS \
2834 DEF_STUB (long_branch_any_any) \
2835 DEF_STUB (long_branch_v4t_arm_thumb) \
2836 DEF_STUB (long_branch_thumb_only) \
2837 DEF_STUB (long_branch_v4t_thumb_thumb) \
2838 DEF_STUB (long_branch_v4t_thumb_arm) \
2839 DEF_STUB (short_branch_v4t_thumb_arm) \
2840 DEF_STUB (long_branch_any_arm_pic) \
2841 DEF_STUB (long_branch_any_thumb_pic) \
2842 DEF_STUB (long_branch_v4t_thumb_thumb_pic) \
2843 DEF_STUB (long_branch_v4t_arm_thumb_pic) \
2844 DEF_STUB (long_branch_v4t_thumb_arm_pic) \
2845 DEF_STUB (long_branch_thumb_only_pic) \
2846 DEF_STUB (long_branch_any_tls_pic) \
2847 DEF_STUB (long_branch_v4t_thumb_tls_pic) \
2848 DEF_STUB (long_branch_arm_nacl) \
2849 DEF_STUB (long_branch_arm_nacl_pic) \
2850 DEF_STUB (cmse_branch_thumb_only) \
2851 DEF_STUB (a8_veneer_b_cond) \
2852 DEF_STUB (a8_veneer_b) \
2853 DEF_STUB (a8_veneer_bl) \
2854 DEF_STUB (a8_veneer_blx) \
2855 DEF_STUB (long_branch_thumb2_only) \
2856 DEF_STUB (long_branch_thumb2_only_pure)
2857
2858 #define DEF_STUB(x) arm_stub_##x,
2859 enum elf32_arm_stub_type
2860 {
2861 arm_stub_none,
2862 DEF_STUBS
2863 max_stub_type
2864 };
2865 #undef DEF_STUB
2866
2867 /* Note the first a8_veneer type. */
2868 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2869
2870 typedef struct
2871 {
2872 const insn_sequence* template_sequence;
2873 int template_size;
2874 } stub_def;
2875
2876 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2877 static const stub_def stub_definitions[] =
2878 {
2879 {NULL, 0},
2880 DEF_STUBS
2881 };
2882
2883 struct elf32_arm_stub_hash_entry
2884 {
2885 /* Base hash table entry structure. */
2886 struct bfd_hash_entry root;
2887
2888 /* The stub section. */
2889 asection *stub_sec;
2890
2891 /* Offset within stub_sec of the beginning of this stub. */
2892 bfd_vma stub_offset;
2893
2894 /* Given the symbol's value and its section we can determine its final
2895 value when building the stubs (so the stub knows where to jump). */
2896 bfd_vma target_value;
2897 asection *target_section;
2898
2899 /* Same as above but for the source of the branch to the stub. Used for
2900 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2901 such, source section does not need to be recorded since Cortex-A8 erratum
2902 workaround stubs are only generated when both source and target are in the
2903 same section. */
2904 bfd_vma source_value;
2905
2906 /* The instruction which caused this stub to be generated (only valid for
2907 Cortex-A8 erratum workaround stubs at present). */
2908 unsigned long orig_insn;
2909
2910 /* The stub type. */
2911 enum elf32_arm_stub_type stub_type;
2912 /* Its encoding size in bytes. */
2913 int stub_size;
2914 /* Its template. */
2915 const insn_sequence *stub_template;
2916 /* The size of the template (number of entries). */
2917 int stub_template_size;
2918
2919 /* The symbol table entry, if any, that this was derived from. */
2920 struct elf32_arm_link_hash_entry *h;
2921
2922 /* Type of branch. */
2923 enum arm_st_branch_type branch_type;
2924
2925 /* Where this stub is being called from, or, in the case of combined
2926 stub sections, the first input section in the group. */
2927 asection *id_sec;
2928
2929 /* The name for the local symbol at the start of this stub. The
2930 stub name in the hash table has to be unique; this does not, so
2931 it can be friendlier. */
2932 char *output_name;
2933 };
2934
2935 /* Used to build a map of a section. This is required for mixed-endian
2936 code/data. */
2937
2938 typedef struct elf32_elf_section_map
2939 {
2940 bfd_vma vma;
2941 char type;
2942 }
2943 elf32_arm_section_map;
2944
2945 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2946
2947 typedef enum
2948 {
2949 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2950 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2951 VFP11_ERRATUM_ARM_VENEER,
2952 VFP11_ERRATUM_THUMB_VENEER
2953 }
2954 elf32_vfp11_erratum_type;
2955
2956 typedef struct elf32_vfp11_erratum_list
2957 {
2958 struct elf32_vfp11_erratum_list *next;
2959 bfd_vma vma;
2960 union
2961 {
2962 struct
2963 {
2964 struct elf32_vfp11_erratum_list *veneer;
2965 unsigned int vfp_insn;
2966 } b;
2967 struct
2968 {
2969 struct elf32_vfp11_erratum_list *branch;
2970 unsigned int id;
2971 } v;
2972 } u;
2973 elf32_vfp11_erratum_type type;
2974 }
2975 elf32_vfp11_erratum_list;
2976
2977 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2978 veneer. */
2979 typedef enum
2980 {
2981 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2982 STM32L4XX_ERRATUM_VENEER
2983 }
2984 elf32_stm32l4xx_erratum_type;
2985
2986 typedef struct elf32_stm32l4xx_erratum_list
2987 {
2988 struct elf32_stm32l4xx_erratum_list *next;
2989 bfd_vma vma;
2990 union
2991 {
2992 struct
2993 {
2994 struct elf32_stm32l4xx_erratum_list *veneer;
2995 unsigned int insn;
2996 } b;
2997 struct
2998 {
2999 struct elf32_stm32l4xx_erratum_list *branch;
3000 unsigned int id;
3001 } v;
3002 } u;
3003 elf32_stm32l4xx_erratum_type type;
3004 }
3005 elf32_stm32l4xx_erratum_list;
3006
3007 typedef enum
3008 {
3009 DELETE_EXIDX_ENTRY,
3010 INSERT_EXIDX_CANTUNWIND_AT_END
3011 }
3012 arm_unwind_edit_type;
3013
3014 /* A (sorted) list of edits to apply to an unwind table. */
3015 typedef struct arm_unwind_table_edit
3016 {
3017 arm_unwind_edit_type type;
3018 /* Note: we sometimes want to insert an unwind entry corresponding to a
3019 section different from the one we're currently writing out, so record the
3020 (text) section this edit relates to here. */
3021 asection *linked_section;
3022 unsigned int index;
3023 struct arm_unwind_table_edit *next;
3024 }
3025 arm_unwind_table_edit;
3026
3027 typedef struct _arm_elf_section_data
3028 {
3029 /* Information about mapping symbols. */
3030 struct bfd_elf_section_data elf;
3031 unsigned int mapcount;
3032 unsigned int mapsize;
3033 elf32_arm_section_map *map;
3034 /* Information about CPU errata. */
3035 unsigned int erratumcount;
3036 elf32_vfp11_erratum_list *erratumlist;
3037 unsigned int stm32l4xx_erratumcount;
3038 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3039 unsigned int additional_reloc_count;
3040 /* Information about unwind tables. */
3041 union
3042 {
3043 /* Unwind info attached to a text section. */
3044 struct
3045 {
3046 asection *arm_exidx_sec;
3047 } text;
3048
3049 /* Unwind info attached to an .ARM.exidx section. */
3050 struct
3051 {
3052 arm_unwind_table_edit *unwind_edit_list;
3053 arm_unwind_table_edit *unwind_edit_tail;
3054 } exidx;
3055 } u;
3056 }
3057 _arm_elf_section_data;
3058
3059 #define elf32_arm_section_data(sec) \
3060 ((_arm_elf_section_data *) elf_section_data (sec))
3061
3062 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3063 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3064 so may be created multiple times: we use an array of these entries whilst
3065 relaxing which we can refresh easily, then create stubs for each potentially
3066 erratum-triggering instruction once we've settled on a solution. */
3067
3068 struct a8_erratum_fix
3069 {
3070 bfd *input_bfd;
3071 asection *section;
3072 bfd_vma offset;
3073 bfd_vma target_offset;
3074 unsigned long orig_insn;
3075 char *stub_name;
3076 enum elf32_arm_stub_type stub_type;
3077 enum arm_st_branch_type branch_type;
3078 };
3079
3080 /* A table of relocs applied to branches which might trigger Cortex-A8
3081 erratum. */
3082
3083 struct a8_erratum_reloc
3084 {
3085 bfd_vma from;
3086 bfd_vma destination;
3087 struct elf32_arm_link_hash_entry *hash;
3088 const char *sym_name;
3089 unsigned int r_type;
3090 enum arm_st_branch_type branch_type;
3091 bool non_a8_stub;
3092 };
3093
3094 /* The size of the thread control block. */
3095 #define TCB_SIZE 8
3096
3097 /* ARM-specific information about a PLT entry, over and above the usual
3098 gotplt_union. */
3099 struct arm_plt_info
3100 {
3101 /* We reference count Thumb references to a PLT entry separately,
3102 so that we can emit the Thumb trampoline only if needed. */
3103 bfd_signed_vma thumb_refcount;
3104
3105 /* Some references from Thumb code may be eliminated by BL->BLX
3106 conversion, so record them separately. */
3107 bfd_signed_vma maybe_thumb_refcount;
3108
3109 /* How many of the recorded PLT accesses were from non-call relocations.
3110 This information is useful when deciding whether anything takes the
3111 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3112 non-call references to the function should resolve directly to the
3113 real runtime target. */
3114 unsigned int noncall_refcount;
3115
3116 /* Since PLT entries have variable size if the Thumb prologue is
3117 used, we need to record the index into .got.plt instead of
3118 recomputing it from the PLT offset. */
3119 bfd_signed_vma got_offset;
3120 };
3121
3122 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3123 struct arm_local_iplt_info
3124 {
3125 /* The information that is usually found in the generic ELF part of
3126 the hash table entry. */
3127 union gotplt_union root;
3128
3129 /* The information that is usually found in the ARM-specific part of
3130 the hash table entry. */
3131 struct arm_plt_info arm;
3132
3133 /* A list of all potential dynamic relocations against this symbol. */
3134 struct elf_dyn_relocs *dyn_relocs;
3135 };
3136
3137 /* Structure to handle FDPIC support for local functions. */
3138 struct fdpic_local
3139 {
3140 unsigned int funcdesc_cnt;
3141 unsigned int gotofffuncdesc_cnt;
3142 int funcdesc_offset;
3143 };
3144
3145 struct elf_arm_obj_tdata
3146 {
3147 struct elf_obj_tdata root;
3148
3149 /* Zero to warn when linking objects with incompatible enum sizes. */
3150 int no_enum_size_warning;
3151
3152 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3153 int no_wchar_size_warning;
3154
3155 /* The number of entries in each of the arrays in this strcuture.
3156 Used to avoid buffer overruns. */
3157 bfd_size_type num_entries;
3158
3159 /* tls_type for each local got entry. */
3160 char *local_got_tls_type;
3161
3162 /* GOTPLT entries for TLS descriptors. */
3163 bfd_vma *local_tlsdesc_gotent;
3164
3165 /* Information for local symbols that need entries in .iplt. */
3166 struct arm_local_iplt_info **local_iplt;
3167
3168 /* Maintains FDPIC counters and funcdesc info. */
3169 struct fdpic_local *local_fdpic_cnts;
3170 };
3171
3172 #define elf_arm_tdata(bfd) \
3173 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3174
3175 #define elf32_arm_num_entries(bfd) \
3176 (elf_arm_tdata (bfd)->num_entries)
3177
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179 (elf_arm_tdata (bfd)->local_got_tls_type)
3180
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3183
3184 #define elf32_arm_local_iplt(bfd) \
3185 (elf_arm_tdata (bfd)->local_iplt)
3186
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3189
3190 #define is_arm_elf(bfd) \
3191 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192 && elf_tdata (bfd) != NULL \
3193 && elf_object_id (bfd) == ARM_ELF_DATA)
3194
3195 static bool
3196 elf32_arm_mkobject (bfd *abfd)
3197 {
3198 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3199 ARM_ELF_DATA);
3200 }
3201
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3203
3204 /* Structure to handle FDPIC support for extern functions. */
3205 struct fdpic_global {
3206 unsigned int gotofffuncdesc_cnt;
3207 unsigned int gotfuncdesc_cnt;
3208 unsigned int funcdesc_cnt;
3209 int funcdesc_offset;
3210 int gotfuncdesc_offset;
3211 };
3212
3213 /* Arm ELF linker hash entry. */
3214 struct elf32_arm_link_hash_entry
3215 {
3216 struct elf_link_hash_entry root;
3217
3218 /* ARM-specific PLT information. */
3219 struct arm_plt_info plt;
3220
3221 #define GOT_UNKNOWN 0
3222 #define GOT_NORMAL 1
3223 #define GOT_TLS_GD 2
3224 #define GOT_TLS_IE 4
3225 #define GOT_TLS_GDESC 8
3226 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3227 unsigned int tls_type : 8;
3228
3229 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3230 unsigned int is_iplt : 1;
3231
3232 unsigned int unused : 23;
3233
3234 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3235 starting at the end of the jump table. */
3236 bfd_vma tlsdesc_got;
3237
3238 /* The symbol marking the real symbol location for exported thumb
3239 symbols with Arm stubs. */
3240 struct elf_link_hash_entry *export_glue;
3241
3242 /* A pointer to the most recently used stub hash entry against this
3243 symbol. */
3244 struct elf32_arm_stub_hash_entry *stub_cache;
3245
3246 /* Counter for FDPIC relocations against this symbol. */
3247 struct fdpic_global fdpic_cnts;
3248 };
3249
3250 /* Traverse an arm ELF linker hash table. */
3251 #define elf32_arm_link_hash_traverse(table, func, info) \
3252 (elf_link_hash_traverse \
3253 (&(table)->root, \
3254 (bool (*) (struct elf_link_hash_entry *, void *)) (func), \
3255 (info)))
3256
3257 /* Get the ARM elf linker hash table from a link_info structure. */
3258 #define elf32_arm_hash_table(p) \
3259 ((is_elf_hash_table ((p)->hash) \
3260 && elf_hash_table_id (elf_hash_table (p)) == ARM_ELF_DATA) \
3261 ? (struct elf32_arm_link_hash_table *) (p)->hash : NULL)
3262
3263 #define arm_stub_hash_lookup(table, string, create, copy) \
3264 ((struct elf32_arm_stub_hash_entry *) \
3265 bfd_hash_lookup ((table), (string), (create), (copy)))
3266
3267 /* Array to keep track of which stub sections have been created, and
3268 information on stub grouping. */
3269 struct map_stub
3270 {
3271 /* This is the section to which stubs in the group will be
3272 attached. */
3273 asection *link_sec;
3274 /* The stub section. */
3275 asection *stub_sec;
3276 };
3277
3278 #define elf32_arm_compute_jump_table_size(htab) \
3279 ((htab)->next_tls_desc_index * 4)
3280
3281 /* ARM ELF linker hash table. */
3282 struct elf32_arm_link_hash_table
3283 {
3284 /* The main hash table. */
3285 struct elf_link_hash_table root;
3286
3287 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3288 bfd_size_type thumb_glue_size;
3289
3290 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3291 bfd_size_type arm_glue_size;
3292
3293 /* The size in bytes of section containing the ARMv4 BX veneers. */
3294 bfd_size_type bx_glue_size;
3295
3296 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3297 veneer has been populated. */
3298 bfd_vma bx_glue_offset[15];
3299
3300 /* The size in bytes of the section containing glue for VFP11 erratum
3301 veneers. */
3302 bfd_size_type vfp11_erratum_glue_size;
3303
3304 /* The size in bytes of the section containing glue for STM32L4XX erratum
3305 veneers. */
3306 bfd_size_type stm32l4xx_erratum_glue_size;
3307
3308 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3309 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3310 elf32_arm_write_section(). */
3311 struct a8_erratum_fix *a8_erratum_fixes;
3312 unsigned int num_a8_erratum_fixes;
3313
3314 /* An arbitrary input BFD chosen to hold the glue sections. */
3315 bfd * bfd_of_glue_owner;
3316
3317 /* Nonzero to output a BE8 image. */
3318 int byteswap_code;
3319
3320 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3321 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3322 int target1_is_rel;
3323
3324 /* The relocation to use for R_ARM_TARGET2 relocations. */
3325 int target2_reloc;
3326
3327 /* 0 = Ignore R_ARM_V4BX.
3328 1 = Convert BX to MOV PC.
3329 2 = Generate v4 interworing stubs. */
3330 int fix_v4bx;
3331
3332 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3333 int fix_cortex_a8;
3334
3335 /* Whether we should fix the ARM1176 BLX immediate issue. */
3336 int fix_arm1176;
3337
3338 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3339 int use_blx;
3340
3341 /* What sort of code sequences we should look for which may trigger the
3342 VFP11 denorm erratum. */
3343 bfd_arm_vfp11_fix vfp11_fix;
3344
3345 /* Global counter for the number of fixes we have emitted. */
3346 int num_vfp11_fixes;
3347
3348 /* What sort of code sequences we should look for which may trigger the
3349 STM32L4XX erratum. */
3350 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3351
3352 /* Global counter for the number of fixes we have emitted. */
3353 int num_stm32l4xx_fixes;
3354
3355 /* Nonzero to force PIC branch veneers. */
3356 int pic_veneer;
3357
3358 /* The number of bytes in the initial entry in the PLT. */
3359 bfd_size_type plt_header_size;
3360
3361 /* The number of bytes in the subsequent PLT etries. */
3362 bfd_size_type plt_entry_size;
3363
3364 /* True if the target uses REL relocations. */
3365 bool use_rel;
3366
3367 /* Nonzero if import library must be a secure gateway import library
3368 as per ARMv8-M Security Extensions. */
3369 int cmse_implib;
3370
3371 /* The import library whose symbols' address must remain stable in
3372 the import library generated. */
3373 bfd *in_implib_bfd;
3374
3375 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3376 bfd_vma next_tls_desc_index;
3377
3378 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3379 bfd_vma num_tls_desc;
3380
3381 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3382 asection *srelplt2;
3383
3384 /* Offset in .plt section of tls_arm_trampoline. */
3385 bfd_vma tls_trampoline;
3386
3387 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3388 union
3389 {
3390 bfd_signed_vma refcount;
3391 bfd_vma offset;
3392 } tls_ldm_got;
3393
3394 /* For convenience in allocate_dynrelocs. */
3395 bfd * obfd;
3396
3397 /* The amount of space used by the reserved portion of the sgotplt
3398 section, plus whatever space is used by the jump slots. */
3399 bfd_vma sgotplt_jump_table_size;
3400
3401 /* The stub hash table. */
3402 struct bfd_hash_table stub_hash_table;
3403
3404 /* Linker stub bfd. */
3405 bfd *stub_bfd;
3406
3407 /* Linker call-backs. */
3408 asection * (*add_stub_section) (const char *, asection *, asection *,
3409 unsigned int);
3410 void (*layout_sections_again) (void);
3411
3412 /* Array to keep track of which stub sections have been created, and
3413 information on stub grouping. */
3414 struct map_stub *stub_group;
3415
3416 /* Input stub section holding secure gateway veneers. */
3417 asection *cmse_stub_sec;
3418
3419 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3420 start to be allocated. */
3421 bfd_vma new_cmse_stub_offset;
3422
3423 /* Number of elements in stub_group. */
3424 unsigned int top_id;
3425
3426 /* Assorted information used by elf32_arm_size_stubs. */
3427 unsigned int bfd_count;
3428 unsigned int top_index;
3429 asection **input_list;
3430
3431 /* True if the target system uses FDPIC. */
3432 int fdpic_p;
3433
3434 /* Fixup section. Used for FDPIC. */
3435 asection *srofixup;
3436 };
3437
3438 /* Add an FDPIC read-only fixup. */
3439 static void
3440 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3441 {
3442 bfd_vma fixup_offset;
3443
3444 fixup_offset = srofixup->reloc_count++ * 4;
3445 BFD_ASSERT (fixup_offset < srofixup->size);
3446 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3447 }
3448
3449 static inline int
3450 ctz (unsigned int mask)
3451 {
3452 #if GCC_VERSION >= 3004
3453 return __builtin_ctz (mask);
3454 #else
3455 unsigned int i;
3456
3457 for (i = 0; i < 8 * sizeof (mask); i++)
3458 {
3459 if (mask & 0x1)
3460 break;
3461 mask = (mask >> 1);
3462 }
3463 return i;
3464 #endif
3465 }
3466
3467 static inline int
3468 elf32_arm_popcount (unsigned int mask)
3469 {
3470 #if GCC_VERSION >= 3004
3471 return __builtin_popcount (mask);
3472 #else
3473 unsigned int i;
3474 int sum = 0;
3475
3476 for (i = 0; i < 8 * sizeof (mask); i++)
3477 {
3478 if (mask & 0x1)
3479 sum++;
3480 mask = (mask >> 1);
3481 }
3482 return sum;
3483 #endif
3484 }
3485
3486 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3487 asection *sreloc, Elf_Internal_Rela *rel);
3488
3489 static void
3490 arm_elf_fill_funcdesc (bfd *output_bfd,
3491 struct bfd_link_info *info,
3492 int *funcdesc_offset,
3493 int dynindx,
3494 int offset,
3495 bfd_vma addr,
3496 bfd_vma dynreloc_value,
3497 bfd_vma seg)
3498 {
3499 if ((*funcdesc_offset & 1) == 0)
3500 {
3501 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3502 asection *sgot = globals->root.sgot;
3503
3504 if (bfd_link_pic (info))
3505 {
3506 asection *srelgot = globals->root.srelgot;
3507 Elf_Internal_Rela outrel;
3508
3509 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3510 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3511 outrel.r_addend = 0;
3512
3513 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3514 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3515 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3516 }
3517 else
3518 {
3519 struct elf_link_hash_entry *hgot = globals->root.hgot;
3520 bfd_vma got_value = hgot->root.u.def.value
3521 + hgot->root.u.def.section->output_section->vma
3522 + hgot->root.u.def.section->output_offset;
3523
3524 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3525 sgot->output_section->vma + sgot->output_offset
3526 + offset);
3527 arm_elf_add_rofixup (output_bfd, globals->srofixup,
3528 sgot->output_section->vma + sgot->output_offset
3529 + offset + 4);
3530 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3531 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3532 }
3533 *funcdesc_offset |= 1;
3534 }
3535 }
3536
3537 /* Create an entry in an ARM ELF linker hash table. */
3538
3539 static struct bfd_hash_entry *
3540 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3541 struct bfd_hash_table * table,
3542 const char * string)
3543 {
3544 struct elf32_arm_link_hash_entry * ret =
3545 (struct elf32_arm_link_hash_entry *) entry;
3546
3547 /* Allocate the structure if it has not already been allocated by a
3548 subclass. */
3549 if (ret == NULL)
3550 ret = (struct elf32_arm_link_hash_entry *)
3551 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3552 if (ret == NULL)
3553 return (struct bfd_hash_entry *) ret;
3554
3555 /* Call the allocation method of the superclass. */
3556 ret = ((struct elf32_arm_link_hash_entry *)
3557 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3558 table, string));
3559 if (ret != NULL)
3560 {
3561 ret->tls_type = GOT_UNKNOWN;
3562 ret->tlsdesc_got = (bfd_vma) -1;
3563 ret->plt.thumb_refcount = 0;
3564 ret->plt.maybe_thumb_refcount = 0;
3565 ret->plt.noncall_refcount = 0;
3566 ret->plt.got_offset = -1;
3567 ret->is_iplt = false;
3568 ret->export_glue = NULL;
3569
3570 ret->stub_cache = NULL;
3571
3572 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3573 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3574 ret->fdpic_cnts.funcdesc_cnt = 0;
3575 ret->fdpic_cnts.funcdesc_offset = -1;
3576 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3577 }
3578
3579 return (struct bfd_hash_entry *) ret;
3580 }
3581
3582 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3583 symbols. */
3584
3585 static bool
3586 elf32_arm_allocate_local_sym_info (bfd *abfd)
3587 {
3588 if (elf_local_got_refcounts (abfd) == NULL)
3589 {
3590 bfd_size_type num_syms;
3591
3592 elf32_arm_num_entries (abfd) = 0;
3593
3594 /* Whilst it might be tempting to allocate a single block of memory and
3595 then divide it up amoungst the arrays in the elf_arm_obj_tdata
3596 structure, this interferes with the work of memory checkers looking
3597 for buffer overruns. So allocate each array individually. */
3598
3599 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3600
3601 elf_local_got_refcounts (abfd) = bfd_zalloc
3602 (abfd, num_syms * sizeof (* elf_local_got_refcounts (abfd)));
3603
3604 if (elf_local_got_refcounts (abfd) == NULL)
3605 return false;
3606
3607 elf32_arm_local_tlsdesc_gotent (abfd) = bfd_zalloc
3608 (abfd, num_syms * sizeof (* elf32_arm_local_tlsdesc_gotent (abfd)));
3609
3610 if (elf32_arm_local_tlsdesc_gotent (abfd) == NULL)
3611 return false;
3612
3613 elf32_arm_local_iplt (abfd) = bfd_zalloc
3614 (abfd, num_syms * sizeof (* elf32_arm_local_iplt (abfd)));
3615
3616 if (elf32_arm_local_iplt (abfd) == NULL)
3617 return false;
3618
3619 elf32_arm_local_fdpic_cnts (abfd) = bfd_zalloc
3620 (abfd, num_syms * sizeof (* elf32_arm_local_fdpic_cnts (abfd)));
3621
3622 if (elf32_arm_local_fdpic_cnts (abfd) == NULL)
3623 return false;
3624
3625 elf32_arm_local_got_tls_type (abfd) = bfd_zalloc
3626 (abfd, num_syms * sizeof (* elf32_arm_local_got_tls_type (abfd)));
3627
3628 if (elf32_arm_local_got_tls_type (abfd) == NULL)
3629 return false;
3630
3631 elf32_arm_num_entries (abfd) = num_syms;
3632
3633 #if GCC_VERSION >= 3000
3634 BFD_ASSERT (__alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd))
3635 <= __alignof__ (*elf_local_got_refcounts (abfd)));
3636 BFD_ASSERT (__alignof__ (*elf32_arm_local_iplt (abfd))
3637 <= __alignof__ (*elf32_arm_local_tlsdesc_gotent (abfd)));
3638 BFD_ASSERT (__alignof__ (*elf32_arm_local_fdpic_cnts (abfd))
3639 <= __alignof__ (*elf32_arm_local_iplt (abfd)));
3640 BFD_ASSERT (__alignof__ (*elf32_arm_local_got_tls_type (abfd))
3641 <= __alignof__ (*elf32_arm_local_fdpic_cnts (abfd)));
3642 #endif
3643 }
3644 return true;
3645 }
3646
3647 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3648 to input bfd ABFD. Create the information if it doesn't already exist.
3649 Return null if an allocation fails. */
3650
3651 static struct arm_local_iplt_info *
3652 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3653 {
3654 struct arm_local_iplt_info **ptr;
3655
3656 if (!elf32_arm_allocate_local_sym_info (abfd))
3657 return NULL;
3658
3659 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3660 BFD_ASSERT (r_symndx < elf32_arm_num_entries (abfd));
3661 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3662 if (*ptr == NULL)
3663 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3664 return *ptr;
3665 }
3666
3667 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3668 in ABFD's symbol table. If the symbol is global, H points to its
3669 hash table entry, otherwise H is null.
3670
3671 Return true if the symbol does have PLT information. When returning
3672 true, point *ROOT_PLT at the target-independent reference count/offset
3673 union and *ARM_PLT at the ARM-specific information. */
3674
3675 static bool
3676 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3677 struct elf32_arm_link_hash_entry *h,
3678 unsigned long r_symndx, union gotplt_union **root_plt,
3679 struct arm_plt_info **arm_plt)
3680 {
3681 struct arm_local_iplt_info *local_iplt;
3682
3683 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3684 return false;
3685
3686 if (h != NULL)
3687 {
3688 *root_plt = &h->root.plt;
3689 *arm_plt = &h->plt;
3690 return true;
3691 }
3692
3693 if (elf32_arm_local_iplt (abfd) == NULL)
3694 return false;
3695
3696 if (r_symndx >= elf32_arm_num_entries (abfd))
3697 return false;
3698
3699 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3700 if (local_iplt == NULL)
3701 return false;
3702
3703 *root_plt = &local_iplt->root;
3704 *arm_plt = &local_iplt->arm;
3705 return true;
3706 }
3707
3708 static bool using_thumb_only (struct elf32_arm_link_hash_table *globals);
3709
3710 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3711 before it. */
3712
3713 static bool
3714 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3715 struct arm_plt_info *arm_plt)
3716 {
3717 struct elf32_arm_link_hash_table *htab;
3718
3719 htab = elf32_arm_hash_table (info);
3720
3721 return (!using_thumb_only (htab) && (arm_plt->thumb_refcount != 0
3722 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3723 }
3724
3725 /* Return a pointer to the head of the dynamic reloc list that should
3726 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3727 ABFD's symbol table. Return null if an error occurs. */
3728
3729 static struct elf_dyn_relocs **
3730 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3731 Elf_Internal_Sym *isym)
3732 {
3733 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3734 {
3735 struct arm_local_iplt_info *local_iplt;
3736
3737 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3738 if (local_iplt == NULL)
3739 return NULL;
3740 return &local_iplt->dyn_relocs;
3741 }
3742 else
3743 {
3744 /* Track dynamic relocs needed for local syms too.
3745 We really need local syms available to do this
3746 easily. Oh well. */
3747 asection *s;
3748 void *vpp;
3749
3750 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3751 if (s == NULL)
3752 return NULL;
3753
3754 vpp = &elf_section_data (s)->local_dynrel;
3755 return (struct elf_dyn_relocs **) vpp;
3756 }
3757 }
3758
3759 /* Initialize an entry in the stub hash table. */
3760
3761 static struct bfd_hash_entry *
3762 stub_hash_newfunc (struct bfd_hash_entry *entry,
3763 struct bfd_hash_table *table,
3764 const char *string)
3765 {
3766 /* Allocate the structure if it has not already been allocated by a
3767 subclass. */
3768 if (entry == NULL)
3769 {
3770 entry = (struct bfd_hash_entry *)
3771 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3772 if (entry == NULL)
3773 return entry;
3774 }
3775
3776 /* Call the allocation method of the superclass. */
3777 entry = bfd_hash_newfunc (entry, table, string);
3778 if (entry != NULL)
3779 {
3780 struct elf32_arm_stub_hash_entry *eh;
3781
3782 /* Initialize the local fields. */
3783 eh = (struct elf32_arm_stub_hash_entry *) entry;
3784 eh->stub_sec = NULL;
3785 eh->stub_offset = (bfd_vma) -1;
3786 eh->source_value = 0;
3787 eh->target_value = 0;
3788 eh->target_section = NULL;
3789 eh->orig_insn = 0;
3790 eh->stub_type = arm_stub_none;
3791 eh->stub_size = 0;
3792 eh->stub_template = NULL;
3793 eh->stub_template_size = -1;
3794 eh->h = NULL;
3795 eh->id_sec = NULL;
3796 eh->output_name = NULL;
3797 }
3798
3799 return entry;
3800 }
3801
3802 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3803 shortcuts to them in our hash table. */
3804
3805 static bool
3806 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3807 {
3808 struct elf32_arm_link_hash_table *htab;
3809
3810 htab = elf32_arm_hash_table (info);
3811 if (htab == NULL)
3812 return false;
3813
3814 if (! _bfd_elf_create_got_section (dynobj, info))
3815 return false;
3816
3817 /* Also create .rofixup. */
3818 if (htab->fdpic_p)
3819 {
3820 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3821 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3822 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3823 if (htab->srofixup == NULL
3824 || !bfd_set_section_alignment (htab->srofixup, 2))
3825 return false;
3826 }
3827
3828 return true;
3829 }
3830
3831 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3832
3833 static bool
3834 create_ifunc_sections (struct bfd_link_info *info)
3835 {
3836 struct elf32_arm_link_hash_table *htab;
3837 const struct elf_backend_data *bed;
3838 bfd *dynobj;
3839 asection *s;
3840 flagword flags;
3841
3842 htab = elf32_arm_hash_table (info);
3843 dynobj = htab->root.dynobj;
3844 bed = get_elf_backend_data (dynobj);
3845 flags = bed->dynamic_sec_flags;
3846
3847 if (htab->root.iplt == NULL)
3848 {
3849 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3850 flags | SEC_READONLY | SEC_CODE);
3851 if (s == NULL
3852 || !bfd_set_section_alignment (s, bed->plt_alignment))
3853 return false;
3854 htab->root.iplt = s;
3855 }
3856
3857 if (htab->root.irelplt == NULL)
3858 {
3859 s = bfd_make_section_anyway_with_flags (dynobj,
3860 RELOC_SECTION (htab, ".iplt"),
3861 flags | SEC_READONLY);
3862 if (s == NULL
3863 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3864 return false;
3865 htab->root.irelplt = s;
3866 }
3867
3868 if (htab->root.igotplt == NULL)
3869 {
3870 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3871 if (s == NULL
3872 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3873 return false;
3874 htab->root.igotplt = s;
3875 }
3876 return true;
3877 }
3878
3879 /* Determine if we're dealing with a Thumb only architecture. */
3880
3881 static bool
3882 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3883 {
3884 int arch;
3885 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3886 Tag_CPU_arch_profile);
3887
3888 if (profile)
3889 return profile == 'M';
3890
3891 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3892
3893 /* Force return logic to be reviewed for each new architecture. */
3894 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3895
3896 if (arch == TAG_CPU_ARCH_V6_M
3897 || arch == TAG_CPU_ARCH_V6S_M
3898 || arch == TAG_CPU_ARCH_V7E_M
3899 || arch == TAG_CPU_ARCH_V8M_BASE
3900 || arch == TAG_CPU_ARCH_V8M_MAIN
3901 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3902 return true;
3903
3904 return false;
3905 }
3906
3907 /* Determine if we're dealing with a Thumb-2 object. */
3908
3909 static bool
3910 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3911 {
3912 int arch;
3913 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3914 Tag_THUMB_ISA_use);
3915
3916 /* No use of thumb permitted, or a legacy thumb-1/2 definition. */
3917 if (thumb_isa < 3)
3918 return thumb_isa == 2;
3919
3920 /* Variant of thumb is described by the architecture tag. */
3921 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3922
3923 /* Force return logic to be reviewed for each new architecture. */
3924 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3925
3926 return (arch == TAG_CPU_ARCH_V6T2
3927 || arch == TAG_CPU_ARCH_V7
3928 || arch == TAG_CPU_ARCH_V7E_M
3929 || arch == TAG_CPU_ARCH_V8
3930 || arch == TAG_CPU_ARCH_V8R
3931 || arch == TAG_CPU_ARCH_V8M_MAIN
3932 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3933 }
3934
3935 /* Determine whether Thumb-2 BL instruction is available. */
3936
3937 static bool
3938 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3939 {
3940 int arch =
3941 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3942
3943 /* Force return logic to be reviewed for each new architecture. */
3944 BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
3945
3946 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3947 return (arch == TAG_CPU_ARCH_V6T2
3948 || arch >= TAG_CPU_ARCH_V7);
3949 }
3950
3951 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3952 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3953 hash table. */
3954
3955 static bool
3956 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3957 {
3958 struct elf32_arm_link_hash_table *htab;
3959
3960 htab = elf32_arm_hash_table (info);
3961 if (htab == NULL)
3962 return false;
3963
3964 if (!htab->root.sgot && !create_got_section (dynobj, info))
3965 return false;
3966
3967 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3968 return false;
3969
3970 if (htab->root.target_os == is_vxworks)
3971 {
3972 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3973 return false;
3974
3975 if (bfd_link_pic (info))
3976 {
3977 htab->plt_header_size = 0;
3978 htab->plt_entry_size
3979 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3980 }
3981 else
3982 {
3983 htab->plt_header_size
3984 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3985 htab->plt_entry_size
3986 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3987 }
3988
3989 if (elf_elfheader (dynobj))
3990 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3991 }
3992 else
3993 {
3994 /* PR ld/16017
3995 Test for thumb only architectures. Note - we cannot just call
3996 using_thumb_only() as the attributes in the output bfd have not been
3997 initialised at this point, so instead we use the input bfd. */
3998 bfd * saved_obfd = htab->obfd;
3999
4000 htab->obfd = dynobj;
4001 if (using_thumb_only (htab))
4002 {
4003 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4004 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4005 }
4006 htab->obfd = saved_obfd;
4007 }
4008
4009 if (htab->fdpic_p) {
4010 htab->plt_header_size = 0;
4011 if (info->flags & DF_BIND_NOW)
4012 htab->plt_entry_size = 4 * (ARRAY_SIZE (elf32_arm_fdpic_plt_entry) - 5);
4013 else
4014 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry);
4015 }
4016
4017 if (!htab->root.splt
4018 || !htab->root.srelplt
4019 || !htab->root.sdynbss
4020 || (!bfd_link_pic (info) && !htab->root.srelbss))
4021 abort ();
4022
4023 return true;
4024 }
4025
4026 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4027
4028 static void
4029 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4030 struct elf_link_hash_entry *dir,
4031 struct elf_link_hash_entry *ind)
4032 {
4033 struct elf32_arm_link_hash_entry *edir, *eind;
4034
4035 edir = (struct elf32_arm_link_hash_entry *) dir;
4036 eind = (struct elf32_arm_link_hash_entry *) ind;
4037
4038 if (ind->root.type == bfd_link_hash_indirect)
4039 {
4040 /* Copy over PLT info. */
4041 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4042 eind->plt.thumb_refcount = 0;
4043 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4044 eind->plt.maybe_thumb_refcount = 0;
4045 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4046 eind->plt.noncall_refcount = 0;
4047
4048 /* Copy FDPIC counters. */
4049 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4050 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4051 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4052
4053 /* We should only allocate a function to .iplt once the final
4054 symbol information is known. */
4055 BFD_ASSERT (!eind->is_iplt);
4056
4057 if (dir->got.refcount <= 0)
4058 {
4059 edir->tls_type = eind->tls_type;
4060 eind->tls_type = GOT_UNKNOWN;
4061 }
4062 }
4063
4064 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4065 }
4066
4067 /* Destroy an ARM elf linker hash table. */
4068
4069 static void
4070 elf32_arm_link_hash_table_free (bfd *obfd)
4071 {
4072 struct elf32_arm_link_hash_table *ret
4073 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4074
4075 bfd_hash_table_free (&ret->stub_hash_table);
4076 _bfd_elf_link_hash_table_free (obfd);
4077 }
4078
4079 /* Create an ARM elf linker hash table. */
4080
4081 static struct bfd_link_hash_table *
4082 elf32_arm_link_hash_table_create (bfd *abfd)
4083 {
4084 struct elf32_arm_link_hash_table *ret;
4085 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4086
4087 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4088 if (ret == NULL)
4089 return NULL;
4090
4091 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4092 elf32_arm_link_hash_newfunc,
4093 sizeof (struct elf32_arm_link_hash_entry),
4094 ARM_ELF_DATA))
4095 {
4096 free (ret);
4097 return NULL;
4098 }
4099
4100 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4101 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4102 #ifdef FOUR_WORD_PLT
4103 ret->plt_header_size = 16;
4104 ret->plt_entry_size = 16;
4105 #else
4106 ret->plt_header_size = 20;
4107 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4108 #endif
4109 ret->use_rel = true;
4110 ret->obfd = abfd;
4111 ret->fdpic_p = 0;
4112
4113 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4114 sizeof (struct elf32_arm_stub_hash_entry)))
4115 {
4116 _bfd_elf_link_hash_table_free (abfd);
4117 return NULL;
4118 }
4119 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4120
4121 return &ret->root.root;
4122 }
4123
4124 /* Determine what kind of NOPs are available. */
4125
4126 static bool
4127 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4128 {
4129 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4130 Tag_CPU_arch);
4131
4132 /* Force return logic to be reviewed for each new architecture. */
4133 BFD_ASSERT (arch <= TAG_CPU_ARCH_V9);
4134
4135 return (arch == TAG_CPU_ARCH_V6T2
4136 || arch == TAG_CPU_ARCH_V6K
4137 || arch == TAG_CPU_ARCH_V7
4138 || arch == TAG_CPU_ARCH_V8
4139 || arch == TAG_CPU_ARCH_V8R
4140 || arch == TAG_CPU_ARCH_V9);
4141 }
4142
4143 static bool
4144 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4145 {
4146 switch (stub_type)
4147 {
4148 case arm_stub_long_branch_thumb_only:
4149 case arm_stub_long_branch_thumb2_only:
4150 case arm_stub_long_branch_thumb2_only_pure:
4151 case arm_stub_long_branch_v4t_thumb_arm:
4152 case arm_stub_short_branch_v4t_thumb_arm:
4153 case arm_stub_long_branch_v4t_thumb_arm_pic:
4154 case arm_stub_long_branch_v4t_thumb_tls_pic:
4155 case arm_stub_long_branch_thumb_only_pic:
4156 case arm_stub_cmse_branch_thumb_only:
4157 return true;
4158 case arm_stub_none:
4159 BFD_FAIL ();
4160 return false;
4161 break;
4162 default:
4163 return false;
4164 }
4165 }
4166
4167 /* Determine the type of stub needed, if any, for a call. */
4168
4169 static enum elf32_arm_stub_type
4170 arm_type_of_stub (struct bfd_link_info *info,
4171 asection *input_sec,
4172 const Elf_Internal_Rela *rel,
4173 unsigned char st_type,
4174 enum arm_st_branch_type *actual_branch_type,
4175 struct elf32_arm_link_hash_entry *hash,
4176 bfd_vma destination,
4177 asection *sym_sec,
4178 bfd *input_bfd,
4179 const char *name)
4180 {
4181 bfd_vma location;
4182 bfd_signed_vma branch_offset;
4183 unsigned int r_type;
4184 struct elf32_arm_link_hash_table * globals;
4185 bool thumb2, thumb2_bl, thumb_only;
4186 enum elf32_arm_stub_type stub_type = arm_stub_none;
4187 int use_plt = 0;
4188 enum arm_st_branch_type branch_type = *actual_branch_type;
4189 union gotplt_union *root_plt;
4190 struct arm_plt_info *arm_plt;
4191 int arch;
4192 int thumb2_movw;
4193
4194 if (branch_type == ST_BRANCH_LONG)
4195 return stub_type;
4196
4197 globals = elf32_arm_hash_table (info);
4198 if (globals == NULL)
4199 return stub_type;
4200
4201 thumb_only = using_thumb_only (globals);
4202 thumb2 = using_thumb2 (globals);
4203 thumb2_bl = using_thumb2_bl (globals);
4204
4205 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4206
4207 /* True for architectures that implement the thumb2 movw instruction. */
4208 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4209
4210 /* Determine where the call point is. */
4211 location = (input_sec->output_offset
4212 + input_sec->output_section->vma
4213 + rel->r_offset);
4214
4215 r_type = ELF32_R_TYPE (rel->r_info);
4216
4217 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4218 are considering a function call relocation. */
4219 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4220 || r_type == R_ARM_THM_JUMP19)
4221 && branch_type == ST_BRANCH_TO_ARM)
4222 branch_type = ST_BRANCH_TO_THUMB;
4223
4224 /* For TLS call relocs, it is the caller's responsibility to provide
4225 the address of the appropriate trampoline. */
4226 if (r_type != R_ARM_TLS_CALL
4227 && r_type != R_ARM_THM_TLS_CALL
4228 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4229 ELF32_R_SYM (rel->r_info), &root_plt,
4230 &arm_plt)
4231 && root_plt->offset != (bfd_vma) -1)
4232 {
4233 asection *splt;
4234
4235 if (hash == NULL || hash->is_iplt)
4236 splt = globals->root.iplt;
4237 else
4238 splt = globals->root.splt;
4239 if (splt != NULL)
4240 {
4241 use_plt = 1;
4242
4243 /* Note when dealing with PLT entries: the main PLT stub is in
4244 ARM mode, so if the branch is in Thumb mode, another
4245 Thumb->ARM stub will be inserted later just before the ARM
4246 PLT stub. If a long branch stub is needed, we'll add a
4247 Thumb->Arm one and branch directly to the ARM PLT entry.
4248 Here, we have to check if a pre-PLT Thumb->ARM stub
4249 is needed and if it will be close enough. */
4250
4251 destination = (splt->output_section->vma
4252 + splt->output_offset
4253 + root_plt->offset);
4254 st_type = STT_FUNC;
4255
4256 /* Thumb branch/call to PLT: it can become a branch to ARM
4257 or to Thumb. We must perform the same checks and
4258 corrections as in elf32_arm_final_link_relocate. */
4259 if ((r_type == R_ARM_THM_CALL)
4260 || (r_type == R_ARM_THM_JUMP24))
4261 {
4262 if (globals->use_blx
4263 && r_type == R_ARM_THM_CALL
4264 && !thumb_only)
4265 {
4266 /* If the Thumb BLX instruction is available, convert
4267 the BL to a BLX instruction to call the ARM-mode
4268 PLT entry. */
4269 branch_type = ST_BRANCH_TO_ARM;
4270 }
4271 else
4272 {
4273 if (!thumb_only)
4274 /* Target the Thumb stub before the ARM PLT entry. */
4275 destination -= PLT_THUMB_STUB_SIZE;
4276 branch_type = ST_BRANCH_TO_THUMB;
4277 }
4278 }
4279 else
4280 {
4281 branch_type = ST_BRANCH_TO_ARM;
4282 }
4283 }
4284 }
4285 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4286 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4287
4288 branch_offset = (bfd_signed_vma)(destination - location);
4289
4290 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4291 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4292 {
4293 /* Handle cases where:
4294 - this call goes too far (different Thumb/Thumb2 max
4295 distance)
4296 - it's a Thumb->Arm call and blx is not available, or it's a
4297 Thumb->Arm branch (not bl). A stub is needed in this case,
4298 but only if this call is not through a PLT entry. Indeed,
4299 PLT stubs handle mode switching already. */
4300 if ((!thumb2_bl
4301 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4302 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4303 || (thumb2_bl
4304 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4305 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4306 || (thumb2
4307 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4308 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4309 && (r_type == R_ARM_THM_JUMP19))
4310 || (branch_type == ST_BRANCH_TO_ARM
4311 && (((r_type == R_ARM_THM_CALL
4312 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4313 || (r_type == R_ARM_THM_JUMP24)
4314 || (r_type == R_ARM_THM_JUMP19))
4315 && !use_plt))
4316 {
4317 /* If we need to insert a Thumb-Thumb long branch stub to a
4318 PLT, use one that branches directly to the ARM PLT
4319 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4320 stub, undo this now. */
4321 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4322 {
4323 branch_type = ST_BRANCH_TO_ARM;
4324 branch_offset += PLT_THUMB_STUB_SIZE;
4325 }
4326
4327 if (branch_type == ST_BRANCH_TO_THUMB)
4328 {
4329 /* Thumb to thumb. */
4330 if (!thumb_only)
4331 {
4332 if (input_sec->flags & SEC_ELF_PURECODE)
4333 _bfd_error_handler
4334 (_("%pB(%pA): warning: long branch veneers used in"
4335 " section with SHF_ARM_PURECODE section"
4336 " attribute is only supported for M-profile"
4337 " targets that implement the movw instruction"),
4338 input_bfd, input_sec);
4339
4340 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4341 /* PIC stubs. */
4342 ? ((globals->use_blx
4343 && (r_type == R_ARM_THM_CALL))
4344 /* V5T and above. Stub starts with ARM code, so
4345 we must be able to switch mode before
4346 reaching it, which is only possible for 'bl'
4347 (ie R_ARM_THM_CALL relocation). */
4348 ? arm_stub_long_branch_any_thumb_pic
4349 /* On V4T, use Thumb code only. */
4350 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4351
4352 /* non-PIC stubs. */
4353 : ((globals->use_blx
4354 && (r_type == R_ARM_THM_CALL))
4355 /* V5T and above. */
4356 ? arm_stub_long_branch_any_any
4357 /* V4T. */
4358 : arm_stub_long_branch_v4t_thumb_thumb);
4359 }
4360 else
4361 {
4362 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4363 stub_type = arm_stub_long_branch_thumb2_only_pure;
4364 else
4365 {
4366 if (input_sec->flags & SEC_ELF_PURECODE)
4367 _bfd_error_handler
4368 (_("%pB(%pA): warning: long branch veneers used in"
4369 " section with SHF_ARM_PURECODE section"
4370 " attribute is only supported for M-profile"
4371 " targets that implement the movw instruction"),
4372 input_bfd, input_sec);
4373
4374 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4375 /* PIC stub. */
4376 ? arm_stub_long_branch_thumb_only_pic
4377 /* non-PIC stub. */
4378 : (thumb2 ? arm_stub_long_branch_thumb2_only
4379 : arm_stub_long_branch_thumb_only);
4380 }
4381 }
4382 }
4383 else
4384 {
4385 if (input_sec->flags & SEC_ELF_PURECODE)
4386 _bfd_error_handler
4387 (_("%pB(%pA): warning: long branch veneers used in"
4388 " section with SHF_ARM_PURECODE section"
4389 " attribute is only supported" " for M-profile"
4390 " targets that implement the movw instruction"),
4391 input_bfd, input_sec);
4392
4393 /* Thumb to arm. */
4394 if (sym_sec != NULL
4395 && sym_sec->owner != NULL
4396 && !INTERWORK_FLAG (sym_sec->owner))
4397 {
4398 _bfd_error_handler
4399 (_("%pB(%s): warning: interworking not enabled;"
4400 " first occurrence: %pB: %s call to %s"),
4401 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4402 }
4403
4404 stub_type =
4405 (bfd_link_pic (info) | globals->pic_veneer)
4406 /* PIC stubs. */
4407 ? (r_type == R_ARM_THM_TLS_CALL
4408 /* TLS PIC stubs. */
4409 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4410 : arm_stub_long_branch_v4t_thumb_tls_pic)
4411 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4412 /* V5T PIC and above. */
4413 ? arm_stub_long_branch_any_arm_pic
4414 /* V4T PIC stub. */
4415 : arm_stub_long_branch_v4t_thumb_arm_pic))
4416
4417 /* non-PIC stubs. */
4418 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4419 /* V5T and above. */
4420 ? arm_stub_long_branch_any_any
4421 /* V4T. */
4422 : arm_stub_long_branch_v4t_thumb_arm);
4423
4424 /* Handle v4t short branches. */
4425 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4426 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4427 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4428 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4429 }
4430 }
4431 }
4432 else if (r_type == R_ARM_CALL
4433 || r_type == R_ARM_JUMP24
4434 || r_type == R_ARM_PLT32
4435 || r_type == R_ARM_TLS_CALL)
4436 {
4437 if (input_sec->flags & SEC_ELF_PURECODE)
4438 _bfd_error_handler
4439 (_("%pB(%pA): warning: long branch veneers used in"
4440 " section with SHF_ARM_PURECODE section"
4441 " attribute is only supported for M-profile"
4442 " targets that implement the movw instruction"),
4443 input_bfd, input_sec);
4444 if (branch_type == ST_BRANCH_TO_THUMB)
4445 {
4446 /* Arm to thumb. */
4447
4448 if (sym_sec != NULL
4449 && sym_sec->owner != NULL
4450 && !INTERWORK_FLAG (sym_sec->owner))
4451 {
4452 _bfd_error_handler
4453 (_("%pB(%s): warning: interworking not enabled;"
4454 " first occurrence: %pB: %s call to %s"),
4455 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4456 }
4457
4458 /* We have an extra 2-bytes reach because of
4459 the mode change (bit 24 (H) of BLX encoding). */
4460 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4461 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4462 || (r_type == R_ARM_CALL && !globals->use_blx)
4463 || (r_type == R_ARM_JUMP24)
4464 || (r_type == R_ARM_PLT32))
4465 {
4466 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4467 /* PIC stubs. */
4468 ? ((globals->use_blx)
4469 /* V5T and above. */
4470 ? arm_stub_long_branch_any_thumb_pic
4471 /* V4T stub. */
4472 : arm_stub_long_branch_v4t_arm_thumb_pic)
4473
4474 /* non-PIC stubs. */
4475 : ((globals->use_blx)
4476 /* V5T and above. */
4477 ? arm_stub_long_branch_any_any
4478 /* V4T. */
4479 : arm_stub_long_branch_v4t_arm_thumb);
4480 }
4481 }
4482 else
4483 {
4484 /* Arm to arm. */
4485 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4486 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4487 {
4488 stub_type =
4489 (bfd_link_pic (info) | globals->pic_veneer)
4490 /* PIC stubs. */
4491 ? (r_type == R_ARM_TLS_CALL
4492 /* TLS PIC Stub. */
4493 ? arm_stub_long_branch_any_tls_pic
4494 : (globals->root.target_os == is_nacl
4495 ? arm_stub_long_branch_arm_nacl_pic
4496 : arm_stub_long_branch_any_arm_pic))
4497 /* non-PIC stubs. */
4498 : (globals->root.target_os == is_nacl
4499 ? arm_stub_long_branch_arm_nacl
4500 : arm_stub_long_branch_any_any);
4501 }
4502 }
4503 }
4504
4505 /* If a stub is needed, record the actual destination type. */
4506 if (stub_type != arm_stub_none)
4507 *actual_branch_type = branch_type;
4508
4509 return stub_type;
4510 }
4511
4512 /* Build a name for an entry in the stub hash table. */
4513
4514 static char *
4515 elf32_arm_stub_name (const asection *input_section,
4516 const asection *sym_sec,
4517 const struct elf32_arm_link_hash_entry *hash,
4518 const Elf_Internal_Rela *rel,
4519 enum elf32_arm_stub_type stub_type)
4520 {
4521 char *stub_name;
4522 bfd_size_type len;
4523
4524 if (hash)
4525 {
4526 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4527 stub_name = (char *) bfd_malloc (len);
4528 if (stub_name != NULL)
4529 sprintf (stub_name, "%08x_%s+%x_%d",
4530 input_section->id & 0xffffffff,
4531 hash->root.root.root.string,
4532 (int) rel->r_addend & 0xffffffff,
4533 (int) stub_type);
4534 }
4535 else
4536 {
4537 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4538 stub_name = (char *) bfd_malloc (len);
4539 if (stub_name != NULL)
4540 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4541 input_section->id & 0xffffffff,
4542 sym_sec->id & 0xffffffff,
4543 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4544 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4545 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4546 (int) rel->r_addend & 0xffffffff,
4547 (int) stub_type);
4548 }
4549
4550 return stub_name;
4551 }
4552
4553 /* Look up an entry in the stub hash. Stub entries are cached because
4554 creating the stub name takes a bit of time. */
4555
4556 static struct elf32_arm_stub_hash_entry *
4557 elf32_arm_get_stub_entry (const asection *input_section,
4558 const asection *sym_sec,
4559 struct elf_link_hash_entry *hash,
4560 const Elf_Internal_Rela *rel,
4561 struct elf32_arm_link_hash_table *htab,
4562 enum elf32_arm_stub_type stub_type)
4563 {
4564 struct elf32_arm_stub_hash_entry *stub_entry;
4565 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4566 const asection *id_sec;
4567
4568 if ((input_section->flags & SEC_CODE) == 0)
4569 return NULL;
4570
4571 /* If the input section is the CMSE stubs one and it needs a long
4572 branch stub to reach it's final destination, give up with an
4573 error message: this is not supported. See PR ld/24709. */
4574 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen (CMSE_STUB_NAME)))
4575 {
4576 bfd *output_bfd = htab->obfd;
4577 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4578
4579 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4580 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4581 CMSE_STUB_NAME,
4582 (uint64_t)out_sec->output_section->vma
4583 + out_sec->output_offset,
4584 (uint64_t)sym_sec->output_section->vma
4585 + sym_sec->output_offset
4586 + h->root.root.u.def.value);
4587 /* Exit, rather than leave incompletely processed
4588 relocations. */
4589 xexit (1);
4590 }
4591
4592 /* If this input section is part of a group of sections sharing one
4593 stub section, then use the id of the first section in the group.
4594 Stub names need to include a section id, as there may well be
4595 more than one stub used to reach say, printf, and we need to
4596 distinguish between them. */
4597 BFD_ASSERT (input_section->id <= htab->top_id);
4598 id_sec = htab->stub_group[input_section->id].link_sec;
4599
4600 if (h != NULL && h->stub_cache != NULL
4601 && h->stub_cache->h == h
4602 && h->stub_cache->id_sec == id_sec
4603 && h->stub_cache->stub_type == stub_type)
4604 {
4605 stub_entry = h->stub_cache;
4606 }
4607 else
4608 {
4609 char *stub_name;
4610
4611 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4612 if (stub_name == NULL)
4613 return NULL;
4614
4615 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4616 stub_name, false, false);
4617 if (h != NULL)
4618 h->stub_cache = stub_entry;
4619
4620 free (stub_name);
4621 }
4622
4623 return stub_entry;
4624 }
4625
4626 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4627 section. */
4628
4629 static bool
4630 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4631 {
4632 if (stub_type >= max_stub_type)
4633 abort (); /* Should be unreachable. */
4634
4635 switch (stub_type)
4636 {
4637 case arm_stub_cmse_branch_thumb_only:
4638 return true;
4639
4640 default:
4641 return false;
4642 }
4643
4644 abort (); /* Should be unreachable. */
4645 }
4646
4647 /* Required alignment (as a power of 2) for the dedicated section holding
4648 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4649 with input sections. */
4650
4651 static int
4652 arm_dedicated_stub_output_section_required_alignment
4653 (enum elf32_arm_stub_type stub_type)
4654 {
4655 if (stub_type >= max_stub_type)
4656 abort (); /* Should be unreachable. */
4657
4658 switch (stub_type)
4659 {
4660 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4661 boundary. */
4662 case arm_stub_cmse_branch_thumb_only:
4663 return 5;
4664
4665 default:
4666 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4667 return 0;
4668 }
4669
4670 abort (); /* Should be unreachable. */
4671 }
4672
4673 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4674 NULL if veneers of this type are interspersed with input sections. */
4675
4676 static const char *
4677 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4678 {
4679 if (stub_type >= max_stub_type)
4680 abort (); /* Should be unreachable. */
4681
4682 switch (stub_type)
4683 {
4684 case arm_stub_cmse_branch_thumb_only:
4685 return CMSE_STUB_NAME;
4686
4687 default:
4688 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4689 return NULL;
4690 }
4691
4692 abort (); /* Should be unreachable. */
4693 }
4694
4695 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4696 returns the address of the hash table field in HTAB holding a pointer to the
4697 corresponding input section. Otherwise, returns NULL. */
4698
4699 static asection **
4700 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4701 enum elf32_arm_stub_type stub_type)
4702 {
4703 if (stub_type >= max_stub_type)
4704 abort (); /* Should be unreachable. */
4705
4706 switch (stub_type)
4707 {
4708 case arm_stub_cmse_branch_thumb_only:
4709 return &htab->cmse_stub_sec;
4710
4711 default:
4712 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4713 return NULL;
4714 }
4715
4716 abort (); /* Should be unreachable. */
4717 }
4718
4719 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4720 is the section that branch into veneer and can be NULL if stub should go in
4721 a dedicated output section. Returns a pointer to the stub section, and the
4722 section to which the stub section will be attached (in *LINK_SEC_P).
4723 LINK_SEC_P may be NULL. */
4724
4725 static asection *
4726 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4727 struct elf32_arm_link_hash_table *htab,
4728 enum elf32_arm_stub_type stub_type)
4729 {
4730 asection *link_sec, *out_sec, **stub_sec_p;
4731 const char *stub_sec_prefix;
4732 bool dedicated_output_section =
4733 arm_dedicated_stub_output_section_required (stub_type);
4734 int align;
4735
4736 if (dedicated_output_section)
4737 {
4738 bfd *output_bfd = htab->obfd;
4739 const char *out_sec_name =
4740 arm_dedicated_stub_output_section_name (stub_type);
4741 link_sec = NULL;
4742 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4743 stub_sec_prefix = out_sec_name;
4744 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4745 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4746 if (out_sec == NULL)
4747 {
4748 _bfd_error_handler (_("no address assigned to the veneers output "
4749 "section %s"), out_sec_name);
4750 return NULL;
4751 }
4752 }
4753 else
4754 {
4755 BFD_ASSERT (section->id <= htab->top_id);
4756 link_sec = htab->stub_group[section->id].link_sec;
4757 BFD_ASSERT (link_sec != NULL);
4758 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4759 if (*stub_sec_p == NULL)
4760 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4761 stub_sec_prefix = link_sec->name;
4762 out_sec = link_sec->output_section;
4763 align = htab->root.target_os == is_nacl ? 4 : 3;
4764 }
4765
4766 if (*stub_sec_p == NULL)
4767 {
4768 size_t namelen;
4769 bfd_size_type len;
4770 char *s_name;
4771
4772 namelen = strlen (stub_sec_prefix);
4773 len = namelen + sizeof (STUB_SUFFIX);
4774 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4775 if (s_name == NULL)
4776 return NULL;
4777
4778 memcpy (s_name, stub_sec_prefix, namelen);
4779 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4780 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4781 align);
4782 if (*stub_sec_p == NULL)
4783 return NULL;
4784
4785 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4786 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4787 | SEC_KEEP;
4788 }
4789
4790 if (!dedicated_output_section)
4791 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4792
4793 if (link_sec_p)
4794 *link_sec_p = link_sec;
4795
4796 return *stub_sec_p;
4797 }
4798
4799 /* Add a new stub entry to the stub hash. Not all fields of the new
4800 stub entry are initialised. */
4801
4802 static struct elf32_arm_stub_hash_entry *
4803 elf32_arm_add_stub (const char *stub_name, asection *section,
4804 struct elf32_arm_link_hash_table *htab,
4805 enum elf32_arm_stub_type stub_type)
4806 {
4807 asection *link_sec;
4808 asection *stub_sec;
4809 struct elf32_arm_stub_hash_entry *stub_entry;
4810
4811 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4812 stub_type);
4813 if (stub_sec == NULL)
4814 return NULL;
4815
4816 /* Enter this entry into the linker stub hash table. */
4817 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4818 true, false);
4819 if (stub_entry == NULL)
4820 {
4821 if (section == NULL)
4822 section = stub_sec;
4823 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4824 section->owner, stub_name);
4825 return NULL;
4826 }
4827
4828 stub_entry->stub_sec = stub_sec;
4829 stub_entry->stub_offset = (bfd_vma) -1;
4830 stub_entry->id_sec = link_sec;
4831
4832 return stub_entry;
4833 }
4834
4835 /* Store an Arm insn into an output section not processed by
4836 elf32_arm_write_section. */
4837
4838 static void
4839 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4840 bfd * output_bfd, bfd_vma val, void * ptr)
4841 {
4842 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4843 bfd_putl32 (val, ptr);
4844 else
4845 bfd_putb32 (val, ptr);
4846 }
4847
4848 /* Store a 16-bit Thumb insn into an output section not processed by
4849 elf32_arm_write_section. */
4850
4851 static void
4852 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4853 bfd * output_bfd, bfd_vma val, void * ptr)
4854 {
4855 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4856 bfd_putl16 (val, ptr);
4857 else
4858 bfd_putb16 (val, ptr);
4859 }
4860
4861 /* Store a Thumb2 insn into an output section not processed by
4862 elf32_arm_write_section. */
4863
4864 static void
4865 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4866 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4867 {
4868 /* T2 instructions are 16-bit streamed. */
4869 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4870 {
4871 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4872 bfd_putl16 ((val & 0xffff), ptr + 2);
4873 }
4874 else
4875 {
4876 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4877 bfd_putb16 ((val & 0xffff), ptr + 2);
4878 }
4879 }
4880
4881 /* If it's possible to change R_TYPE to a more efficient access
4882 model, return the new reloc type. */
4883
4884 static unsigned
4885 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4886 struct elf_link_hash_entry *h)
4887 {
4888 int is_local = (h == NULL);
4889
4890 if (bfd_link_dll (info)
4891 || (h && h->root.type == bfd_link_hash_undefweak))
4892 return r_type;
4893
4894 /* We do not support relaxations for Old TLS models. */
4895 switch (r_type)
4896 {
4897 case R_ARM_TLS_GOTDESC:
4898 case R_ARM_TLS_CALL:
4899 case R_ARM_THM_TLS_CALL:
4900 case R_ARM_TLS_DESCSEQ:
4901 case R_ARM_THM_TLS_DESCSEQ:
4902 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4903 }
4904
4905 return r_type;
4906 }
4907
4908 static bfd_reloc_status_type elf32_arm_final_link_relocate
4909 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4910 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4911 const char *, unsigned char, enum arm_st_branch_type,
4912 struct elf_link_hash_entry *, bool *, char **);
4913
4914 static unsigned int
4915 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4916 {
4917 switch (stub_type)
4918 {
4919 case arm_stub_a8_veneer_b_cond:
4920 case arm_stub_a8_veneer_b:
4921 case arm_stub_a8_veneer_bl:
4922 return 2;
4923
4924 case arm_stub_long_branch_any_any:
4925 case arm_stub_long_branch_v4t_arm_thumb:
4926 case arm_stub_long_branch_thumb_only:
4927 case arm_stub_long_branch_thumb2_only:
4928 case arm_stub_long_branch_thumb2_only_pure:
4929 case arm_stub_long_branch_v4t_thumb_thumb:
4930 case arm_stub_long_branch_v4t_thumb_arm:
4931 case arm_stub_short_branch_v4t_thumb_arm:
4932 case arm_stub_long_branch_any_arm_pic:
4933 case arm_stub_long_branch_any_thumb_pic:
4934 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4935 case arm_stub_long_branch_v4t_arm_thumb_pic:
4936 case arm_stub_long_branch_v4t_thumb_arm_pic:
4937 case arm_stub_long_branch_thumb_only_pic:
4938 case arm_stub_long_branch_any_tls_pic:
4939 case arm_stub_long_branch_v4t_thumb_tls_pic:
4940 case arm_stub_cmse_branch_thumb_only:
4941 case arm_stub_a8_veneer_blx:
4942 return 4;
4943
4944 case arm_stub_long_branch_arm_nacl:
4945 case arm_stub_long_branch_arm_nacl_pic:
4946 return 16;
4947
4948 default:
4949 abort (); /* Should be unreachable. */
4950 }
4951 }
4952
4953 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4954 veneering (TRUE) or have their own symbol (FALSE). */
4955
4956 static bool
4957 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4958 {
4959 if (stub_type >= max_stub_type)
4960 abort (); /* Should be unreachable. */
4961
4962 switch (stub_type)
4963 {
4964 case arm_stub_cmse_branch_thumb_only:
4965 return true;
4966
4967 default:
4968 return false;
4969 }
4970
4971 abort (); /* Should be unreachable. */
4972 }
4973
4974 /* Returns the padding needed for the dedicated section used stubs of type
4975 STUB_TYPE. */
4976
4977 static int
4978 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4979 {
4980 if (stub_type >= max_stub_type)
4981 abort (); /* Should be unreachable. */
4982
4983 switch (stub_type)
4984 {
4985 case arm_stub_cmse_branch_thumb_only:
4986 return 32;
4987
4988 default:
4989 return 0;
4990 }
4991
4992 abort (); /* Should be unreachable. */
4993 }
4994
4995 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4996 returns the address of the hash table field in HTAB holding the offset at
4997 which new veneers should be layed out in the stub section. */
4998
4999 static bfd_vma*
5000 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5001 enum elf32_arm_stub_type stub_type)
5002 {
5003 switch (stub_type)
5004 {
5005 case arm_stub_cmse_branch_thumb_only:
5006 return &htab->new_cmse_stub_offset;
5007
5008 default:
5009 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5010 return NULL;
5011 }
5012 }
5013
5014 static bool
5015 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5016 void * in_arg)
5017 {
5018 #define MAXRELOCS 3
5019 bool removed_sg_veneer;
5020 struct elf32_arm_stub_hash_entry *stub_entry;
5021 struct elf32_arm_link_hash_table *globals;
5022 struct bfd_link_info *info;
5023 asection *stub_sec;
5024 bfd *stub_bfd;
5025 bfd_byte *loc;
5026 bfd_vma sym_value;
5027 int template_size;
5028 int size;
5029 const insn_sequence *template_sequence;
5030 int i;
5031 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5032 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5033 int nrelocs = 0;
5034 int just_allocated = 0;
5035
5036 /* Massage our args to the form they really have. */
5037 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5038 info = (struct bfd_link_info *) in_arg;
5039
5040 /* Fail if the target section could not be assigned to an output
5041 section. The user should fix his linker script. */
5042 if (stub_entry->target_section->output_section == NULL
5043 && info->non_contiguous_regions)
5044 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5045 "Retry without --enable-non-contiguous-regions.\n"),
5046 stub_entry->target_section);
5047
5048 globals = elf32_arm_hash_table (info);
5049 if (globals == NULL)
5050 return false;
5051
5052 stub_sec = stub_entry->stub_sec;
5053
5054 if ((globals->fix_cortex_a8 < 0)
5055 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5056 /* We have to do less-strictly-aligned fixes last. */
5057 return true;
5058
5059 /* Assign a slot at the end of section if none assigned yet. */
5060 if (stub_entry->stub_offset == (bfd_vma) -1)
5061 {
5062 stub_entry->stub_offset = stub_sec->size;
5063 just_allocated = 1;
5064 }
5065 loc = stub_sec->contents + stub_entry->stub_offset;
5066
5067 stub_bfd = stub_sec->owner;
5068
5069 /* This is the address of the stub destination. */
5070 sym_value = (stub_entry->target_value
5071 + stub_entry->target_section->output_offset
5072 + stub_entry->target_section->output_section->vma);
5073
5074 template_sequence = stub_entry->stub_template;
5075 template_size = stub_entry->stub_template_size;
5076
5077 size = 0;
5078 for (i = 0; i < template_size; i++)
5079 {
5080 switch (template_sequence[i].type)
5081 {
5082 case THUMB16_TYPE:
5083 {
5084 bfd_vma data = (bfd_vma) template_sequence[i].data;
5085 if (template_sequence[i].reloc_addend != 0)
5086 {
5087 /* We've borrowed the reloc_addend field to mean we should
5088 insert a condition code into this (Thumb-1 branch)
5089 instruction. See THUMB16_BCOND_INSN. */
5090 BFD_ASSERT ((data & 0xff00) == 0xd000);
5091 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5092 }
5093 bfd_put_16 (stub_bfd, data, loc + size);
5094 size += 2;
5095 }
5096 break;
5097
5098 case THUMB32_TYPE:
5099 bfd_put_16 (stub_bfd,
5100 (template_sequence[i].data >> 16) & 0xffff,
5101 loc + size);
5102 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5103 loc + size + 2);
5104 if (template_sequence[i].r_type != R_ARM_NONE)
5105 {
5106 stub_reloc_idx[nrelocs] = i;
5107 stub_reloc_offset[nrelocs++] = size;
5108 }
5109 size += 4;
5110 break;
5111
5112 case ARM_TYPE:
5113 bfd_put_32 (stub_bfd, template_sequence[i].data,
5114 loc + size);
5115 /* Handle cases where the target is encoded within the
5116 instruction. */
5117 if (template_sequence[i].r_type == R_ARM_JUMP24)
5118 {
5119 stub_reloc_idx[nrelocs] = i;
5120 stub_reloc_offset[nrelocs++] = size;
5121 }
5122 size += 4;
5123 break;
5124
5125 case DATA_TYPE:
5126 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5127 stub_reloc_idx[nrelocs] = i;
5128 stub_reloc_offset[nrelocs++] = size;
5129 size += 4;
5130 break;
5131
5132 default:
5133 BFD_FAIL ();
5134 return false;
5135 }
5136 }
5137
5138 if (just_allocated)
5139 stub_sec->size += size;
5140
5141 /* Stub size has already been computed in arm_size_one_stub. Check
5142 consistency. */
5143 BFD_ASSERT (size == stub_entry->stub_size);
5144
5145 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5146 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5147 sym_value |= 1;
5148
5149 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5150 to relocate in each stub. */
5151 removed_sg_veneer =
5152 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5153 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5154
5155 for (i = 0; i < nrelocs; i++)
5156 {
5157 Elf_Internal_Rela rel;
5158 bool unresolved_reloc;
5159 char *error_message;
5160 bfd_vma points_to =
5161 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5162
5163 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5164 rel.r_info = ELF32_R_INFO (0,
5165 template_sequence[stub_reloc_idx[i]].r_type);
5166 rel.r_addend = 0;
5167
5168 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5169 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5170 template should refer back to the instruction after the original
5171 branch. We use target_section as Cortex-A8 erratum workaround stubs
5172 are only generated when both source and target are in the same
5173 section. */
5174 points_to = stub_entry->target_section->output_section->vma
5175 + stub_entry->target_section->output_offset
5176 + stub_entry->source_value;
5177
5178 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5179 (template_sequence[stub_reloc_idx[i]].r_type),
5180 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5181 points_to, info, stub_entry->target_section, "", STT_FUNC,
5182 stub_entry->branch_type,
5183 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5184 &error_message);
5185 }
5186
5187 return true;
5188 #undef MAXRELOCS
5189 }
5190
5191 /* Calculate the template, template size and instruction size for a stub.
5192 Return value is the instruction size. */
5193
5194 static unsigned int
5195 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5196 const insn_sequence **stub_template,
5197 int *stub_template_size)
5198 {
5199 const insn_sequence *template_sequence = NULL;
5200 int template_size = 0, i;
5201 unsigned int size;
5202
5203 template_sequence = stub_definitions[stub_type].template_sequence;
5204 if (stub_template)
5205 *stub_template = template_sequence;
5206
5207 template_size = stub_definitions[stub_type].template_size;
5208 if (stub_template_size)
5209 *stub_template_size = template_size;
5210
5211 size = 0;
5212 for (i = 0; i < template_size; i++)
5213 {
5214 switch (template_sequence[i].type)
5215 {
5216 case THUMB16_TYPE:
5217 size += 2;
5218 break;
5219
5220 case ARM_TYPE:
5221 case THUMB32_TYPE:
5222 case DATA_TYPE:
5223 size += 4;
5224 break;
5225
5226 default:
5227 BFD_FAIL ();
5228 return 0;
5229 }
5230 }
5231
5232 return size;
5233 }
5234
5235 /* As above, but don't actually build the stub. Just bump offset so
5236 we know stub section sizes. */
5237
5238 static bool
5239 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5240 void *in_arg ATTRIBUTE_UNUSED)
5241 {
5242 struct elf32_arm_stub_hash_entry *stub_entry;
5243 const insn_sequence *template_sequence;
5244 int template_size, size;
5245
5246 /* Massage our args to the form they really have. */
5247 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5248
5249 BFD_ASSERT ((stub_entry->stub_type > arm_stub_none)
5250 && stub_entry->stub_type < ARRAY_SIZE (stub_definitions));
5251
5252 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5253 &template_size);
5254
5255 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5256 if (stub_entry->stub_template_size)
5257 {
5258 stub_entry->stub_size = size;
5259 stub_entry->stub_template = template_sequence;
5260 stub_entry->stub_template_size = template_size;
5261 }
5262
5263 /* Already accounted for. */
5264 if (stub_entry->stub_offset != (bfd_vma) -1)
5265 return true;
5266
5267 size = (size + 7) & ~7;
5268 stub_entry->stub_sec->size += size;
5269
5270 return true;
5271 }
5272
5273 /* External entry points for sizing and building linker stubs. */
5274
5275 /* Set up various things so that we can make a list of input sections
5276 for each output section included in the link. Returns -1 on error,
5277 0 when no stubs will be needed, and 1 on success. */
5278
5279 int
5280 elf32_arm_setup_section_lists (bfd *output_bfd,
5281 struct bfd_link_info *info)
5282 {
5283 bfd *input_bfd;
5284 unsigned int bfd_count;
5285 unsigned int top_id, top_index;
5286 asection *section;
5287 asection **input_list, **list;
5288 size_t amt;
5289 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5290
5291 if (htab == NULL)
5292 return 0;
5293
5294 /* Count the number of input BFDs and find the top input section id. */
5295 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5296 input_bfd != NULL;
5297 input_bfd = input_bfd->link.next)
5298 {
5299 bfd_count += 1;
5300 for (section = input_bfd->sections;
5301 section != NULL;
5302 section = section->next)
5303 {
5304 if (top_id < section->id)
5305 top_id = section->id;
5306 }
5307 }
5308 htab->bfd_count = bfd_count;
5309
5310 amt = sizeof (struct map_stub) * (top_id + 1);
5311 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5312 if (htab->stub_group == NULL)
5313 return -1;
5314 htab->top_id = top_id;
5315
5316 /* We can't use output_bfd->section_count here to find the top output
5317 section index as some sections may have been removed, and
5318 _bfd_strip_section_from_output doesn't renumber the indices. */
5319 for (section = output_bfd->sections, top_index = 0;
5320 section != NULL;
5321 section = section->next)
5322 {
5323 if (top_index < section->index)
5324 top_index = section->index;
5325 }
5326
5327 htab->top_index = top_index;
5328 amt = sizeof (asection *) * (top_index + 1);
5329 input_list = (asection **) bfd_malloc (amt);
5330 htab->input_list = input_list;
5331 if (input_list == NULL)
5332 return -1;
5333
5334 /* For sections we aren't interested in, mark their entries with a
5335 value we can check later. */
5336 list = input_list + top_index;
5337 do
5338 *list = bfd_abs_section_ptr;
5339 while (list-- != input_list);
5340
5341 for (section = output_bfd->sections;
5342 section != NULL;
5343 section = section->next)
5344 {
5345 if ((section->flags & SEC_CODE) != 0)
5346 input_list[section->index] = NULL;
5347 }
5348
5349 return 1;
5350 }
5351
5352 /* The linker repeatedly calls this function for each input section,
5353 in the order that input sections are linked into output sections.
5354 Build lists of input sections to determine groupings between which
5355 we may insert linker stubs. */
5356
5357 void
5358 elf32_arm_next_input_section (struct bfd_link_info *info,
5359 asection *isec)
5360 {
5361 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5362
5363 if (htab == NULL)
5364 return;
5365
5366 if (isec->output_section->index <= htab->top_index)
5367 {
5368 asection **list = htab->input_list + isec->output_section->index;
5369
5370 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5371 {
5372 /* Steal the link_sec pointer for our list. */
5373 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5374 /* This happens to make the list in reverse order,
5375 which we reverse later. */
5376 PREV_SEC (isec) = *list;
5377 *list = isec;
5378 }
5379 }
5380 }
5381
5382 /* See whether we can group stub sections together. Grouping stub
5383 sections may result in fewer stubs. More importantly, we need to
5384 put all .init* and .fini* stubs at the end of the .init or
5385 .fini output sections respectively, because glibc splits the
5386 _init and _fini functions into multiple parts. Putting a stub in
5387 the middle of a function is not a good idea. */
5388
5389 static void
5390 group_sections (struct elf32_arm_link_hash_table *htab,
5391 bfd_size_type stub_group_size,
5392 bool stubs_always_after_branch)
5393 {
5394 asection **list = htab->input_list;
5395
5396 do
5397 {
5398 asection *tail = *list;
5399 asection *head;
5400
5401 if (tail == bfd_abs_section_ptr)
5402 continue;
5403
5404 /* Reverse the list: we must avoid placing stubs at the
5405 beginning of the section because the beginning of the text
5406 section may be required for an interrupt vector in bare metal
5407 code. */
5408 #define NEXT_SEC PREV_SEC
5409 head = NULL;
5410 while (tail != NULL)
5411 {
5412 /* Pop from tail. */
5413 asection *item = tail;
5414 tail = PREV_SEC (item);
5415
5416 /* Push on head. */
5417 NEXT_SEC (item) = head;
5418 head = item;
5419 }
5420
5421 while (head != NULL)
5422 {
5423 asection *curr;
5424 asection *next;
5425 bfd_vma stub_group_start = head->output_offset;
5426 bfd_vma end_of_next;
5427
5428 curr = head;
5429 while (NEXT_SEC (curr) != NULL)
5430 {
5431 next = NEXT_SEC (curr);
5432 end_of_next = next->output_offset + next->size;
5433 if (end_of_next - stub_group_start >= stub_group_size)
5434 /* End of NEXT is too far from start, so stop. */
5435 break;
5436 /* Add NEXT to the group. */
5437 curr = next;
5438 }
5439
5440 /* OK, the size from the start to the start of CURR is less
5441 than stub_group_size and thus can be handled by one stub
5442 section. (Or the head section is itself larger than
5443 stub_group_size, in which case we may be toast.)
5444 We should really be keeping track of the total size of
5445 stubs added here, as stubs contribute to the final output
5446 section size. */
5447 do
5448 {
5449 next = NEXT_SEC (head);
5450 /* Set up this stub group. */
5451 htab->stub_group[head->id].link_sec = curr;
5452 }
5453 while (head != curr && (head = next) != NULL);
5454
5455 /* But wait, there's more! Input sections up to stub_group_size
5456 bytes after the stub section can be handled by it too. */
5457 if (!stubs_always_after_branch)
5458 {
5459 stub_group_start = curr->output_offset + curr->size;
5460
5461 while (next != NULL)
5462 {
5463 end_of_next = next->output_offset + next->size;
5464 if (end_of_next - stub_group_start >= stub_group_size)
5465 /* End of NEXT is too far from stubs, so stop. */
5466 break;
5467 /* Add NEXT to the stub group. */
5468 head = next;
5469 next = NEXT_SEC (head);
5470 htab->stub_group[head->id].link_sec = curr;
5471 }
5472 }
5473 head = next;
5474 }
5475 }
5476 while (list++ != htab->input_list + htab->top_index);
5477
5478 free (htab->input_list);
5479 #undef PREV_SEC
5480 #undef NEXT_SEC
5481 }
5482
5483 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5484 erratum fix. */
5485
5486 static int
5487 a8_reloc_compare (const void *a, const void *b)
5488 {
5489 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5490 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5491
5492 if (ra->from < rb->from)
5493 return -1;
5494 else if (ra->from > rb->from)
5495 return 1;
5496 else
5497 return 0;
5498 }
5499
5500 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5501 const char *, char **);
5502
5503 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5504 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5505 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5506 otherwise. */
5507
5508 static bool
5509 cortex_a8_erratum_scan (bfd *input_bfd,
5510 struct bfd_link_info *info,
5511 struct a8_erratum_fix **a8_fixes_p,
5512 unsigned int *num_a8_fixes_p,
5513 unsigned int *a8_fix_table_size_p,
5514 struct a8_erratum_reloc *a8_relocs,
5515 unsigned int num_a8_relocs,
5516 unsigned prev_num_a8_fixes,
5517 bool *stub_changed_p)
5518 {
5519 asection *section;
5520 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5521 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5522 unsigned int num_a8_fixes = *num_a8_fixes_p;
5523 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5524
5525 if (htab == NULL)
5526 return false;
5527
5528 for (section = input_bfd->sections;
5529 section != NULL;
5530 section = section->next)
5531 {
5532 bfd_byte *contents = NULL;
5533 struct _arm_elf_section_data *sec_data;
5534 unsigned int span;
5535 bfd_vma base_vma;
5536
5537 if (elf_section_type (section) != SHT_PROGBITS
5538 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5539 || (section->flags & SEC_EXCLUDE) != 0
5540 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5541 || (section->output_section == bfd_abs_section_ptr))
5542 continue;
5543
5544 base_vma = section->output_section->vma + section->output_offset;
5545
5546 if (elf_section_data (section)->this_hdr.contents != NULL)
5547 contents = elf_section_data (section)->this_hdr.contents;
5548 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5549 return true;
5550
5551 sec_data = elf32_arm_section_data (section);
5552
5553 for (span = 0; span < sec_data->mapcount; span++)
5554 {
5555 unsigned int span_start = sec_data->map[span].vma;
5556 unsigned int span_end = (span == sec_data->mapcount - 1)
5557 ? section->size : sec_data->map[span + 1].vma;
5558 unsigned int i;
5559 char span_type = sec_data->map[span].type;
5560 bool last_was_32bit = false, last_was_branch = false;
5561
5562 if (span_type != 't')
5563 continue;
5564
5565 /* Span is entirely within a single 4KB region: skip scanning. */
5566 if (((base_vma + span_start) & ~0xfff)
5567 == ((base_vma + span_end) & ~0xfff))
5568 continue;
5569
5570 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5571
5572 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5573 * The branch target is in the same 4KB region as the
5574 first half of the branch.
5575 * The instruction before the branch is a 32-bit
5576 length non-branch instruction. */
5577 for (i = span_start; i < span_end;)
5578 {
5579 unsigned int insn = bfd_getl16 (&contents[i]);
5580 bool insn_32bit = false, is_blx = false, is_b = false;
5581 bool is_bl = false, is_bcc = false, is_32bit_branch;
5582
5583 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5584 insn_32bit = true;
5585
5586 if (insn_32bit)
5587 {
5588 /* Load the rest of the insn (in manual-friendly order). */
5589 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5590
5591 /* Encoding T4: B<c>.W. */
5592 is_b = (insn & 0xf800d000) == 0xf0009000;
5593 /* Encoding T1: BL<c>.W. */
5594 is_bl = (insn & 0xf800d000) == 0xf000d000;
5595 /* Encoding T2: BLX<c>.W. */
5596 is_blx = (insn & 0xf800d000) == 0xf000c000;
5597 /* Encoding T3: B<c>.W (not permitted in IT block). */
5598 is_bcc = (insn & 0xf800d000) == 0xf0008000
5599 && (insn & 0x07f00000) != 0x03800000;
5600 }
5601
5602 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5603
5604 if (((base_vma + i) & 0xfff) == 0xffe
5605 && insn_32bit
5606 && is_32bit_branch
5607 && last_was_32bit
5608 && ! last_was_branch)
5609 {
5610 bfd_signed_vma offset = 0;
5611 bool force_target_arm = false;
5612 bool force_target_thumb = false;
5613 bfd_vma target;
5614 enum elf32_arm_stub_type stub_type = arm_stub_none;
5615 struct a8_erratum_reloc key, *found;
5616 bool use_plt = false;
5617
5618 key.from = base_vma + i;
5619 found = (struct a8_erratum_reloc *)
5620 bsearch (&key, a8_relocs, num_a8_relocs,
5621 sizeof (struct a8_erratum_reloc),
5622 &a8_reloc_compare);
5623
5624 if (found)
5625 {
5626 char *error_message = NULL;
5627 struct elf_link_hash_entry *entry;
5628
5629 /* We don't care about the error returned from this
5630 function, only if there is glue or not. */
5631 entry = find_thumb_glue (info, found->sym_name,
5632 &error_message);
5633
5634 if (entry)
5635 found->non_a8_stub = true;
5636
5637 /* Keep a simpler condition, for the sake of clarity. */
5638 if (htab->root.splt != NULL && found->hash != NULL
5639 && found->hash->root.plt.offset != (bfd_vma) -1)
5640 use_plt = true;
5641
5642 if (found->r_type == R_ARM_THM_CALL)
5643 {
5644 if (found->branch_type == ST_BRANCH_TO_ARM
5645 || use_plt)
5646 force_target_arm = true;
5647 else
5648 force_target_thumb = true;
5649 }
5650 }
5651
5652 /* Check if we have an offending branch instruction. */
5653
5654 if (found && found->non_a8_stub)
5655 /* We've already made a stub for this instruction, e.g.
5656 it's a long branch or a Thumb->ARM stub. Assume that
5657 stub will suffice to work around the A8 erratum (see
5658 setting of always_after_branch above). */
5659 ;
5660 else if (is_bcc)
5661 {
5662 offset = (insn & 0x7ff) << 1;
5663 offset |= (insn & 0x3f0000) >> 4;
5664 offset |= (insn & 0x2000) ? 0x40000 : 0;
5665 offset |= (insn & 0x800) ? 0x80000 : 0;
5666 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5667 if (offset & 0x100000)
5668 offset |= ~ ((bfd_signed_vma) 0xfffff);
5669 stub_type = arm_stub_a8_veneer_b_cond;
5670 }
5671 else if (is_b || is_bl || is_blx)
5672 {
5673 int s = (insn & 0x4000000) != 0;
5674 int j1 = (insn & 0x2000) != 0;
5675 int j2 = (insn & 0x800) != 0;
5676 int i1 = !(j1 ^ s);
5677 int i2 = !(j2 ^ s);
5678
5679 offset = (insn & 0x7ff) << 1;
5680 offset |= (insn & 0x3ff0000) >> 4;
5681 offset |= i2 << 22;
5682 offset |= i1 << 23;
5683 offset |= s << 24;
5684 if (offset & 0x1000000)
5685 offset |= ~ ((bfd_signed_vma) 0xffffff);
5686
5687 if (is_blx)
5688 offset &= ~ ((bfd_signed_vma) 3);
5689
5690 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5691 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5692 }
5693
5694 if (stub_type != arm_stub_none)
5695 {
5696 bfd_vma pc_for_insn = base_vma + i + 4;
5697
5698 /* The original instruction is a BL, but the target is
5699 an ARM instruction. If we were not making a stub,
5700 the BL would have been converted to a BLX. Use the
5701 BLX stub instead in that case. */
5702 if (htab->use_blx && force_target_arm
5703 && stub_type == arm_stub_a8_veneer_bl)
5704 {
5705 stub_type = arm_stub_a8_veneer_blx;
5706 is_blx = true;
5707 is_bl = false;
5708 }
5709 /* Conversely, if the original instruction was
5710 BLX but the target is Thumb mode, use the BL
5711 stub. */
5712 else if (force_target_thumb
5713 && stub_type == arm_stub_a8_veneer_blx)
5714 {
5715 stub_type = arm_stub_a8_veneer_bl;
5716 is_blx = false;
5717 is_bl = true;
5718 }
5719
5720 if (is_blx)
5721 pc_for_insn &= ~ ((bfd_vma) 3);
5722
5723 /* If we found a relocation, use the proper destination,
5724 not the offset in the (unrelocated) instruction.
5725 Note this is always done if we switched the stub type
5726 above. */
5727 if (found)
5728 offset =
5729 (bfd_signed_vma) (found->destination - pc_for_insn);
5730
5731 /* If the stub will use a Thumb-mode branch to a
5732 PLT target, redirect it to the preceding Thumb
5733 entry point. */
5734 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5735 offset -= PLT_THUMB_STUB_SIZE;
5736
5737 target = pc_for_insn + offset;
5738
5739 /* The BLX stub is ARM-mode code. Adjust the offset to
5740 take the different PC value (+8 instead of +4) into
5741 account. */
5742 if (stub_type == arm_stub_a8_veneer_blx)
5743 offset += 4;
5744
5745 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5746 {
5747 char *stub_name = NULL;
5748
5749 if (num_a8_fixes == a8_fix_table_size)
5750 {
5751 a8_fix_table_size *= 2;
5752 a8_fixes = (struct a8_erratum_fix *)
5753 bfd_realloc (a8_fixes,
5754 sizeof (struct a8_erratum_fix)
5755 * a8_fix_table_size);
5756 }
5757
5758 if (num_a8_fixes < prev_num_a8_fixes)
5759 {
5760 /* If we're doing a subsequent scan,
5761 check if we've found the same fix as
5762 before, and try and reuse the stub
5763 name. */
5764 stub_name = a8_fixes[num_a8_fixes].stub_name;
5765 if ((a8_fixes[num_a8_fixes].section != section)
5766 || (a8_fixes[num_a8_fixes].offset != i))
5767 {
5768 free (stub_name);
5769 stub_name = NULL;
5770 *stub_changed_p = true;
5771 }
5772 }
5773
5774 if (!stub_name)
5775 {
5776 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5777 if (stub_name != NULL)
5778 sprintf (stub_name, "%x:%x", section->id, i);
5779 }
5780
5781 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5782 a8_fixes[num_a8_fixes].section = section;
5783 a8_fixes[num_a8_fixes].offset = i;
5784 a8_fixes[num_a8_fixes].target_offset =
5785 target - base_vma;
5786 a8_fixes[num_a8_fixes].orig_insn = insn;
5787 a8_fixes[num_a8_fixes].stub_name = stub_name;
5788 a8_fixes[num_a8_fixes].stub_type = stub_type;
5789 a8_fixes[num_a8_fixes].branch_type =
5790 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5791
5792 num_a8_fixes++;
5793 }
5794 }
5795 }
5796
5797 i += insn_32bit ? 4 : 2;
5798 last_was_32bit = insn_32bit;
5799 last_was_branch = is_32bit_branch;
5800 }
5801 }
5802
5803 if (elf_section_data (section)->this_hdr.contents == NULL)
5804 free (contents);
5805 }
5806
5807 *a8_fixes_p = a8_fixes;
5808 *num_a8_fixes_p = num_a8_fixes;
5809 *a8_fix_table_size_p = a8_fix_table_size;
5810
5811 return false;
5812 }
5813
5814 /* Create or update a stub entry depending on whether the stub can already be
5815 found in HTAB. The stub is identified by:
5816 - its type STUB_TYPE
5817 - its source branch (note that several can share the same stub) whose
5818 section and relocation (if any) are given by SECTION and IRELA
5819 respectively
5820 - its target symbol whose input section, hash, name, value and branch type
5821 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5822 respectively
5823
5824 If found, the value of the stub's target symbol is updated from SYM_VALUE
5825 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5826 TRUE and the stub entry is initialized.
5827
5828 Returns the stub that was created or updated, or NULL if an error
5829 occurred. */
5830
5831 static struct elf32_arm_stub_hash_entry *
5832 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5833 enum elf32_arm_stub_type stub_type, asection *section,
5834 Elf_Internal_Rela *irela, asection *sym_sec,
5835 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5836 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5837 bool *new_stub)
5838 {
5839 const asection *id_sec;
5840 char *stub_name;
5841 struct elf32_arm_stub_hash_entry *stub_entry;
5842 unsigned int r_type;
5843 bool sym_claimed = arm_stub_sym_claimed (stub_type);
5844
5845 BFD_ASSERT (stub_type != arm_stub_none);
5846 *new_stub = false;
5847
5848 if (sym_claimed)
5849 stub_name = sym_name;
5850 else
5851 {
5852 BFD_ASSERT (irela);
5853 BFD_ASSERT (section);
5854 BFD_ASSERT (section->id <= htab->top_id);
5855
5856 /* Support for grouping stub sections. */
5857 id_sec = htab->stub_group[section->id].link_sec;
5858
5859 /* Get the name of this stub. */
5860 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5861 stub_type);
5862 if (!stub_name)
5863 return NULL;
5864 }
5865
5866 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, false,
5867 false);
5868 /* The proper stub has already been created, just update its value. */
5869 if (stub_entry != NULL)
5870 {
5871 if (!sym_claimed)
5872 free (stub_name);
5873 stub_entry->target_value = sym_value;
5874 return stub_entry;
5875 }
5876
5877 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5878 if (stub_entry == NULL)
5879 {
5880 if (!sym_claimed)
5881 free (stub_name);
5882 return NULL;
5883 }
5884
5885 stub_entry->target_value = sym_value;
5886 stub_entry->target_section = sym_sec;
5887 stub_entry->stub_type = stub_type;
5888 stub_entry->h = hash;
5889 stub_entry->branch_type = branch_type;
5890
5891 if (sym_claimed)
5892 stub_entry->output_name = sym_name;
5893 else
5894 {
5895 if (sym_name == NULL)
5896 sym_name = "unnamed";
5897 stub_entry->output_name = (char *)
5898 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5899 + strlen (sym_name));
5900 if (stub_entry->output_name == NULL)
5901 {
5902 free (stub_name);
5903 return NULL;
5904 }
5905
5906 /* For historical reasons, use the existing names for ARM-to-Thumb and
5907 Thumb-to-ARM stubs. */
5908 r_type = ELF32_R_TYPE (irela->r_info);
5909 if ((r_type == (unsigned int) R_ARM_THM_CALL
5910 || r_type == (unsigned int) R_ARM_THM_JUMP24
5911 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5912 && branch_type == ST_BRANCH_TO_ARM)
5913 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5914 else if ((r_type == (unsigned int) R_ARM_CALL
5915 || r_type == (unsigned int) R_ARM_JUMP24)
5916 && branch_type == ST_BRANCH_TO_THUMB)
5917 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5918 else
5919 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5920 }
5921
5922 *new_stub = true;
5923 return stub_entry;
5924 }
5925
5926 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5927 gateway veneer to transition from non secure to secure state and create them
5928 accordingly.
5929
5930 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5931 defines the conditions that govern Secure Gateway veneer creation for a
5932 given symbol <SYM> as follows:
5933 - it has function type
5934 - it has non local binding
5935 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5936 same type, binding and value as <SYM> (called normal symbol).
5937 An entry function can handle secure state transition itself in which case
5938 its special symbol would have a different value from the normal symbol.
5939
5940 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5941 entry mapping while HTAB gives the name to hash entry mapping.
5942 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5943 created.
5944
5945 The return value gives whether a stub failed to be allocated. */
5946
5947 static bool
5948 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5949 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5950 int *cmse_stub_created)
5951 {
5952 const struct elf_backend_data *bed;
5953 Elf_Internal_Shdr *symtab_hdr;
5954 unsigned i, j, sym_count, ext_start;
5955 Elf_Internal_Sym *cmse_sym, *local_syms;
5956 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5957 enum arm_st_branch_type branch_type;
5958 char *sym_name, *lsym_name;
5959 bfd_vma sym_value;
5960 asection *section;
5961 struct elf32_arm_stub_hash_entry *stub_entry;
5962 bool is_v8m, new_stub, cmse_invalid, ret = true;
5963
5964 bed = get_elf_backend_data (input_bfd);
5965 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5966 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5967 ext_start = symtab_hdr->sh_info;
5968 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5969 && out_attr[Tag_CPU_arch_profile].i == 'M');
5970
5971 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5972 if (local_syms == NULL)
5973 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5974 symtab_hdr->sh_info, 0, NULL, NULL,
5975 NULL);
5976 if (symtab_hdr->sh_info && local_syms == NULL)
5977 return false;
5978
5979 /* Scan symbols. */
5980 for (i = 0; i < sym_count; i++)
5981 {
5982 cmse_invalid = false;
5983
5984 if (i < ext_start)
5985 {
5986 cmse_sym = &local_syms[i];
5987 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5988 symtab_hdr->sh_link,
5989 cmse_sym->st_name);
5990 if (!sym_name || !startswith (sym_name, CMSE_PREFIX))
5991 continue;
5992
5993 /* Special symbol with local binding. */
5994 cmse_invalid = true;
5995 }
5996 else
5997 {
5998 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5999 sym_name = (char *) cmse_hash->root.root.root.string;
6000 if (!startswith (sym_name, CMSE_PREFIX))
6001 continue;
6002
6003 /* Special symbol has incorrect binding or type. */
6004 if ((cmse_hash->root.root.type != bfd_link_hash_defined
6005 && cmse_hash->root.root.type != bfd_link_hash_defweak)
6006 || cmse_hash->root.type != STT_FUNC)
6007 cmse_invalid = true;
6008 }
6009
6010 if (!is_v8m)
6011 {
6012 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6013 "ARMv8-M architecture or later"),
6014 input_bfd, sym_name);
6015 is_v8m = true; /* Avoid multiple warning. */
6016 ret = false;
6017 }
6018
6019 if (cmse_invalid)
6020 {
6021 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6022 " a global or weak function symbol"),
6023 input_bfd, sym_name);
6024 ret = false;
6025 if (i < ext_start)
6026 continue;
6027 }
6028
6029 sym_name += strlen (CMSE_PREFIX);
6030 hash = (struct elf32_arm_link_hash_entry *)
6031 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6032
6033 /* No associated normal symbol or it is neither global nor weak. */
6034 if (!hash
6035 || (hash->root.root.type != bfd_link_hash_defined
6036 && hash->root.root.type != bfd_link_hash_defweak)
6037 || hash->root.type != STT_FUNC)
6038 {
6039 /* Initialize here to avoid warning about use of possibly
6040 uninitialized variable. */
6041 j = 0;
6042
6043 if (!hash)
6044 {
6045 /* Searching for a normal symbol with local binding. */
6046 for (; j < ext_start; j++)
6047 {
6048 lsym_name =
6049 bfd_elf_string_from_elf_section (input_bfd,
6050 symtab_hdr->sh_link,
6051 local_syms[j].st_name);
6052 if (!strcmp (sym_name, lsym_name))
6053 break;
6054 }
6055 }
6056
6057 if (hash || j < ext_start)
6058 {
6059 _bfd_error_handler
6060 (_("%pB: invalid standard symbol `%s'; it must be "
6061 "a global or weak function symbol"),
6062 input_bfd, sym_name);
6063 }
6064 else
6065 _bfd_error_handler
6066 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6067 ret = false;
6068 if (!hash)
6069 continue;
6070 }
6071
6072 sym_value = hash->root.root.u.def.value;
6073 section = hash->root.root.u.def.section;
6074
6075 if (cmse_hash->root.root.u.def.section != section)
6076 {
6077 _bfd_error_handler
6078 (_("%pB: `%s' and its special symbol are in different sections"),
6079 input_bfd, sym_name);
6080 ret = false;
6081 }
6082 if (cmse_hash->root.root.u.def.value != sym_value)
6083 continue; /* Ignore: could be an entry function starting with SG. */
6084
6085 /* If this section is a link-once section that will be discarded, then
6086 don't create any stubs. */
6087 if (section->output_section == NULL)
6088 {
6089 _bfd_error_handler
6090 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6091 continue;
6092 }
6093
6094 if (hash->root.size == 0)
6095 {
6096 _bfd_error_handler
6097 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6098 ret = false;
6099 }
6100
6101 if (!ret)
6102 continue;
6103 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6104 stub_entry
6105 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6106 NULL, NULL, section, hash, sym_name,
6107 sym_value, branch_type, &new_stub);
6108
6109 if (stub_entry == NULL)
6110 ret = false;
6111 else
6112 {
6113 BFD_ASSERT (new_stub);
6114 (*cmse_stub_created)++;
6115 }
6116 }
6117
6118 if (!symtab_hdr->contents)
6119 free (local_syms);
6120 return ret;
6121 }
6122
6123 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6124 code entry function, ie can be called from non secure code without using a
6125 veneer. */
6126
6127 static bool
6128 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6129 {
6130 bfd_byte contents[4];
6131 uint32_t first_insn;
6132 asection *section;
6133 file_ptr offset;
6134 bfd *abfd;
6135
6136 /* Defined symbol of function type. */
6137 if (hash->root.root.type != bfd_link_hash_defined
6138 && hash->root.root.type != bfd_link_hash_defweak)
6139 return false;
6140 if (hash->root.type != STT_FUNC)
6141 return false;
6142
6143 /* Read first instruction. */
6144 section = hash->root.root.u.def.section;
6145 abfd = section->owner;
6146 offset = hash->root.root.u.def.value - section->vma;
6147 if (!bfd_get_section_contents (abfd, section, contents, offset,
6148 sizeof (contents)))
6149 return false;
6150
6151 first_insn = bfd_get_32 (abfd, contents);
6152
6153 /* Starts by SG instruction. */
6154 return first_insn == 0xe97fe97f;
6155 }
6156
6157 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6158 secure gateway veneers (ie. the veneers was not in the input import library)
6159 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6160
6161 static bool
6162 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6163 {
6164 struct elf32_arm_stub_hash_entry *stub_entry;
6165 struct bfd_link_info *info;
6166
6167 /* Massage our args to the form they really have. */
6168 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6169 info = (struct bfd_link_info *) gen_info;
6170
6171 if (info->out_implib_bfd)
6172 return true;
6173
6174 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6175 return true;
6176
6177 if (stub_entry->stub_offset == (bfd_vma) -1)
6178 _bfd_error_handler (" %s", stub_entry->output_name);
6179
6180 return true;
6181 }
6182
6183 /* Set offset of each secure gateway veneers so that its address remain
6184 identical to the one in the input import library referred by
6185 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6186 (present in input import library but absent from the executable being
6187 linked) or if new veneers appeared and there is no output import library
6188 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6189 number of secure gateway veneers found in the input import library.
6190
6191 The function returns whether an error occurred. If no error occurred,
6192 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6193 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6194 veneer observed set for new veneers to be layed out after. */
6195
6196 static bool
6197 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6198 struct elf32_arm_link_hash_table *htab,
6199 int *cmse_stub_created)
6200 {
6201 long symsize;
6202 char *sym_name;
6203 flagword flags;
6204 long i, symcount;
6205 bfd *in_implib_bfd;
6206 asection *stub_out_sec;
6207 bool ret = true;
6208 Elf_Internal_Sym *intsym;
6209 const char *out_sec_name;
6210 bfd_size_type cmse_stub_size;
6211 asymbol **sympp = NULL, *sym;
6212 struct elf32_arm_link_hash_entry *hash;
6213 const insn_sequence *cmse_stub_template;
6214 struct elf32_arm_stub_hash_entry *stub_entry;
6215 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6216 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6217 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6218
6219 /* No input secure gateway import library. */
6220 if (!htab->in_implib_bfd)
6221 return true;
6222
6223 in_implib_bfd = htab->in_implib_bfd;
6224 if (!htab->cmse_implib)
6225 {
6226 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6227 "Gateway import libraries"), in_implib_bfd);
6228 return false;
6229 }
6230
6231 /* Get symbol table size. */
6232 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6233 if (symsize < 0)
6234 return false;
6235
6236 /* Read in the input secure gateway import library's symbol table. */
6237 sympp = (asymbol **) bfd_malloc (symsize);
6238 if (sympp == NULL)
6239 return false;
6240
6241 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6242 if (symcount < 0)
6243 {
6244 ret = false;
6245 goto free_sym_buf;
6246 }
6247
6248 htab->new_cmse_stub_offset = 0;
6249 cmse_stub_size =
6250 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6251 &cmse_stub_template,
6252 &cmse_stub_template_size);
6253 out_sec_name =
6254 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6255 stub_out_sec =
6256 bfd_get_section_by_name (htab->obfd, out_sec_name);
6257 if (stub_out_sec != NULL)
6258 cmse_stub_sec_vma = stub_out_sec->vma;
6259
6260 /* Set addresses of veneers mentionned in input secure gateway import
6261 library's symbol table. */
6262 for (i = 0; i < symcount; i++)
6263 {
6264 sym = sympp[i];
6265 flags = sym->flags;
6266 sym_name = (char *) bfd_asymbol_name (sym);
6267 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6268
6269 if (sym->section != bfd_abs_section_ptr
6270 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6271 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6272 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6273 != ST_BRANCH_TO_THUMB))
6274 {
6275 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6276 "symbol should be absolute, global and "
6277 "refer to Thumb functions"),
6278 in_implib_bfd, sym_name);
6279 ret = false;
6280 continue;
6281 }
6282
6283 veneer_value = bfd_asymbol_value (sym);
6284 stub_offset = veneer_value - cmse_stub_sec_vma;
6285 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6286 false, false);
6287 hash = (struct elf32_arm_link_hash_entry *)
6288 elf_link_hash_lookup (&(htab)->root, sym_name, false, false, true);
6289
6290 /* Stub entry should have been created by cmse_scan or the symbol be of
6291 a secure function callable from non secure code. */
6292 if (!stub_entry && !hash)
6293 {
6294 bool new_stub;
6295
6296 _bfd_error_handler
6297 (_("entry function `%s' disappeared from secure code"), sym_name);
6298 hash = (struct elf32_arm_link_hash_entry *)
6299 elf_link_hash_lookup (&(htab)->root, sym_name, true, true, true);
6300 stub_entry
6301 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6302 NULL, NULL, bfd_abs_section_ptr, hash,
6303 sym_name, veneer_value,
6304 ST_BRANCH_TO_THUMB, &new_stub);
6305 if (stub_entry == NULL)
6306 ret = false;
6307 else
6308 {
6309 BFD_ASSERT (new_stub);
6310 new_cmse_stubs_created++;
6311 (*cmse_stub_created)++;
6312 }
6313 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6314 stub_entry->stub_offset = stub_offset;
6315 }
6316 /* Symbol found is not callable from non secure code. */
6317 else if (!stub_entry)
6318 {
6319 if (!cmse_entry_fct_p (hash))
6320 {
6321 _bfd_error_handler (_("`%s' refers to a non entry function"),
6322 sym_name);
6323 ret = false;
6324 }
6325 continue;
6326 }
6327 else
6328 {
6329 /* Only stubs for SG veneers should have been created. */
6330 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6331
6332 /* Check visibility hasn't changed. */
6333 if (!!(flags & BSF_GLOBAL)
6334 != (hash->root.root.type == bfd_link_hash_defined))
6335 _bfd_error_handler
6336 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6337 sym_name);
6338
6339 stub_entry->stub_offset = stub_offset;
6340 }
6341
6342 /* Size should match that of a SG veneer. */
6343 if (intsym->st_size != cmse_stub_size)
6344 {
6345 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6346 in_implib_bfd, sym_name);
6347 ret = false;
6348 }
6349
6350 /* Previous veneer address is before current SG veneer section. */
6351 if (veneer_value < cmse_stub_sec_vma)
6352 {
6353 /* Avoid offset underflow. */
6354 if (stub_entry)
6355 stub_entry->stub_offset = 0;
6356 stub_offset = 0;
6357 ret = false;
6358 }
6359
6360 /* Complain if stub offset not a multiple of stub size. */
6361 if (stub_offset % cmse_stub_size)
6362 {
6363 _bfd_error_handler
6364 (_("offset of veneer for entry function `%s' not a multiple of "
6365 "its size"), sym_name);
6366 ret = false;
6367 }
6368
6369 if (!ret)
6370 continue;
6371
6372 new_cmse_stubs_created--;
6373 if (veneer_value < cmse_stub_array_start)
6374 cmse_stub_array_start = veneer_value;
6375 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6376 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6377 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6378 }
6379
6380 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6381 {
6382 BFD_ASSERT (new_cmse_stubs_created > 0);
6383 _bfd_error_handler
6384 (_("new entry function(s) introduced but no output import library "
6385 "specified:"));
6386 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6387 }
6388
6389 if (cmse_stub_array_start != cmse_stub_sec_vma)
6390 {
6391 _bfd_error_handler
6392 (_("start address of `%s' is different from previous link"),
6393 out_sec_name);
6394 ret = false;
6395 }
6396
6397 free_sym_buf:
6398 free (sympp);
6399 return ret;
6400 }
6401
6402 /* Determine and set the size of the stub section for a final link.
6403
6404 The basic idea here is to examine all the relocations looking for
6405 PC-relative calls to a target that is unreachable with a "bl"
6406 instruction. */
6407
6408 bool
6409 elf32_arm_size_stubs (bfd *output_bfd,
6410 bfd *stub_bfd,
6411 struct bfd_link_info *info,
6412 bfd_signed_vma group_size,
6413 asection * (*add_stub_section) (const char *, asection *,
6414 asection *,
6415 unsigned int),
6416 void (*layout_sections_again) (void))
6417 {
6418 bool ret = true;
6419 obj_attribute *out_attr;
6420 int cmse_stub_created = 0;
6421 bfd_size_type stub_group_size;
6422 bool m_profile, stubs_always_after_branch, first_veneer_scan = true;
6423 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6424 struct a8_erratum_fix *a8_fixes = NULL;
6425 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6426 struct a8_erratum_reloc *a8_relocs = NULL;
6427 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6428
6429 if (htab == NULL)
6430 return false;
6431
6432 if (htab->fix_cortex_a8)
6433 {
6434 a8_fixes = (struct a8_erratum_fix *)
6435 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6436 a8_relocs = (struct a8_erratum_reloc *)
6437 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6438 }
6439
6440 /* Propagate mach to stub bfd, because it may not have been
6441 finalized when we created stub_bfd. */
6442 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6443 bfd_get_mach (output_bfd));
6444
6445 /* Stash our params away. */
6446 htab->stub_bfd = stub_bfd;
6447 htab->add_stub_section = add_stub_section;
6448 htab->layout_sections_again = layout_sections_again;
6449 stubs_always_after_branch = group_size < 0;
6450
6451 out_attr = elf_known_obj_attributes_proc (output_bfd);
6452 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6453
6454 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6455 as the first half of a 32-bit branch straddling two 4K pages. This is a
6456 crude way of enforcing that. */
6457 if (htab->fix_cortex_a8)
6458 stubs_always_after_branch = 1;
6459
6460 if (group_size < 0)
6461 stub_group_size = -group_size;
6462 else
6463 stub_group_size = group_size;
6464
6465 if (stub_group_size == 1)
6466 {
6467 /* Default values. */
6468 /* Thumb branch range is +-4MB has to be used as the default
6469 maximum size (a given section can contain both ARM and Thumb
6470 code, so the worst case has to be taken into account).
6471
6472 This value is 24K less than that, which allows for 2025
6473 12-byte stubs. If we exceed that, then we will fail to link.
6474 The user will have to relink with an explicit group size
6475 option. */
6476 stub_group_size = 4170000;
6477 }
6478
6479 group_sections (htab, stub_group_size, stubs_always_after_branch);
6480
6481 /* If we're applying the cortex A8 fix, we need to determine the
6482 program header size now, because we cannot change it later --
6483 that could alter section placements. Notice the A8 erratum fix
6484 ends up requiring the section addresses to remain unchanged
6485 modulo the page size. That's something we cannot represent
6486 inside BFD, and we don't want to force the section alignment to
6487 be the page size. */
6488 if (htab->fix_cortex_a8)
6489 (*htab->layout_sections_again) ();
6490
6491 while (1)
6492 {
6493 bfd *input_bfd;
6494 unsigned int bfd_indx;
6495 asection *stub_sec;
6496 enum elf32_arm_stub_type stub_type;
6497 bool stub_changed = false;
6498 unsigned prev_num_a8_fixes = num_a8_fixes;
6499
6500 num_a8_fixes = 0;
6501 for (input_bfd = info->input_bfds, bfd_indx = 0;
6502 input_bfd != NULL;
6503 input_bfd = input_bfd->link.next, bfd_indx++)
6504 {
6505 Elf_Internal_Shdr *symtab_hdr;
6506 asection *section;
6507 Elf_Internal_Sym *local_syms = NULL;
6508
6509 if (!is_arm_elf (input_bfd))
6510 continue;
6511 if ((input_bfd->flags & DYNAMIC) != 0
6512 && (elf_sym_hashes (input_bfd) == NULL
6513 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6514 continue;
6515
6516 num_a8_relocs = 0;
6517
6518 /* We'll need the symbol table in a second. */
6519 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6520 if (symtab_hdr->sh_info == 0)
6521 continue;
6522
6523 /* Limit scan of symbols to object file whose profile is
6524 Microcontroller to not hinder performance in the general case. */
6525 if (m_profile && first_veneer_scan)
6526 {
6527 struct elf_link_hash_entry **sym_hashes;
6528
6529 sym_hashes = elf_sym_hashes (input_bfd);
6530 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6531 &cmse_stub_created))
6532 goto error_ret_free_local;
6533
6534 if (cmse_stub_created != 0)
6535 stub_changed = true;
6536 }
6537
6538 /* Walk over each section attached to the input bfd. */
6539 for (section = input_bfd->sections;
6540 section != NULL;
6541 section = section->next)
6542 {
6543 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6544
6545 /* If there aren't any relocs, then there's nothing more
6546 to do. */
6547 if ((section->flags & SEC_RELOC) == 0
6548 || section->reloc_count == 0
6549 || (section->flags & SEC_CODE) == 0)
6550 continue;
6551
6552 /* If this section is a link-once section that will be
6553 discarded, then don't create any stubs. */
6554 if (section->output_section == NULL
6555 || section->output_section->owner != output_bfd)
6556 continue;
6557
6558 /* Get the relocs. */
6559 internal_relocs
6560 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6561 NULL, info->keep_memory);
6562 if (internal_relocs == NULL)
6563 goto error_ret_free_local;
6564
6565 /* Now examine each relocation. */
6566 irela = internal_relocs;
6567 irelaend = irela + section->reloc_count;
6568 for (; irela < irelaend; irela++)
6569 {
6570 unsigned int r_type, r_indx;
6571 asection *sym_sec;
6572 bfd_vma sym_value;
6573 bfd_vma destination;
6574 struct elf32_arm_link_hash_entry *hash;
6575 const char *sym_name;
6576 unsigned char st_type;
6577 enum arm_st_branch_type branch_type;
6578 bool created_stub = false;
6579
6580 r_type = ELF32_R_TYPE (irela->r_info);
6581 r_indx = ELF32_R_SYM (irela->r_info);
6582
6583 if (r_type >= (unsigned int) R_ARM_max)
6584 {
6585 bfd_set_error (bfd_error_bad_value);
6586 error_ret_free_internal:
6587 if (elf_section_data (section)->relocs == NULL)
6588 free (internal_relocs);
6589 /* Fall through. */
6590 error_ret_free_local:
6591 if (symtab_hdr->contents != (unsigned char *) local_syms)
6592 free (local_syms);
6593 return false;
6594 }
6595
6596 hash = NULL;
6597 if (r_indx >= symtab_hdr->sh_info)
6598 hash = elf32_arm_hash_entry
6599 (elf_sym_hashes (input_bfd)
6600 [r_indx - symtab_hdr->sh_info]);
6601
6602 /* Only look for stubs on branch instructions, or
6603 non-relaxed TLSCALL */
6604 if ((r_type != (unsigned int) R_ARM_CALL)
6605 && (r_type != (unsigned int) R_ARM_THM_CALL)
6606 && (r_type != (unsigned int) R_ARM_JUMP24)
6607 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6608 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6609 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6610 && (r_type != (unsigned int) R_ARM_PLT32)
6611 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6612 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6613 && r_type == (elf32_arm_tls_transition
6614 (info, r_type,
6615 (struct elf_link_hash_entry *) hash))
6616 && ((hash ? hash->tls_type
6617 : (elf32_arm_local_got_tls_type
6618 (input_bfd)[r_indx]))
6619 & GOT_TLS_GDESC) != 0))
6620 continue;
6621
6622 /* Now determine the call target, its name, value,
6623 section. */
6624 sym_sec = NULL;
6625 sym_value = 0;
6626 destination = 0;
6627 sym_name = NULL;
6628
6629 if (r_type == (unsigned int) R_ARM_TLS_CALL
6630 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6631 {
6632 /* A non-relaxed TLS call. The target is the
6633 plt-resident trampoline and nothing to do
6634 with the symbol. */
6635 BFD_ASSERT (htab->tls_trampoline > 0);
6636 sym_sec = htab->root.splt;
6637 sym_value = htab->tls_trampoline;
6638 hash = 0;
6639 st_type = STT_FUNC;
6640 branch_type = ST_BRANCH_TO_ARM;
6641 }
6642 else if (!hash)
6643 {
6644 /* It's a local symbol. */
6645 Elf_Internal_Sym *sym;
6646
6647 if (local_syms == NULL)
6648 {
6649 local_syms
6650 = (Elf_Internal_Sym *) symtab_hdr->contents;
6651 if (local_syms == NULL)
6652 local_syms
6653 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6654 symtab_hdr->sh_info, 0,
6655 NULL, NULL, NULL);
6656 if (local_syms == NULL)
6657 goto error_ret_free_internal;
6658 }
6659
6660 sym = local_syms + r_indx;
6661 if (sym->st_shndx == SHN_UNDEF)
6662 sym_sec = bfd_und_section_ptr;
6663 else if (sym->st_shndx == SHN_ABS)
6664 sym_sec = bfd_abs_section_ptr;
6665 else if (sym->st_shndx == SHN_COMMON)
6666 sym_sec = bfd_com_section_ptr;
6667 else
6668 sym_sec =
6669 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6670
6671 if (!sym_sec)
6672 /* This is an undefined symbol. It can never
6673 be resolved. */
6674 continue;
6675
6676 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6677 sym_value = sym->st_value;
6678 destination = (sym_value + irela->r_addend
6679 + sym_sec->output_offset
6680 + sym_sec->output_section->vma);
6681 st_type = ELF_ST_TYPE (sym->st_info);
6682 branch_type =
6683 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6684 sym_name
6685 = bfd_elf_string_from_elf_section (input_bfd,
6686 symtab_hdr->sh_link,
6687 sym->st_name);
6688 }
6689 else
6690 {
6691 /* It's an external symbol. */
6692 while (hash->root.root.type == bfd_link_hash_indirect
6693 || hash->root.root.type == bfd_link_hash_warning)
6694 hash = ((struct elf32_arm_link_hash_entry *)
6695 hash->root.root.u.i.link);
6696
6697 if (hash->root.root.type == bfd_link_hash_defined
6698 || hash->root.root.type == bfd_link_hash_defweak)
6699 {
6700 sym_sec = hash->root.root.u.def.section;
6701 sym_value = hash->root.root.u.def.value;
6702
6703 struct elf32_arm_link_hash_table *globals =
6704 elf32_arm_hash_table (info);
6705
6706 /* For a destination in a shared library,
6707 use the PLT stub as target address to
6708 decide whether a branch stub is
6709 needed. */
6710 if (globals != NULL
6711 && globals->root.splt != NULL
6712 && hash != NULL
6713 && hash->root.plt.offset != (bfd_vma) -1)
6714 {
6715 sym_sec = globals->root.splt;
6716 sym_value = hash->root.plt.offset;
6717 if (sym_sec->output_section != NULL)
6718 destination = (sym_value
6719 + sym_sec->output_offset
6720 + sym_sec->output_section->vma);
6721 }
6722 else if (sym_sec->output_section != NULL)
6723 destination = (sym_value + irela->r_addend
6724 + sym_sec->output_offset
6725 + sym_sec->output_section->vma);
6726 }
6727 else if ((hash->root.root.type == bfd_link_hash_undefined)
6728 || (hash->root.root.type == bfd_link_hash_undefweak))
6729 {
6730 /* For a shared library, use the PLT stub as
6731 target address to decide whether a long
6732 branch stub is needed.
6733 For absolute code, they cannot be handled. */
6734 struct elf32_arm_link_hash_table *globals =
6735 elf32_arm_hash_table (info);
6736
6737 if (globals != NULL
6738 && globals->root.splt != NULL
6739 && hash != NULL
6740 && hash->root.plt.offset != (bfd_vma) -1)
6741 {
6742 sym_sec = globals->root.splt;
6743 sym_value = hash->root.plt.offset;
6744 if (sym_sec->output_section != NULL)
6745 destination = (sym_value
6746 + sym_sec->output_offset
6747 + sym_sec->output_section->vma);
6748 }
6749 else
6750 continue;
6751 }
6752 else
6753 {
6754 bfd_set_error (bfd_error_bad_value);
6755 goto error_ret_free_internal;
6756 }
6757 st_type = hash->root.type;
6758 branch_type =
6759 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6760 sym_name = hash->root.root.root.string;
6761 }
6762
6763 do
6764 {
6765 bool new_stub;
6766 struct elf32_arm_stub_hash_entry *stub_entry;
6767
6768 /* Determine what (if any) linker stub is needed. */
6769 stub_type = arm_type_of_stub (info, section, irela,
6770 st_type, &branch_type,
6771 hash, destination, sym_sec,
6772 input_bfd, sym_name);
6773 if (stub_type == arm_stub_none)
6774 break;
6775
6776 /* We've either created a stub for this reloc already,
6777 or we are about to. */
6778 stub_entry =
6779 elf32_arm_create_stub (htab, stub_type, section, irela,
6780 sym_sec, hash,
6781 (char *) sym_name, sym_value,
6782 branch_type, &new_stub);
6783
6784 created_stub = stub_entry != NULL;
6785 if (!created_stub)
6786 goto error_ret_free_internal;
6787 else if (!new_stub)
6788 break;
6789 else
6790 stub_changed = true;
6791 }
6792 while (0);
6793
6794 /* Look for relocations which might trigger Cortex-A8
6795 erratum. */
6796 if (htab->fix_cortex_a8
6797 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6798 || r_type == (unsigned int) R_ARM_THM_JUMP19
6799 || r_type == (unsigned int) R_ARM_THM_CALL
6800 || r_type == (unsigned int) R_ARM_THM_XPC22))
6801 {
6802 bfd_vma from = section->output_section->vma
6803 + section->output_offset
6804 + irela->r_offset;
6805
6806 if ((from & 0xfff) == 0xffe)
6807 {
6808 /* Found a candidate. Note we haven't checked the
6809 destination is within 4K here: if we do so (and
6810 don't create an entry in a8_relocs) we can't tell
6811 that a branch should have been relocated when
6812 scanning later. */
6813 if (num_a8_relocs == a8_reloc_table_size)
6814 {
6815 a8_reloc_table_size *= 2;
6816 a8_relocs = (struct a8_erratum_reloc *)
6817 bfd_realloc (a8_relocs,
6818 sizeof (struct a8_erratum_reloc)
6819 * a8_reloc_table_size);
6820 }
6821
6822 a8_relocs[num_a8_relocs].from = from;
6823 a8_relocs[num_a8_relocs].destination = destination;
6824 a8_relocs[num_a8_relocs].r_type = r_type;
6825 a8_relocs[num_a8_relocs].branch_type = branch_type;
6826 a8_relocs[num_a8_relocs].sym_name = sym_name;
6827 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6828 a8_relocs[num_a8_relocs].hash = hash;
6829
6830 num_a8_relocs++;
6831 }
6832 }
6833 }
6834
6835 /* We're done with the internal relocs, free them. */
6836 if (elf_section_data (section)->relocs == NULL)
6837 free (internal_relocs);
6838 }
6839
6840 if (htab->fix_cortex_a8)
6841 {
6842 /* Sort relocs which might apply to Cortex-A8 erratum. */
6843 qsort (a8_relocs, num_a8_relocs,
6844 sizeof (struct a8_erratum_reloc),
6845 &a8_reloc_compare);
6846
6847 /* Scan for branches which might trigger Cortex-A8 erratum. */
6848 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6849 &num_a8_fixes, &a8_fix_table_size,
6850 a8_relocs, num_a8_relocs,
6851 prev_num_a8_fixes, &stub_changed)
6852 != 0)
6853 goto error_ret_free_local;
6854 }
6855
6856 if (local_syms != NULL
6857 && symtab_hdr->contents != (unsigned char *) local_syms)
6858 {
6859 if (!info->keep_memory)
6860 free (local_syms);
6861 else
6862 symtab_hdr->contents = (unsigned char *) local_syms;
6863 }
6864 }
6865
6866 if (first_veneer_scan
6867 && !set_cmse_veneer_addr_from_implib (info, htab,
6868 &cmse_stub_created))
6869 ret = false;
6870
6871 if (prev_num_a8_fixes != num_a8_fixes)
6872 stub_changed = true;
6873
6874 if (!stub_changed)
6875 break;
6876
6877 /* OK, we've added some stubs. Find out the new size of the
6878 stub sections. */
6879 for (stub_sec = htab->stub_bfd->sections;
6880 stub_sec != NULL;
6881 stub_sec = stub_sec->next)
6882 {
6883 /* Ignore non-stub sections. */
6884 if (!strstr (stub_sec->name, STUB_SUFFIX))
6885 continue;
6886
6887 stub_sec->size = 0;
6888 }
6889
6890 /* Add new SG veneers after those already in the input import
6891 library. */
6892 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6893 stub_type++)
6894 {
6895 bfd_vma *start_offset_p;
6896 asection **stub_sec_p;
6897
6898 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6899 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6900 if (start_offset_p == NULL)
6901 continue;
6902
6903 BFD_ASSERT (stub_sec_p != NULL);
6904 if (*stub_sec_p != NULL)
6905 (*stub_sec_p)->size = *start_offset_p;
6906 }
6907
6908 /* Compute stub section size, considering padding. */
6909 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6910 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6911 stub_type++)
6912 {
6913 int size, padding;
6914 asection **stub_sec_p;
6915
6916 padding = arm_dedicated_stub_section_padding (stub_type);
6917 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6918 /* Skip if no stub input section or no stub section padding
6919 required. */
6920 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6921 continue;
6922 /* Stub section padding required but no dedicated section. */
6923 BFD_ASSERT (stub_sec_p);
6924
6925 size = (*stub_sec_p)->size;
6926 size = (size + padding - 1) & ~(padding - 1);
6927 (*stub_sec_p)->size = size;
6928 }
6929
6930 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6931 if (htab->fix_cortex_a8)
6932 for (i = 0; i < num_a8_fixes; i++)
6933 {
6934 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6935 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6936
6937 if (stub_sec == NULL)
6938 return false;
6939
6940 stub_sec->size
6941 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6942 NULL);
6943 }
6944
6945
6946 /* Ask the linker to do its stuff. */
6947 (*htab->layout_sections_again) ();
6948 first_veneer_scan = false;
6949 }
6950
6951 /* Add stubs for Cortex-A8 erratum fixes now. */
6952 if (htab->fix_cortex_a8)
6953 {
6954 for (i = 0; i < num_a8_fixes; i++)
6955 {
6956 struct elf32_arm_stub_hash_entry *stub_entry;
6957 char *stub_name = a8_fixes[i].stub_name;
6958 asection *section = a8_fixes[i].section;
6959 unsigned int section_id = a8_fixes[i].section->id;
6960 asection *link_sec = htab->stub_group[section_id].link_sec;
6961 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6962 const insn_sequence *template_sequence;
6963 int template_size, size = 0;
6964
6965 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6966 true, false);
6967 if (stub_entry == NULL)
6968 {
6969 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6970 section->owner, stub_name);
6971 return false;
6972 }
6973
6974 stub_entry->stub_sec = stub_sec;
6975 stub_entry->stub_offset = (bfd_vma) -1;
6976 stub_entry->id_sec = link_sec;
6977 stub_entry->stub_type = a8_fixes[i].stub_type;
6978 stub_entry->source_value = a8_fixes[i].offset;
6979 stub_entry->target_section = a8_fixes[i].section;
6980 stub_entry->target_value = a8_fixes[i].target_offset;
6981 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6982 stub_entry->branch_type = a8_fixes[i].branch_type;
6983
6984 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6985 &template_sequence,
6986 &template_size);
6987
6988 stub_entry->stub_size = size;
6989 stub_entry->stub_template = template_sequence;
6990 stub_entry->stub_template_size = template_size;
6991 }
6992
6993 /* Stash the Cortex-A8 erratum fix array for use later in
6994 elf32_arm_write_section(). */
6995 htab->a8_erratum_fixes = a8_fixes;
6996 htab->num_a8_erratum_fixes = num_a8_fixes;
6997 }
6998 else
6999 {
7000 htab->a8_erratum_fixes = NULL;
7001 htab->num_a8_erratum_fixes = 0;
7002 }
7003 return ret;
7004 }
7005
7006 /* Build all the stubs associated with the current output file. The
7007 stubs are kept in a hash table attached to the main linker hash
7008 table. We also set up the .plt entries for statically linked PIC
7009 functions here. This function is called via arm_elf_finish in the
7010 linker. */
7011
7012 bool
7013 elf32_arm_build_stubs (struct bfd_link_info *info)
7014 {
7015 asection *stub_sec;
7016 struct bfd_hash_table *table;
7017 enum elf32_arm_stub_type stub_type;
7018 struct elf32_arm_link_hash_table *htab;
7019
7020 htab = elf32_arm_hash_table (info);
7021 if (htab == NULL)
7022 return false;
7023
7024 for (stub_sec = htab->stub_bfd->sections;
7025 stub_sec != NULL;
7026 stub_sec = stub_sec->next)
7027 {
7028 bfd_size_type size;
7029
7030 /* Ignore non-stub sections. */
7031 if (!strstr (stub_sec->name, STUB_SUFFIX))
7032 continue;
7033
7034 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7035 must at least be done for stub section requiring padding and for SG
7036 veneers to ensure that a non secure code branching to a removed SG
7037 veneer causes an error. */
7038 size = stub_sec->size;
7039 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7040 if (stub_sec->contents == NULL && size != 0)
7041 return false;
7042
7043 stub_sec->size = 0;
7044 }
7045
7046 /* Add new SG veneers after those already in the input import library. */
7047 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7048 {
7049 bfd_vma *start_offset_p;
7050 asection **stub_sec_p;
7051
7052 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7053 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7054 if (start_offset_p == NULL)
7055 continue;
7056
7057 BFD_ASSERT (stub_sec_p != NULL);
7058 if (*stub_sec_p != NULL)
7059 (*stub_sec_p)->size = *start_offset_p;
7060 }
7061
7062 /* Build the stubs as directed by the stub hash table. */
7063 table = &htab->stub_hash_table;
7064 bfd_hash_traverse (table, arm_build_one_stub, info);
7065 if (htab->fix_cortex_a8)
7066 {
7067 /* Place the cortex a8 stubs last. */
7068 htab->fix_cortex_a8 = -1;
7069 bfd_hash_traverse (table, arm_build_one_stub, info);
7070 }
7071
7072 return true;
7073 }
7074
7075 /* Locate the Thumb encoded calling stub for NAME. */
7076
7077 static struct elf_link_hash_entry *
7078 find_thumb_glue (struct bfd_link_info *link_info,
7079 const char *name,
7080 char **error_message)
7081 {
7082 char *tmp_name;
7083 struct elf_link_hash_entry *hash;
7084 struct elf32_arm_link_hash_table *hash_table;
7085
7086 /* We need a pointer to the armelf specific hash table. */
7087 hash_table = elf32_arm_hash_table (link_info);
7088 if (hash_table == NULL)
7089 return NULL;
7090
7091 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7092 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7093
7094 BFD_ASSERT (tmp_name);
7095
7096 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7097
7098 hash = elf_link_hash_lookup
7099 (&(hash_table)->root, tmp_name, false, false, true);
7100
7101 if (hash == NULL
7102 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7103 "Thumb", tmp_name, name) == -1)
7104 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7105
7106 free (tmp_name);
7107
7108 return hash;
7109 }
7110
7111 /* Locate the ARM encoded calling stub for NAME. */
7112
7113 static struct elf_link_hash_entry *
7114 find_arm_glue (struct bfd_link_info *link_info,
7115 const char *name,
7116 char **error_message)
7117 {
7118 char *tmp_name;
7119 struct elf_link_hash_entry *myh;
7120 struct elf32_arm_link_hash_table *hash_table;
7121
7122 /* We need a pointer to the elfarm specific hash table. */
7123 hash_table = elf32_arm_hash_table (link_info);
7124 if (hash_table == NULL)
7125 return NULL;
7126
7127 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7128 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7129 BFD_ASSERT (tmp_name);
7130
7131 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7132
7133 myh = elf_link_hash_lookup
7134 (&(hash_table)->root, tmp_name, false, false, true);
7135
7136 if (myh == NULL
7137 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7138 "ARM", tmp_name, name) == -1)
7139 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7140
7141 free (tmp_name);
7142
7143 return myh;
7144 }
7145
7146 /* ARM->Thumb glue (static images):
7147
7148 .arm
7149 __func_from_arm:
7150 ldr r12, __func_addr
7151 bx r12
7152 __func_addr:
7153 .word func @ behave as if you saw a ARM_32 reloc.
7154
7155 (v5t static images)
7156 .arm
7157 __func_from_arm:
7158 ldr pc, __func_addr
7159 __func_addr:
7160 .word func @ behave as if you saw a ARM_32 reloc.
7161
7162 (relocatable images)
7163 .arm
7164 __func_from_arm:
7165 ldr r12, __func_offset
7166 add r12, r12, pc
7167 bx r12
7168 __func_offset:
7169 .word func - . */
7170
7171 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7172 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7173 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7174 static const insn32 a2t3_func_addr_insn = 0x00000001;
7175
7176 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7177 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7178 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7179
7180 #define ARM2THUMB_PIC_GLUE_SIZE 16
7181 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7182 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7183 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7184
7185 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7186
7187 .thumb .thumb
7188 .align 2 .align 2
7189 __func_from_thumb: __func_from_thumb:
7190 bx pc push {r6, lr}
7191 nop ldr r6, __func_addr
7192 .arm mov lr, pc
7193 b func bx r6
7194 .arm
7195 ;; back_to_thumb
7196 ldmia r13! {r6, lr}
7197 bx lr
7198 __func_addr:
7199 .word func */
7200
7201 #define THUMB2ARM_GLUE_SIZE 8
7202 static const insn16 t2a1_bx_pc_insn = 0x4778;
7203 static const insn16 t2a2_noop_insn = 0x46c0;
7204 static const insn32 t2a3_b_insn = 0xea000000;
7205
7206 #define VFP11_ERRATUM_VENEER_SIZE 8
7207 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7208 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7209
7210 #define ARM_BX_VENEER_SIZE 12
7211 static const insn32 armbx1_tst_insn = 0xe3100001;
7212 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7213 static const insn32 armbx3_bx_insn = 0xe12fff10;
7214
7215 #ifndef ELFARM_NABI_C_INCLUDED
7216 static void
7217 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7218 {
7219 asection * s;
7220 bfd_byte * contents;
7221
7222 if (size == 0)
7223 {
7224 /* Do not include empty glue sections in the output. */
7225 if (abfd != NULL)
7226 {
7227 s = bfd_get_linker_section (abfd, name);
7228 if (s != NULL)
7229 s->flags |= SEC_EXCLUDE;
7230 }
7231 return;
7232 }
7233
7234 BFD_ASSERT (abfd != NULL);
7235
7236 s = bfd_get_linker_section (abfd, name);
7237 BFD_ASSERT (s != NULL);
7238
7239 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7240
7241 BFD_ASSERT (s->size == size);
7242 s->contents = contents;
7243 }
7244
7245 bool
7246 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7247 {
7248 struct elf32_arm_link_hash_table * globals;
7249
7250 globals = elf32_arm_hash_table (info);
7251 BFD_ASSERT (globals != NULL);
7252
7253 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7254 globals->arm_glue_size,
7255 ARM2THUMB_GLUE_SECTION_NAME);
7256
7257 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7258 globals->thumb_glue_size,
7259 THUMB2ARM_GLUE_SECTION_NAME);
7260
7261 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7262 globals->vfp11_erratum_glue_size,
7263 VFP11_ERRATUM_VENEER_SECTION_NAME);
7264
7265 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7266 globals->stm32l4xx_erratum_glue_size,
7267 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7268
7269 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7270 globals->bx_glue_size,
7271 ARM_BX_GLUE_SECTION_NAME);
7272
7273 return true;
7274 }
7275
7276 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7277 returns the symbol identifying the stub. */
7278
7279 static struct elf_link_hash_entry *
7280 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7281 struct elf_link_hash_entry * h)
7282 {
7283 const char * name = h->root.root.string;
7284 asection * s;
7285 char * tmp_name;
7286 struct elf_link_hash_entry * myh;
7287 struct bfd_link_hash_entry * bh;
7288 struct elf32_arm_link_hash_table * globals;
7289 bfd_vma val;
7290 bfd_size_type size;
7291
7292 globals = elf32_arm_hash_table (link_info);
7293 BFD_ASSERT (globals != NULL);
7294 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7295
7296 s = bfd_get_linker_section
7297 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7298
7299 BFD_ASSERT (s != NULL);
7300
7301 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7302 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7303 BFD_ASSERT (tmp_name);
7304
7305 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7306
7307 myh = elf_link_hash_lookup
7308 (&(globals)->root, tmp_name, false, false, true);
7309
7310 if (myh != NULL)
7311 {
7312 /* We've already seen this guy. */
7313 free (tmp_name);
7314 return myh;
7315 }
7316
7317 /* The only trick here is using hash_table->arm_glue_size as the value.
7318 Even though the section isn't allocated yet, this is where we will be
7319 putting it. The +1 on the value marks that the stub has not been
7320 output yet - not that it is a Thumb function. */
7321 bh = NULL;
7322 val = globals->arm_glue_size + 1;
7323 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7324 tmp_name, BSF_GLOBAL, s, val,
7325 NULL, true, false, &bh);
7326
7327 myh = (struct elf_link_hash_entry *) bh;
7328 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7329 myh->forced_local = 1;
7330
7331 free (tmp_name);
7332
7333 if (bfd_link_pic (link_info)
7334 || globals->root.is_relocatable_executable
7335 || globals->pic_veneer)
7336 size = ARM2THUMB_PIC_GLUE_SIZE;
7337 else if (globals->use_blx)
7338 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7339 else
7340 size = ARM2THUMB_STATIC_GLUE_SIZE;
7341
7342 s->size += size;
7343 globals->arm_glue_size += size;
7344
7345 return myh;
7346 }
7347
7348 /* Allocate space for ARMv4 BX veneers. */
7349
7350 static void
7351 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7352 {
7353 asection * s;
7354 struct elf32_arm_link_hash_table *globals;
7355 char *tmp_name;
7356 struct elf_link_hash_entry *myh;
7357 struct bfd_link_hash_entry *bh;
7358 bfd_vma val;
7359
7360 /* BX PC does not need a veneer. */
7361 if (reg == 15)
7362 return;
7363
7364 globals = elf32_arm_hash_table (link_info);
7365 BFD_ASSERT (globals != NULL);
7366 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7367
7368 /* Check if this veneer has already been allocated. */
7369 if (globals->bx_glue_offset[reg])
7370 return;
7371
7372 s = bfd_get_linker_section
7373 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7374
7375 BFD_ASSERT (s != NULL);
7376
7377 /* Add symbol for veneer. */
7378 tmp_name = (char *)
7379 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7380 BFD_ASSERT (tmp_name);
7381
7382 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7383
7384 myh = elf_link_hash_lookup
7385 (&(globals)->root, tmp_name, false, false, false);
7386
7387 BFD_ASSERT (myh == NULL);
7388
7389 bh = NULL;
7390 val = globals->bx_glue_size;
7391 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7392 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7393 NULL, true, false, &bh);
7394
7395 myh = (struct elf_link_hash_entry *) bh;
7396 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7397 myh->forced_local = 1;
7398
7399 s->size += ARM_BX_VENEER_SIZE;
7400 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7401 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7402 }
7403
7404
7405 /* Add an entry to the code/data map for section SEC. */
7406
7407 static void
7408 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7409 {
7410 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7411 unsigned int newidx;
7412
7413 if (sec_data->map == NULL)
7414 {
7415 sec_data->map = (elf32_arm_section_map *)
7416 bfd_malloc (sizeof (elf32_arm_section_map));
7417 sec_data->mapcount = 0;
7418 sec_data->mapsize = 1;
7419 }
7420
7421 newidx = sec_data->mapcount++;
7422
7423 if (sec_data->mapcount > sec_data->mapsize)
7424 {
7425 sec_data->mapsize *= 2;
7426 sec_data->map = (elf32_arm_section_map *)
7427 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7428 * sizeof (elf32_arm_section_map));
7429 }
7430
7431 if (sec_data->map)
7432 {
7433 sec_data->map[newidx].vma = vma;
7434 sec_data->map[newidx].type = type;
7435 }
7436 }
7437
7438
7439 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7440 veneers are handled for now. */
7441
7442 static bfd_vma
7443 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7444 elf32_vfp11_erratum_list *branch,
7445 bfd *branch_bfd,
7446 asection *branch_sec,
7447 unsigned int offset)
7448 {
7449 asection *s;
7450 struct elf32_arm_link_hash_table *hash_table;
7451 char *tmp_name;
7452 struct elf_link_hash_entry *myh;
7453 struct bfd_link_hash_entry *bh;
7454 bfd_vma val;
7455 struct _arm_elf_section_data *sec_data;
7456 elf32_vfp11_erratum_list *newerr;
7457
7458 hash_table = elf32_arm_hash_table (link_info);
7459 BFD_ASSERT (hash_table != NULL);
7460 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7461
7462 s = bfd_get_linker_section
7463 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7464
7465 sec_data = elf32_arm_section_data (s);
7466
7467 BFD_ASSERT (s != NULL);
7468
7469 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7470 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7471 BFD_ASSERT (tmp_name);
7472
7473 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7474 hash_table->num_vfp11_fixes);
7475
7476 myh = elf_link_hash_lookup
7477 (&(hash_table)->root, tmp_name, false, false, false);
7478
7479 BFD_ASSERT (myh == NULL);
7480
7481 bh = NULL;
7482 val = hash_table->vfp11_erratum_glue_size;
7483 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7484 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7485 NULL, true, false, &bh);
7486
7487 myh = (struct elf_link_hash_entry *) bh;
7488 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7489 myh->forced_local = 1;
7490
7491 /* Link veneer back to calling location. */
7492 sec_data->erratumcount += 1;
7493 newerr = (elf32_vfp11_erratum_list *)
7494 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7495
7496 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7497 newerr->vma = -1;
7498 newerr->u.v.branch = branch;
7499 newerr->u.v.id = hash_table->num_vfp11_fixes;
7500 branch->u.b.veneer = newerr;
7501
7502 newerr->next = sec_data->erratumlist;
7503 sec_data->erratumlist = newerr;
7504
7505 /* A symbol for the return from the veneer. */
7506 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7507 hash_table->num_vfp11_fixes);
7508
7509 myh = elf_link_hash_lookup
7510 (&(hash_table)->root, tmp_name, false, false, false);
7511
7512 if (myh != NULL)
7513 abort ();
7514
7515 bh = NULL;
7516 val = offset + 4;
7517 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7518 branch_sec, val, NULL, true, false, &bh);
7519
7520 myh = (struct elf_link_hash_entry *) bh;
7521 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7522 myh->forced_local = 1;
7523
7524 free (tmp_name);
7525
7526 /* Generate a mapping symbol for the veneer section, and explicitly add an
7527 entry for that symbol to the code/data map for the section. */
7528 if (hash_table->vfp11_erratum_glue_size == 0)
7529 {
7530 bh = NULL;
7531 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7532 ever requires this erratum fix. */
7533 _bfd_generic_link_add_one_symbol (link_info,
7534 hash_table->bfd_of_glue_owner, "$a",
7535 BSF_LOCAL, s, 0, NULL,
7536 true, false, &bh);
7537
7538 myh = (struct elf_link_hash_entry *) bh;
7539 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7540 myh->forced_local = 1;
7541
7542 /* The elf32_arm_init_maps function only cares about symbols from input
7543 BFDs. We must make a note of this generated mapping symbol
7544 ourselves so that code byteswapping works properly in
7545 elf32_arm_write_section. */
7546 elf32_arm_section_map_add (s, 'a', 0);
7547 }
7548
7549 s->size += VFP11_ERRATUM_VENEER_SIZE;
7550 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7551 hash_table->num_vfp11_fixes++;
7552
7553 /* The offset of the veneer. */
7554 return val;
7555 }
7556
7557 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7558 veneers need to be handled because used only in Cortex-M. */
7559
7560 static bfd_vma
7561 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7562 elf32_stm32l4xx_erratum_list *branch,
7563 bfd *branch_bfd,
7564 asection *branch_sec,
7565 unsigned int offset,
7566 bfd_size_type veneer_size)
7567 {
7568 asection *s;
7569 struct elf32_arm_link_hash_table *hash_table;
7570 char *tmp_name;
7571 struct elf_link_hash_entry *myh;
7572 struct bfd_link_hash_entry *bh;
7573 bfd_vma val;
7574 struct _arm_elf_section_data *sec_data;
7575 elf32_stm32l4xx_erratum_list *newerr;
7576
7577 hash_table = elf32_arm_hash_table (link_info);
7578 BFD_ASSERT (hash_table != NULL);
7579 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7580
7581 s = bfd_get_linker_section
7582 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7583
7584 BFD_ASSERT (s != NULL);
7585
7586 sec_data = elf32_arm_section_data (s);
7587
7588 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7589 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7590 BFD_ASSERT (tmp_name);
7591
7592 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7593 hash_table->num_stm32l4xx_fixes);
7594
7595 myh = elf_link_hash_lookup
7596 (&(hash_table)->root, tmp_name, false, false, false);
7597
7598 BFD_ASSERT (myh == NULL);
7599
7600 bh = NULL;
7601 val = hash_table->stm32l4xx_erratum_glue_size;
7602 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7603 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7604 NULL, true, false, &bh);
7605
7606 myh = (struct elf_link_hash_entry *) bh;
7607 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7608 myh->forced_local = 1;
7609
7610 /* Link veneer back to calling location. */
7611 sec_data->stm32l4xx_erratumcount += 1;
7612 newerr = (elf32_stm32l4xx_erratum_list *)
7613 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7614
7615 newerr->type = STM32L4XX_ERRATUM_VENEER;
7616 newerr->vma = -1;
7617 newerr->u.v.branch = branch;
7618 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7619 branch->u.b.veneer = newerr;
7620
7621 newerr->next = sec_data->stm32l4xx_erratumlist;
7622 sec_data->stm32l4xx_erratumlist = newerr;
7623
7624 /* A symbol for the return from the veneer. */
7625 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7626 hash_table->num_stm32l4xx_fixes);
7627
7628 myh = elf_link_hash_lookup
7629 (&(hash_table)->root, tmp_name, false, false, false);
7630
7631 if (myh != NULL)
7632 abort ();
7633
7634 bh = NULL;
7635 val = offset + 4;
7636 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7637 branch_sec, val, NULL, true, false, &bh);
7638
7639 myh = (struct elf_link_hash_entry *) bh;
7640 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7641 myh->forced_local = 1;
7642
7643 free (tmp_name);
7644
7645 /* Generate a mapping symbol for the veneer section, and explicitly add an
7646 entry for that symbol to the code/data map for the section. */
7647 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7648 {
7649 bh = NULL;
7650 /* Creates a THUMB symbol since there is no other choice. */
7651 _bfd_generic_link_add_one_symbol (link_info,
7652 hash_table->bfd_of_glue_owner, "$t",
7653 BSF_LOCAL, s, 0, NULL,
7654 true, false, &bh);
7655
7656 myh = (struct elf_link_hash_entry *) bh;
7657 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7658 myh->forced_local = 1;
7659
7660 /* The elf32_arm_init_maps function only cares about symbols from input
7661 BFDs. We must make a note of this generated mapping symbol
7662 ourselves so that code byteswapping works properly in
7663 elf32_arm_write_section. */
7664 elf32_arm_section_map_add (s, 't', 0);
7665 }
7666
7667 s->size += veneer_size;
7668 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7669 hash_table->num_stm32l4xx_fixes++;
7670
7671 /* The offset of the veneer. */
7672 return val;
7673 }
7674
7675 #define ARM_GLUE_SECTION_FLAGS \
7676 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7677 | SEC_READONLY | SEC_LINKER_CREATED)
7678
7679 /* Create a fake section for use by the ARM backend of the linker. */
7680
7681 static bool
7682 arm_make_glue_section (bfd * abfd, const char * name)
7683 {
7684 asection * sec;
7685
7686 sec = bfd_get_linker_section (abfd, name);
7687 if (sec != NULL)
7688 /* Already made. */
7689 return true;
7690
7691 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7692
7693 if (sec == NULL
7694 || !bfd_set_section_alignment (sec, 2))
7695 return false;
7696
7697 /* Set the gc mark to prevent the section from being removed by garbage
7698 collection, despite the fact that no relocs refer to this section. */
7699 sec->gc_mark = 1;
7700
7701 return true;
7702 }
7703
7704 /* Set size of .plt entries. This function is called from the
7705 linker scripts in ld/emultempl/{armelf}.em. */
7706
7707 void
7708 bfd_elf32_arm_use_long_plt (void)
7709 {
7710 elf32_arm_use_long_plt_entry = true;
7711 }
7712
7713 /* Add the glue sections to ABFD. This function is called from the
7714 linker scripts in ld/emultempl/{armelf}.em. */
7715
7716 bool
7717 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7718 struct bfd_link_info *info)
7719 {
7720 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7721 bool dostm32l4xx = globals
7722 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7723 bool addglue;
7724
7725 /* If we are only performing a partial
7726 link do not bother adding the glue. */
7727 if (bfd_link_relocatable (info))
7728 return true;
7729
7730 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7731 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7732 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7733 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7734
7735 if (!dostm32l4xx)
7736 return addglue;
7737
7738 return addglue
7739 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7740 }
7741
7742 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7743 ensures they are not marked for deletion by
7744 strip_excluded_output_sections () when veneers are going to be created
7745 later. Not doing so would trigger assert on empty section size in
7746 lang_size_sections_1 (). */
7747
7748 void
7749 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7750 {
7751 enum elf32_arm_stub_type stub_type;
7752
7753 /* If we are only performing a partial
7754 link do not bother adding the glue. */
7755 if (bfd_link_relocatable (info))
7756 return;
7757
7758 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7759 {
7760 asection *out_sec;
7761 const char *out_sec_name;
7762
7763 if (!arm_dedicated_stub_output_section_required (stub_type))
7764 continue;
7765
7766 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7767 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7768 if (out_sec != NULL)
7769 out_sec->flags |= SEC_KEEP;
7770 }
7771 }
7772
7773 /* Select a BFD to be used to hold the sections used by the glue code.
7774 This function is called from the linker scripts in ld/emultempl/
7775 {armelf/pe}.em. */
7776
7777 bool
7778 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7779 {
7780 struct elf32_arm_link_hash_table *globals;
7781
7782 /* If we are only performing a partial link
7783 do not bother getting a bfd to hold the glue. */
7784 if (bfd_link_relocatable (info))
7785 return true;
7786
7787 /* Make sure we don't attach the glue sections to a dynamic object. */
7788 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7789
7790 globals = elf32_arm_hash_table (info);
7791 BFD_ASSERT (globals != NULL);
7792
7793 if (globals->bfd_of_glue_owner != NULL)
7794 return true;
7795
7796 /* Save the bfd for later use. */
7797 globals->bfd_of_glue_owner = abfd;
7798
7799 return true;
7800 }
7801
7802 static void
7803 check_use_blx (struct elf32_arm_link_hash_table *globals)
7804 {
7805 int cpu_arch;
7806
7807 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7808 Tag_CPU_arch);
7809
7810 if (globals->fix_arm1176)
7811 {
7812 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7813 globals->use_blx = 1;
7814 }
7815 else
7816 {
7817 if (cpu_arch > TAG_CPU_ARCH_V4T)
7818 globals->use_blx = 1;
7819 }
7820 }
7821
7822 bool
7823 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7824 struct bfd_link_info *link_info)
7825 {
7826 Elf_Internal_Shdr *symtab_hdr;
7827 Elf_Internal_Rela *internal_relocs = NULL;
7828 Elf_Internal_Rela *irel, *irelend;
7829 bfd_byte *contents = NULL;
7830
7831 asection *sec;
7832 struct elf32_arm_link_hash_table *globals;
7833
7834 /* If we are only performing a partial link do not bother
7835 to construct any glue. */
7836 if (bfd_link_relocatable (link_info))
7837 return true;
7838
7839 /* Here we have a bfd that is to be included on the link. We have a
7840 hook to do reloc rummaging, before section sizes are nailed down. */
7841 globals = elf32_arm_hash_table (link_info);
7842 BFD_ASSERT (globals != NULL);
7843
7844 check_use_blx (globals);
7845
7846 if (globals->byteswap_code && !bfd_big_endian (abfd))
7847 {
7848 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7849 abfd);
7850 return false;
7851 }
7852
7853 /* PR 5398: If we have not decided to include any loadable sections in
7854 the output then we will not have a glue owner bfd. This is OK, it
7855 just means that there is nothing else for us to do here. */
7856 if (globals->bfd_of_glue_owner == NULL)
7857 return true;
7858
7859 /* Rummage around all the relocs and map the glue vectors. */
7860 sec = abfd->sections;
7861
7862 if (sec == NULL)
7863 return true;
7864
7865 for (; sec != NULL; sec = sec->next)
7866 {
7867 if (sec->reloc_count == 0)
7868 continue;
7869
7870 if ((sec->flags & SEC_EXCLUDE) != 0)
7871 continue;
7872
7873 symtab_hdr = & elf_symtab_hdr (abfd);
7874
7875 /* Load the relocs. */
7876 internal_relocs
7877 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, false);
7878
7879 if (internal_relocs == NULL)
7880 goto error_return;
7881
7882 irelend = internal_relocs + sec->reloc_count;
7883 for (irel = internal_relocs; irel < irelend; irel++)
7884 {
7885 long r_type;
7886 unsigned long r_index;
7887
7888 struct elf_link_hash_entry *h;
7889
7890 r_type = ELF32_R_TYPE (irel->r_info);
7891 r_index = ELF32_R_SYM (irel->r_info);
7892
7893 /* These are the only relocation types we care about. */
7894 if ( r_type != R_ARM_PC24
7895 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7896 continue;
7897
7898 /* Get the section contents if we haven't done so already. */
7899 if (contents == NULL)
7900 {
7901 /* Get cached copy if it exists. */
7902 if (elf_section_data (sec)->this_hdr.contents != NULL)
7903 contents = elf_section_data (sec)->this_hdr.contents;
7904 else
7905 {
7906 /* Go get them off disk. */
7907 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7908 goto error_return;
7909 }
7910 }
7911
7912 if (r_type == R_ARM_V4BX)
7913 {
7914 int reg;
7915
7916 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7917 record_arm_bx_glue (link_info, reg);
7918 continue;
7919 }
7920
7921 /* If the relocation is not against a symbol it cannot concern us. */
7922 h = NULL;
7923
7924 /* We don't care about local symbols. */
7925 if (r_index < symtab_hdr->sh_info)
7926 continue;
7927
7928 /* This is an external symbol. */
7929 r_index -= symtab_hdr->sh_info;
7930 h = (struct elf_link_hash_entry *)
7931 elf_sym_hashes (abfd)[r_index];
7932
7933 /* If the relocation is against a static symbol it must be within
7934 the current section and so cannot be a cross ARM/Thumb relocation. */
7935 if (h == NULL)
7936 continue;
7937
7938 /* If the call will go through a PLT entry then we do not need
7939 glue. */
7940 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7941 continue;
7942
7943 switch (r_type)
7944 {
7945 case R_ARM_PC24:
7946 /* This one is a call from arm code. We need to look up
7947 the target of the call. If it is a thumb target, we
7948 insert glue. */
7949 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7950 == ST_BRANCH_TO_THUMB)
7951 record_arm_to_thumb_glue (link_info, h);
7952 break;
7953
7954 default:
7955 abort ();
7956 }
7957 }
7958
7959 if (elf_section_data (sec)->this_hdr.contents != contents)
7960 free (contents);
7961 contents = NULL;
7962
7963 if (elf_section_data (sec)->relocs != internal_relocs)
7964 free (internal_relocs);
7965 internal_relocs = NULL;
7966 }
7967
7968 return true;
7969
7970 error_return:
7971 if (elf_section_data (sec)->this_hdr.contents != contents)
7972 free (contents);
7973 if (elf_section_data (sec)->relocs != internal_relocs)
7974 free (internal_relocs);
7975
7976 return false;
7977 }
7978 #endif
7979
7980
7981 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7982
7983 void
7984 bfd_elf32_arm_init_maps (bfd *abfd)
7985 {
7986 Elf_Internal_Sym *isymbuf;
7987 Elf_Internal_Shdr *hdr;
7988 unsigned int i, localsyms;
7989
7990 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7991 if (! is_arm_elf (abfd))
7992 return;
7993
7994 if ((abfd->flags & DYNAMIC) != 0)
7995 return;
7996
7997 hdr = & elf_symtab_hdr (abfd);
7998 localsyms = hdr->sh_info;
7999
8000 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8001 should contain the number of local symbols, which should come before any
8002 global symbols. Mapping symbols are always local. */
8003 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8004 NULL);
8005
8006 /* No internal symbols read? Skip this BFD. */
8007 if (isymbuf == NULL)
8008 return;
8009
8010 for (i = 0; i < localsyms; i++)
8011 {
8012 Elf_Internal_Sym *isym = &isymbuf[i];
8013 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8014 const char *name;
8015
8016 if (sec != NULL
8017 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8018 {
8019 name = bfd_elf_string_from_elf_section (abfd,
8020 hdr->sh_link, isym->st_name);
8021
8022 if (bfd_is_arm_special_symbol_name (name,
8023 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8024 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8025 }
8026 }
8027 }
8028
8029
8030 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8031 say what they wanted. */
8032
8033 void
8034 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8035 {
8036 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8037 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8038
8039 if (globals == NULL)
8040 return;
8041
8042 if (globals->fix_cortex_a8 == -1)
8043 {
8044 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8045 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8046 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8047 || out_attr[Tag_CPU_arch_profile].i == 0))
8048 globals->fix_cortex_a8 = 1;
8049 else
8050 globals->fix_cortex_a8 = 0;
8051 }
8052 }
8053
8054
8055 void
8056 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8057 {
8058 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8059 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8060
8061 if (globals == NULL)
8062 return;
8063 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8064 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8065 {
8066 switch (globals->vfp11_fix)
8067 {
8068 case BFD_ARM_VFP11_FIX_DEFAULT:
8069 case BFD_ARM_VFP11_FIX_NONE:
8070 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8071 break;
8072
8073 default:
8074 /* Give a warning, but do as the user requests anyway. */
8075 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8076 "workaround is not necessary for target architecture"), obfd);
8077 }
8078 }
8079 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8080 /* For earlier architectures, we might need the workaround, but do not
8081 enable it by default. If users is running with broken hardware, they
8082 must enable the erratum fix explicitly. */
8083 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8084 }
8085
8086 void
8087 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8088 {
8089 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8090 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8091
8092 if (globals == NULL)
8093 return;
8094
8095 /* We assume only Cortex-M4 may require the fix. */
8096 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8097 || out_attr[Tag_CPU_arch_profile].i != 'M')
8098 {
8099 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8100 /* Give a warning, but do as the user requests anyway. */
8101 _bfd_error_handler
8102 (_("%pB: warning: selected STM32L4XX erratum "
8103 "workaround is not necessary for target architecture"), obfd);
8104 }
8105 }
8106
8107 enum bfd_arm_vfp11_pipe
8108 {
8109 VFP11_FMAC,
8110 VFP11_LS,
8111 VFP11_DS,
8112 VFP11_BAD
8113 };
8114
8115 /* Return a VFP register number. This is encoded as RX:X for single-precision
8116 registers, or X:RX for double-precision registers, where RX is the group of
8117 four bits in the instruction encoding and X is the single extension bit.
8118 RX and X fields are specified using their lowest (starting) bit. The return
8119 value is:
8120
8121 0...31: single-precision registers s0...s31
8122 32...63: double-precision registers d0...d31.
8123
8124 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8125 encounter VFP3 instructions, so we allow the full range for DP registers. */
8126
8127 static unsigned int
8128 bfd_arm_vfp11_regno (unsigned int insn, bool is_double, unsigned int rx,
8129 unsigned int x)
8130 {
8131 if (is_double)
8132 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8133 else
8134 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8135 }
8136
8137 /* Set bits in *WMASK according to a register number REG as encoded by
8138 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8139
8140 static void
8141 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8142 {
8143 if (reg < 32)
8144 *wmask |= 1 << reg;
8145 else if (reg < 48)
8146 *wmask |= 3 << ((reg - 32) * 2);
8147 }
8148
8149 /* Return TRUE if WMASK overwrites anything in REGS. */
8150
8151 static bool
8152 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8153 {
8154 int i;
8155
8156 for (i = 0; i < numregs; i++)
8157 {
8158 unsigned int reg = regs[i];
8159
8160 if (reg < 32 && (wmask & (1 << reg)) != 0)
8161 return true;
8162
8163 reg -= 32;
8164
8165 if (reg >= 16)
8166 continue;
8167
8168 if ((wmask & (3 << (reg * 2))) != 0)
8169 return true;
8170 }
8171
8172 return false;
8173 }
8174
8175 /* In this function, we're interested in two things: finding input registers
8176 for VFP data-processing instructions, and finding the set of registers which
8177 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8178 hold the written set, so FLDM etc. are easy to deal with (we're only
8179 interested in 32 SP registers or 16 dp registers, due to the VFP version
8180 implemented by the chip in question). DP registers are marked by setting
8181 both SP registers in the write mask). */
8182
8183 static enum bfd_arm_vfp11_pipe
8184 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8185 int *numregs)
8186 {
8187 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8188 bool is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8189
8190 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8191 {
8192 unsigned int pqrs;
8193 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8194 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8195
8196 pqrs = ((insn & 0x00800000) >> 20)
8197 | ((insn & 0x00300000) >> 19)
8198 | ((insn & 0x00000040) >> 6);
8199
8200 switch (pqrs)
8201 {
8202 case 0: /* fmac[sd]. */
8203 case 1: /* fnmac[sd]. */
8204 case 2: /* fmsc[sd]. */
8205 case 3: /* fnmsc[sd]. */
8206 vpipe = VFP11_FMAC;
8207 bfd_arm_vfp11_write_mask (destmask, fd);
8208 regs[0] = fd;
8209 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8210 regs[2] = fm;
8211 *numregs = 3;
8212 break;
8213
8214 case 4: /* fmul[sd]. */
8215 case 5: /* fnmul[sd]. */
8216 case 6: /* fadd[sd]. */
8217 case 7: /* fsub[sd]. */
8218 vpipe = VFP11_FMAC;
8219 goto vfp_binop;
8220
8221 case 8: /* fdiv[sd]. */
8222 vpipe = VFP11_DS;
8223 vfp_binop:
8224 bfd_arm_vfp11_write_mask (destmask, fd);
8225 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8226 regs[1] = fm;
8227 *numregs = 2;
8228 break;
8229
8230 case 15: /* extended opcode. */
8231 {
8232 unsigned int extn = ((insn >> 15) & 0x1e)
8233 | ((insn >> 7) & 1);
8234
8235 switch (extn)
8236 {
8237 case 0: /* fcpy[sd]. */
8238 case 1: /* fabs[sd]. */
8239 case 2: /* fneg[sd]. */
8240 case 8: /* fcmp[sd]. */
8241 case 9: /* fcmpe[sd]. */
8242 case 10: /* fcmpz[sd]. */
8243 case 11: /* fcmpez[sd]. */
8244 case 16: /* fuito[sd]. */
8245 case 17: /* fsito[sd]. */
8246 case 24: /* ftoui[sd]. */
8247 case 25: /* ftouiz[sd]. */
8248 case 26: /* ftosi[sd]. */
8249 case 27: /* ftosiz[sd]. */
8250 /* These instructions will not bounce due to underflow. */
8251 *numregs = 0;
8252 vpipe = VFP11_FMAC;
8253 break;
8254
8255 case 3: /* fsqrt[sd]. */
8256 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8257 registers to cause the erratum in previous instructions. */
8258 bfd_arm_vfp11_write_mask (destmask, fd);
8259 vpipe = VFP11_DS;
8260 break;
8261
8262 case 15: /* fcvt{ds,sd}. */
8263 {
8264 int rnum = 0;
8265
8266 bfd_arm_vfp11_write_mask (destmask, fd);
8267
8268 /* Only FCVTSD can underflow. */
8269 if ((insn & 0x100) != 0)
8270 regs[rnum++] = fm;
8271
8272 *numregs = rnum;
8273
8274 vpipe = VFP11_FMAC;
8275 }
8276 break;
8277
8278 default:
8279 return VFP11_BAD;
8280 }
8281 }
8282 break;
8283
8284 default:
8285 return VFP11_BAD;
8286 }
8287 }
8288 /* Two-register transfer. */
8289 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8290 {
8291 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8292
8293 if ((insn & 0x100000) == 0)
8294 {
8295 if (is_double)
8296 bfd_arm_vfp11_write_mask (destmask, fm);
8297 else
8298 {
8299 bfd_arm_vfp11_write_mask (destmask, fm);
8300 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8301 }
8302 }
8303
8304 vpipe = VFP11_LS;
8305 }
8306 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8307 {
8308 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8309 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8310
8311 switch (puw)
8312 {
8313 case 0: /* Two-reg transfer. We should catch these above. */
8314 abort ();
8315
8316 case 2: /* fldm[sdx]. */
8317 case 3:
8318 case 5:
8319 {
8320 unsigned int i, offset = insn & 0xff;
8321
8322 if (is_double)
8323 offset >>= 1;
8324
8325 for (i = fd; i < fd + offset; i++)
8326 bfd_arm_vfp11_write_mask (destmask, i);
8327 }
8328 break;
8329
8330 case 4: /* fld[sd]. */
8331 case 6:
8332 bfd_arm_vfp11_write_mask (destmask, fd);
8333 break;
8334
8335 default:
8336 return VFP11_BAD;
8337 }
8338
8339 vpipe = VFP11_LS;
8340 }
8341 /* Single-register transfer. Note L==0. */
8342 else if ((insn & 0x0f100e10) == 0x0e000a10)
8343 {
8344 unsigned int opcode = (insn >> 21) & 7;
8345 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8346
8347 switch (opcode)
8348 {
8349 case 0: /* fmsr/fmdlr. */
8350 case 1: /* fmdhr. */
8351 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8352 destination register. I don't know if this is exactly right,
8353 but it is the conservative choice. */
8354 bfd_arm_vfp11_write_mask (destmask, fn);
8355 break;
8356
8357 case 7: /* fmxr. */
8358 break;
8359 }
8360
8361 vpipe = VFP11_LS;
8362 }
8363
8364 return vpipe;
8365 }
8366
8367
8368 static int elf32_arm_compare_mapping (const void * a, const void * b);
8369
8370
8371 /* Look for potentially-troublesome code sequences which might trigger the
8372 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8373 (available from ARM) for details of the erratum. A short version is
8374 described in ld.texinfo. */
8375
8376 bool
8377 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8378 {
8379 asection *sec;
8380 bfd_byte *contents = NULL;
8381 int state = 0;
8382 int regs[3], numregs = 0;
8383 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8384 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8385
8386 if (globals == NULL)
8387 return false;
8388
8389 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8390 The states transition as follows:
8391
8392 0 -> 1 (vector) or 0 -> 2 (scalar)
8393 A VFP FMAC-pipeline instruction has been seen. Fill
8394 regs[0]..regs[numregs-1] with its input operands. Remember this
8395 instruction in 'first_fmac'.
8396
8397 1 -> 2
8398 Any instruction, except for a VFP instruction which overwrites
8399 regs[*].
8400
8401 1 -> 3 [ -> 0 ] or
8402 2 -> 3 [ -> 0 ]
8403 A VFP instruction has been seen which overwrites any of regs[*].
8404 We must make a veneer! Reset state to 0 before examining next
8405 instruction.
8406
8407 2 -> 0
8408 If we fail to match anything in state 2, reset to state 0 and reset
8409 the instruction pointer to the instruction after 'first_fmac'.
8410
8411 If the VFP11 vector mode is in use, there must be at least two unrelated
8412 instructions between anti-dependent VFP11 instructions to properly avoid
8413 triggering the erratum, hence the use of the extra state 1. */
8414
8415 /* If we are only performing a partial link do not bother
8416 to construct any glue. */
8417 if (bfd_link_relocatable (link_info))
8418 return true;
8419
8420 /* Skip if this bfd does not correspond to an ELF image. */
8421 if (! is_arm_elf (abfd))
8422 return true;
8423
8424 /* We should have chosen a fix type by the time we get here. */
8425 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8426
8427 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8428 return true;
8429
8430 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8431 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8432 return true;
8433
8434 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8435 {
8436 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8437 struct _arm_elf_section_data *sec_data;
8438
8439 /* If we don't have executable progbits, we're not interested in this
8440 section. Also skip if section is to be excluded. */
8441 if (elf_section_type (sec) != SHT_PROGBITS
8442 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8443 || (sec->flags & SEC_EXCLUDE) != 0
8444 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8445 || sec->output_section == bfd_abs_section_ptr
8446 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8447 continue;
8448
8449 sec_data = elf32_arm_section_data (sec);
8450
8451 if (sec_data->mapcount == 0)
8452 continue;
8453
8454 if (elf_section_data (sec)->this_hdr.contents != NULL)
8455 contents = elf_section_data (sec)->this_hdr.contents;
8456 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8457 goto error_return;
8458
8459 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8460 elf32_arm_compare_mapping);
8461
8462 for (span = 0; span < sec_data->mapcount; span++)
8463 {
8464 unsigned int span_start = sec_data->map[span].vma;
8465 unsigned int span_end = (span == sec_data->mapcount - 1)
8466 ? sec->size : sec_data->map[span + 1].vma;
8467 char span_type = sec_data->map[span].type;
8468
8469 /* FIXME: Only ARM mode is supported at present. We may need to
8470 support Thumb-2 mode also at some point. */
8471 if (span_type != 'a')
8472 continue;
8473
8474 for (i = span_start; i < span_end;)
8475 {
8476 unsigned int next_i = i + 4;
8477 unsigned int insn = bfd_big_endian (abfd)
8478 ? (((unsigned) contents[i] << 24)
8479 | (contents[i + 1] << 16)
8480 | (contents[i + 2] << 8)
8481 | contents[i + 3])
8482 : (((unsigned) contents[i + 3] << 24)
8483 | (contents[i + 2] << 16)
8484 | (contents[i + 1] << 8)
8485 | contents[i]);
8486 unsigned int writemask = 0;
8487 enum bfd_arm_vfp11_pipe vpipe;
8488
8489 switch (state)
8490 {
8491 case 0:
8492 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8493 &numregs);
8494 /* I'm assuming the VFP11 erratum can trigger with denorm
8495 operands on either the FMAC or the DS pipeline. This might
8496 lead to slightly overenthusiastic veneer insertion. */
8497 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8498 {
8499 state = use_vector ? 1 : 2;
8500 first_fmac = i;
8501 veneer_of_insn = insn;
8502 }
8503 break;
8504
8505 case 1:
8506 {
8507 int other_regs[3], other_numregs;
8508 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8509 other_regs,
8510 &other_numregs);
8511 if (vpipe != VFP11_BAD
8512 && bfd_arm_vfp11_antidependency (writemask, regs,
8513 numregs))
8514 state = 3;
8515 else
8516 state = 2;
8517 }
8518 break;
8519
8520 case 2:
8521 {
8522 int other_regs[3], other_numregs;
8523 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8524 other_regs,
8525 &other_numregs);
8526 if (vpipe != VFP11_BAD
8527 && bfd_arm_vfp11_antidependency (writemask, regs,
8528 numregs))
8529 state = 3;
8530 else
8531 {
8532 state = 0;
8533 next_i = first_fmac + 4;
8534 }
8535 }
8536 break;
8537
8538 case 3:
8539 abort (); /* Should be unreachable. */
8540 }
8541
8542 if (state == 3)
8543 {
8544 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8545 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8546
8547 elf32_arm_section_data (sec)->erratumcount += 1;
8548
8549 newerr->u.b.vfp_insn = veneer_of_insn;
8550
8551 switch (span_type)
8552 {
8553 case 'a':
8554 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8555 break;
8556
8557 default:
8558 abort ();
8559 }
8560
8561 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8562 first_fmac);
8563
8564 newerr->vma = -1;
8565
8566 newerr->next = sec_data->erratumlist;
8567 sec_data->erratumlist = newerr;
8568
8569 state = 0;
8570 }
8571
8572 i = next_i;
8573 }
8574 }
8575
8576 if (elf_section_data (sec)->this_hdr.contents != contents)
8577 free (contents);
8578 contents = NULL;
8579 }
8580
8581 return true;
8582
8583 error_return:
8584 if (elf_section_data (sec)->this_hdr.contents != contents)
8585 free (contents);
8586
8587 return false;
8588 }
8589
8590 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8591 after sections have been laid out, using specially-named symbols. */
8592
8593 void
8594 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8595 struct bfd_link_info *link_info)
8596 {
8597 asection *sec;
8598 struct elf32_arm_link_hash_table *globals;
8599 char *tmp_name;
8600
8601 if (bfd_link_relocatable (link_info))
8602 return;
8603
8604 /* Skip if this bfd does not correspond to an ELF image. */
8605 if (! is_arm_elf (abfd))
8606 return;
8607
8608 globals = elf32_arm_hash_table (link_info);
8609 if (globals == NULL)
8610 return;
8611
8612 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8613 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8614 BFD_ASSERT (tmp_name);
8615
8616 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8617 {
8618 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8619 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8620
8621 for (; errnode != NULL; errnode = errnode->next)
8622 {
8623 struct elf_link_hash_entry *myh;
8624 bfd_vma vma;
8625
8626 switch (errnode->type)
8627 {
8628 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8629 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8630 /* Find veneer symbol. */
8631 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8632 errnode->u.b.veneer->u.v.id);
8633
8634 myh = elf_link_hash_lookup
8635 (&(globals)->root, tmp_name, false, false, true);
8636
8637 if (myh == NULL)
8638 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8639 abfd, "VFP11", tmp_name);
8640
8641 vma = myh->root.u.def.section->output_section->vma
8642 + myh->root.u.def.section->output_offset
8643 + myh->root.u.def.value;
8644
8645 errnode->u.b.veneer->vma = vma;
8646 break;
8647
8648 case VFP11_ERRATUM_ARM_VENEER:
8649 case VFP11_ERRATUM_THUMB_VENEER:
8650 /* Find return location. */
8651 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8652 errnode->u.v.id);
8653
8654 myh = elf_link_hash_lookup
8655 (&(globals)->root, tmp_name, false, false, true);
8656
8657 if (myh == NULL)
8658 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8659 abfd, "VFP11", tmp_name);
8660
8661 vma = myh->root.u.def.section->output_section->vma
8662 + myh->root.u.def.section->output_offset
8663 + myh->root.u.def.value;
8664
8665 errnode->u.v.branch->vma = vma;
8666 break;
8667
8668 default:
8669 abort ();
8670 }
8671 }
8672 }
8673
8674 free (tmp_name);
8675 }
8676
8677 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8678 return locations after sections have been laid out, using
8679 specially-named symbols. */
8680
8681 void
8682 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8683 struct bfd_link_info *link_info)
8684 {
8685 asection *sec;
8686 struct elf32_arm_link_hash_table *globals;
8687 char *tmp_name;
8688
8689 if (bfd_link_relocatable (link_info))
8690 return;
8691
8692 /* Skip if this bfd does not correspond to an ELF image. */
8693 if (! is_arm_elf (abfd))
8694 return;
8695
8696 globals = elf32_arm_hash_table (link_info);
8697 if (globals == NULL)
8698 return;
8699
8700 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8701 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8702 BFD_ASSERT (tmp_name);
8703
8704 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8705 {
8706 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8707 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8708
8709 for (; errnode != NULL; errnode = errnode->next)
8710 {
8711 struct elf_link_hash_entry *myh;
8712 bfd_vma vma;
8713
8714 switch (errnode->type)
8715 {
8716 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8717 /* Find veneer symbol. */
8718 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8719 errnode->u.b.veneer->u.v.id);
8720
8721 myh = elf_link_hash_lookup
8722 (&(globals)->root, tmp_name, false, false, true);
8723
8724 if (myh == NULL)
8725 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8726 abfd, "STM32L4XX", tmp_name);
8727
8728 vma = myh->root.u.def.section->output_section->vma
8729 + myh->root.u.def.section->output_offset
8730 + myh->root.u.def.value;
8731
8732 errnode->u.b.veneer->vma = vma;
8733 break;
8734
8735 case STM32L4XX_ERRATUM_VENEER:
8736 /* Find return location. */
8737 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8738 errnode->u.v.id);
8739
8740 myh = elf_link_hash_lookup
8741 (&(globals)->root, tmp_name, false, false, true);
8742
8743 if (myh == NULL)
8744 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8745 abfd, "STM32L4XX", tmp_name);
8746
8747 vma = myh->root.u.def.section->output_section->vma
8748 + myh->root.u.def.section->output_offset
8749 + myh->root.u.def.value;
8750
8751 errnode->u.v.branch->vma = vma;
8752 break;
8753
8754 default:
8755 abort ();
8756 }
8757 }
8758 }
8759
8760 free (tmp_name);
8761 }
8762
8763 static inline bool
8764 is_thumb2_ldmia (const insn32 insn)
8765 {
8766 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8767 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8768 return (insn & 0xffd02000) == 0xe8900000;
8769 }
8770
8771 static inline bool
8772 is_thumb2_ldmdb (const insn32 insn)
8773 {
8774 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8775 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8776 return (insn & 0xffd02000) == 0xe9100000;
8777 }
8778
8779 static inline bool
8780 is_thumb2_vldm (const insn32 insn)
8781 {
8782 /* A6.5 Extension register load or store instruction
8783 A7.7.229
8784 We look for SP 32-bit and DP 64-bit registers.
8785 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8786 <list> is consecutive 64-bit registers
8787 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8788 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8789 <list> is consecutive 32-bit registers
8790 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8791 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8792 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8793 return
8794 (((insn & 0xfe100f00) == 0xec100b00) ||
8795 ((insn & 0xfe100f00) == 0xec100a00))
8796 && /* (IA without !). */
8797 (((((insn << 7) >> 28) & 0xd) == 0x4)
8798 /* (IA with !), includes VPOP (when reg number is SP). */
8799 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8800 /* (DB with !). */
8801 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8802 }
8803
8804 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8805 VLDM opcode and:
8806 - computes the number and the mode of memory accesses
8807 - decides if the replacement should be done:
8808 . replaces only if > 8-word accesses
8809 . or (testing purposes only) replaces all accesses. */
8810
8811 static bool
8812 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8813 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8814 {
8815 int nb_words = 0;
8816
8817 /* The field encoding the register list is the same for both LDMIA
8818 and LDMDB encodings. */
8819 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8820 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8821 else if (is_thumb2_vldm (insn))
8822 nb_words = (insn & 0xff);
8823
8824 /* DEFAULT mode accounts for the real bug condition situation,
8825 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8826 return (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT
8827 ? nb_words > 8
8828 : stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL);
8829 }
8830
8831 /* Look for potentially-troublesome code sequences which might trigger
8832 the STM STM32L4XX erratum. */
8833
8834 bool
8835 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8836 struct bfd_link_info *link_info)
8837 {
8838 asection *sec;
8839 bfd_byte *contents = NULL;
8840 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8841
8842 if (globals == NULL)
8843 return false;
8844
8845 /* If we are only performing a partial link do not bother
8846 to construct any glue. */
8847 if (bfd_link_relocatable (link_info))
8848 return true;
8849
8850 /* Skip if this bfd does not correspond to an ELF image. */
8851 if (! is_arm_elf (abfd))
8852 return true;
8853
8854 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8855 return true;
8856
8857 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8858 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8859 return true;
8860
8861 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8862 {
8863 unsigned int i, span;
8864 struct _arm_elf_section_data *sec_data;
8865
8866 /* If we don't have executable progbits, we're not interested in this
8867 section. Also skip if section is to be excluded. */
8868 if (elf_section_type (sec) != SHT_PROGBITS
8869 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8870 || (sec->flags & SEC_EXCLUDE) != 0
8871 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8872 || sec->output_section == bfd_abs_section_ptr
8873 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8874 continue;
8875
8876 sec_data = elf32_arm_section_data (sec);
8877
8878 if (sec_data->mapcount == 0)
8879 continue;
8880
8881 if (elf_section_data (sec)->this_hdr.contents != NULL)
8882 contents = elf_section_data (sec)->this_hdr.contents;
8883 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8884 goto error_return;
8885
8886 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8887 elf32_arm_compare_mapping);
8888
8889 for (span = 0; span < sec_data->mapcount; span++)
8890 {
8891 unsigned int span_start = sec_data->map[span].vma;
8892 unsigned int span_end = (span == sec_data->mapcount - 1)
8893 ? sec->size : sec_data->map[span + 1].vma;
8894 char span_type = sec_data->map[span].type;
8895 int itblock_current_pos = 0;
8896
8897 /* Only Thumb2 mode need be supported with this CM4 specific
8898 code, we should not encounter any arm mode eg span_type
8899 != 'a'. */
8900 if (span_type != 't')
8901 continue;
8902
8903 for (i = span_start; i < span_end;)
8904 {
8905 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8906 bool insn_32bit = false;
8907 bool is_ldm = false;
8908 bool is_vldm = false;
8909 bool is_not_last_in_it_block = false;
8910
8911 /* The first 16-bits of all 32-bit thumb2 instructions start
8912 with opcode[15..13]=0b111 and the encoded op1 can be anything
8913 except opcode[12..11]!=0b00.
8914 See 32-bit Thumb instruction encoding. */
8915 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8916 insn_32bit = true;
8917
8918 /* Compute the predicate that tells if the instruction
8919 is concerned by the IT block
8920 - Creates an error if there is a ldm that is not
8921 last in the IT block thus cannot be replaced
8922 - Otherwise we can create a branch at the end of the
8923 IT block, it will be controlled naturally by IT
8924 with the proper pseudo-predicate
8925 - So the only interesting predicate is the one that
8926 tells that we are not on the last item of an IT
8927 block. */
8928 if (itblock_current_pos != 0)
8929 is_not_last_in_it_block = !!--itblock_current_pos;
8930
8931 if (insn_32bit)
8932 {
8933 /* Load the rest of the insn (in manual-friendly order). */
8934 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8935 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8936 is_vldm = is_thumb2_vldm (insn);
8937
8938 /* Veneers are created for (v)ldm depending on
8939 option flags and memory accesses conditions; but
8940 if the instruction is not the last instruction of
8941 an IT block, we cannot create a jump there, so we
8942 bail out. */
8943 if ((is_ldm || is_vldm)
8944 && stm32l4xx_need_create_replacing_stub
8945 (insn, globals->stm32l4xx_fix))
8946 {
8947 if (is_not_last_in_it_block)
8948 {
8949 _bfd_error_handler
8950 /* xgettext:c-format */
8951 (_("%pB(%pA+%#x): error: multiple load detected"
8952 " in non-last IT block instruction:"
8953 " STM32L4XX veneer cannot be generated; "
8954 "use gcc option -mrestrict-it to generate"
8955 " only one instruction per IT block"),
8956 abfd, sec, i);
8957 }
8958 else
8959 {
8960 elf32_stm32l4xx_erratum_list *newerr =
8961 (elf32_stm32l4xx_erratum_list *)
8962 bfd_zmalloc
8963 (sizeof (elf32_stm32l4xx_erratum_list));
8964
8965 elf32_arm_section_data (sec)
8966 ->stm32l4xx_erratumcount += 1;
8967 newerr->u.b.insn = insn;
8968 /* We create only thumb branches. */
8969 newerr->type =
8970 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8971 record_stm32l4xx_erratum_veneer
8972 (link_info, newerr, abfd, sec,
8973 i,
8974 is_ldm ?
8975 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8976 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8977 newerr->vma = -1;
8978 newerr->next = sec_data->stm32l4xx_erratumlist;
8979 sec_data->stm32l4xx_erratumlist = newerr;
8980 }
8981 }
8982 }
8983 else
8984 {
8985 /* A7.7.37 IT p208
8986 IT blocks are only encoded in T1
8987 Encoding T1: IT{x{y{z}}} <firstcond>
8988 1 0 1 1 - 1 1 1 1 - firstcond - mask
8989 if mask = '0000' then see 'related encodings'
8990 We don't deal with UNPREDICTABLE, just ignore these.
8991 There can be no nested IT blocks so an IT block
8992 is naturally a new one for which it is worth
8993 computing its size. */
8994 bool is_newitblock = ((insn & 0xff00) == 0xbf00)
8995 && ((insn & 0x000f) != 0x0000);
8996 /* If we have a new IT block we compute its size. */
8997 if (is_newitblock)
8998 {
8999 /* Compute the number of instructions controlled
9000 by the IT block, it will be used to decide
9001 whether we are inside an IT block or not. */
9002 unsigned int mask = insn & 0x000f;
9003 itblock_current_pos = 4 - ctz (mask);
9004 }
9005 }
9006
9007 i += insn_32bit ? 4 : 2;
9008 }
9009 }
9010
9011 if (elf_section_data (sec)->this_hdr.contents != contents)
9012 free (contents);
9013 contents = NULL;
9014 }
9015
9016 return true;
9017
9018 error_return:
9019 if (elf_section_data (sec)->this_hdr.contents != contents)
9020 free (contents);
9021
9022 return false;
9023 }
9024
9025 /* Set target relocation values needed during linking. */
9026
9027 void
9028 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9029 struct bfd_link_info *link_info,
9030 struct elf32_arm_params *params)
9031 {
9032 struct elf32_arm_link_hash_table *globals;
9033
9034 globals = elf32_arm_hash_table (link_info);
9035 if (globals == NULL)
9036 return;
9037
9038 globals->target1_is_rel = params->target1_is_rel;
9039 if (globals->fdpic_p)
9040 globals->target2_reloc = R_ARM_GOT32;
9041 else if (strcmp (params->target2_type, "rel") == 0)
9042 globals->target2_reloc = R_ARM_REL32;
9043 else if (strcmp (params->target2_type, "abs") == 0)
9044 globals->target2_reloc = R_ARM_ABS32;
9045 else if (strcmp (params->target2_type, "got-rel") == 0)
9046 globals->target2_reloc = R_ARM_GOT_PREL;
9047 else
9048 {
9049 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9050 params->target2_type);
9051 }
9052 globals->fix_v4bx = params->fix_v4bx;
9053 globals->use_blx |= params->use_blx;
9054 globals->vfp11_fix = params->vfp11_denorm_fix;
9055 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9056 if (globals->fdpic_p)
9057 globals->pic_veneer = 1;
9058 else
9059 globals->pic_veneer = params->pic_veneer;
9060 globals->fix_cortex_a8 = params->fix_cortex_a8;
9061 globals->fix_arm1176 = params->fix_arm1176;
9062 globals->cmse_implib = params->cmse_implib;
9063 globals->in_implib_bfd = params->in_implib_bfd;
9064
9065 BFD_ASSERT (is_arm_elf (output_bfd));
9066 elf_arm_tdata (output_bfd)->no_enum_size_warning
9067 = params->no_enum_size_warning;
9068 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9069 = params->no_wchar_size_warning;
9070 }
9071
9072 /* Replace the target offset of a Thumb bl or b.w instruction. */
9073
9074 static void
9075 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9076 {
9077 bfd_vma upper;
9078 bfd_vma lower;
9079 int reloc_sign;
9080
9081 BFD_ASSERT ((offset & 1) == 0);
9082
9083 upper = bfd_get_16 (abfd, insn);
9084 lower = bfd_get_16 (abfd, insn + 2);
9085 reloc_sign = (offset < 0) ? 1 : 0;
9086 upper = (upper & ~(bfd_vma) 0x7ff)
9087 | ((offset >> 12) & 0x3ff)
9088 | (reloc_sign << 10);
9089 lower = (lower & ~(bfd_vma) 0x2fff)
9090 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9091 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9092 | ((offset >> 1) & 0x7ff);
9093 bfd_put_16 (abfd, upper, insn);
9094 bfd_put_16 (abfd, lower, insn + 2);
9095 }
9096
9097 /* Thumb code calling an ARM function. */
9098
9099 static int
9100 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9101 const char * name,
9102 bfd * input_bfd,
9103 bfd * output_bfd,
9104 asection * input_section,
9105 bfd_byte * hit_data,
9106 asection * sym_sec,
9107 bfd_vma offset,
9108 bfd_signed_vma addend,
9109 bfd_vma val,
9110 char **error_message)
9111 {
9112 asection * s = 0;
9113 bfd_vma my_offset;
9114 long int ret_offset;
9115 struct elf_link_hash_entry * myh;
9116 struct elf32_arm_link_hash_table * globals;
9117
9118 myh = find_thumb_glue (info, name, error_message);
9119 if (myh == NULL)
9120 return false;
9121
9122 globals = elf32_arm_hash_table (info);
9123 BFD_ASSERT (globals != NULL);
9124 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9125
9126 my_offset = myh->root.u.def.value;
9127
9128 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9129 THUMB2ARM_GLUE_SECTION_NAME);
9130
9131 BFD_ASSERT (s != NULL);
9132 BFD_ASSERT (s->contents != NULL);
9133 BFD_ASSERT (s->output_section != NULL);
9134
9135 if ((my_offset & 0x01) == 0x01)
9136 {
9137 if (sym_sec != NULL
9138 && sym_sec->owner != NULL
9139 && !INTERWORK_FLAG (sym_sec->owner))
9140 {
9141 _bfd_error_handler
9142 (_("%pB(%s): warning: interworking not enabled;"
9143 " first occurrence: %pB: %s call to %s"),
9144 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9145
9146 return false;
9147 }
9148
9149 --my_offset;
9150 myh->root.u.def.value = my_offset;
9151
9152 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9153 s->contents + my_offset);
9154
9155 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9156 s->contents + my_offset + 2);
9157
9158 ret_offset =
9159 /* Address of destination of the stub. */
9160 ((bfd_signed_vma) val)
9161 - ((bfd_signed_vma)
9162 /* Offset from the start of the current section
9163 to the start of the stubs. */
9164 (s->output_offset
9165 /* Offset of the start of this stub from the start of the stubs. */
9166 + my_offset
9167 /* Address of the start of the current section. */
9168 + s->output_section->vma)
9169 /* The branch instruction is 4 bytes into the stub. */
9170 + 4
9171 /* ARM branches work from the pc of the instruction + 8. */
9172 + 8);
9173
9174 put_arm_insn (globals, output_bfd,
9175 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9176 s->contents + my_offset + 4);
9177 }
9178
9179 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9180
9181 /* Now go back and fix up the original BL insn to point to here. */
9182 ret_offset =
9183 /* Address of where the stub is located. */
9184 (s->output_section->vma + s->output_offset + my_offset)
9185 /* Address of where the BL is located. */
9186 - (input_section->output_section->vma + input_section->output_offset
9187 + offset)
9188 /* Addend in the relocation. */
9189 - addend
9190 /* Biassing for PC-relative addressing. */
9191 - 8;
9192
9193 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9194
9195 return true;
9196 }
9197
9198 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9199
9200 static struct elf_link_hash_entry *
9201 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9202 const char * name,
9203 bfd * input_bfd,
9204 bfd * output_bfd,
9205 asection * sym_sec,
9206 bfd_vma val,
9207 asection * s,
9208 char ** error_message)
9209 {
9210 bfd_vma my_offset;
9211 long int ret_offset;
9212 struct elf_link_hash_entry * myh;
9213 struct elf32_arm_link_hash_table * globals;
9214
9215 myh = find_arm_glue (info, name, error_message);
9216 if (myh == NULL)
9217 return NULL;
9218
9219 globals = elf32_arm_hash_table (info);
9220 BFD_ASSERT (globals != NULL);
9221 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9222
9223 my_offset = myh->root.u.def.value;
9224
9225 if ((my_offset & 0x01) == 0x01)
9226 {
9227 if (sym_sec != NULL
9228 && sym_sec->owner != NULL
9229 && !INTERWORK_FLAG (sym_sec->owner))
9230 {
9231 _bfd_error_handler
9232 (_("%pB(%s): warning: interworking not enabled;"
9233 " first occurrence: %pB: %s call to %s"),
9234 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9235 }
9236
9237 --my_offset;
9238 myh->root.u.def.value = my_offset;
9239
9240 if (bfd_link_pic (info)
9241 || globals->root.is_relocatable_executable
9242 || globals->pic_veneer)
9243 {
9244 /* For relocatable objects we can't use absolute addresses,
9245 so construct the address from a relative offset. */
9246 /* TODO: If the offset is small it's probably worth
9247 constructing the address with adds. */
9248 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9249 s->contents + my_offset);
9250 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9251 s->contents + my_offset + 4);
9252 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9253 s->contents + my_offset + 8);
9254 /* Adjust the offset by 4 for the position of the add,
9255 and 8 for the pipeline offset. */
9256 ret_offset = (val - (s->output_offset
9257 + s->output_section->vma
9258 + my_offset + 12))
9259 | 1;
9260 bfd_put_32 (output_bfd, ret_offset,
9261 s->contents + my_offset + 12);
9262 }
9263 else if (globals->use_blx)
9264 {
9265 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9266 s->contents + my_offset);
9267
9268 /* It's a thumb address. Add the low order bit. */
9269 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9270 s->contents + my_offset + 4);
9271 }
9272 else
9273 {
9274 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9275 s->contents + my_offset);
9276
9277 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9278 s->contents + my_offset + 4);
9279
9280 /* It's a thumb address. Add the low order bit. */
9281 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9282 s->contents + my_offset + 8);
9283
9284 my_offset += 12;
9285 }
9286 }
9287
9288 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9289
9290 return myh;
9291 }
9292
9293 /* Arm code calling a Thumb function. */
9294
9295 static int
9296 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9297 const char * name,
9298 bfd * input_bfd,
9299 bfd * output_bfd,
9300 asection * input_section,
9301 bfd_byte * hit_data,
9302 asection * sym_sec,
9303 bfd_vma offset,
9304 bfd_signed_vma addend,
9305 bfd_vma val,
9306 char **error_message)
9307 {
9308 unsigned long int tmp;
9309 bfd_vma my_offset;
9310 asection * s;
9311 long int ret_offset;
9312 struct elf_link_hash_entry * myh;
9313 struct elf32_arm_link_hash_table * globals;
9314
9315 globals = elf32_arm_hash_table (info);
9316 BFD_ASSERT (globals != NULL);
9317 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9318
9319 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9320 ARM2THUMB_GLUE_SECTION_NAME);
9321 BFD_ASSERT (s != NULL);
9322 BFD_ASSERT (s->contents != NULL);
9323 BFD_ASSERT (s->output_section != NULL);
9324
9325 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9326 sym_sec, val, s, error_message);
9327 if (!myh)
9328 return false;
9329
9330 my_offset = myh->root.u.def.value;
9331 tmp = bfd_get_32 (input_bfd, hit_data);
9332 tmp = tmp & 0xFF000000;
9333
9334 /* Somehow these are both 4 too far, so subtract 8. */
9335 ret_offset = (s->output_offset
9336 + my_offset
9337 + s->output_section->vma
9338 - (input_section->output_offset
9339 + input_section->output_section->vma
9340 + offset + addend)
9341 - 8);
9342
9343 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9344
9345 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9346
9347 return true;
9348 }
9349
9350 /* Populate Arm stub for an exported Thumb function. */
9351
9352 static bool
9353 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9354 {
9355 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9356 asection * s;
9357 struct elf_link_hash_entry * myh;
9358 struct elf32_arm_link_hash_entry *eh;
9359 struct elf32_arm_link_hash_table * globals;
9360 asection *sec;
9361 bfd_vma val;
9362 char *error_message;
9363
9364 eh = elf32_arm_hash_entry (h);
9365 /* Allocate stubs for exported Thumb functions on v4t. */
9366 if (eh->export_glue == NULL)
9367 return true;
9368
9369 globals = elf32_arm_hash_table (info);
9370 BFD_ASSERT (globals != NULL);
9371 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9372
9373 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9374 ARM2THUMB_GLUE_SECTION_NAME);
9375 BFD_ASSERT (s != NULL);
9376 BFD_ASSERT (s->contents != NULL);
9377 BFD_ASSERT (s->output_section != NULL);
9378
9379 sec = eh->export_glue->root.u.def.section;
9380
9381 BFD_ASSERT (sec->output_section != NULL);
9382
9383 val = eh->export_glue->root.u.def.value + sec->output_offset
9384 + sec->output_section->vma;
9385
9386 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9387 h->root.u.def.section->owner,
9388 globals->obfd, sec, val, s,
9389 &error_message);
9390 BFD_ASSERT (myh);
9391 return true;
9392 }
9393
9394 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9395
9396 static bfd_vma
9397 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9398 {
9399 bfd_byte *p;
9400 bfd_vma glue_addr;
9401 asection *s;
9402 struct elf32_arm_link_hash_table *globals;
9403
9404 globals = elf32_arm_hash_table (info);
9405 BFD_ASSERT (globals != NULL);
9406 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9407
9408 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9409 ARM_BX_GLUE_SECTION_NAME);
9410 BFD_ASSERT (s != NULL);
9411 BFD_ASSERT (s->contents != NULL);
9412 BFD_ASSERT (s->output_section != NULL);
9413
9414 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9415
9416 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9417
9418 if ((globals->bx_glue_offset[reg] & 1) == 0)
9419 {
9420 p = s->contents + glue_addr;
9421 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9422 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9423 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9424 globals->bx_glue_offset[reg] |= 1;
9425 }
9426
9427 return glue_addr + s->output_section->vma + s->output_offset;
9428 }
9429
9430 /* Generate Arm stubs for exported Thumb symbols. */
9431 static void
9432 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9433 struct bfd_link_info *link_info)
9434 {
9435 struct elf32_arm_link_hash_table * globals;
9436
9437 if (link_info == NULL)
9438 /* Ignore this if we are not called by the ELF backend linker. */
9439 return;
9440
9441 globals = elf32_arm_hash_table (link_info);
9442 if (globals == NULL)
9443 return;
9444
9445 /* If blx is available then exported Thumb symbols are OK and there is
9446 nothing to do. */
9447 if (globals->use_blx)
9448 return;
9449
9450 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9451 link_info);
9452 }
9453
9454 /* Reserve space for COUNT dynamic relocations in relocation selection
9455 SRELOC. */
9456
9457 static void
9458 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9459 bfd_size_type count)
9460 {
9461 struct elf32_arm_link_hash_table *htab;
9462
9463 htab = elf32_arm_hash_table (info);
9464 BFD_ASSERT (htab->root.dynamic_sections_created);
9465 if (sreloc == NULL)
9466 abort ();
9467 sreloc->size += RELOC_SIZE (htab) * count;
9468 }
9469
9470 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9471 dynamic, the relocations should go in SRELOC, otherwise they should
9472 go in the special .rel.iplt section. */
9473
9474 static void
9475 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9476 bfd_size_type count)
9477 {
9478 struct elf32_arm_link_hash_table *htab;
9479
9480 htab = elf32_arm_hash_table (info);
9481 if (!htab->root.dynamic_sections_created)
9482 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9483 else
9484 {
9485 BFD_ASSERT (sreloc != NULL);
9486 sreloc->size += RELOC_SIZE (htab) * count;
9487 }
9488 }
9489
9490 /* Add relocation REL to the end of relocation section SRELOC. */
9491
9492 static void
9493 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9494 asection *sreloc, Elf_Internal_Rela *rel)
9495 {
9496 bfd_byte *loc;
9497 struct elf32_arm_link_hash_table *htab;
9498
9499 htab = elf32_arm_hash_table (info);
9500 if (!htab->root.dynamic_sections_created
9501 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9502 sreloc = htab->root.irelplt;
9503 if (sreloc == NULL)
9504 abort ();
9505 loc = sreloc->contents;
9506 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9507 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9508 abort ();
9509 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9510 }
9511
9512 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9513 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9514 to .plt. */
9515
9516 static void
9517 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9518 bool is_iplt_entry,
9519 union gotplt_union *root_plt,
9520 struct arm_plt_info *arm_plt)
9521 {
9522 struct elf32_arm_link_hash_table *htab;
9523 asection *splt;
9524 asection *sgotplt;
9525
9526 htab = elf32_arm_hash_table (info);
9527
9528 if (is_iplt_entry)
9529 {
9530 splt = htab->root.iplt;
9531 sgotplt = htab->root.igotplt;
9532
9533 /* NaCl uses a special first entry in .iplt too. */
9534 if (htab->root.target_os == is_nacl && splt->size == 0)
9535 splt->size += htab->plt_header_size;
9536
9537 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9538 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9539 }
9540 else
9541 {
9542 splt = htab->root.splt;
9543 sgotplt = htab->root.sgotplt;
9544
9545 if (htab->fdpic_p)
9546 {
9547 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9548 /* For lazy binding, relocations will be put into .rel.plt, in
9549 .rel.got otherwise. */
9550 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9551 if (info->flags & DF_BIND_NOW)
9552 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9553 else
9554 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9555 }
9556 else
9557 {
9558 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9559 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9560 }
9561
9562 /* If this is the first .plt entry, make room for the special
9563 first entry. */
9564 if (splt->size == 0)
9565 splt->size += htab->plt_header_size;
9566
9567 htab->next_tls_desc_index++;
9568 }
9569
9570 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9571 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9572 splt->size += PLT_THUMB_STUB_SIZE;
9573 root_plt->offset = splt->size;
9574 splt->size += htab->plt_entry_size;
9575
9576 /* We also need to make an entry in the .got.plt section, which
9577 will be placed in the .got section by the linker script. */
9578 if (is_iplt_entry)
9579 arm_plt->got_offset = sgotplt->size;
9580 else
9581 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9582 if (htab->fdpic_p)
9583 /* Function descriptor takes 64 bits in GOT. */
9584 sgotplt->size += 8;
9585 else
9586 sgotplt->size += 4;
9587 }
9588
9589 static bfd_vma
9590 arm_movw_immediate (bfd_vma value)
9591 {
9592 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9593 }
9594
9595 static bfd_vma
9596 arm_movt_immediate (bfd_vma value)
9597 {
9598 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9599 }
9600
9601 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9602 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9603 Otherwise, DYNINDX is the index of the symbol in the dynamic
9604 symbol table and SYM_VALUE is undefined.
9605
9606 ROOT_PLT points to the offset of the PLT entry from the start of its
9607 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9608 bookkeeping information.
9609
9610 Returns FALSE if there was a problem. */
9611
9612 static bool
9613 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9614 union gotplt_union *root_plt,
9615 struct arm_plt_info *arm_plt,
9616 int dynindx, bfd_vma sym_value)
9617 {
9618 struct elf32_arm_link_hash_table *htab;
9619 asection *sgot;
9620 asection *splt;
9621 asection *srel;
9622 bfd_byte *loc;
9623 bfd_vma plt_index;
9624 Elf_Internal_Rela rel;
9625 bfd_vma got_header_size;
9626
9627 htab = elf32_arm_hash_table (info);
9628
9629 /* Pick the appropriate sections and sizes. */
9630 if (dynindx == -1)
9631 {
9632 splt = htab->root.iplt;
9633 sgot = htab->root.igotplt;
9634 srel = htab->root.irelplt;
9635
9636 /* There are no reserved entries in .igot.plt, and no special
9637 first entry in .iplt. */
9638 got_header_size = 0;
9639 }
9640 else
9641 {
9642 splt = htab->root.splt;
9643 sgot = htab->root.sgotplt;
9644 srel = htab->root.srelplt;
9645
9646 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9647 }
9648 BFD_ASSERT (splt != NULL && srel != NULL);
9649
9650 bfd_vma got_offset, got_address, plt_address;
9651 bfd_vma got_displacement, initial_got_entry;
9652 bfd_byte * ptr;
9653
9654 BFD_ASSERT (sgot != NULL);
9655
9656 /* Get the offset into the .(i)got.plt table of the entry that
9657 corresponds to this function. */
9658 got_offset = (arm_plt->got_offset & -2);
9659
9660 /* Get the index in the procedure linkage table which
9661 corresponds to this symbol. This is the index of this symbol
9662 in all the symbols for which we are making plt entries.
9663 After the reserved .got.plt entries, all symbols appear in
9664 the same order as in .plt. */
9665 if (htab->fdpic_p)
9666 /* Function descriptor takes 8 bytes. */
9667 plt_index = (got_offset - got_header_size) / 8;
9668 else
9669 plt_index = (got_offset - got_header_size) / 4;
9670
9671 /* Calculate the address of the GOT entry. */
9672 got_address = (sgot->output_section->vma
9673 + sgot->output_offset
9674 + got_offset);
9675
9676 /* ...and the address of the PLT entry. */
9677 plt_address = (splt->output_section->vma
9678 + splt->output_offset
9679 + root_plt->offset);
9680
9681 ptr = splt->contents + root_plt->offset;
9682 if (htab->root.target_os == is_vxworks && bfd_link_pic (info))
9683 {
9684 unsigned int i;
9685 bfd_vma val;
9686
9687 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9688 {
9689 val = elf32_arm_vxworks_shared_plt_entry[i];
9690 if (i == 2)
9691 val |= got_address - sgot->output_section->vma;
9692 if (i == 5)
9693 val |= plt_index * RELOC_SIZE (htab);
9694 if (i == 2 || i == 5)
9695 bfd_put_32 (output_bfd, val, ptr);
9696 else
9697 put_arm_insn (htab, output_bfd, val, ptr);
9698 }
9699 }
9700 else if (htab->root.target_os == is_vxworks)
9701 {
9702 unsigned int i;
9703 bfd_vma val;
9704
9705 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9706 {
9707 val = elf32_arm_vxworks_exec_plt_entry[i];
9708 if (i == 2)
9709 val |= got_address;
9710 if (i == 4)
9711 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9712 if (i == 5)
9713 val |= plt_index * RELOC_SIZE (htab);
9714 if (i == 2 || i == 5)
9715 bfd_put_32 (output_bfd, val, ptr);
9716 else
9717 put_arm_insn (htab, output_bfd, val, ptr);
9718 }
9719
9720 loc = (htab->srelplt2->contents
9721 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9722
9723 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9724 referencing the GOT for this PLT entry. */
9725 rel.r_offset = plt_address + 8;
9726 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9727 rel.r_addend = got_offset;
9728 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9729 loc += RELOC_SIZE (htab);
9730
9731 /* Create the R_ARM_ABS32 relocation referencing the
9732 beginning of the PLT for this GOT entry. */
9733 rel.r_offset = got_address;
9734 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9735 rel.r_addend = 0;
9736 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9737 }
9738 else if (htab->root.target_os == is_nacl)
9739 {
9740 /* Calculate the displacement between the PLT slot and the
9741 common tail that's part of the special initial PLT slot. */
9742 int32_t tail_displacement
9743 = ((splt->output_section->vma + splt->output_offset
9744 + ARM_NACL_PLT_TAIL_OFFSET)
9745 - (plt_address + htab->plt_entry_size + 4));
9746 BFD_ASSERT ((tail_displacement & 3) == 0);
9747 tail_displacement >>= 2;
9748
9749 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9750 || (-tail_displacement & 0xff000000) == 0);
9751
9752 /* Calculate the displacement between the PLT slot and the entry
9753 in the GOT. The offset accounts for the value produced by
9754 adding to pc in the penultimate instruction of the PLT stub. */
9755 got_displacement = (got_address
9756 - (plt_address + htab->plt_entry_size));
9757
9758 /* NaCl does not support interworking at all. */
9759 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9760
9761 put_arm_insn (htab, output_bfd,
9762 elf32_arm_nacl_plt_entry[0]
9763 | arm_movw_immediate (got_displacement),
9764 ptr + 0);
9765 put_arm_insn (htab, output_bfd,
9766 elf32_arm_nacl_plt_entry[1]
9767 | arm_movt_immediate (got_displacement),
9768 ptr + 4);
9769 put_arm_insn (htab, output_bfd,
9770 elf32_arm_nacl_plt_entry[2],
9771 ptr + 8);
9772 put_arm_insn (htab, output_bfd,
9773 elf32_arm_nacl_plt_entry[3]
9774 | (tail_displacement & 0x00ffffff),
9775 ptr + 12);
9776 }
9777 else if (htab->fdpic_p)
9778 {
9779 const bfd_vma *plt_entry = using_thumb_only (htab)
9780 ? elf32_arm_fdpic_thumb_plt_entry
9781 : elf32_arm_fdpic_plt_entry;
9782
9783 /* Fill-up Thumb stub if needed. */
9784 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9785 {
9786 put_thumb_insn (htab, output_bfd,
9787 elf32_arm_plt_thumb_stub[0], ptr - 4);
9788 put_thumb_insn (htab, output_bfd,
9789 elf32_arm_plt_thumb_stub[1], ptr - 2);
9790 }
9791 /* As we are using 32 bit instructions even for the Thumb
9792 version, we have to use 'put_arm_insn' instead of
9793 'put_thumb_insn'. */
9794 put_arm_insn (htab, output_bfd, plt_entry[0], ptr + 0);
9795 put_arm_insn (htab, output_bfd, plt_entry[1], ptr + 4);
9796 put_arm_insn (htab, output_bfd, plt_entry[2], ptr + 8);
9797 put_arm_insn (htab, output_bfd, plt_entry[3], ptr + 12);
9798 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9799
9800 if (!(info->flags & DF_BIND_NOW))
9801 {
9802 /* funcdesc_value_reloc_offset. */
9803 bfd_put_32 (output_bfd,
9804 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9805 ptr + 20);
9806 put_arm_insn (htab, output_bfd, plt_entry[6], ptr + 24);
9807 put_arm_insn (htab, output_bfd, plt_entry[7], ptr + 28);
9808 put_arm_insn (htab, output_bfd, plt_entry[8], ptr + 32);
9809 put_arm_insn (htab, output_bfd, plt_entry[9], ptr + 36);
9810 }
9811 }
9812 else if (using_thumb_only (htab))
9813 {
9814 /* PR ld/16017: Generate thumb only PLT entries. */
9815 if (!using_thumb2 (htab))
9816 {
9817 /* FIXME: We ought to be able to generate thumb-1 PLT
9818 instructions... */
9819 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9820 output_bfd);
9821 return false;
9822 }
9823
9824 /* Calculate the displacement between the PLT slot and the entry in
9825 the GOT. The 12-byte offset accounts for the value produced by
9826 adding to pc in the 3rd instruction of the PLT stub. */
9827 got_displacement = got_address - (plt_address + 12);
9828
9829 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9830 instead of 'put_thumb_insn'. */
9831 put_arm_insn (htab, output_bfd,
9832 elf32_thumb2_plt_entry[0]
9833 | ((got_displacement & 0x000000ff) << 16)
9834 | ((got_displacement & 0x00000700) << 20)
9835 | ((got_displacement & 0x00000800) >> 1)
9836 | ((got_displacement & 0x0000f000) >> 12),
9837 ptr + 0);
9838 put_arm_insn (htab, output_bfd,
9839 elf32_thumb2_plt_entry[1]
9840 | ((got_displacement & 0x00ff0000) )
9841 | ((got_displacement & 0x07000000) << 4)
9842 | ((got_displacement & 0x08000000) >> 17)
9843 | ((got_displacement & 0xf0000000) >> 28),
9844 ptr + 4);
9845 put_arm_insn (htab, output_bfd,
9846 elf32_thumb2_plt_entry[2],
9847 ptr + 8);
9848 put_arm_insn (htab, output_bfd,
9849 elf32_thumb2_plt_entry[3],
9850 ptr + 12);
9851 }
9852 else
9853 {
9854 /* Calculate the displacement between the PLT slot and the
9855 entry in the GOT. The eight-byte offset accounts for the
9856 value produced by adding to pc in the first instruction
9857 of the PLT stub. */
9858 got_displacement = got_address - (plt_address + 8);
9859
9860 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9861 {
9862 put_thumb_insn (htab, output_bfd,
9863 elf32_arm_plt_thumb_stub[0], ptr - 4);
9864 put_thumb_insn (htab, output_bfd,
9865 elf32_arm_plt_thumb_stub[1], ptr - 2);
9866 }
9867
9868 if (!elf32_arm_use_long_plt_entry)
9869 {
9870 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9871
9872 put_arm_insn (htab, output_bfd,
9873 elf32_arm_plt_entry_short[0]
9874 | ((got_displacement & 0x0ff00000) >> 20),
9875 ptr + 0);
9876 put_arm_insn (htab, output_bfd,
9877 elf32_arm_plt_entry_short[1]
9878 | ((got_displacement & 0x000ff000) >> 12),
9879 ptr+ 4);
9880 put_arm_insn (htab, output_bfd,
9881 elf32_arm_plt_entry_short[2]
9882 | (got_displacement & 0x00000fff),
9883 ptr + 8);
9884 #ifdef FOUR_WORD_PLT
9885 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9886 #endif
9887 }
9888 else
9889 {
9890 put_arm_insn (htab, output_bfd,
9891 elf32_arm_plt_entry_long[0]
9892 | ((got_displacement & 0xf0000000) >> 28),
9893 ptr + 0);
9894 put_arm_insn (htab, output_bfd,
9895 elf32_arm_plt_entry_long[1]
9896 | ((got_displacement & 0x0ff00000) >> 20),
9897 ptr + 4);
9898 put_arm_insn (htab, output_bfd,
9899 elf32_arm_plt_entry_long[2]
9900 | ((got_displacement & 0x000ff000) >> 12),
9901 ptr+ 8);
9902 put_arm_insn (htab, output_bfd,
9903 elf32_arm_plt_entry_long[3]
9904 | (got_displacement & 0x00000fff),
9905 ptr + 12);
9906 }
9907 }
9908
9909 /* Fill in the entry in the .rel(a).(i)plt section. */
9910 rel.r_offset = got_address;
9911 rel.r_addend = 0;
9912 if (dynindx == -1)
9913 {
9914 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9915 The dynamic linker or static executable then calls SYM_VALUE
9916 to determine the correct run-time value of the .igot.plt entry. */
9917 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9918 initial_got_entry = sym_value;
9919 }
9920 else
9921 {
9922 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9923 used by PLT entry. */
9924 if (htab->fdpic_p)
9925 {
9926 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9927 initial_got_entry = 0;
9928 }
9929 else
9930 {
9931 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9932 initial_got_entry = (splt->output_section->vma
9933 + splt->output_offset);
9934
9935 /* PR ld/16017
9936 When thumb only we need to set the LSB for any address that
9937 will be used with an interworking branch instruction. */
9938 if (using_thumb_only (htab))
9939 initial_got_entry |= 1;
9940 }
9941 }
9942
9943 /* Fill in the entry in the global offset table. */
9944 bfd_put_32 (output_bfd, initial_got_entry,
9945 sgot->contents + got_offset);
9946
9947 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9948 {
9949 /* Setup initial funcdesc value. */
9950 /* FIXME: we don't support lazy binding because there is a
9951 race condition between both words getting written and
9952 some other thread attempting to read them. The ARM
9953 architecture does not have an atomic 64 bit load/store
9954 instruction that could be used to prevent it; it is
9955 recommended that threaded FDPIC applications run with the
9956 LD_BIND_NOW environment variable set. */
9957 bfd_put_32 (output_bfd, plt_address + 0x18,
9958 sgot->contents + got_offset);
9959 bfd_put_32 (output_bfd, -1 /*TODO*/,
9960 sgot->contents + got_offset + 4);
9961 }
9962
9963 if (dynindx == -1)
9964 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9965 else
9966 {
9967 if (htab->fdpic_p)
9968 {
9969 /* For FDPIC we put PLT relocationss into .rel.got when not
9970 lazy binding otherwise we put them in .rel.plt. For now,
9971 we don't support lazy binding so put it in .rel.got. */
9972 if (info->flags & DF_BIND_NOW)
9973 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelgot, &rel);
9974 else
9975 elf32_arm_add_dynreloc (output_bfd, info, htab->root.srelplt, &rel);
9976 }
9977 else
9978 {
9979 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9980 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9981 }
9982 }
9983
9984 return true;
9985 }
9986
9987 /* Some relocations map to different relocations depending on the
9988 target. Return the real relocation. */
9989
9990 static int
9991 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9992 int r_type)
9993 {
9994 switch (r_type)
9995 {
9996 case R_ARM_TARGET1:
9997 if (globals->target1_is_rel)
9998 return R_ARM_REL32;
9999 else
10000 return R_ARM_ABS32;
10001
10002 case R_ARM_TARGET2:
10003 return globals->target2_reloc;
10004
10005 default:
10006 return r_type;
10007 }
10008 }
10009
10010 /* Return the base VMA address which should be subtracted from real addresses
10011 when resolving @dtpoff relocation.
10012 This is PT_TLS segment p_vaddr. */
10013
10014 static bfd_vma
10015 dtpoff_base (struct bfd_link_info *info)
10016 {
10017 /* If tls_sec is NULL, we should have signalled an error already. */
10018 if (elf_hash_table (info)->tls_sec == NULL)
10019 return 0;
10020 return elf_hash_table (info)->tls_sec->vma;
10021 }
10022
10023 /* Return the relocation value for @tpoff relocation
10024 if STT_TLS virtual address is ADDRESS. */
10025
10026 static bfd_vma
10027 tpoff (struct bfd_link_info *info, bfd_vma address)
10028 {
10029 struct elf_link_hash_table *htab = elf_hash_table (info);
10030 bfd_vma base;
10031
10032 /* If tls_sec is NULL, we should have signalled an error already. */
10033 if (htab->tls_sec == NULL)
10034 return 0;
10035 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10036 return address - htab->tls_sec->vma + base;
10037 }
10038
10039 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10040 VALUE is the relocation value. */
10041
10042 static bfd_reloc_status_type
10043 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10044 {
10045 if (value > 0xfff)
10046 return bfd_reloc_overflow;
10047
10048 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10049 bfd_put_32 (abfd, value, data);
10050 return bfd_reloc_ok;
10051 }
10052
10053 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10054 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10055 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10056
10057 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10058 is to then call final_link_relocate. Return other values in the
10059 case of error.
10060
10061 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10062 the pre-relaxed code. It would be nice if the relocs were updated
10063 to match the optimization. */
10064
10065 static bfd_reloc_status_type
10066 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10067 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10068 Elf_Internal_Rela *rel, unsigned long is_local)
10069 {
10070 unsigned long insn;
10071
10072 switch (ELF32_R_TYPE (rel->r_info))
10073 {
10074 default:
10075 return bfd_reloc_notsupported;
10076
10077 case R_ARM_TLS_GOTDESC:
10078 if (is_local)
10079 insn = 0;
10080 else
10081 {
10082 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10083 if (insn & 1)
10084 insn -= 5; /* THUMB */
10085 else
10086 insn -= 8; /* ARM */
10087 }
10088 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10089 return bfd_reloc_continue;
10090
10091 case R_ARM_THM_TLS_DESCSEQ:
10092 /* Thumb insn. */
10093 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10094 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10095 {
10096 if (is_local)
10097 /* nop */
10098 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10099 }
10100 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10101 {
10102 if (is_local)
10103 /* nop */
10104 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10105 else
10106 /* ldr rx,[ry] */
10107 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10108 }
10109 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10110 {
10111 if (is_local)
10112 /* nop */
10113 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10114 else
10115 /* mov r0, rx */
10116 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10117 contents + rel->r_offset);
10118 }
10119 else
10120 {
10121 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10122 /* It's a 32 bit instruction, fetch the rest of it for
10123 error generation. */
10124 insn = (insn << 16)
10125 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10126 _bfd_error_handler
10127 /* xgettext:c-format */
10128 (_("%pB(%pA+%#" PRIx64 "): "
10129 "unexpected %s instruction '%#lx' in TLS trampoline"),
10130 input_bfd, input_sec, (uint64_t) rel->r_offset,
10131 "Thumb", insn);
10132 return bfd_reloc_notsupported;
10133 }
10134 break;
10135
10136 case R_ARM_TLS_DESCSEQ:
10137 /* arm insn. */
10138 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10139 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10140 {
10141 if (is_local)
10142 /* mov rx, ry */
10143 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10144 contents + rel->r_offset);
10145 }
10146 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10147 {
10148 if (is_local)
10149 /* nop */
10150 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10151 else
10152 /* ldr rx,[ry] */
10153 bfd_put_32 (input_bfd, insn & 0xfffff000,
10154 contents + rel->r_offset);
10155 }
10156 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10157 {
10158 if (is_local)
10159 /* nop */
10160 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10161 else
10162 /* mov r0, rx */
10163 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10164 contents + rel->r_offset);
10165 }
10166 else
10167 {
10168 _bfd_error_handler
10169 /* xgettext:c-format */
10170 (_("%pB(%pA+%#" PRIx64 "): "
10171 "unexpected %s instruction '%#lx' in TLS trampoline"),
10172 input_bfd, input_sec, (uint64_t) rel->r_offset,
10173 "ARM", insn);
10174 return bfd_reloc_notsupported;
10175 }
10176 break;
10177
10178 case R_ARM_TLS_CALL:
10179 /* GD->IE relaxation, turn the instruction into 'nop' or
10180 'ldr r0, [pc,r0]' */
10181 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10182 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10183 break;
10184
10185 case R_ARM_THM_TLS_CALL:
10186 /* GD->IE relaxation. */
10187 if (!is_local)
10188 /* add r0,pc; ldr r0, [r0] */
10189 insn = 0x44786800;
10190 else if (using_thumb2 (globals))
10191 /* nop.w */
10192 insn = 0xf3af8000;
10193 else
10194 /* nop; nop */
10195 insn = 0xbf00bf00;
10196
10197 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10198 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10199 break;
10200 }
10201 return bfd_reloc_ok;
10202 }
10203
10204 /* For a given value of n, calculate the value of G_n as required to
10205 deal with group relocations. We return it in the form of an
10206 encoded constant-and-rotation, together with the final residual. If n is
10207 specified as less than zero, then final_residual is filled with the
10208 input value and no further action is performed. */
10209
10210 static bfd_vma
10211 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10212 {
10213 int current_n;
10214 bfd_vma g_n;
10215 bfd_vma encoded_g_n = 0;
10216 bfd_vma residual = value; /* Also known as Y_n. */
10217
10218 for (current_n = 0; current_n <= n; current_n++)
10219 {
10220 int shift;
10221
10222 /* Calculate which part of the value to mask. */
10223 if (residual == 0)
10224 shift = 0;
10225 else
10226 {
10227 int msb;
10228
10229 /* Determine the most significant bit in the residual and
10230 align the resulting value to a 2-bit boundary. */
10231 for (msb = 30; msb >= 0; msb -= 2)
10232 if (residual & (3u << msb))
10233 break;
10234
10235 /* The desired shift is now (msb - 6), or zero, whichever
10236 is the greater. */
10237 shift = msb - 6;
10238 if (shift < 0)
10239 shift = 0;
10240 }
10241
10242 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10243 g_n = residual & (0xff << shift);
10244 encoded_g_n = (g_n >> shift)
10245 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10246
10247 /* Calculate the residual for the next time around. */
10248 residual &= ~g_n;
10249 }
10250
10251 *final_residual = residual;
10252
10253 return encoded_g_n;
10254 }
10255
10256 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10257 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10258
10259 static int
10260 identify_add_or_sub (bfd_vma insn)
10261 {
10262 int opcode = insn & 0x1e00000;
10263
10264 if (opcode == 1 << 23) /* ADD */
10265 return 1;
10266
10267 if (opcode == 1 << 22) /* SUB */
10268 return -1;
10269
10270 return 0;
10271 }
10272
10273 /* Perform a relocation as part of a final link. */
10274
10275 static bfd_reloc_status_type
10276 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10277 bfd * input_bfd,
10278 bfd * output_bfd,
10279 asection * input_section,
10280 bfd_byte * contents,
10281 Elf_Internal_Rela * rel,
10282 bfd_vma value,
10283 struct bfd_link_info * info,
10284 asection * sym_sec,
10285 const char * sym_name,
10286 unsigned char st_type,
10287 enum arm_st_branch_type branch_type,
10288 struct elf_link_hash_entry * h,
10289 bool * unresolved_reloc_p,
10290 char ** error_message)
10291 {
10292 unsigned long r_type = howto->type;
10293 unsigned long r_symndx;
10294 bfd_byte * hit_data = contents + rel->r_offset;
10295 bfd_vma * local_got_offsets;
10296 bfd_vma * local_tlsdesc_gotents;
10297 asection * sgot;
10298 asection * splt;
10299 asection * sreloc = NULL;
10300 asection * srelgot;
10301 bfd_vma addend;
10302 bfd_signed_vma signed_addend;
10303 unsigned char dynreloc_st_type;
10304 bfd_vma dynreloc_value;
10305 struct elf32_arm_link_hash_table * globals;
10306 struct elf32_arm_link_hash_entry *eh;
10307 union gotplt_union *root_plt;
10308 struct arm_plt_info *arm_plt;
10309 bfd_vma plt_offset;
10310 bfd_vma gotplt_offset;
10311 bool has_iplt_entry;
10312 bool resolved_to_zero;
10313
10314 globals = elf32_arm_hash_table (info);
10315 if (globals == NULL)
10316 return bfd_reloc_notsupported;
10317
10318 BFD_ASSERT (is_arm_elf (input_bfd));
10319 BFD_ASSERT (howto != NULL);
10320
10321 /* Some relocation types map to different relocations depending on the
10322 target. We pick the right one here. */
10323 r_type = arm_real_reloc_type (globals, r_type);
10324
10325 /* It is possible to have linker relaxations on some TLS access
10326 models. Update our information here. */
10327 r_type = elf32_arm_tls_transition (info, r_type, h);
10328
10329 if (r_type != howto->type)
10330 howto = elf32_arm_howto_from_type (r_type);
10331
10332 eh = (struct elf32_arm_link_hash_entry *) h;
10333 sgot = globals->root.sgot;
10334 local_got_offsets = elf_local_got_offsets (input_bfd);
10335 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10336
10337 if (globals->root.dynamic_sections_created)
10338 srelgot = globals->root.srelgot;
10339 else
10340 srelgot = NULL;
10341
10342 r_symndx = ELF32_R_SYM (rel->r_info);
10343
10344 if (globals->use_rel)
10345 {
10346 bfd_vma sign;
10347
10348 switch (howto->size)
10349 {
10350 case 0: addend = bfd_get_8 (input_bfd, hit_data); break;
10351 case 1: addend = bfd_get_16 (input_bfd, hit_data); break;
10352 case 2: addend = bfd_get_32 (input_bfd, hit_data); break;
10353 default: addend = 0; break;
10354 }
10355 /* Note: the addend and signed_addend calculated here are
10356 incorrect for any split field. */
10357 addend &= howto->src_mask;
10358 sign = howto->src_mask & ~(howto->src_mask >> 1);
10359 signed_addend = (addend ^ sign) - sign;
10360 signed_addend = (bfd_vma) signed_addend << howto->rightshift;
10361 addend <<= howto->rightshift;
10362 }
10363 else
10364 addend = signed_addend = rel->r_addend;
10365
10366 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10367 are resolving a function call relocation. */
10368 if (using_thumb_only (globals)
10369 && (r_type == R_ARM_THM_CALL
10370 || r_type == R_ARM_THM_JUMP24)
10371 && branch_type == ST_BRANCH_TO_ARM)
10372 branch_type = ST_BRANCH_TO_THUMB;
10373
10374 /* Record the symbol information that should be used in dynamic
10375 relocations. */
10376 dynreloc_st_type = st_type;
10377 dynreloc_value = value;
10378 if (branch_type == ST_BRANCH_TO_THUMB)
10379 dynreloc_value |= 1;
10380
10381 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10382 VALUE appropriately for relocations that we resolve at link time. */
10383 has_iplt_entry = false;
10384 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10385 &arm_plt)
10386 && root_plt->offset != (bfd_vma) -1)
10387 {
10388 plt_offset = root_plt->offset;
10389 gotplt_offset = arm_plt->got_offset;
10390
10391 if (h == NULL || eh->is_iplt)
10392 {
10393 has_iplt_entry = true;
10394 splt = globals->root.iplt;
10395
10396 /* Populate .iplt entries here, because not all of them will
10397 be seen by finish_dynamic_symbol. The lower bit is set if
10398 we have already populated the entry. */
10399 if (plt_offset & 1)
10400 plt_offset--;
10401 else
10402 {
10403 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10404 -1, dynreloc_value))
10405 root_plt->offset |= 1;
10406 else
10407 return bfd_reloc_notsupported;
10408 }
10409
10410 /* Static relocations always resolve to the .iplt entry. */
10411 st_type = STT_FUNC;
10412 value = (splt->output_section->vma
10413 + splt->output_offset
10414 + plt_offset);
10415 branch_type = ST_BRANCH_TO_ARM;
10416
10417 /* If there are non-call relocations that resolve to the .iplt
10418 entry, then all dynamic ones must too. */
10419 if (arm_plt->noncall_refcount != 0)
10420 {
10421 dynreloc_st_type = st_type;
10422 dynreloc_value = value;
10423 }
10424 }
10425 else
10426 /* We populate the .plt entry in finish_dynamic_symbol. */
10427 splt = globals->root.splt;
10428 }
10429 else
10430 {
10431 splt = NULL;
10432 plt_offset = (bfd_vma) -1;
10433 gotplt_offset = (bfd_vma) -1;
10434 }
10435
10436 resolved_to_zero = (h != NULL
10437 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10438
10439 switch (r_type)
10440 {
10441 case R_ARM_NONE:
10442 /* We don't need to find a value for this symbol. It's just a
10443 marker. */
10444 *unresolved_reloc_p = false;
10445 return bfd_reloc_ok;
10446
10447 case R_ARM_ABS12:
10448 if (globals->root.target_os != is_vxworks)
10449 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10450 /* Fall through. */
10451
10452 case R_ARM_PC24:
10453 case R_ARM_ABS32:
10454 case R_ARM_ABS32_NOI:
10455 case R_ARM_REL32:
10456 case R_ARM_REL32_NOI:
10457 case R_ARM_CALL:
10458 case R_ARM_JUMP24:
10459 case R_ARM_XPC25:
10460 case R_ARM_PREL31:
10461 case R_ARM_PLT32:
10462 /* Handle relocations which should use the PLT entry. ABS32/REL32
10463 will use the symbol's value, which may point to a PLT entry, but we
10464 don't need to handle that here. If we created a PLT entry, all
10465 branches in this object should go to it, except if the PLT is too
10466 far away, in which case a long branch stub should be inserted. */
10467 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10468 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10469 && r_type != R_ARM_CALL
10470 && r_type != R_ARM_JUMP24
10471 && r_type != R_ARM_PLT32)
10472 && plt_offset != (bfd_vma) -1)
10473 {
10474 /* If we've created a .plt section, and assigned a PLT entry
10475 to this function, it must either be a STT_GNU_IFUNC reference
10476 or not be known to bind locally. In other cases, we should
10477 have cleared the PLT entry by now. */
10478 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10479
10480 value = (splt->output_section->vma
10481 + splt->output_offset
10482 + plt_offset);
10483 *unresolved_reloc_p = false;
10484 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10485 contents, rel->r_offset, value,
10486 rel->r_addend);
10487 }
10488
10489 /* When generating a shared object or relocatable executable, these
10490 relocations are copied into the output file to be resolved at
10491 run time. */
10492 if ((bfd_link_pic (info)
10493 || globals->root.is_relocatable_executable
10494 || globals->fdpic_p)
10495 && (input_section->flags & SEC_ALLOC)
10496 && !(globals->root.target_os == is_vxworks
10497 && strcmp (input_section->output_section->name,
10498 ".tls_vars") == 0)
10499 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10500 || !SYMBOL_CALLS_LOCAL (info, h))
10501 && !(input_bfd == globals->stub_bfd
10502 && strstr (input_section->name, STUB_SUFFIX))
10503 && (h == NULL
10504 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10505 && !resolved_to_zero)
10506 || h->root.type != bfd_link_hash_undefweak)
10507 && r_type != R_ARM_PC24
10508 && r_type != R_ARM_CALL
10509 && r_type != R_ARM_JUMP24
10510 && r_type != R_ARM_PREL31
10511 && r_type != R_ARM_PLT32)
10512 {
10513 Elf_Internal_Rela outrel;
10514 bool skip, relocate;
10515 int isrofixup = 0;
10516
10517 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10518 && !h->def_regular)
10519 {
10520 char *v = _("shared object");
10521
10522 if (bfd_link_executable (info))
10523 v = _("PIE executable");
10524
10525 _bfd_error_handler
10526 (_("%pB: relocation %s against external or undefined symbol `%s'"
10527 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10528 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10529 return bfd_reloc_notsupported;
10530 }
10531
10532 *unresolved_reloc_p = false;
10533
10534 if (sreloc == NULL && globals->root.dynamic_sections_created)
10535 {
10536 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10537 ! globals->use_rel);
10538
10539 if (sreloc == NULL)
10540 return bfd_reloc_notsupported;
10541 }
10542
10543 skip = false;
10544 relocate = false;
10545
10546 outrel.r_addend = addend;
10547 outrel.r_offset =
10548 _bfd_elf_section_offset (output_bfd, info, input_section,
10549 rel->r_offset);
10550 if (outrel.r_offset == (bfd_vma) -1)
10551 skip = true;
10552 else if (outrel.r_offset == (bfd_vma) -2)
10553 skip = true, relocate = true;
10554 outrel.r_offset += (input_section->output_section->vma
10555 + input_section->output_offset);
10556
10557 if (skip)
10558 memset (&outrel, 0, sizeof outrel);
10559 else if (h != NULL
10560 && h->dynindx != -1
10561 && (!bfd_link_pic (info)
10562 || !(bfd_link_pie (info)
10563 || SYMBOLIC_BIND (info, h))
10564 || !h->def_regular))
10565 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10566 else
10567 {
10568 int symbol;
10569
10570 /* This symbol is local, or marked to become local. */
10571 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10572 || (globals->fdpic_p && !bfd_link_pic (info)));
10573 /* On SVR4-ish systems, the dynamic loader cannot
10574 relocate the text and data segments independently,
10575 so the symbol does not matter. */
10576 symbol = 0;
10577 if (dynreloc_st_type == STT_GNU_IFUNC)
10578 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10579 to the .iplt entry. Instead, every non-call reference
10580 must use an R_ARM_IRELATIVE relocation to obtain the
10581 correct run-time address. */
10582 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10583 else if (globals->fdpic_p && !bfd_link_pic (info))
10584 isrofixup = 1;
10585 else
10586 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10587 if (globals->use_rel)
10588 relocate = true;
10589 else
10590 outrel.r_addend += dynreloc_value;
10591 }
10592
10593 if (isrofixup)
10594 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
10595 else
10596 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10597
10598 /* If this reloc is against an external symbol, we do not want to
10599 fiddle with the addend. Otherwise, we need to include the symbol
10600 value so that it becomes an addend for the dynamic reloc. */
10601 if (! relocate)
10602 return bfd_reloc_ok;
10603
10604 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10605 contents, rel->r_offset,
10606 dynreloc_value, (bfd_vma) 0);
10607 }
10608 else switch (r_type)
10609 {
10610 case R_ARM_ABS12:
10611 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10612
10613 case R_ARM_XPC25: /* Arm BLX instruction. */
10614 case R_ARM_CALL:
10615 case R_ARM_JUMP24:
10616 case R_ARM_PC24: /* Arm B/BL instruction. */
10617 case R_ARM_PLT32:
10618 {
10619 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10620
10621 if (r_type == R_ARM_XPC25)
10622 {
10623 /* Check for Arm calling Arm function. */
10624 /* FIXME: Should we translate the instruction into a BL
10625 instruction instead ? */
10626 if (branch_type != ST_BRANCH_TO_THUMB)
10627 _bfd_error_handler
10628 (_("\%pB: warning: %s BLX instruction targets"
10629 " %s function '%s'"),
10630 input_bfd, "ARM",
10631 "ARM", h ? h->root.root.string : "(local)");
10632 }
10633 else if (r_type == R_ARM_PC24)
10634 {
10635 /* Check for Arm calling Thumb function. */
10636 if (branch_type == ST_BRANCH_TO_THUMB)
10637 {
10638 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10639 output_bfd, input_section,
10640 hit_data, sym_sec, rel->r_offset,
10641 signed_addend, value,
10642 error_message))
10643 return bfd_reloc_ok;
10644 else
10645 return bfd_reloc_dangerous;
10646 }
10647 }
10648
10649 /* Check if a stub has to be inserted because the
10650 destination is too far or we are changing mode. */
10651 if ( r_type == R_ARM_CALL
10652 || r_type == R_ARM_JUMP24
10653 || r_type == R_ARM_PLT32)
10654 {
10655 enum elf32_arm_stub_type stub_type = arm_stub_none;
10656 struct elf32_arm_link_hash_entry *hash;
10657
10658 hash = (struct elf32_arm_link_hash_entry *) h;
10659 stub_type = arm_type_of_stub (info, input_section, rel,
10660 st_type, &branch_type,
10661 hash, value, sym_sec,
10662 input_bfd, sym_name);
10663
10664 if (stub_type != arm_stub_none)
10665 {
10666 /* The target is out of reach, so redirect the
10667 branch to the local stub for this function. */
10668 stub_entry = elf32_arm_get_stub_entry (input_section,
10669 sym_sec, h,
10670 rel, globals,
10671 stub_type);
10672 {
10673 if (stub_entry != NULL)
10674 value = (stub_entry->stub_offset
10675 + stub_entry->stub_sec->output_offset
10676 + stub_entry->stub_sec->output_section->vma);
10677
10678 if (plt_offset != (bfd_vma) -1)
10679 *unresolved_reloc_p = false;
10680 }
10681 }
10682 else
10683 {
10684 /* If the call goes through a PLT entry, make sure to
10685 check distance to the right destination address. */
10686 if (plt_offset != (bfd_vma) -1)
10687 {
10688 value = (splt->output_section->vma
10689 + splt->output_offset
10690 + plt_offset);
10691 *unresolved_reloc_p = false;
10692 /* The PLT entry is in ARM mode, regardless of the
10693 target function. */
10694 branch_type = ST_BRANCH_TO_ARM;
10695 }
10696 }
10697 }
10698
10699 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10700 where:
10701 S is the address of the symbol in the relocation.
10702 P is address of the instruction being relocated.
10703 A is the addend (extracted from the instruction) in bytes.
10704
10705 S is held in 'value'.
10706 P is the base address of the section containing the
10707 instruction plus the offset of the reloc into that
10708 section, ie:
10709 (input_section->output_section->vma +
10710 input_section->output_offset +
10711 rel->r_offset).
10712 A is the addend, converted into bytes, ie:
10713 (signed_addend * 4)
10714
10715 Note: None of these operations have knowledge of the pipeline
10716 size of the processor, thus it is up to the assembler to
10717 encode this information into the addend. */
10718 value -= (input_section->output_section->vma
10719 + input_section->output_offset);
10720 value -= rel->r_offset;
10721 value += signed_addend;
10722
10723 signed_addend = value;
10724 signed_addend >>= howto->rightshift;
10725
10726 /* A branch to an undefined weak symbol is turned into a jump to
10727 the next instruction unless a PLT entry will be created.
10728 Do the same for local undefined symbols (but not for STN_UNDEF).
10729 The jump to the next instruction is optimized as a NOP depending
10730 on the architecture. */
10731 if (h ? (h->root.type == bfd_link_hash_undefweak
10732 && plt_offset == (bfd_vma) -1)
10733 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10734 {
10735 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10736
10737 if (arch_has_arm_nop (globals))
10738 value |= 0x0320f000;
10739 else
10740 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10741 }
10742 else
10743 {
10744 /* Perform a signed range check. */
10745 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10746 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10747 return bfd_reloc_overflow;
10748
10749 addend = (value & 2);
10750
10751 value = (signed_addend & howto->dst_mask)
10752 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10753
10754 if (r_type == R_ARM_CALL)
10755 {
10756 /* Set the H bit in the BLX instruction. */
10757 if (branch_type == ST_BRANCH_TO_THUMB)
10758 {
10759 if (addend)
10760 value |= (1 << 24);
10761 else
10762 value &= ~(bfd_vma)(1 << 24);
10763 }
10764
10765 /* Select the correct instruction (BL or BLX). */
10766 /* Only if we are not handling a BL to a stub. In this
10767 case, mode switching is performed by the stub. */
10768 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10769 value |= (1 << 28);
10770 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10771 {
10772 value &= ~(bfd_vma)(1 << 28);
10773 value |= (1 << 24);
10774 }
10775 }
10776 }
10777 }
10778 break;
10779
10780 case R_ARM_ABS32:
10781 value += addend;
10782 if (branch_type == ST_BRANCH_TO_THUMB)
10783 value |= 1;
10784 break;
10785
10786 case R_ARM_ABS32_NOI:
10787 value += addend;
10788 break;
10789
10790 case R_ARM_REL32:
10791 value += addend;
10792 if (branch_type == ST_BRANCH_TO_THUMB)
10793 value |= 1;
10794 value -= (input_section->output_section->vma
10795 + input_section->output_offset + rel->r_offset);
10796 break;
10797
10798 case R_ARM_REL32_NOI:
10799 value += addend;
10800 value -= (input_section->output_section->vma
10801 + input_section->output_offset + rel->r_offset);
10802 break;
10803
10804 case R_ARM_PREL31:
10805 value -= (input_section->output_section->vma
10806 + input_section->output_offset + rel->r_offset);
10807 value += signed_addend;
10808 if (! h || h->root.type != bfd_link_hash_undefweak)
10809 {
10810 /* Check for overflow. */
10811 if ((value ^ (value >> 1)) & (1 << 30))
10812 return bfd_reloc_overflow;
10813 }
10814 value &= 0x7fffffff;
10815 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10816 if (branch_type == ST_BRANCH_TO_THUMB)
10817 value |= 1;
10818 break;
10819 }
10820
10821 bfd_put_32 (input_bfd, value, hit_data);
10822 return bfd_reloc_ok;
10823
10824 case R_ARM_ABS8:
10825 value += addend;
10826
10827 /* There is no way to tell whether the user intended to use a signed or
10828 unsigned addend. When checking for overflow we accept either,
10829 as specified by the AAELF. */
10830 if ((long) value > 0xff || (long) value < -0x80)
10831 return bfd_reloc_overflow;
10832
10833 bfd_put_8 (input_bfd, value, hit_data);
10834 return bfd_reloc_ok;
10835
10836 case R_ARM_ABS16:
10837 value += addend;
10838
10839 /* See comment for R_ARM_ABS8. */
10840 if ((long) value > 0xffff || (long) value < -0x8000)
10841 return bfd_reloc_overflow;
10842
10843 bfd_put_16 (input_bfd, value, hit_data);
10844 return bfd_reloc_ok;
10845
10846 case R_ARM_THM_ABS5:
10847 /* Support ldr and str instructions for the thumb. */
10848 if (globals->use_rel)
10849 {
10850 /* Need to refetch addend. */
10851 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10852 /* ??? Need to determine shift amount from operand size. */
10853 addend >>= howto->rightshift;
10854 }
10855 value += addend;
10856
10857 /* ??? Isn't value unsigned? */
10858 if ((long) value > 0x1f || (long) value < -0x10)
10859 return bfd_reloc_overflow;
10860
10861 /* ??? Value needs to be properly shifted into place first. */
10862 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10863 bfd_put_16 (input_bfd, value, hit_data);
10864 return bfd_reloc_ok;
10865
10866 case R_ARM_THM_ALU_PREL_11_0:
10867 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10868 {
10869 bfd_vma insn;
10870 bfd_signed_vma relocation;
10871
10872 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10873 | bfd_get_16 (input_bfd, hit_data + 2);
10874
10875 if (globals->use_rel)
10876 {
10877 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10878 | ((insn & (1 << 26)) >> 15);
10879 if (insn & 0xf00000)
10880 signed_addend = -signed_addend;
10881 }
10882
10883 relocation = value + signed_addend;
10884 relocation -= Pa (input_section->output_section->vma
10885 + input_section->output_offset
10886 + rel->r_offset);
10887
10888 /* PR 21523: Use an absolute value. The user of this reloc will
10889 have already selected an ADD or SUB insn appropriately. */
10890 value = llabs (relocation);
10891
10892 if (value >= 0x1000)
10893 return bfd_reloc_overflow;
10894
10895 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10896 if (branch_type == ST_BRANCH_TO_THUMB)
10897 value |= 1;
10898
10899 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10900 | ((value & 0x700) << 4)
10901 | ((value & 0x800) << 15);
10902 if (relocation < 0)
10903 insn |= 0xa00000;
10904
10905 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10906 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10907
10908 return bfd_reloc_ok;
10909 }
10910
10911 case R_ARM_THM_PC8:
10912 /* PR 10073: This reloc is not generated by the GNU toolchain,
10913 but it is supported for compatibility with third party libraries
10914 generated by other compilers, specifically the ARM/IAR. */
10915 {
10916 bfd_vma insn;
10917 bfd_signed_vma relocation;
10918
10919 insn = bfd_get_16 (input_bfd, hit_data);
10920
10921 if (globals->use_rel)
10922 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10923
10924 relocation = value + addend;
10925 relocation -= Pa (input_section->output_section->vma
10926 + input_section->output_offset
10927 + rel->r_offset);
10928
10929 value = relocation;
10930
10931 /* We do not check for overflow of this reloc. Although strictly
10932 speaking this is incorrect, it appears to be necessary in order
10933 to work with IAR generated relocs. Since GCC and GAS do not
10934 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10935 a problem for them. */
10936 value &= 0x3fc;
10937
10938 insn = (insn & 0xff00) | (value >> 2);
10939
10940 bfd_put_16 (input_bfd, insn, hit_data);
10941
10942 return bfd_reloc_ok;
10943 }
10944
10945 case R_ARM_THM_PC12:
10946 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10947 {
10948 bfd_vma insn;
10949 bfd_signed_vma relocation;
10950
10951 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10952 | bfd_get_16 (input_bfd, hit_data + 2);
10953
10954 if (globals->use_rel)
10955 {
10956 signed_addend = insn & 0xfff;
10957 if (!(insn & (1 << 23)))
10958 signed_addend = -signed_addend;
10959 }
10960
10961 relocation = value + signed_addend;
10962 relocation -= Pa (input_section->output_section->vma
10963 + input_section->output_offset
10964 + rel->r_offset);
10965
10966 value = relocation;
10967
10968 if (value >= 0x1000)
10969 return bfd_reloc_overflow;
10970
10971 insn = (insn & 0xff7ff000) | value;
10972 if (relocation >= 0)
10973 insn |= (1 << 23);
10974
10975 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10976 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10977
10978 return bfd_reloc_ok;
10979 }
10980
10981 case R_ARM_THM_XPC22:
10982 case R_ARM_THM_CALL:
10983 case R_ARM_THM_JUMP24:
10984 /* Thumb BL (branch long instruction). */
10985 {
10986 bfd_vma relocation;
10987 bfd_vma reloc_sign;
10988 bool overflow = false;
10989 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10990 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10991 bfd_signed_vma reloc_signed_max;
10992 bfd_signed_vma reloc_signed_min;
10993 bfd_vma check;
10994 bfd_signed_vma signed_check;
10995 int bitsize;
10996 const int thumb2 = using_thumb2 (globals);
10997 const int thumb2_bl = using_thumb2_bl (globals);
10998
10999 /* A branch to an undefined weak symbol is turned into a jump to
11000 the next instruction unless a PLT entry will be created.
11001 The jump to the next instruction is optimized as a NOP.W for
11002 Thumb-2 enabled architectures. */
11003 if (h && h->root.type == bfd_link_hash_undefweak
11004 && plt_offset == (bfd_vma) -1)
11005 {
11006 if (thumb2)
11007 {
11008 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11009 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11010 }
11011 else
11012 {
11013 bfd_put_16 (input_bfd, 0xe000, hit_data);
11014 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11015 }
11016 return bfd_reloc_ok;
11017 }
11018
11019 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11020 with Thumb-1) involving the J1 and J2 bits. */
11021 if (globals->use_rel)
11022 {
11023 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11024 bfd_vma upper = upper_insn & 0x3ff;
11025 bfd_vma lower = lower_insn & 0x7ff;
11026 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11027 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11028 bfd_vma i1 = j1 ^ s ? 0 : 1;
11029 bfd_vma i2 = j2 ^ s ? 0 : 1;
11030
11031 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11032 /* Sign extend. */
11033 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11034
11035 signed_addend = addend;
11036 }
11037
11038 if (r_type == R_ARM_THM_XPC22)
11039 {
11040 /* Check for Thumb to Thumb call. */
11041 /* FIXME: Should we translate the instruction into a BL
11042 instruction instead ? */
11043 if (branch_type == ST_BRANCH_TO_THUMB)
11044 _bfd_error_handler
11045 (_("%pB: warning: %s BLX instruction targets"
11046 " %s function '%s'"),
11047 input_bfd, "Thumb",
11048 "Thumb", h ? h->root.root.string : "(local)");
11049 }
11050 else
11051 {
11052 /* If it is not a call to Thumb, assume call to Arm.
11053 If it is a call relative to a section name, then it is not a
11054 function call at all, but rather a long jump. Calls through
11055 the PLT do not require stubs. */
11056 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11057 {
11058 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11059 {
11060 /* Convert BL to BLX. */
11061 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11062 }
11063 else if (( r_type != R_ARM_THM_CALL)
11064 && (r_type != R_ARM_THM_JUMP24))
11065 {
11066 if (elf32_thumb_to_arm_stub
11067 (info, sym_name, input_bfd, output_bfd, input_section,
11068 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11069 error_message))
11070 return bfd_reloc_ok;
11071 else
11072 return bfd_reloc_dangerous;
11073 }
11074 }
11075 else if (branch_type == ST_BRANCH_TO_THUMB
11076 && globals->use_blx
11077 && r_type == R_ARM_THM_CALL)
11078 {
11079 /* Make sure this is a BL. */
11080 lower_insn |= 0x1800;
11081 }
11082 }
11083
11084 enum elf32_arm_stub_type stub_type = arm_stub_none;
11085 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11086 {
11087 /* Check if a stub has to be inserted because the destination
11088 is too far. */
11089 struct elf32_arm_stub_hash_entry *stub_entry;
11090 struct elf32_arm_link_hash_entry *hash;
11091
11092 hash = (struct elf32_arm_link_hash_entry *) h;
11093
11094 stub_type = arm_type_of_stub (info, input_section, rel,
11095 st_type, &branch_type,
11096 hash, value, sym_sec,
11097 input_bfd, sym_name);
11098
11099 if (stub_type != arm_stub_none)
11100 {
11101 /* The target is out of reach or we are changing modes, so
11102 redirect the branch to the local stub for this
11103 function. */
11104 stub_entry = elf32_arm_get_stub_entry (input_section,
11105 sym_sec, h,
11106 rel, globals,
11107 stub_type);
11108 if (stub_entry != NULL)
11109 {
11110 value = (stub_entry->stub_offset
11111 + stub_entry->stub_sec->output_offset
11112 + stub_entry->stub_sec->output_section->vma);
11113
11114 if (plt_offset != (bfd_vma) -1)
11115 *unresolved_reloc_p = false;
11116 }
11117
11118 /* If this call becomes a call to Arm, force BLX. */
11119 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11120 {
11121 if ((stub_entry
11122 && !arm_stub_is_thumb (stub_entry->stub_type))
11123 || branch_type != ST_BRANCH_TO_THUMB)
11124 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11125 }
11126 }
11127 }
11128
11129 /* Handle calls via the PLT. */
11130 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11131 {
11132 value = (splt->output_section->vma
11133 + splt->output_offset
11134 + plt_offset);
11135
11136 if (globals->use_blx
11137 && r_type == R_ARM_THM_CALL
11138 && ! using_thumb_only (globals))
11139 {
11140 /* If the Thumb BLX instruction is available, convert
11141 the BL to a BLX instruction to call the ARM-mode
11142 PLT entry. */
11143 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11144 branch_type = ST_BRANCH_TO_ARM;
11145 }
11146 else
11147 {
11148 if (! using_thumb_only (globals))
11149 /* Target the Thumb stub before the ARM PLT entry. */
11150 value -= PLT_THUMB_STUB_SIZE;
11151 branch_type = ST_BRANCH_TO_THUMB;
11152 }
11153 *unresolved_reloc_p = false;
11154 }
11155
11156 relocation = value + signed_addend;
11157
11158 relocation -= (input_section->output_section->vma
11159 + input_section->output_offset
11160 + rel->r_offset);
11161
11162 check = relocation >> howto->rightshift;
11163
11164 /* If this is a signed value, the rightshift just dropped
11165 leading 1 bits (assuming twos complement). */
11166 if ((bfd_signed_vma) relocation >= 0)
11167 signed_check = check;
11168 else
11169 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11170
11171 /* Calculate the permissable maximum and minimum values for
11172 this relocation according to whether we're relocating for
11173 Thumb-2 or not. */
11174 bitsize = howto->bitsize;
11175 if (!thumb2_bl)
11176 bitsize -= 2;
11177 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11178 reloc_signed_min = ~reloc_signed_max;
11179
11180 /* Assumes two's complement. */
11181 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11182 overflow = true;
11183
11184 if ((lower_insn & 0x5000) == 0x4000)
11185 /* For a BLX instruction, make sure that the relocation is rounded up
11186 to a word boundary. This follows the semantics of the instruction
11187 which specifies that bit 1 of the target address will come from bit
11188 1 of the base address. */
11189 relocation = (relocation + 2) & ~ 3;
11190
11191 /* Put RELOCATION back into the insn. Assumes two's complement.
11192 We use the Thumb-2 encoding, which is safe even if dealing with
11193 a Thumb-1 instruction by virtue of our overflow check above. */
11194 reloc_sign = (signed_check < 0) ? 1 : 0;
11195 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11196 | ((relocation >> 12) & 0x3ff)
11197 | (reloc_sign << 10);
11198 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11199 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11200 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11201 | ((relocation >> 1) & 0x7ff);
11202
11203 /* Put the relocated value back in the object file: */
11204 bfd_put_16 (input_bfd, upper_insn, hit_data);
11205 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11206
11207 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11208 }
11209 break;
11210
11211 case R_ARM_THM_JUMP19:
11212 /* Thumb32 conditional branch instruction. */
11213 {
11214 bfd_vma relocation;
11215 bool overflow = false;
11216 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11217 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11218 bfd_signed_vma reloc_signed_max = 0xffffe;
11219 bfd_signed_vma reloc_signed_min = -0x100000;
11220 bfd_signed_vma signed_check;
11221 enum elf32_arm_stub_type stub_type = arm_stub_none;
11222 struct elf32_arm_stub_hash_entry *stub_entry;
11223 struct elf32_arm_link_hash_entry *hash;
11224
11225 /* Need to refetch the addend, reconstruct the top three bits,
11226 and squish the two 11 bit pieces together. */
11227 if (globals->use_rel)
11228 {
11229 bfd_vma S = (upper_insn & 0x0400) >> 10;
11230 bfd_vma upper = (upper_insn & 0x003f);
11231 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11232 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11233 bfd_vma lower = (lower_insn & 0x07ff);
11234
11235 upper |= J1 << 6;
11236 upper |= J2 << 7;
11237 upper |= (!S) << 8;
11238 upper -= 0x0100; /* Sign extend. */
11239
11240 addend = (upper << 12) | (lower << 1);
11241 signed_addend = addend;
11242 }
11243
11244 /* Handle calls via the PLT. */
11245 if (plt_offset != (bfd_vma) -1)
11246 {
11247 value = (splt->output_section->vma
11248 + splt->output_offset
11249 + plt_offset);
11250 /* Target the Thumb stub before the ARM PLT entry. */
11251 value -= PLT_THUMB_STUB_SIZE;
11252 *unresolved_reloc_p = false;
11253 }
11254
11255 hash = (struct elf32_arm_link_hash_entry *)h;
11256
11257 stub_type = arm_type_of_stub (info, input_section, rel,
11258 st_type, &branch_type,
11259 hash, value, sym_sec,
11260 input_bfd, sym_name);
11261 if (stub_type != arm_stub_none)
11262 {
11263 stub_entry = elf32_arm_get_stub_entry (input_section,
11264 sym_sec, h,
11265 rel, globals,
11266 stub_type);
11267 if (stub_entry != NULL)
11268 {
11269 value = (stub_entry->stub_offset
11270 + stub_entry->stub_sec->output_offset
11271 + stub_entry->stub_sec->output_section->vma);
11272 }
11273 }
11274
11275 relocation = value + signed_addend;
11276 relocation -= (input_section->output_section->vma
11277 + input_section->output_offset
11278 + rel->r_offset);
11279 signed_check = (bfd_signed_vma) relocation;
11280
11281 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11282 overflow = true;
11283
11284 /* Put RELOCATION back into the insn. */
11285 {
11286 bfd_vma S = (relocation & 0x00100000) >> 20;
11287 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11288 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11289 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11290 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11291
11292 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11293 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11294 }
11295
11296 /* Put the relocated value back in the object file: */
11297 bfd_put_16 (input_bfd, upper_insn, hit_data);
11298 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11299
11300 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11301 }
11302
11303 case R_ARM_THM_JUMP11:
11304 case R_ARM_THM_JUMP8:
11305 case R_ARM_THM_JUMP6:
11306 /* Thumb B (branch) instruction). */
11307 {
11308 bfd_signed_vma relocation;
11309 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11310 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11311 bfd_signed_vma signed_check;
11312
11313 /* CZB cannot jump backward. */
11314 if (r_type == R_ARM_THM_JUMP6)
11315 {
11316 reloc_signed_min = 0;
11317 if (globals->use_rel)
11318 signed_addend = ((addend & 0x200) >> 3) | ((addend & 0xf8) >> 2);
11319 }
11320
11321 relocation = value + signed_addend;
11322
11323 relocation -= (input_section->output_section->vma
11324 + input_section->output_offset
11325 + rel->r_offset);
11326
11327 relocation >>= howto->rightshift;
11328 signed_check = relocation;
11329
11330 if (r_type == R_ARM_THM_JUMP6)
11331 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11332 else
11333 relocation &= howto->dst_mask;
11334 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11335
11336 bfd_put_16 (input_bfd, relocation, hit_data);
11337
11338 /* Assumes two's complement. */
11339 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11340 return bfd_reloc_overflow;
11341
11342 return bfd_reloc_ok;
11343 }
11344
11345 case R_ARM_ALU_PCREL7_0:
11346 case R_ARM_ALU_PCREL15_8:
11347 case R_ARM_ALU_PCREL23_15:
11348 {
11349 bfd_vma insn;
11350 bfd_vma relocation;
11351
11352 insn = bfd_get_32 (input_bfd, hit_data);
11353 if (globals->use_rel)
11354 {
11355 /* Extract the addend. */
11356 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11357 signed_addend = addend;
11358 }
11359 relocation = value + signed_addend;
11360
11361 relocation -= (input_section->output_section->vma
11362 + input_section->output_offset
11363 + rel->r_offset);
11364 insn = (insn & ~0xfff)
11365 | ((howto->bitpos << 7) & 0xf00)
11366 | ((relocation >> howto->bitpos) & 0xff);
11367 bfd_put_32 (input_bfd, value, hit_data);
11368 }
11369 return bfd_reloc_ok;
11370
11371 case R_ARM_GNU_VTINHERIT:
11372 case R_ARM_GNU_VTENTRY:
11373 return bfd_reloc_ok;
11374
11375 case R_ARM_GOTOFF32:
11376 /* Relocation is relative to the start of the
11377 global offset table. */
11378
11379 BFD_ASSERT (sgot != NULL);
11380 if (sgot == NULL)
11381 return bfd_reloc_notsupported;
11382
11383 /* If we are addressing a Thumb function, we need to adjust the
11384 address by one, so that attempts to call the function pointer will
11385 correctly interpret it as Thumb code. */
11386 if (branch_type == ST_BRANCH_TO_THUMB)
11387 value += 1;
11388
11389 /* Note that sgot->output_offset is not involved in this
11390 calculation. We always want the start of .got. If we
11391 define _GLOBAL_OFFSET_TABLE in a different way, as is
11392 permitted by the ABI, we might have to change this
11393 calculation. */
11394 value -= sgot->output_section->vma;
11395 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11396 contents, rel->r_offset, value,
11397 rel->r_addend);
11398
11399 case R_ARM_GOTPC:
11400 /* Use global offset table as symbol value. */
11401 BFD_ASSERT (sgot != NULL);
11402
11403 if (sgot == NULL)
11404 return bfd_reloc_notsupported;
11405
11406 *unresolved_reloc_p = false;
11407 value = sgot->output_section->vma;
11408 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11409 contents, rel->r_offset, value,
11410 rel->r_addend);
11411
11412 case R_ARM_GOT32:
11413 case R_ARM_GOT_PREL:
11414 /* Relocation is to the entry for this symbol in the
11415 global offset table. */
11416 if (sgot == NULL)
11417 return bfd_reloc_notsupported;
11418
11419 if (dynreloc_st_type == STT_GNU_IFUNC
11420 && plt_offset != (bfd_vma) -1
11421 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11422 {
11423 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11424 symbol, and the relocation resolves directly to the runtime
11425 target rather than to the .iplt entry. This means that any
11426 .got entry would be the same value as the .igot.plt entry,
11427 so there's no point creating both. */
11428 sgot = globals->root.igotplt;
11429 value = sgot->output_offset + gotplt_offset;
11430 }
11431 else if (h != NULL)
11432 {
11433 bfd_vma off;
11434
11435 off = h->got.offset;
11436 BFD_ASSERT (off != (bfd_vma) -1);
11437 if ((off & 1) != 0)
11438 {
11439 /* We have already processsed one GOT relocation against
11440 this symbol. */
11441 off &= ~1;
11442 if (globals->root.dynamic_sections_created
11443 && !SYMBOL_REFERENCES_LOCAL (info, h))
11444 *unresolved_reloc_p = false;
11445 }
11446 else
11447 {
11448 Elf_Internal_Rela outrel;
11449 int isrofixup = 0;
11450
11451 if (((h->dynindx != -1) || globals->fdpic_p)
11452 && !SYMBOL_REFERENCES_LOCAL (info, h))
11453 {
11454 /* If the symbol doesn't resolve locally in a static
11455 object, we have an undefined reference. If the
11456 symbol doesn't resolve locally in a dynamic object,
11457 it should be resolved by the dynamic linker. */
11458 if (globals->root.dynamic_sections_created)
11459 {
11460 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11461 *unresolved_reloc_p = false;
11462 }
11463 else
11464 outrel.r_info = 0;
11465 outrel.r_addend = 0;
11466 }
11467 else
11468 {
11469 if (dynreloc_st_type == STT_GNU_IFUNC)
11470 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11471 else if (bfd_link_pic (info)
11472 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11473 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11474 else
11475 {
11476 outrel.r_info = 0;
11477 if (globals->fdpic_p)
11478 isrofixup = 1;
11479 }
11480 outrel.r_addend = dynreloc_value;
11481 }
11482
11483 /* The GOT entry is initialized to zero by default.
11484 See if we should install a different value. */
11485 if (outrel.r_addend != 0
11486 && (globals->use_rel || outrel.r_info == 0))
11487 {
11488 bfd_put_32 (output_bfd, outrel.r_addend,
11489 sgot->contents + off);
11490 outrel.r_addend = 0;
11491 }
11492
11493 if (isrofixup)
11494 arm_elf_add_rofixup (output_bfd,
11495 elf32_arm_hash_table (info)->srofixup,
11496 sgot->output_section->vma
11497 + sgot->output_offset + off);
11498
11499 else if (outrel.r_info != 0)
11500 {
11501 outrel.r_offset = (sgot->output_section->vma
11502 + sgot->output_offset
11503 + off);
11504 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11505 }
11506
11507 h->got.offset |= 1;
11508 }
11509 value = sgot->output_offset + off;
11510 }
11511 else
11512 {
11513 bfd_vma off;
11514
11515 BFD_ASSERT (local_got_offsets != NULL
11516 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11517
11518 off = local_got_offsets[r_symndx];
11519
11520 /* The offset must always be a multiple of 4. We use the
11521 least significant bit to record whether we have already
11522 generated the necessary reloc. */
11523 if ((off & 1) != 0)
11524 off &= ~1;
11525 else
11526 {
11527 Elf_Internal_Rela outrel;
11528 int isrofixup = 0;
11529
11530 if (dynreloc_st_type == STT_GNU_IFUNC)
11531 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11532 else if (bfd_link_pic (info))
11533 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11534 else
11535 {
11536 outrel.r_info = 0;
11537 if (globals->fdpic_p)
11538 isrofixup = 1;
11539 }
11540
11541 /* The GOT entry is initialized to zero by default.
11542 See if we should install a different value. */
11543 if (globals->use_rel || outrel.r_info == 0)
11544 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11545
11546 if (isrofixup)
11547 arm_elf_add_rofixup (output_bfd,
11548 globals->srofixup,
11549 sgot->output_section->vma
11550 + sgot->output_offset + off);
11551
11552 else if (outrel.r_info != 0)
11553 {
11554 outrel.r_addend = addend + dynreloc_value;
11555 outrel.r_offset = (sgot->output_section->vma
11556 + sgot->output_offset
11557 + off);
11558 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11559 }
11560
11561 local_got_offsets[r_symndx] |= 1;
11562 }
11563
11564 value = sgot->output_offset + off;
11565 }
11566 if (r_type != R_ARM_GOT32)
11567 value += sgot->output_section->vma;
11568
11569 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11570 contents, rel->r_offset, value,
11571 rel->r_addend);
11572
11573 case R_ARM_TLS_LDO32:
11574 value = value - dtpoff_base (info);
11575
11576 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11577 contents, rel->r_offset, value,
11578 rel->r_addend);
11579
11580 case R_ARM_TLS_LDM32:
11581 case R_ARM_TLS_LDM32_FDPIC:
11582 {
11583 bfd_vma off;
11584
11585 if (sgot == NULL)
11586 abort ();
11587
11588 off = globals->tls_ldm_got.offset;
11589
11590 if ((off & 1) != 0)
11591 off &= ~1;
11592 else
11593 {
11594 /* If we don't know the module number, create a relocation
11595 for it. */
11596 if (bfd_link_dll (info))
11597 {
11598 Elf_Internal_Rela outrel;
11599
11600 if (srelgot == NULL)
11601 abort ();
11602
11603 outrel.r_addend = 0;
11604 outrel.r_offset = (sgot->output_section->vma
11605 + sgot->output_offset + off);
11606 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11607
11608 if (globals->use_rel)
11609 bfd_put_32 (output_bfd, outrel.r_addend,
11610 sgot->contents + off);
11611
11612 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11613 }
11614 else
11615 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11616
11617 globals->tls_ldm_got.offset |= 1;
11618 }
11619
11620 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11621 {
11622 bfd_put_32 (output_bfd,
11623 globals->root.sgot->output_offset + off,
11624 contents + rel->r_offset);
11625
11626 return bfd_reloc_ok;
11627 }
11628 else
11629 {
11630 value = sgot->output_section->vma + sgot->output_offset + off
11631 - (input_section->output_section->vma
11632 + input_section->output_offset + rel->r_offset);
11633
11634 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11635 contents, rel->r_offset, value,
11636 rel->r_addend);
11637 }
11638 }
11639
11640 case R_ARM_TLS_CALL:
11641 case R_ARM_THM_TLS_CALL:
11642 case R_ARM_TLS_GD32:
11643 case R_ARM_TLS_GD32_FDPIC:
11644 case R_ARM_TLS_IE32:
11645 case R_ARM_TLS_IE32_FDPIC:
11646 case R_ARM_TLS_GOTDESC:
11647 case R_ARM_TLS_DESCSEQ:
11648 case R_ARM_THM_TLS_DESCSEQ:
11649 {
11650 bfd_vma off, offplt;
11651 int indx = 0;
11652 char tls_type;
11653
11654 BFD_ASSERT (sgot != NULL);
11655
11656 if (h != NULL)
11657 {
11658 bool dyn;
11659 dyn = globals->root.dynamic_sections_created;
11660 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11661 bfd_link_pic (info),
11662 h)
11663 && (!bfd_link_pic (info)
11664 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11665 {
11666 *unresolved_reloc_p = false;
11667 indx = h->dynindx;
11668 }
11669 off = h->got.offset;
11670 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11671 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11672 }
11673 else
11674 {
11675 BFD_ASSERT (local_got_offsets != NULL);
11676
11677 if (r_symndx >= elf32_arm_num_entries (input_bfd))
11678 {
11679 _bfd_error_handler (_("\
11680 %pB: expected symbol index in range 0..%lu but found local symbol with index %lu"),
11681 input_bfd,
11682 (unsigned long) elf32_arm_num_entries (input_bfd),
11683 r_symndx);
11684 return false;
11685 }
11686 off = local_got_offsets[r_symndx];
11687 offplt = local_tlsdesc_gotents[r_symndx];
11688 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11689 }
11690
11691 /* Linker relaxations happens from one of the
11692 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11693 if (ELF32_R_TYPE (rel->r_info) != r_type)
11694 tls_type = GOT_TLS_IE;
11695
11696 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11697
11698 if ((off & 1) != 0)
11699 off &= ~1;
11700 else
11701 {
11702 bool need_relocs = false;
11703 Elf_Internal_Rela outrel;
11704 int cur_off = off;
11705
11706 /* The GOT entries have not been initialized yet. Do it
11707 now, and emit any relocations. If both an IE GOT and a
11708 GD GOT are necessary, we emit the GD first. */
11709
11710 if ((bfd_link_dll (info) || indx != 0)
11711 && (h == NULL
11712 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11713 && !resolved_to_zero)
11714 || h->root.type != bfd_link_hash_undefweak))
11715 {
11716 need_relocs = true;
11717 BFD_ASSERT (srelgot != NULL);
11718 }
11719
11720 if (tls_type & GOT_TLS_GDESC)
11721 {
11722 bfd_byte *loc;
11723
11724 /* We should have relaxed, unless this is an undefined
11725 weak symbol. */
11726 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11727 || bfd_link_dll (info));
11728 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11729 <= globals->root.sgotplt->size);
11730
11731 outrel.r_addend = 0;
11732 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11733 + globals->root.sgotplt->output_offset
11734 + offplt
11735 + globals->sgotplt_jump_table_size);
11736
11737 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11738 sreloc = globals->root.srelplt;
11739 loc = sreloc->contents;
11740 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11741 BFD_ASSERT (loc + RELOC_SIZE (globals)
11742 <= sreloc->contents + sreloc->size);
11743
11744 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11745
11746 /* For globals, the first word in the relocation gets
11747 the relocation index and the top bit set, or zero,
11748 if we're binding now. For locals, it gets the
11749 symbol's offset in the tls section. */
11750 bfd_put_32 (output_bfd,
11751 !h ? value - elf_hash_table (info)->tls_sec->vma
11752 : info->flags & DF_BIND_NOW ? 0
11753 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11754 globals->root.sgotplt->contents + offplt
11755 + globals->sgotplt_jump_table_size);
11756
11757 /* Second word in the relocation is always zero. */
11758 bfd_put_32 (output_bfd, 0,
11759 globals->root.sgotplt->contents + offplt
11760 + globals->sgotplt_jump_table_size + 4);
11761 }
11762 if (tls_type & GOT_TLS_GD)
11763 {
11764 if (need_relocs)
11765 {
11766 outrel.r_addend = 0;
11767 outrel.r_offset = (sgot->output_section->vma
11768 + sgot->output_offset
11769 + cur_off);
11770 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11771
11772 if (globals->use_rel)
11773 bfd_put_32 (output_bfd, outrel.r_addend,
11774 sgot->contents + cur_off);
11775
11776 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11777
11778 if (indx == 0)
11779 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11780 sgot->contents + cur_off + 4);
11781 else
11782 {
11783 outrel.r_addend = 0;
11784 outrel.r_info = ELF32_R_INFO (indx,
11785 R_ARM_TLS_DTPOFF32);
11786 outrel.r_offset += 4;
11787
11788 if (globals->use_rel)
11789 bfd_put_32 (output_bfd, outrel.r_addend,
11790 sgot->contents + cur_off + 4);
11791
11792 elf32_arm_add_dynreloc (output_bfd, info,
11793 srelgot, &outrel);
11794 }
11795 }
11796 else
11797 {
11798 /* If we are not emitting relocations for a
11799 general dynamic reference, then we must be in a
11800 static link or an executable link with the
11801 symbol binding locally. Mark it as belonging
11802 to module 1, the executable. */
11803 bfd_put_32 (output_bfd, 1,
11804 sgot->contents + cur_off);
11805 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11806 sgot->contents + cur_off + 4);
11807 }
11808
11809 cur_off += 8;
11810 }
11811
11812 if (tls_type & GOT_TLS_IE)
11813 {
11814 if (need_relocs)
11815 {
11816 if (indx == 0)
11817 outrel.r_addend = value - dtpoff_base (info);
11818 else
11819 outrel.r_addend = 0;
11820 outrel.r_offset = (sgot->output_section->vma
11821 + sgot->output_offset
11822 + cur_off);
11823 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11824
11825 if (globals->use_rel)
11826 bfd_put_32 (output_bfd, outrel.r_addend,
11827 sgot->contents + cur_off);
11828
11829 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11830 }
11831 else
11832 bfd_put_32 (output_bfd, tpoff (info, value),
11833 sgot->contents + cur_off);
11834 cur_off += 4;
11835 }
11836
11837 if (h != NULL)
11838 h->got.offset |= 1;
11839 else
11840 local_got_offsets[r_symndx] |= 1;
11841 }
11842
11843 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11844 off += 8;
11845 else if (tls_type & GOT_TLS_GDESC)
11846 off = offplt;
11847
11848 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
11849 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL)
11850 {
11851 bfd_signed_vma offset;
11852 /* TLS stubs are arm mode. The original symbol is a
11853 data object, so branch_type is bogus. */
11854 branch_type = ST_BRANCH_TO_ARM;
11855 enum elf32_arm_stub_type stub_type
11856 = arm_type_of_stub (info, input_section, rel,
11857 st_type, &branch_type,
11858 (struct elf32_arm_link_hash_entry *)h,
11859 globals->tls_trampoline, globals->root.splt,
11860 input_bfd, sym_name);
11861
11862 if (stub_type != arm_stub_none)
11863 {
11864 struct elf32_arm_stub_hash_entry *stub_entry
11865 = elf32_arm_get_stub_entry
11866 (input_section, globals->root.splt, 0, rel,
11867 globals, stub_type);
11868 offset = (stub_entry->stub_offset
11869 + stub_entry->stub_sec->output_offset
11870 + stub_entry->stub_sec->output_section->vma);
11871 }
11872 else
11873 offset = (globals->root.splt->output_section->vma
11874 + globals->root.splt->output_offset
11875 + globals->tls_trampoline);
11876
11877 if (ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL)
11878 {
11879 unsigned long inst;
11880
11881 offset -= (input_section->output_section->vma
11882 + input_section->output_offset
11883 + rel->r_offset + 8);
11884
11885 inst = offset >> 2;
11886 inst &= 0x00ffffff;
11887 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11888 }
11889 else
11890 {
11891 /* Thumb blx encodes the offset in a complicated
11892 fashion. */
11893 unsigned upper_insn, lower_insn;
11894 unsigned neg;
11895
11896 offset -= (input_section->output_section->vma
11897 + input_section->output_offset
11898 + rel->r_offset + 4);
11899
11900 if (stub_type != arm_stub_none
11901 && arm_stub_is_thumb (stub_type))
11902 {
11903 lower_insn = 0xd000;
11904 }
11905 else
11906 {
11907 lower_insn = 0xc000;
11908 /* Round up the offset to a word boundary. */
11909 offset = (offset + 2) & ~2;
11910 }
11911
11912 neg = offset < 0;
11913 upper_insn = (0xf000
11914 | ((offset >> 12) & 0x3ff)
11915 | (neg << 10));
11916 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11917 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11918 | ((offset >> 1) & 0x7ff);
11919 bfd_put_16 (input_bfd, upper_insn, hit_data);
11920 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11921 return bfd_reloc_ok;
11922 }
11923 }
11924 /* These relocations needs special care, as besides the fact
11925 they point somewhere in .gotplt, the addend must be
11926 adjusted accordingly depending on the type of instruction
11927 we refer to. */
11928 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11929 {
11930 unsigned long data, insn;
11931 unsigned thumb;
11932
11933 data = bfd_get_signed_32 (input_bfd, hit_data);
11934 thumb = data & 1;
11935 data &= ~1ul;
11936
11937 if (thumb)
11938 {
11939 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11940 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11941 insn = (insn << 16)
11942 | bfd_get_16 (input_bfd,
11943 contents + rel->r_offset - data + 2);
11944 if ((insn & 0xf800c000) == 0xf000c000)
11945 /* bl/blx */
11946 value = -6;
11947 else if ((insn & 0xffffff00) == 0x4400)
11948 /* add */
11949 value = -5;
11950 else
11951 {
11952 _bfd_error_handler
11953 /* xgettext:c-format */
11954 (_("%pB(%pA+%#" PRIx64 "): "
11955 "unexpected %s instruction '%#lx' "
11956 "referenced by TLS_GOTDESC"),
11957 input_bfd, input_section, (uint64_t) rel->r_offset,
11958 "Thumb", insn);
11959 return bfd_reloc_notsupported;
11960 }
11961 }
11962 else
11963 {
11964 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11965
11966 switch (insn >> 24)
11967 {
11968 case 0xeb: /* bl */
11969 case 0xfa: /* blx */
11970 value = -4;
11971 break;
11972
11973 case 0xe0: /* add */
11974 value = -8;
11975 break;
11976
11977 default:
11978 _bfd_error_handler
11979 /* xgettext:c-format */
11980 (_("%pB(%pA+%#" PRIx64 "): "
11981 "unexpected %s instruction '%#lx' "
11982 "referenced by TLS_GOTDESC"),
11983 input_bfd, input_section, (uint64_t) rel->r_offset,
11984 "ARM", insn);
11985 return bfd_reloc_notsupported;
11986 }
11987 }
11988
11989 value += ((globals->root.sgotplt->output_section->vma
11990 + globals->root.sgotplt->output_offset + off)
11991 - (input_section->output_section->vma
11992 + input_section->output_offset
11993 + rel->r_offset)
11994 + globals->sgotplt_jump_table_size);
11995 }
11996 else
11997 value = ((globals->root.sgot->output_section->vma
11998 + globals->root.sgot->output_offset + off)
11999 - (input_section->output_section->vma
12000 + input_section->output_offset + rel->r_offset));
12001
12002 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12003 r_type == R_ARM_TLS_IE32_FDPIC))
12004 {
12005 /* For FDPIC relocations, resolve to the offset of the GOT
12006 entry from the start of GOT. */
12007 bfd_put_32 (output_bfd,
12008 globals->root.sgot->output_offset + off,
12009 contents + rel->r_offset);
12010
12011 return bfd_reloc_ok;
12012 }
12013 else
12014 {
12015 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12016 contents, rel->r_offset, value,
12017 rel->r_addend);
12018 }
12019 }
12020
12021 case R_ARM_TLS_LE32:
12022 if (bfd_link_dll (info))
12023 {
12024 _bfd_error_handler
12025 /* xgettext:c-format */
12026 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12027 "in shared object"),
12028 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12029 return bfd_reloc_notsupported;
12030 }
12031 else
12032 value = tpoff (info, value);
12033
12034 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12035 contents, rel->r_offset, value,
12036 rel->r_addend);
12037
12038 case R_ARM_V4BX:
12039 if (globals->fix_v4bx)
12040 {
12041 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12042
12043 /* Ensure that we have a BX instruction. */
12044 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12045
12046 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12047 {
12048 /* Branch to veneer. */
12049 bfd_vma glue_addr;
12050 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12051 glue_addr -= input_section->output_section->vma
12052 + input_section->output_offset
12053 + rel->r_offset + 8;
12054 insn = (insn & 0xf0000000) | 0x0a000000
12055 | ((glue_addr >> 2) & 0x00ffffff);
12056 }
12057 else
12058 {
12059 /* Preserve Rm (lowest four bits) and the condition code
12060 (highest four bits). Other bits encode MOV PC,Rm. */
12061 insn = (insn & 0xf000000f) | 0x01a0f000;
12062 }
12063
12064 bfd_put_32 (input_bfd, insn, hit_data);
12065 }
12066 return bfd_reloc_ok;
12067
12068 case R_ARM_MOVW_ABS_NC:
12069 case R_ARM_MOVT_ABS:
12070 case R_ARM_MOVW_PREL_NC:
12071 case R_ARM_MOVT_PREL:
12072 /* Until we properly support segment-base-relative addressing then
12073 we assume the segment base to be zero, as for the group relocations.
12074 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12075 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12076 case R_ARM_MOVW_BREL_NC:
12077 case R_ARM_MOVW_BREL:
12078 case R_ARM_MOVT_BREL:
12079 {
12080 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12081
12082 if (globals->use_rel)
12083 {
12084 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12085 signed_addend = (addend ^ 0x8000) - 0x8000;
12086 }
12087
12088 value += signed_addend;
12089
12090 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12091 value -= (input_section->output_section->vma
12092 + input_section->output_offset + rel->r_offset);
12093
12094 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12095 return bfd_reloc_overflow;
12096
12097 if (branch_type == ST_BRANCH_TO_THUMB)
12098 value |= 1;
12099
12100 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12101 || r_type == R_ARM_MOVT_BREL)
12102 value >>= 16;
12103
12104 insn &= 0xfff0f000;
12105 insn |= value & 0xfff;
12106 insn |= (value & 0xf000) << 4;
12107 bfd_put_32 (input_bfd, insn, hit_data);
12108 }
12109 return bfd_reloc_ok;
12110
12111 case R_ARM_THM_MOVW_ABS_NC:
12112 case R_ARM_THM_MOVT_ABS:
12113 case R_ARM_THM_MOVW_PREL_NC:
12114 case R_ARM_THM_MOVT_PREL:
12115 /* Until we properly support segment-base-relative addressing then
12116 we assume the segment base to be zero, as for the above relocations.
12117 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12118 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12119 as R_ARM_THM_MOVT_ABS. */
12120 case R_ARM_THM_MOVW_BREL_NC:
12121 case R_ARM_THM_MOVW_BREL:
12122 case R_ARM_THM_MOVT_BREL:
12123 {
12124 bfd_vma insn;
12125
12126 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12127 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12128
12129 if (globals->use_rel)
12130 {
12131 addend = ((insn >> 4) & 0xf000)
12132 | ((insn >> 15) & 0x0800)
12133 | ((insn >> 4) & 0x0700)
12134 | (insn & 0x00ff);
12135 signed_addend = (addend ^ 0x8000) - 0x8000;
12136 }
12137
12138 value += signed_addend;
12139
12140 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12141 value -= (input_section->output_section->vma
12142 + input_section->output_offset + rel->r_offset);
12143
12144 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12145 return bfd_reloc_overflow;
12146
12147 if (branch_type == ST_BRANCH_TO_THUMB)
12148 value |= 1;
12149
12150 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12151 || r_type == R_ARM_THM_MOVT_BREL)
12152 value >>= 16;
12153
12154 insn &= 0xfbf08f00;
12155 insn |= (value & 0xf000) << 4;
12156 insn |= (value & 0x0800) << 15;
12157 insn |= (value & 0x0700) << 4;
12158 insn |= (value & 0x00ff);
12159
12160 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12161 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12162 }
12163 return bfd_reloc_ok;
12164
12165 case R_ARM_ALU_PC_G0_NC:
12166 case R_ARM_ALU_PC_G1_NC:
12167 case R_ARM_ALU_PC_G0:
12168 case R_ARM_ALU_PC_G1:
12169 case R_ARM_ALU_PC_G2:
12170 case R_ARM_ALU_SB_G0_NC:
12171 case R_ARM_ALU_SB_G1_NC:
12172 case R_ARM_ALU_SB_G0:
12173 case R_ARM_ALU_SB_G1:
12174 case R_ARM_ALU_SB_G2:
12175 {
12176 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12177 bfd_vma pc = input_section->output_section->vma
12178 + input_section->output_offset + rel->r_offset;
12179 /* sb is the origin of the *segment* containing the symbol. */
12180 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12181 bfd_vma residual;
12182 bfd_vma g_n;
12183 bfd_signed_vma signed_value;
12184 int group = 0;
12185
12186 /* Determine which group of bits to select. */
12187 switch (r_type)
12188 {
12189 case R_ARM_ALU_PC_G0_NC:
12190 case R_ARM_ALU_PC_G0:
12191 case R_ARM_ALU_SB_G0_NC:
12192 case R_ARM_ALU_SB_G0:
12193 group = 0;
12194 break;
12195
12196 case R_ARM_ALU_PC_G1_NC:
12197 case R_ARM_ALU_PC_G1:
12198 case R_ARM_ALU_SB_G1_NC:
12199 case R_ARM_ALU_SB_G1:
12200 group = 1;
12201 break;
12202
12203 case R_ARM_ALU_PC_G2:
12204 case R_ARM_ALU_SB_G2:
12205 group = 2;
12206 break;
12207
12208 default:
12209 abort ();
12210 }
12211
12212 /* If REL, extract the addend from the insn. If RELA, it will
12213 have already been fetched for us. */
12214 if (globals->use_rel)
12215 {
12216 int negative;
12217 bfd_vma constant = insn & 0xff;
12218 bfd_vma rotation = (insn & 0xf00) >> 8;
12219
12220 if (rotation == 0)
12221 signed_addend = constant;
12222 else
12223 {
12224 /* Compensate for the fact that in the instruction, the
12225 rotation is stored in multiples of 2 bits. */
12226 rotation *= 2;
12227
12228 /* Rotate "constant" right by "rotation" bits. */
12229 signed_addend = (constant >> rotation) |
12230 (constant << (8 * sizeof (bfd_vma) - rotation));
12231 }
12232
12233 /* Determine if the instruction is an ADD or a SUB.
12234 (For REL, this determines the sign of the addend.) */
12235 negative = identify_add_or_sub (insn);
12236 if (negative == 0)
12237 {
12238 _bfd_error_handler
12239 /* xgettext:c-format */
12240 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12241 "are allowed for ALU group relocations"),
12242 input_bfd, input_section, (uint64_t) rel->r_offset);
12243 return bfd_reloc_overflow;
12244 }
12245
12246 signed_addend *= negative;
12247 }
12248
12249 /* Compute the value (X) to go in the place. */
12250 if (r_type == R_ARM_ALU_PC_G0_NC
12251 || r_type == R_ARM_ALU_PC_G1_NC
12252 || r_type == R_ARM_ALU_PC_G0
12253 || r_type == R_ARM_ALU_PC_G1
12254 || r_type == R_ARM_ALU_PC_G2)
12255 /* PC relative. */
12256 signed_value = value - pc + signed_addend;
12257 else
12258 /* Section base relative. */
12259 signed_value = value - sb + signed_addend;
12260
12261 /* If the target symbol is a Thumb function, then set the
12262 Thumb bit in the address. */
12263 if (branch_type == ST_BRANCH_TO_THUMB)
12264 signed_value |= 1;
12265
12266 /* Calculate the value of the relevant G_n, in encoded
12267 constant-with-rotation format. */
12268 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12269 group, &residual);
12270
12271 /* Check for overflow if required. */
12272 if ((r_type == R_ARM_ALU_PC_G0
12273 || r_type == R_ARM_ALU_PC_G1
12274 || r_type == R_ARM_ALU_PC_G2
12275 || r_type == R_ARM_ALU_SB_G0
12276 || r_type == R_ARM_ALU_SB_G1
12277 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12278 {
12279 _bfd_error_handler
12280 /* xgettext:c-format */
12281 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12282 "splitting %#" PRIx64 " for group relocation %s"),
12283 input_bfd, input_section, (uint64_t) rel->r_offset,
12284 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12285 howto->name);
12286 return bfd_reloc_overflow;
12287 }
12288
12289 /* Mask out the value and the ADD/SUB part of the opcode; take care
12290 not to destroy the S bit. */
12291 insn &= 0xff1ff000;
12292
12293 /* Set the opcode according to whether the value to go in the
12294 place is negative. */
12295 if (signed_value < 0)
12296 insn |= 1 << 22;
12297 else
12298 insn |= 1 << 23;
12299
12300 /* Encode the offset. */
12301 insn |= g_n;
12302
12303 bfd_put_32 (input_bfd, insn, hit_data);
12304 }
12305 return bfd_reloc_ok;
12306
12307 case R_ARM_LDR_PC_G0:
12308 case R_ARM_LDR_PC_G1:
12309 case R_ARM_LDR_PC_G2:
12310 case R_ARM_LDR_SB_G0:
12311 case R_ARM_LDR_SB_G1:
12312 case R_ARM_LDR_SB_G2:
12313 {
12314 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12315 bfd_vma pc = input_section->output_section->vma
12316 + input_section->output_offset + rel->r_offset;
12317 /* sb is the origin of the *segment* containing the symbol. */
12318 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12319 bfd_vma residual;
12320 bfd_signed_vma signed_value;
12321 int group = 0;
12322
12323 /* Determine which groups of bits to calculate. */
12324 switch (r_type)
12325 {
12326 case R_ARM_LDR_PC_G0:
12327 case R_ARM_LDR_SB_G0:
12328 group = 0;
12329 break;
12330
12331 case R_ARM_LDR_PC_G1:
12332 case R_ARM_LDR_SB_G1:
12333 group = 1;
12334 break;
12335
12336 case R_ARM_LDR_PC_G2:
12337 case R_ARM_LDR_SB_G2:
12338 group = 2;
12339 break;
12340
12341 default:
12342 abort ();
12343 }
12344
12345 /* If REL, extract the addend from the insn. If RELA, it will
12346 have already been fetched for us. */
12347 if (globals->use_rel)
12348 {
12349 int negative = (insn & (1 << 23)) ? 1 : -1;
12350 signed_addend = negative * (insn & 0xfff);
12351 }
12352
12353 /* Compute the value (X) to go in the place. */
12354 if (r_type == R_ARM_LDR_PC_G0
12355 || r_type == R_ARM_LDR_PC_G1
12356 || r_type == R_ARM_LDR_PC_G2)
12357 /* PC relative. */
12358 signed_value = value - pc + signed_addend;
12359 else
12360 /* Section base relative. */
12361 signed_value = value - sb + signed_addend;
12362
12363 /* Calculate the value of the relevant G_{n-1} to obtain
12364 the residual at that stage. */
12365 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12366 group - 1, &residual);
12367
12368 /* Check for overflow. */
12369 if (residual >= 0x1000)
12370 {
12371 _bfd_error_handler
12372 /* xgettext:c-format */
12373 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12374 "splitting %#" PRIx64 " for group relocation %s"),
12375 input_bfd, input_section, (uint64_t) rel->r_offset,
12376 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12377 howto->name);
12378 return bfd_reloc_overflow;
12379 }
12380
12381 /* Mask out the value and U bit. */
12382 insn &= 0xff7ff000;
12383
12384 /* Set the U bit if the value to go in the place is non-negative. */
12385 if (signed_value >= 0)
12386 insn |= 1 << 23;
12387
12388 /* Encode the offset. */
12389 insn |= residual;
12390
12391 bfd_put_32 (input_bfd, insn, hit_data);
12392 }
12393 return bfd_reloc_ok;
12394
12395 case R_ARM_LDRS_PC_G0:
12396 case R_ARM_LDRS_PC_G1:
12397 case R_ARM_LDRS_PC_G2:
12398 case R_ARM_LDRS_SB_G0:
12399 case R_ARM_LDRS_SB_G1:
12400 case R_ARM_LDRS_SB_G2:
12401 {
12402 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12403 bfd_vma pc = input_section->output_section->vma
12404 + input_section->output_offset + rel->r_offset;
12405 /* sb is the origin of the *segment* containing the symbol. */
12406 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12407 bfd_vma residual;
12408 bfd_signed_vma signed_value;
12409 int group = 0;
12410
12411 /* Determine which groups of bits to calculate. */
12412 switch (r_type)
12413 {
12414 case R_ARM_LDRS_PC_G0:
12415 case R_ARM_LDRS_SB_G0:
12416 group = 0;
12417 break;
12418
12419 case R_ARM_LDRS_PC_G1:
12420 case R_ARM_LDRS_SB_G1:
12421 group = 1;
12422 break;
12423
12424 case R_ARM_LDRS_PC_G2:
12425 case R_ARM_LDRS_SB_G2:
12426 group = 2;
12427 break;
12428
12429 default:
12430 abort ();
12431 }
12432
12433 /* If REL, extract the addend from the insn. If RELA, it will
12434 have already been fetched for us. */
12435 if (globals->use_rel)
12436 {
12437 int negative = (insn & (1 << 23)) ? 1 : -1;
12438 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12439 }
12440
12441 /* Compute the value (X) to go in the place. */
12442 if (r_type == R_ARM_LDRS_PC_G0
12443 || r_type == R_ARM_LDRS_PC_G1
12444 || r_type == R_ARM_LDRS_PC_G2)
12445 /* PC relative. */
12446 signed_value = value - pc + signed_addend;
12447 else
12448 /* Section base relative. */
12449 signed_value = value - sb + signed_addend;
12450
12451 /* Calculate the value of the relevant G_{n-1} to obtain
12452 the residual at that stage. */
12453 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12454 group - 1, &residual);
12455
12456 /* Check for overflow. */
12457 if (residual >= 0x100)
12458 {
12459 _bfd_error_handler
12460 /* xgettext:c-format */
12461 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12462 "splitting %#" PRIx64 " for group relocation %s"),
12463 input_bfd, input_section, (uint64_t) rel->r_offset,
12464 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12465 howto->name);
12466 return bfd_reloc_overflow;
12467 }
12468
12469 /* Mask out the value and U bit. */
12470 insn &= 0xff7ff0f0;
12471
12472 /* Set the U bit if the value to go in the place is non-negative. */
12473 if (signed_value >= 0)
12474 insn |= 1 << 23;
12475
12476 /* Encode the offset. */
12477 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12478
12479 bfd_put_32 (input_bfd, insn, hit_data);
12480 }
12481 return bfd_reloc_ok;
12482
12483 case R_ARM_LDC_PC_G0:
12484 case R_ARM_LDC_PC_G1:
12485 case R_ARM_LDC_PC_G2:
12486 case R_ARM_LDC_SB_G0:
12487 case R_ARM_LDC_SB_G1:
12488 case R_ARM_LDC_SB_G2:
12489 {
12490 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12491 bfd_vma pc = input_section->output_section->vma
12492 + input_section->output_offset + rel->r_offset;
12493 /* sb is the origin of the *segment* containing the symbol. */
12494 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12495 bfd_vma residual;
12496 bfd_signed_vma signed_value;
12497 int group = 0;
12498
12499 /* Determine which groups of bits to calculate. */
12500 switch (r_type)
12501 {
12502 case R_ARM_LDC_PC_G0:
12503 case R_ARM_LDC_SB_G0:
12504 group = 0;
12505 break;
12506
12507 case R_ARM_LDC_PC_G1:
12508 case R_ARM_LDC_SB_G1:
12509 group = 1;
12510 break;
12511
12512 case R_ARM_LDC_PC_G2:
12513 case R_ARM_LDC_SB_G2:
12514 group = 2;
12515 break;
12516
12517 default:
12518 abort ();
12519 }
12520
12521 /* If REL, extract the addend from the insn. If RELA, it will
12522 have already been fetched for us. */
12523 if (globals->use_rel)
12524 {
12525 int negative = (insn & (1 << 23)) ? 1 : -1;
12526 signed_addend = negative * ((insn & 0xff) << 2);
12527 }
12528
12529 /* Compute the value (X) to go in the place. */
12530 if (r_type == R_ARM_LDC_PC_G0
12531 || r_type == R_ARM_LDC_PC_G1
12532 || r_type == R_ARM_LDC_PC_G2)
12533 /* PC relative. */
12534 signed_value = value - pc + signed_addend;
12535 else
12536 /* Section base relative. */
12537 signed_value = value - sb + signed_addend;
12538
12539 /* Calculate the value of the relevant G_{n-1} to obtain
12540 the residual at that stage. */
12541 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12542 group - 1, &residual);
12543
12544 /* Check for overflow. (The absolute value to go in the place must be
12545 divisible by four and, after having been divided by four, must
12546 fit in eight bits.) */
12547 if ((residual & 0x3) != 0 || residual >= 0x400)
12548 {
12549 _bfd_error_handler
12550 /* xgettext:c-format */
12551 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12552 "splitting %#" PRIx64 " for group relocation %s"),
12553 input_bfd, input_section, (uint64_t) rel->r_offset,
12554 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12555 howto->name);
12556 return bfd_reloc_overflow;
12557 }
12558
12559 /* Mask out the value and U bit. */
12560 insn &= 0xff7fff00;
12561
12562 /* Set the U bit if the value to go in the place is non-negative. */
12563 if (signed_value >= 0)
12564 insn |= 1 << 23;
12565
12566 /* Encode the offset. */
12567 insn |= residual >> 2;
12568
12569 bfd_put_32 (input_bfd, insn, hit_data);
12570 }
12571 return bfd_reloc_ok;
12572
12573 case R_ARM_THM_ALU_ABS_G0_NC:
12574 case R_ARM_THM_ALU_ABS_G1_NC:
12575 case R_ARM_THM_ALU_ABS_G2_NC:
12576 case R_ARM_THM_ALU_ABS_G3_NC:
12577 {
12578 const int shift_array[4] = {0, 8, 16, 24};
12579 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12580 bfd_vma addr = value;
12581 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12582
12583 /* Compute address. */
12584 if (globals->use_rel)
12585 signed_addend = insn & 0xff;
12586 addr += signed_addend;
12587 if (branch_type == ST_BRANCH_TO_THUMB)
12588 addr |= 1;
12589 /* Clean imm8 insn. */
12590 insn &= 0xff00;
12591 /* And update with correct part of address. */
12592 insn |= (addr >> shift) & 0xff;
12593 /* Update insn. */
12594 bfd_put_16 (input_bfd, insn, hit_data);
12595 }
12596
12597 *unresolved_reloc_p = false;
12598 return bfd_reloc_ok;
12599
12600 case R_ARM_GOTOFFFUNCDESC:
12601 {
12602 if (h == NULL)
12603 {
12604 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12605 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12606
12607 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12608 {
12609 * error_message = _("local symbol index too big");
12610 return bfd_reloc_dangerous;
12611 }
12612
12613 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12614 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12615 bfd_vma seg = -1;
12616
12617 if (bfd_link_pic (info) && dynindx == 0)
12618 {
12619 * error_message = _("no dynamic index information available");
12620 return bfd_reloc_dangerous;
12621 }
12622
12623 /* Resolve relocation. */
12624 bfd_put_32 (output_bfd, (offset + sgot->output_offset)
12625 , contents + rel->r_offset);
12626 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12627 not done yet. */
12628 arm_elf_fill_funcdesc (output_bfd, info,
12629 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12630 dynindx, offset, addr, dynreloc_value, seg);
12631 }
12632 else
12633 {
12634 int dynindx;
12635 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12636 bfd_vma addr;
12637 bfd_vma seg = -1;
12638
12639 /* For static binaries, sym_sec can be null. */
12640 if (sym_sec)
12641 {
12642 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12643 addr = dynreloc_value - sym_sec->output_section->vma;
12644 }
12645 else
12646 {
12647 dynindx = 0;
12648 addr = 0;
12649 }
12650
12651 if (bfd_link_pic (info) && dynindx == 0)
12652 {
12653 * error_message = _("no dynamic index information available");
12654 return bfd_reloc_dangerous;
12655 }
12656
12657 /* This case cannot occur since funcdesc is allocated by
12658 the dynamic loader so we cannot resolve the relocation. */
12659 if (h->dynindx != -1)
12660 {
12661 * error_message = _("invalid dynamic index");
12662 return bfd_reloc_dangerous;
12663 }
12664
12665 /* Resolve relocation. */
12666 bfd_put_32 (output_bfd, (offset + sgot->output_offset),
12667 contents + rel->r_offset);
12668 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12669 arm_elf_fill_funcdesc (output_bfd, info,
12670 &eh->fdpic_cnts.funcdesc_offset,
12671 dynindx, offset, addr, dynreloc_value, seg);
12672 }
12673 }
12674 *unresolved_reloc_p = false;
12675 return bfd_reloc_ok;
12676
12677 case R_ARM_GOTFUNCDESC:
12678 {
12679 if (h != NULL)
12680 {
12681 Elf_Internal_Rela outrel;
12682
12683 /* Resolve relocation. */
12684 bfd_put_32 (output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12685 + sgot->output_offset),
12686 contents + rel->r_offset);
12687 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12688 if (h->dynindx == -1)
12689 {
12690 int dynindx;
12691 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12692 bfd_vma addr;
12693 bfd_vma seg = -1;
12694
12695 /* For static binaries sym_sec can be null. */
12696 if (sym_sec)
12697 {
12698 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12699 addr = dynreloc_value - sym_sec->output_section->vma;
12700 }
12701 else
12702 {
12703 dynindx = 0;
12704 addr = 0;
12705 }
12706
12707 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12708 arm_elf_fill_funcdesc (output_bfd, info,
12709 &eh->fdpic_cnts.funcdesc_offset,
12710 dynindx, offset, addr, dynreloc_value, seg);
12711 }
12712
12713 /* Add a dynamic relocation on GOT entry if not already done. */
12714 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12715 {
12716 if (h->dynindx == -1)
12717 {
12718 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12719 if (h->root.type == bfd_link_hash_undefweak)
12720 bfd_put_32 (output_bfd, 0, sgot->contents
12721 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12722 else
12723 bfd_put_32 (output_bfd, sgot->output_section->vma
12724 + sgot->output_offset
12725 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12726 sgot->contents
12727 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12728 }
12729 else
12730 {
12731 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12732 }
12733 outrel.r_offset = sgot->output_section->vma
12734 + sgot->output_offset
12735 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12736 outrel.r_addend = 0;
12737 if (h->dynindx == -1 && !bfd_link_pic (info))
12738 if (h->root.type == bfd_link_hash_undefweak)
12739 arm_elf_add_rofixup (output_bfd, globals->srofixup, -1);
12740 else
12741 arm_elf_add_rofixup (output_bfd, globals->srofixup,
12742 outrel.r_offset);
12743 else
12744 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12745 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12746 }
12747 }
12748 else
12749 {
12750 /* Such relocation on static function should not have been
12751 emitted by the compiler. */
12752 return bfd_reloc_notsupported;
12753 }
12754 }
12755 *unresolved_reloc_p = false;
12756 return bfd_reloc_ok;
12757
12758 case R_ARM_FUNCDESC:
12759 {
12760 if (h == NULL)
12761 {
12762 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts (input_bfd);
12763 Elf_Internal_Rela outrel;
12764 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12765
12766 if (r_symndx >= elf32_arm_num_entries (input_bfd))
12767 {
12768 * error_message = _("local symbol index too big");
12769 return bfd_reloc_dangerous;
12770 }
12771
12772 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12773 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12774 bfd_vma seg = -1;
12775
12776 if (bfd_link_pic (info) && dynindx == 0)
12777 {
12778 * error_message = _("dynamic index information not available");
12779 return bfd_reloc_dangerous;
12780 }
12781
12782 /* Replace static FUNCDESC relocation with a
12783 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12784 executable. */
12785 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12786 outrel.r_offset = input_section->output_section->vma
12787 + input_section->output_offset + rel->r_offset;
12788 outrel.r_addend = 0;
12789 if (bfd_link_pic (info))
12790 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12791 else
12792 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12793
12794 bfd_put_32 (input_bfd, sgot->output_section->vma
12795 + sgot->output_offset + offset, hit_data);
12796
12797 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12798 arm_elf_fill_funcdesc (output_bfd, info,
12799 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12800 dynindx, offset, addr, dynreloc_value, seg);
12801 }
12802 else
12803 {
12804 if (h->dynindx == -1)
12805 {
12806 int dynindx;
12807 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12808 bfd_vma addr;
12809 bfd_vma seg = -1;
12810 Elf_Internal_Rela outrel;
12811
12812 /* For static binaries sym_sec can be null. */
12813 if (sym_sec)
12814 {
12815 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12816 addr = dynreloc_value - sym_sec->output_section->vma;
12817 }
12818 else
12819 {
12820 dynindx = 0;
12821 addr = 0;
12822 }
12823
12824 if (bfd_link_pic (info) && dynindx == 0)
12825 abort ();
12826
12827 /* Replace static FUNCDESC relocation with a
12828 R_ARM_RELATIVE dynamic relocation. */
12829 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12830 outrel.r_offset = input_section->output_section->vma
12831 + input_section->output_offset + rel->r_offset;
12832 outrel.r_addend = 0;
12833 if (bfd_link_pic (info))
12834 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12835 else
12836 arm_elf_add_rofixup (output_bfd, globals->srofixup, outrel.r_offset);
12837
12838 bfd_put_32 (input_bfd, sgot->output_section->vma
12839 + sgot->output_offset + offset, hit_data);
12840
12841 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12842 arm_elf_fill_funcdesc (output_bfd, info,
12843 &eh->fdpic_cnts.funcdesc_offset,
12844 dynindx, offset, addr, dynreloc_value, seg);
12845 }
12846 else
12847 {
12848 Elf_Internal_Rela outrel;
12849
12850 /* Add a dynamic relocation. */
12851 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12852 outrel.r_offset = input_section->output_section->vma
12853 + input_section->output_offset + rel->r_offset;
12854 outrel.r_addend = 0;
12855 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12856 }
12857 }
12858 }
12859 *unresolved_reloc_p = false;
12860 return bfd_reloc_ok;
12861
12862 case R_ARM_THM_BF16:
12863 {
12864 bfd_vma relocation;
12865 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12866 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12867
12868 if (globals->use_rel)
12869 {
12870 bfd_vma immA = (upper_insn & 0x001f);
12871 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12872 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12873 addend = (immA << 12);
12874 addend |= (immB << 2);
12875 addend |= (immC << 1);
12876 addend |= 1;
12877 /* Sign extend. */
12878 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12879 }
12880
12881 relocation = value + signed_addend;
12882 relocation -= (input_section->output_section->vma
12883 + input_section->output_offset
12884 + rel->r_offset);
12885
12886 /* Put RELOCATION back into the insn. */
12887 {
12888 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12889 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12890 bfd_vma immC = (relocation & 0x00000002) >> 1;
12891
12892 upper_insn = (upper_insn & 0xffe0) | immA;
12893 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12894 }
12895
12896 /* Put the relocated value back in the object file: */
12897 bfd_put_16 (input_bfd, upper_insn, hit_data);
12898 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12899
12900 return bfd_reloc_ok;
12901 }
12902
12903 case R_ARM_THM_BF12:
12904 {
12905 bfd_vma relocation;
12906 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12907 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12908
12909 if (globals->use_rel)
12910 {
12911 bfd_vma immA = (upper_insn & 0x0001);
12912 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12913 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12914 addend = (immA << 12);
12915 addend |= (immB << 2);
12916 addend |= (immC << 1);
12917 addend |= 1;
12918 /* Sign extend. */
12919 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12920 signed_addend = addend;
12921 }
12922
12923 relocation = value + signed_addend;
12924 relocation -= (input_section->output_section->vma
12925 + input_section->output_offset
12926 + rel->r_offset);
12927
12928 /* Put RELOCATION back into the insn. */
12929 {
12930 bfd_vma immA = (relocation & 0x00001000) >> 12;
12931 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12932 bfd_vma immC = (relocation & 0x00000002) >> 1;
12933
12934 upper_insn = (upper_insn & 0xfffe) | immA;
12935 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12936 }
12937
12938 /* Put the relocated value back in the object file: */
12939 bfd_put_16 (input_bfd, upper_insn, hit_data);
12940 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12941
12942 return bfd_reloc_ok;
12943 }
12944
12945 case R_ARM_THM_BF18:
12946 {
12947 bfd_vma relocation;
12948 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12949 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12950
12951 if (globals->use_rel)
12952 {
12953 bfd_vma immA = (upper_insn & 0x007f);
12954 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12955 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12956 addend = (immA << 12);
12957 addend |= (immB << 2);
12958 addend |= (immC << 1);
12959 addend |= 1;
12960 /* Sign extend. */
12961 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
12962 signed_addend = addend;
12963 }
12964
12965 relocation = value + signed_addend;
12966 relocation -= (input_section->output_section->vma
12967 + input_section->output_offset
12968 + rel->r_offset);
12969
12970 /* Put RELOCATION back into the insn. */
12971 {
12972 bfd_vma immA = (relocation & 0x0007f000) >> 12;
12973 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12974 bfd_vma immC = (relocation & 0x00000002) >> 1;
12975
12976 upper_insn = (upper_insn & 0xff80) | immA;
12977 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12978 }
12979
12980 /* Put the relocated value back in the object file: */
12981 bfd_put_16 (input_bfd, upper_insn, hit_data);
12982 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12983
12984 return bfd_reloc_ok;
12985 }
12986
12987 default:
12988 return bfd_reloc_notsupported;
12989 }
12990 }
12991
12992 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12993 static void
12994 arm_add_to_rel (bfd * abfd,
12995 bfd_byte * address,
12996 reloc_howto_type * howto,
12997 bfd_signed_vma increment)
12998 {
12999 bfd_signed_vma addend;
13000
13001 if (howto->type == R_ARM_THM_CALL
13002 || howto->type == R_ARM_THM_JUMP24)
13003 {
13004 int upper_insn, lower_insn;
13005 int upper, lower;
13006
13007 upper_insn = bfd_get_16 (abfd, address);
13008 lower_insn = bfd_get_16 (abfd, address + 2);
13009 upper = upper_insn & 0x7ff;
13010 lower = lower_insn & 0x7ff;
13011
13012 addend = (upper << 12) | (lower << 1);
13013 addend += increment;
13014 addend >>= 1;
13015
13016 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13017 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13018
13019 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13020 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13021 }
13022 else
13023 {
13024 bfd_vma contents;
13025
13026 contents = bfd_get_32 (abfd, address);
13027
13028 /* Get the (signed) value from the instruction. */
13029 addend = contents & howto->src_mask;
13030 if (addend & ((howto->src_mask + 1) >> 1))
13031 {
13032 bfd_signed_vma mask;
13033
13034 mask = -1;
13035 mask &= ~ howto->src_mask;
13036 addend |= mask;
13037 }
13038
13039 /* Add in the increment, (which is a byte value). */
13040 switch (howto->type)
13041 {
13042 default:
13043 addend += increment;
13044 break;
13045
13046 case R_ARM_PC24:
13047 case R_ARM_PLT32:
13048 case R_ARM_CALL:
13049 case R_ARM_JUMP24:
13050 addend <<= howto->size;
13051 addend += increment;
13052
13053 /* Should we check for overflow here ? */
13054
13055 /* Drop any undesired bits. */
13056 addend >>= howto->rightshift;
13057 break;
13058 }
13059
13060 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13061
13062 bfd_put_32 (abfd, contents, address);
13063 }
13064 }
13065
13066 #define IS_ARM_TLS_RELOC(R_TYPE) \
13067 ((R_TYPE) == R_ARM_TLS_GD32 \
13068 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13069 || (R_TYPE) == R_ARM_TLS_LDO32 \
13070 || (R_TYPE) == R_ARM_TLS_LDM32 \
13071 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13072 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13073 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13074 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13075 || (R_TYPE) == R_ARM_TLS_LE32 \
13076 || (R_TYPE) == R_ARM_TLS_IE32 \
13077 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13078 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13079
13080 /* Specific set of relocations for the gnu tls dialect. */
13081 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13082 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13083 || (R_TYPE) == R_ARM_TLS_CALL \
13084 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13085 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13086 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13087
13088 /* Relocate an ARM ELF section. */
13089
13090 static int
13091 elf32_arm_relocate_section (bfd * output_bfd,
13092 struct bfd_link_info * info,
13093 bfd * input_bfd,
13094 asection * input_section,
13095 bfd_byte * contents,
13096 Elf_Internal_Rela * relocs,
13097 Elf_Internal_Sym * local_syms,
13098 asection ** local_sections)
13099 {
13100 Elf_Internal_Shdr *symtab_hdr;
13101 struct elf_link_hash_entry **sym_hashes;
13102 Elf_Internal_Rela *rel;
13103 Elf_Internal_Rela *relend;
13104 const char *name;
13105 struct elf32_arm_link_hash_table * globals;
13106
13107 globals = elf32_arm_hash_table (info);
13108 if (globals == NULL)
13109 return false;
13110
13111 symtab_hdr = & elf_symtab_hdr (input_bfd);
13112 sym_hashes = elf_sym_hashes (input_bfd);
13113
13114 rel = relocs;
13115 relend = relocs + input_section->reloc_count;
13116 for (; rel < relend; rel++)
13117 {
13118 int r_type;
13119 reloc_howto_type * howto;
13120 unsigned long r_symndx;
13121 Elf_Internal_Sym * sym;
13122 asection * sec;
13123 struct elf_link_hash_entry * h;
13124 bfd_vma relocation;
13125 bfd_reloc_status_type r;
13126 arelent bfd_reloc;
13127 char sym_type;
13128 bool unresolved_reloc = false;
13129 char *error_message = NULL;
13130
13131 r_symndx = ELF32_R_SYM (rel->r_info);
13132 r_type = ELF32_R_TYPE (rel->r_info);
13133 r_type = arm_real_reloc_type (globals, r_type);
13134
13135 if ( r_type == R_ARM_GNU_VTENTRY
13136 || r_type == R_ARM_GNU_VTINHERIT)
13137 continue;
13138
13139 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13140
13141 if (howto == NULL)
13142 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13143
13144 h = NULL;
13145 sym = NULL;
13146 sec = NULL;
13147
13148 if (r_symndx < symtab_hdr->sh_info)
13149 {
13150 sym = local_syms + r_symndx;
13151 sym_type = ELF32_ST_TYPE (sym->st_info);
13152 sec = local_sections[r_symndx];
13153
13154 /* An object file might have a reference to a local
13155 undefined symbol. This is a daft object file, but we
13156 should at least do something about it. V4BX & NONE
13157 relocations do not use the symbol and are explicitly
13158 allowed to use the undefined symbol, so allow those.
13159 Likewise for relocations against STN_UNDEF. */
13160 if (r_type != R_ARM_V4BX
13161 && r_type != R_ARM_NONE
13162 && r_symndx != STN_UNDEF
13163 && bfd_is_und_section (sec)
13164 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13165 (*info->callbacks->undefined_symbol)
13166 (info, bfd_elf_string_from_elf_section
13167 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13168 input_bfd, input_section,
13169 rel->r_offset, true);
13170
13171 if (globals->use_rel)
13172 {
13173 relocation = (sec->output_section->vma
13174 + sec->output_offset
13175 + sym->st_value);
13176 if (!bfd_link_relocatable (info)
13177 && (sec->flags & SEC_MERGE)
13178 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13179 {
13180 asection *msec;
13181 bfd_vma addend, value;
13182
13183 switch (r_type)
13184 {
13185 case R_ARM_MOVW_ABS_NC:
13186 case R_ARM_MOVT_ABS:
13187 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13188 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13189 addend = (addend ^ 0x8000) - 0x8000;
13190 break;
13191
13192 case R_ARM_THM_MOVW_ABS_NC:
13193 case R_ARM_THM_MOVT_ABS:
13194 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13195 << 16;
13196 value |= bfd_get_16 (input_bfd,
13197 contents + rel->r_offset + 2);
13198 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13199 | ((value & 0x04000000) >> 15);
13200 addend = (addend ^ 0x8000) - 0x8000;
13201 break;
13202
13203 default:
13204 if (howto->rightshift
13205 || (howto->src_mask & (howto->src_mask + 1)))
13206 {
13207 _bfd_error_handler
13208 /* xgettext:c-format */
13209 (_("%pB(%pA+%#" PRIx64 "): "
13210 "%s relocation against SEC_MERGE section"),
13211 input_bfd, input_section,
13212 (uint64_t) rel->r_offset, howto->name);
13213 return false;
13214 }
13215
13216 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13217
13218 /* Get the (signed) value from the instruction. */
13219 addend = value & howto->src_mask;
13220 if (addend & ((howto->src_mask + 1) >> 1))
13221 {
13222 bfd_signed_vma mask;
13223
13224 mask = -1;
13225 mask &= ~ howto->src_mask;
13226 addend |= mask;
13227 }
13228 break;
13229 }
13230
13231 msec = sec;
13232 addend =
13233 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13234 - relocation;
13235 addend += msec->output_section->vma + msec->output_offset;
13236
13237 /* Cases here must match those in the preceding
13238 switch statement. */
13239 switch (r_type)
13240 {
13241 case R_ARM_MOVW_ABS_NC:
13242 case R_ARM_MOVT_ABS:
13243 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13244 | (addend & 0xfff);
13245 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13246 break;
13247
13248 case R_ARM_THM_MOVW_ABS_NC:
13249 case R_ARM_THM_MOVT_ABS:
13250 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13251 | (addend & 0xff) | ((addend & 0x0800) << 15);
13252 bfd_put_16 (input_bfd, value >> 16,
13253 contents + rel->r_offset);
13254 bfd_put_16 (input_bfd, value,
13255 contents + rel->r_offset + 2);
13256 break;
13257
13258 default:
13259 value = (value & ~ howto->dst_mask)
13260 | (addend & howto->dst_mask);
13261 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13262 break;
13263 }
13264 }
13265 }
13266 else
13267 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13268 }
13269 else
13270 {
13271 bool warned, ignored;
13272
13273 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13274 r_symndx, symtab_hdr, sym_hashes,
13275 h, sec, relocation,
13276 unresolved_reloc, warned, ignored);
13277
13278 sym_type = h->type;
13279 }
13280
13281 if (sec != NULL && discarded_section (sec))
13282 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13283 rel, 1, relend, howto, 0, contents);
13284
13285 if (bfd_link_relocatable (info))
13286 {
13287 /* This is a relocatable link. We don't have to change
13288 anything, unless the reloc is against a section symbol,
13289 in which case we have to adjust according to where the
13290 section symbol winds up in the output section. */
13291 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13292 {
13293 if (globals->use_rel)
13294 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13295 howto, (bfd_signed_vma) sec->output_offset);
13296 else
13297 rel->r_addend += sec->output_offset;
13298 }
13299 continue;
13300 }
13301
13302 if (h != NULL)
13303 name = h->root.root.string;
13304 else
13305 {
13306 name = (bfd_elf_string_from_elf_section
13307 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13308 if (name == NULL || *name == '\0')
13309 name = bfd_section_name (sec);
13310 }
13311
13312 if (r_symndx != STN_UNDEF
13313 && r_type != R_ARM_NONE
13314 && (h == NULL
13315 || h->root.type == bfd_link_hash_defined
13316 || h->root.type == bfd_link_hash_defweak)
13317 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13318 {
13319 _bfd_error_handler
13320 ((sym_type == STT_TLS
13321 /* xgettext:c-format */
13322 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13323 /* xgettext:c-format */
13324 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13325 input_bfd,
13326 input_section,
13327 (uint64_t) rel->r_offset,
13328 howto->name,
13329 name);
13330 }
13331
13332 /* We call elf32_arm_final_link_relocate unless we're completely
13333 done, i.e., the relaxation produced the final output we want,
13334 and we won't let anybody mess with it. Also, we have to do
13335 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13336 both in relaxed and non-relaxed cases. */
13337 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13338 || (IS_ARM_TLS_GNU_RELOC (r_type)
13339 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13340 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13341 & GOT_TLS_GDESC)))
13342 {
13343 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13344 contents, rel, h == NULL);
13345 /* This may have been marked unresolved because it came from
13346 a shared library. But we've just dealt with that. */
13347 unresolved_reloc = 0;
13348 }
13349 else
13350 r = bfd_reloc_continue;
13351
13352 if (r == bfd_reloc_continue)
13353 {
13354 unsigned char branch_type =
13355 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13356 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13357
13358 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13359 input_section, contents, rel,
13360 relocation, info, sec, name,
13361 sym_type, branch_type, h,
13362 &unresolved_reloc,
13363 &error_message);
13364 }
13365
13366 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13367 because such sections are not SEC_ALLOC and thus ld.so will
13368 not process them. */
13369 if (unresolved_reloc
13370 && !((input_section->flags & SEC_DEBUGGING) != 0
13371 && h->def_dynamic)
13372 && _bfd_elf_section_offset (output_bfd, info, input_section,
13373 rel->r_offset) != (bfd_vma) -1)
13374 {
13375 _bfd_error_handler
13376 /* xgettext:c-format */
13377 (_("%pB(%pA+%#" PRIx64 "): "
13378 "unresolvable %s relocation against symbol `%s'"),
13379 input_bfd,
13380 input_section,
13381 (uint64_t) rel->r_offset,
13382 howto->name,
13383 h->root.root.string);
13384 return false;
13385 }
13386
13387 if (r != bfd_reloc_ok)
13388 {
13389 switch (r)
13390 {
13391 case bfd_reloc_overflow:
13392 /* If the overflowing reloc was to an undefined symbol,
13393 we have already printed one error message and there
13394 is no point complaining again. */
13395 if (!h || h->root.type != bfd_link_hash_undefined)
13396 (*info->callbacks->reloc_overflow)
13397 (info, (h ? &h->root : NULL), name, howto->name,
13398 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13399 break;
13400
13401 case bfd_reloc_undefined:
13402 (*info->callbacks->undefined_symbol)
13403 (info, name, input_bfd, input_section, rel->r_offset, true);
13404 break;
13405
13406 case bfd_reloc_outofrange:
13407 error_message = _("out of range");
13408 goto common_error;
13409
13410 case bfd_reloc_notsupported:
13411 error_message = _("unsupported relocation");
13412 goto common_error;
13413
13414 case bfd_reloc_dangerous:
13415 /* error_message should already be set. */
13416 goto common_error;
13417
13418 default:
13419 error_message = _("unknown error");
13420 /* Fall through. */
13421
13422 common_error:
13423 BFD_ASSERT (error_message != NULL);
13424 (*info->callbacks->reloc_dangerous)
13425 (info, error_message, input_bfd, input_section, rel->r_offset);
13426 break;
13427 }
13428 }
13429 }
13430
13431 return true;
13432 }
13433
13434 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13435 adds the edit to the start of the list. (The list must be built in order of
13436 ascending TINDEX: the function's callers are primarily responsible for
13437 maintaining that condition). */
13438
13439 static void
13440 add_unwind_table_edit (arm_unwind_table_edit **head,
13441 arm_unwind_table_edit **tail,
13442 arm_unwind_edit_type type,
13443 asection *linked_section,
13444 unsigned int tindex)
13445 {
13446 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13447 xmalloc (sizeof (arm_unwind_table_edit));
13448
13449 new_edit->type = type;
13450 new_edit->linked_section = linked_section;
13451 new_edit->index = tindex;
13452
13453 if (tindex > 0)
13454 {
13455 new_edit->next = NULL;
13456
13457 if (*tail)
13458 (*tail)->next = new_edit;
13459
13460 (*tail) = new_edit;
13461
13462 if (!*head)
13463 (*head) = new_edit;
13464 }
13465 else
13466 {
13467 new_edit->next = *head;
13468
13469 if (!*tail)
13470 *tail = new_edit;
13471
13472 *head = new_edit;
13473 }
13474 }
13475
13476 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13477
13478 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13479
13480 static void
13481 adjust_exidx_size (asection *exidx_sec, int adjust)
13482 {
13483 asection *out_sec;
13484
13485 if (!exidx_sec->rawsize)
13486 exidx_sec->rawsize = exidx_sec->size;
13487
13488 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13489 out_sec = exidx_sec->output_section;
13490 /* Adjust size of output section. */
13491 bfd_set_section_size (out_sec, out_sec->size + adjust);
13492 }
13493
13494 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13495
13496 static void
13497 insert_cantunwind_after (asection *text_sec, asection *exidx_sec)
13498 {
13499 struct _arm_elf_section_data *exidx_arm_data;
13500
13501 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13502 add_unwind_table_edit
13503 (&exidx_arm_data->u.exidx.unwind_edit_list,
13504 &exidx_arm_data->u.exidx.unwind_edit_tail,
13505 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13506
13507 exidx_arm_data->additional_reloc_count++;
13508
13509 adjust_exidx_size (exidx_sec, 8);
13510 }
13511
13512 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13513 made to those tables, such that:
13514
13515 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13516 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13517 codes which have been inlined into the index).
13518
13519 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13520
13521 The edits are applied when the tables are written
13522 (in elf32_arm_write_section). */
13523
13524 bool
13525 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13526 unsigned int num_text_sections,
13527 struct bfd_link_info *info,
13528 bool merge_exidx_entries)
13529 {
13530 bfd *inp;
13531 unsigned int last_second_word = 0, i;
13532 asection *last_exidx_sec = NULL;
13533 asection *last_text_sec = NULL;
13534 int last_unwind_type = -1;
13535
13536 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13537 text sections. */
13538 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13539 {
13540 asection *sec;
13541
13542 for (sec = inp->sections; sec != NULL; sec = sec->next)
13543 {
13544 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13545 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13546
13547 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13548 continue;
13549
13550 if (elf_sec->linked_to)
13551 {
13552 Elf_Internal_Shdr *linked_hdr
13553 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13554 struct _arm_elf_section_data *linked_sec_arm_data
13555 = get_arm_elf_section_data (linked_hdr->bfd_section);
13556
13557 if (linked_sec_arm_data == NULL)
13558 continue;
13559
13560 /* Link this .ARM.exidx section back from the text section it
13561 describes. */
13562 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13563 }
13564 }
13565 }
13566
13567 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13568 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13569 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13570
13571 for (i = 0; i < num_text_sections; i++)
13572 {
13573 asection *sec = text_section_order[i];
13574 asection *exidx_sec;
13575 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13576 struct _arm_elf_section_data *exidx_arm_data;
13577 bfd_byte *contents = NULL;
13578 int deleted_exidx_bytes = 0;
13579 bfd_vma j;
13580 arm_unwind_table_edit *unwind_edit_head = NULL;
13581 arm_unwind_table_edit *unwind_edit_tail = NULL;
13582 Elf_Internal_Shdr *hdr;
13583 bfd *ibfd;
13584
13585 if (arm_data == NULL)
13586 continue;
13587
13588 exidx_sec = arm_data->u.text.arm_exidx_sec;
13589 if (exidx_sec == NULL)
13590 {
13591 /* Section has no unwind data. */
13592 if (last_unwind_type == 0 || !last_exidx_sec)
13593 continue;
13594
13595 /* Ignore zero sized sections. */
13596 if (sec->size == 0)
13597 continue;
13598
13599 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13600 last_unwind_type = 0;
13601 continue;
13602 }
13603
13604 /* Skip /DISCARD/ sections. */
13605 if (bfd_is_abs_section (exidx_sec->output_section))
13606 continue;
13607
13608 hdr = &elf_section_data (exidx_sec)->this_hdr;
13609 if (hdr->sh_type != SHT_ARM_EXIDX)
13610 continue;
13611
13612 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13613 if (exidx_arm_data == NULL)
13614 continue;
13615
13616 ibfd = exidx_sec->owner;
13617
13618 if (hdr->contents != NULL)
13619 contents = hdr->contents;
13620 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13621 /* An error? */
13622 continue;
13623
13624 if (last_unwind_type > 0)
13625 {
13626 unsigned int first_word = bfd_get_32 (ibfd, contents);
13627 /* Add cantunwind if first unwind item does not match section
13628 start. */
13629 if (first_word != sec->vma)
13630 {
13631 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13632 last_unwind_type = 0;
13633 }
13634 }
13635
13636 for (j = 0; j < hdr->sh_size; j += 8)
13637 {
13638 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13639 int unwind_type;
13640 int elide = 0;
13641
13642 /* An EXIDX_CANTUNWIND entry. */
13643 if (second_word == 1)
13644 {
13645 if (last_unwind_type == 0)
13646 elide = 1;
13647 unwind_type = 0;
13648 }
13649 /* Inlined unwinding data. Merge if equal to previous. */
13650 else if ((second_word & 0x80000000) != 0)
13651 {
13652 if (merge_exidx_entries
13653 && last_second_word == second_word && last_unwind_type == 1)
13654 elide = 1;
13655 unwind_type = 1;
13656 last_second_word = second_word;
13657 }
13658 /* Normal table entry. In theory we could merge these too,
13659 but duplicate entries are likely to be much less common. */
13660 else
13661 unwind_type = 2;
13662
13663 if (elide && !bfd_link_relocatable (info))
13664 {
13665 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13666 DELETE_EXIDX_ENTRY, NULL, j / 8);
13667
13668 deleted_exidx_bytes += 8;
13669 }
13670
13671 last_unwind_type = unwind_type;
13672 }
13673
13674 /* Free contents if we allocated it ourselves. */
13675 if (contents != hdr->contents)
13676 free (contents);
13677
13678 /* Record edits to be applied later (in elf32_arm_write_section). */
13679 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13680 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13681
13682 if (deleted_exidx_bytes > 0)
13683 adjust_exidx_size (exidx_sec, - deleted_exidx_bytes);
13684
13685 last_exidx_sec = exidx_sec;
13686 last_text_sec = sec;
13687 }
13688
13689 /* Add terminating CANTUNWIND entry. */
13690 if (!bfd_link_relocatable (info) && last_exidx_sec
13691 && last_unwind_type != 0)
13692 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13693
13694 return true;
13695 }
13696
13697 static bool
13698 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13699 bfd *ibfd, const char *name)
13700 {
13701 asection *sec, *osec;
13702
13703 sec = bfd_get_linker_section (ibfd, name);
13704 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13705 return true;
13706
13707 osec = sec->output_section;
13708 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13709 return true;
13710
13711 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13712 sec->output_offset, sec->size))
13713 return false;
13714
13715 return true;
13716 }
13717
13718 static bool
13719 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13720 {
13721 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13722 asection *sec, *osec;
13723
13724 if (globals == NULL)
13725 return false;
13726
13727 /* Invoke the regular ELF backend linker to do all the work. */
13728 if (!bfd_elf_final_link (abfd, info))
13729 return false;
13730
13731 /* Process stub sections (eg BE8 encoding, ...). */
13732 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13733 unsigned int i;
13734 for (i=0; i<htab->top_id; i++)
13735 {
13736 sec = htab->stub_group[i].stub_sec;
13737 /* Only process it once, in its link_sec slot. */
13738 if (sec && i == htab->stub_group[i].link_sec->id)
13739 {
13740 osec = sec->output_section;
13741 elf32_arm_write_section (abfd, info, sec, sec->contents);
13742 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13743 sec->output_offset, sec->size))
13744 return false;
13745 }
13746 }
13747
13748 /* Write out any glue sections now that we have created all the
13749 stubs. */
13750 if (globals->bfd_of_glue_owner != NULL)
13751 {
13752 if (! elf32_arm_output_glue_section (info, abfd,
13753 globals->bfd_of_glue_owner,
13754 ARM2THUMB_GLUE_SECTION_NAME))
13755 return false;
13756
13757 if (! elf32_arm_output_glue_section (info, abfd,
13758 globals->bfd_of_glue_owner,
13759 THUMB2ARM_GLUE_SECTION_NAME))
13760 return false;
13761
13762 if (! elf32_arm_output_glue_section (info, abfd,
13763 globals->bfd_of_glue_owner,
13764 VFP11_ERRATUM_VENEER_SECTION_NAME))
13765 return false;
13766
13767 if (! elf32_arm_output_glue_section (info, abfd,
13768 globals->bfd_of_glue_owner,
13769 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13770 return false;
13771
13772 if (! elf32_arm_output_glue_section (info, abfd,
13773 globals->bfd_of_glue_owner,
13774 ARM_BX_GLUE_SECTION_NAME))
13775 return false;
13776 }
13777
13778 return true;
13779 }
13780
13781 /* Return a best guess for the machine number based on the attributes. */
13782
13783 static unsigned int
13784 bfd_arm_get_mach_from_attributes (bfd * abfd)
13785 {
13786 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13787
13788 switch (arch)
13789 {
13790 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13791 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13792 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13793 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13794
13795 case TAG_CPU_ARCH_V5TE:
13796 {
13797 char * name;
13798
13799 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13800 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13801
13802 if (name)
13803 {
13804 if (strcmp (name, "IWMMXT2") == 0)
13805 return bfd_mach_arm_iWMMXt2;
13806
13807 if (strcmp (name, "IWMMXT") == 0)
13808 return bfd_mach_arm_iWMMXt;
13809
13810 if (strcmp (name, "XSCALE") == 0)
13811 {
13812 int wmmx;
13813
13814 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13815 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13816 switch (wmmx)
13817 {
13818 case 1: return bfd_mach_arm_iWMMXt;
13819 case 2: return bfd_mach_arm_iWMMXt2;
13820 default: return bfd_mach_arm_XScale;
13821 }
13822 }
13823 }
13824
13825 return bfd_mach_arm_5TE;
13826 }
13827
13828 case TAG_CPU_ARCH_V5TEJ:
13829 return bfd_mach_arm_5TEJ;
13830 case TAG_CPU_ARCH_V6:
13831 return bfd_mach_arm_6;
13832 case TAG_CPU_ARCH_V6KZ:
13833 return bfd_mach_arm_6KZ;
13834 case TAG_CPU_ARCH_V6T2:
13835 return bfd_mach_arm_6T2;
13836 case TAG_CPU_ARCH_V6K:
13837 return bfd_mach_arm_6K;
13838 case TAG_CPU_ARCH_V7:
13839 return bfd_mach_arm_7;
13840 case TAG_CPU_ARCH_V6_M:
13841 return bfd_mach_arm_6M;
13842 case TAG_CPU_ARCH_V6S_M:
13843 return bfd_mach_arm_6SM;
13844 case TAG_CPU_ARCH_V7E_M:
13845 return bfd_mach_arm_7EM;
13846 case TAG_CPU_ARCH_V8:
13847 return bfd_mach_arm_8;
13848 case TAG_CPU_ARCH_V8R:
13849 return bfd_mach_arm_8R;
13850 case TAG_CPU_ARCH_V8M_BASE:
13851 return bfd_mach_arm_8M_BASE;
13852 case TAG_CPU_ARCH_V8M_MAIN:
13853 return bfd_mach_arm_8M_MAIN;
13854 case TAG_CPU_ARCH_V8_1M_MAIN:
13855 return bfd_mach_arm_8_1M_MAIN;
13856 case TAG_CPU_ARCH_V9:
13857 return bfd_mach_arm_9;
13858
13859 default:
13860 /* Force entry to be added for any new known Tag_CPU_arch value. */
13861 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13862
13863 /* Unknown Tag_CPU_arch value. */
13864 return bfd_mach_arm_unknown;
13865 }
13866 }
13867
13868 /* Set the right machine number. */
13869
13870 static bool
13871 elf32_arm_object_p (bfd *abfd)
13872 {
13873 unsigned int mach;
13874
13875 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13876
13877 if (mach == bfd_mach_arm_unknown)
13878 {
13879 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13880 mach = bfd_mach_arm_ep9312;
13881 else
13882 mach = bfd_arm_get_mach_from_attributes (abfd);
13883 }
13884
13885 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13886 return true;
13887 }
13888
13889 /* Function to keep ARM specific flags in the ELF header. */
13890
13891 static bool
13892 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13893 {
13894 if (elf_flags_init (abfd)
13895 && elf_elfheader (abfd)->e_flags != flags)
13896 {
13897 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13898 {
13899 if (flags & EF_ARM_INTERWORK)
13900 _bfd_error_handler
13901 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13902 abfd);
13903 else
13904 _bfd_error_handler
13905 (_("warning: clearing the interworking flag of %pB due to outside request"),
13906 abfd);
13907 }
13908 }
13909 else
13910 {
13911 elf_elfheader (abfd)->e_flags = flags;
13912 elf_flags_init (abfd) = true;
13913 }
13914
13915 return true;
13916 }
13917
13918 /* Copy backend specific data from one object module to another. */
13919
13920 static bool
13921 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13922 {
13923 flagword in_flags;
13924 flagword out_flags;
13925
13926 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13927 return true;
13928
13929 in_flags = elf_elfheader (ibfd)->e_flags;
13930 out_flags = elf_elfheader (obfd)->e_flags;
13931
13932 if (elf_flags_init (obfd)
13933 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13934 && in_flags != out_flags)
13935 {
13936 /* Cannot mix APCS26 and APCS32 code. */
13937 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13938 return false;
13939
13940 /* Cannot mix float APCS and non-float APCS code. */
13941 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13942 return false;
13943
13944 /* If the src and dest have different interworking flags
13945 then turn off the interworking bit. */
13946 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13947 {
13948 if (out_flags & EF_ARM_INTERWORK)
13949 _bfd_error_handler
13950 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13951 obfd, ibfd);
13952
13953 in_flags &= ~EF_ARM_INTERWORK;
13954 }
13955
13956 /* Likewise for PIC, though don't warn for this case. */
13957 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13958 in_flags &= ~EF_ARM_PIC;
13959 }
13960
13961 elf_elfheader (obfd)->e_flags = in_flags;
13962 elf_flags_init (obfd) = true;
13963
13964 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13965 }
13966
13967 /* Values for Tag_ABI_PCS_R9_use. */
13968 enum
13969 {
13970 AEABI_R9_V6,
13971 AEABI_R9_SB,
13972 AEABI_R9_TLS,
13973 AEABI_R9_unused
13974 };
13975
13976 /* Values for Tag_ABI_PCS_RW_data. */
13977 enum
13978 {
13979 AEABI_PCS_RW_data_absolute,
13980 AEABI_PCS_RW_data_PCrel,
13981 AEABI_PCS_RW_data_SBrel,
13982 AEABI_PCS_RW_data_unused
13983 };
13984
13985 /* Values for Tag_ABI_enum_size. */
13986 enum
13987 {
13988 AEABI_enum_unused,
13989 AEABI_enum_short,
13990 AEABI_enum_wide,
13991 AEABI_enum_forced_wide
13992 };
13993
13994 /* Determine whether an object attribute tag takes an integer, a
13995 string or both. */
13996
13997 static int
13998 elf32_arm_obj_attrs_arg_type (int tag)
13999 {
14000 if (tag == Tag_compatibility)
14001 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14002 else if (tag == Tag_nodefaults)
14003 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14004 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14005 return ATTR_TYPE_FLAG_STR_VAL;
14006 else if (tag < 32)
14007 return ATTR_TYPE_FLAG_INT_VAL;
14008 else
14009 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14010 }
14011
14012 /* The ABI defines that Tag_conformance should be emitted first, and that
14013 Tag_nodefaults should be second (if either is defined). This sets those
14014 two positions, and bumps up the position of all the remaining tags to
14015 compensate. */
14016 static int
14017 elf32_arm_obj_attrs_order (int num)
14018 {
14019 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14020 return Tag_conformance;
14021 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14022 return Tag_nodefaults;
14023 if ((num - 2) < Tag_nodefaults)
14024 return num - 2;
14025 if ((num - 1) < Tag_conformance)
14026 return num - 1;
14027 return num;
14028 }
14029
14030 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14031 static bool
14032 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14033 {
14034 if ((tag & 127) < 64)
14035 {
14036 _bfd_error_handler
14037 (_("%pB: unknown mandatory EABI object attribute %d"),
14038 abfd, tag);
14039 bfd_set_error (bfd_error_bad_value);
14040 return false;
14041 }
14042 else
14043 {
14044 _bfd_error_handler
14045 (_("warning: %pB: unknown EABI object attribute %d"),
14046 abfd, tag);
14047 return true;
14048 }
14049 }
14050
14051 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14052 Returns -1 if no architecture could be read. */
14053
14054 static int
14055 get_secondary_compatible_arch (bfd *abfd)
14056 {
14057 obj_attribute *attr =
14058 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14059
14060 /* Note: the tag and its argument below are uleb128 values, though
14061 currently-defined values fit in one byte for each. */
14062 if (attr->s
14063 && attr->s[0] == Tag_CPU_arch
14064 && (attr->s[1] & 128) != 128
14065 && attr->s[2] == 0)
14066 return attr->s[1];
14067
14068 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14069 return -1;
14070 }
14071
14072 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14073 The tag is removed if ARCH is -1. */
14074
14075 static void
14076 set_secondary_compatible_arch (bfd *abfd, int arch)
14077 {
14078 obj_attribute *attr =
14079 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14080
14081 if (arch == -1)
14082 {
14083 attr->s = NULL;
14084 return;
14085 }
14086
14087 /* Note: the tag and its argument below are uleb128 values, though
14088 currently-defined values fit in one byte for each. */
14089 if (!attr->s)
14090 attr->s = (char *) bfd_alloc (abfd, 3);
14091 attr->s[0] = Tag_CPU_arch;
14092 attr->s[1] = arch;
14093 attr->s[2] = '\0';
14094 }
14095
14096 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14097 into account. */
14098
14099 static int
14100 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14101 int newtag, int secondary_compat)
14102 {
14103 #define T(X) TAG_CPU_ARCH_##X
14104 int tagl, tagh, result;
14105 const int v6t2[] =
14106 {
14107 T(V6T2), /* PRE_V4. */
14108 T(V6T2), /* V4. */
14109 T(V6T2), /* V4T. */
14110 T(V6T2), /* V5T. */
14111 T(V6T2), /* V5TE. */
14112 T(V6T2), /* V5TEJ. */
14113 T(V6T2), /* V6. */
14114 T(V7), /* V6KZ. */
14115 T(V6T2) /* V6T2. */
14116 };
14117 const int v6k[] =
14118 {
14119 T(V6K), /* PRE_V4. */
14120 T(V6K), /* V4. */
14121 T(V6K), /* V4T. */
14122 T(V6K), /* V5T. */
14123 T(V6K), /* V5TE. */
14124 T(V6K), /* V5TEJ. */
14125 T(V6K), /* V6. */
14126 T(V6KZ), /* V6KZ. */
14127 T(V7), /* V6T2. */
14128 T(V6K) /* V6K. */
14129 };
14130 const int v7[] =
14131 {
14132 T(V7), /* PRE_V4. */
14133 T(V7), /* V4. */
14134 T(V7), /* V4T. */
14135 T(V7), /* V5T. */
14136 T(V7), /* V5TE. */
14137 T(V7), /* V5TEJ. */
14138 T(V7), /* V6. */
14139 T(V7), /* V6KZ. */
14140 T(V7), /* V6T2. */
14141 T(V7), /* V6K. */
14142 T(V7) /* V7. */
14143 };
14144 const int v6_m[] =
14145 {
14146 -1, /* PRE_V4. */
14147 -1, /* V4. */
14148 T(V6K), /* V4T. */
14149 T(V6K), /* V5T. */
14150 T(V6K), /* V5TE. */
14151 T(V6K), /* V5TEJ. */
14152 T(V6K), /* V6. */
14153 T(V6KZ), /* V6KZ. */
14154 T(V7), /* V6T2. */
14155 T(V6K), /* V6K. */
14156 T(V7), /* V7. */
14157 T(V6_M) /* V6_M. */
14158 };
14159 const int v6s_m[] =
14160 {
14161 -1, /* PRE_V4. */
14162 -1, /* V4. */
14163 T(V6K), /* V4T. */
14164 T(V6K), /* V5T. */
14165 T(V6K), /* V5TE. */
14166 T(V6K), /* V5TEJ. */
14167 T(V6K), /* V6. */
14168 T(V6KZ), /* V6KZ. */
14169 T(V7), /* V6T2. */
14170 T(V6K), /* V6K. */
14171 T(V7), /* V7. */
14172 T(V6S_M), /* V6_M. */
14173 T(V6S_M) /* V6S_M. */
14174 };
14175 const int v7e_m[] =
14176 {
14177 -1, /* PRE_V4. */
14178 -1, /* V4. */
14179 T(V7E_M), /* V4T. */
14180 T(V7E_M), /* V5T. */
14181 T(V7E_M), /* V5TE. */
14182 T(V7E_M), /* V5TEJ. */
14183 T(V7E_M), /* V6. */
14184 T(V7E_M), /* V6KZ. */
14185 T(V7E_M), /* V6T2. */
14186 T(V7E_M), /* V6K. */
14187 T(V7E_M), /* V7. */
14188 T(V7E_M), /* V6_M. */
14189 T(V7E_M), /* V6S_M. */
14190 T(V7E_M) /* V7E_M. */
14191 };
14192 const int v8[] =
14193 {
14194 T(V8), /* PRE_V4. */
14195 T(V8), /* V4. */
14196 T(V8), /* V4T. */
14197 T(V8), /* V5T. */
14198 T(V8), /* V5TE. */
14199 T(V8), /* V5TEJ. */
14200 T(V8), /* V6. */
14201 T(V8), /* V6KZ. */
14202 T(V8), /* V6T2. */
14203 T(V8), /* V6K. */
14204 T(V8), /* V7. */
14205 T(V8), /* V6_M. */
14206 T(V8), /* V6S_M. */
14207 T(V8), /* V7E_M. */
14208 T(V8), /* V8. */
14209 T(V8), /* V8-R. */
14210 T(V8), /* V8-M.BASE. */
14211 T(V8), /* V8-M.MAIN. */
14212 T(V8), /* V8.1. */
14213 T(V8), /* V8.2. */
14214 T(V8), /* V8.3. */
14215 T(V8), /* V8.1-M.MAIN. */
14216 };
14217 const int v8r[] =
14218 {
14219 T(V8R), /* PRE_V4. */
14220 T(V8R), /* V4. */
14221 T(V8R), /* V4T. */
14222 T(V8R), /* V5T. */
14223 T(V8R), /* V5TE. */
14224 T(V8R), /* V5TEJ. */
14225 T(V8R), /* V6. */
14226 T(V8R), /* V6KZ. */
14227 T(V8R), /* V6T2. */
14228 T(V8R), /* V6K. */
14229 T(V8R), /* V7. */
14230 T(V8R), /* V6_M. */
14231 T(V8R), /* V6S_M. */
14232 T(V8R), /* V7E_M. */
14233 T(V8), /* V8. */
14234 T(V8R), /* V8R. */
14235 };
14236 const int v8m_baseline[] =
14237 {
14238 -1, /* PRE_V4. */
14239 -1, /* V4. */
14240 -1, /* V4T. */
14241 -1, /* V5T. */
14242 -1, /* V5TE. */
14243 -1, /* V5TEJ. */
14244 -1, /* V6. */
14245 -1, /* V6KZ. */
14246 -1, /* V6T2. */
14247 -1, /* V6K. */
14248 -1, /* V7. */
14249 T(V8M_BASE), /* V6_M. */
14250 T(V8M_BASE), /* V6S_M. */
14251 -1, /* V7E_M. */
14252 -1, /* V8. */
14253 -1, /* V8R. */
14254 T(V8M_BASE) /* V8-M BASELINE. */
14255 };
14256 const int v8m_mainline[] =
14257 {
14258 -1, /* PRE_V4. */
14259 -1, /* V4. */
14260 -1, /* V4T. */
14261 -1, /* V5T. */
14262 -1, /* V5TE. */
14263 -1, /* V5TEJ. */
14264 -1, /* V6. */
14265 -1, /* V6KZ. */
14266 -1, /* V6T2. */
14267 -1, /* V6K. */
14268 T(V8M_MAIN), /* V7. */
14269 T(V8M_MAIN), /* V6_M. */
14270 T(V8M_MAIN), /* V6S_M. */
14271 T(V8M_MAIN), /* V7E_M. */
14272 -1, /* V8. */
14273 -1, /* V8R. */
14274 T(V8M_MAIN), /* V8-M BASELINE. */
14275 T(V8M_MAIN) /* V8-M MAINLINE. */
14276 };
14277 const int v8_1m_mainline[] =
14278 {
14279 -1, /* PRE_V4. */
14280 -1, /* V4. */
14281 -1, /* V4T. */
14282 -1, /* V5T. */
14283 -1, /* V5TE. */
14284 -1, /* V5TEJ. */
14285 -1, /* V6. */
14286 -1, /* V6KZ. */
14287 -1, /* V6T2. */
14288 -1, /* V6K. */
14289 T(V8_1M_MAIN), /* V7. */
14290 T(V8_1M_MAIN), /* V6_M. */
14291 T(V8_1M_MAIN), /* V6S_M. */
14292 T(V8_1M_MAIN), /* V7E_M. */
14293 -1, /* V8. */
14294 -1, /* V8R. */
14295 T(V8_1M_MAIN), /* V8-M BASELINE. */
14296 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14297 -1, /* Unused (18). */
14298 -1, /* Unused (19). */
14299 -1, /* Unused (20). */
14300 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14301 };
14302 const int v9[] =
14303 {
14304 T(V9), /* PRE_V4. */
14305 T(V9), /* V4. */
14306 T(V9), /* V4T. */
14307 T(V9), /* V5T. */
14308 T(V9), /* V5TE. */
14309 T(V9), /* V5TEJ. */
14310 T(V9), /* V6. */
14311 T(V9), /* V6KZ. */
14312 T(V9), /* V6T2. */
14313 T(V9), /* V6K. */
14314 T(V9), /* V7. */
14315 T(V9), /* V6_M. */
14316 T(V9), /* V6S_M. */
14317 T(V9), /* V7E_M. */
14318 T(V9), /* V8. */
14319 T(V9), /* V8-R. */
14320 T(V9), /* V8-M.BASE. */
14321 T(V9), /* V8-M.MAIN. */
14322 T(V9), /* V8.1. */
14323 T(V9), /* V8.2. */
14324 T(V9), /* V8.3. */
14325 T(V9), /* V8.1-M.MAIN. */
14326 T(V9), /* V9. */
14327 };
14328 const int v4t_plus_v6_m[] =
14329 {
14330 -1, /* PRE_V4. */
14331 -1, /* V4. */
14332 T(V4T), /* V4T. */
14333 T(V5T), /* V5T. */
14334 T(V5TE), /* V5TE. */
14335 T(V5TEJ), /* V5TEJ. */
14336 T(V6), /* V6. */
14337 T(V6KZ), /* V6KZ. */
14338 T(V6T2), /* V6T2. */
14339 T(V6K), /* V6K. */
14340 T(V7), /* V7. */
14341 T(V6_M), /* V6_M. */
14342 T(V6S_M), /* V6S_M. */
14343 T(V7E_M), /* V7E_M. */
14344 T(V8), /* V8. */
14345 -1, /* V8R. */
14346 T(V8M_BASE), /* V8-M BASELINE. */
14347 T(V8M_MAIN), /* V8-M MAINLINE. */
14348 -1, /* Unused (18). */
14349 -1, /* Unused (19). */
14350 -1, /* Unused (20). */
14351 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14352 T(V9), /* V9. */
14353 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14354 };
14355 const int *comb[] =
14356 {
14357 v6t2,
14358 v6k,
14359 v7,
14360 v6_m,
14361 v6s_m,
14362 v7e_m,
14363 v8,
14364 v8r,
14365 v8m_baseline,
14366 v8m_mainline,
14367 NULL,
14368 NULL,
14369 NULL,
14370 v8_1m_mainline,
14371 v9,
14372 /* Pseudo-architecture. */
14373 v4t_plus_v6_m
14374 };
14375
14376 /* Check we've not got a higher architecture than we know about. */
14377
14378 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14379 {
14380 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14381 return -1;
14382 }
14383
14384 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14385
14386 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14387 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14388 oldtag = T(V4T_PLUS_V6_M);
14389
14390 /* And override the new tag if we have a Tag_also_compatible_with on the
14391 input. */
14392
14393 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14394 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14395 newtag = T(V4T_PLUS_V6_M);
14396
14397 tagl = (oldtag < newtag) ? oldtag : newtag;
14398 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14399
14400 /* Architectures before V6KZ add features monotonically. */
14401 if (tagh <= TAG_CPU_ARCH_V6KZ)
14402 return result;
14403
14404 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14405
14406 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14407 as the canonical version. */
14408 if (result == T(V4T_PLUS_V6_M))
14409 {
14410 result = T(V4T);
14411 *secondary_compat_out = T(V6_M);
14412 }
14413 else
14414 *secondary_compat_out = -1;
14415
14416 if (result == -1)
14417 {
14418 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14419 ibfd, oldtag, newtag);
14420 return -1;
14421 }
14422
14423 return result;
14424 #undef T
14425 }
14426
14427 /* Query attributes object to see if integer divide instructions may be
14428 present in an object. */
14429 static bool
14430 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14431 {
14432 int arch = attr[Tag_CPU_arch].i;
14433 int profile = attr[Tag_CPU_arch_profile].i;
14434
14435 switch (attr[Tag_DIV_use].i)
14436 {
14437 case 0:
14438 /* Integer divide allowed if instruction contained in archetecture. */
14439 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14440 return true;
14441 else if (arch >= TAG_CPU_ARCH_V7E_M)
14442 return true;
14443 else
14444 return false;
14445
14446 case 1:
14447 /* Integer divide explicitly prohibited. */
14448 return false;
14449
14450 default:
14451 /* Unrecognised case - treat as allowing divide everywhere. */
14452 case 2:
14453 /* Integer divide allowed in ARM state. */
14454 return true;
14455 }
14456 }
14457
14458 /* Query attributes object to see if integer divide instructions are
14459 forbidden to be in the object. This is not the inverse of
14460 elf32_arm_attributes_accept_div. */
14461 static bool
14462 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14463 {
14464 return attr[Tag_DIV_use].i == 1;
14465 }
14466
14467 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14468 are conflicting attributes. */
14469
14470 static bool
14471 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14472 {
14473 bfd *obfd = info->output_bfd;
14474 obj_attribute *in_attr;
14475 obj_attribute *out_attr;
14476 /* Some tags have 0 = don't care, 1 = strong requirement,
14477 2 = weak requirement. */
14478 static const int order_021[3] = {0, 2, 1};
14479 int i;
14480 bool result = true;
14481 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14482
14483 /* Skip the linker stubs file. This preserves previous behavior
14484 of accepting unknown attributes in the first input file - but
14485 is that a bug? */
14486 if (ibfd->flags & BFD_LINKER_CREATED)
14487 return true;
14488
14489 /* Skip any input that hasn't attribute section.
14490 This enables to link object files without attribute section with
14491 any others. */
14492 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14493 return true;
14494
14495 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14496 {
14497 /* This is the first object. Copy the attributes. */
14498 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14499
14500 out_attr = elf_known_obj_attributes_proc (obfd);
14501
14502 /* Use the Tag_null value to indicate the attributes have been
14503 initialized. */
14504 out_attr[0].i = 1;
14505
14506 /* We do not output objects with Tag_MPextension_use_legacy - we move
14507 the attribute's value to Tag_MPextension_use. */
14508 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14509 {
14510 if (out_attr[Tag_MPextension_use].i != 0
14511 && out_attr[Tag_MPextension_use_legacy].i
14512 != out_attr[Tag_MPextension_use].i)
14513 {
14514 _bfd_error_handler
14515 (_("Error: %pB has both the current and legacy "
14516 "Tag_MPextension_use attributes"), ibfd);
14517 result = false;
14518 }
14519
14520 out_attr[Tag_MPextension_use] =
14521 out_attr[Tag_MPextension_use_legacy];
14522 out_attr[Tag_MPextension_use_legacy].type = 0;
14523 out_attr[Tag_MPextension_use_legacy].i = 0;
14524 }
14525
14526 return result;
14527 }
14528
14529 in_attr = elf_known_obj_attributes_proc (ibfd);
14530 out_attr = elf_known_obj_attributes_proc (obfd);
14531 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14532 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14533 {
14534 /* Ignore mismatches if the object doesn't use floating point or is
14535 floating point ABI independent. */
14536 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14537 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14538 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14539 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14540 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14541 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14542 {
14543 _bfd_error_handler
14544 (_("error: %pB uses VFP register arguments, %pB does not"),
14545 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14546 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14547 result = false;
14548 }
14549 }
14550
14551 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14552 {
14553 /* Merge this attribute with existing attributes. */
14554 switch (i)
14555 {
14556 case Tag_CPU_raw_name:
14557 case Tag_CPU_name:
14558 /* These are merged after Tag_CPU_arch. */
14559 break;
14560
14561 case Tag_ABI_optimization_goals:
14562 case Tag_ABI_FP_optimization_goals:
14563 /* Use the first value seen. */
14564 break;
14565
14566 case Tag_CPU_arch:
14567 {
14568 int secondary_compat = -1, secondary_compat_out = -1;
14569 unsigned int saved_out_attr = out_attr[i].i;
14570 int arch_attr;
14571 static const char *name_table[] =
14572 {
14573 /* These aren't real CPU names, but we can't guess
14574 that from the architecture version alone. */
14575 "Pre v4",
14576 "ARM v4",
14577 "ARM v4T",
14578 "ARM v5T",
14579 "ARM v5TE",
14580 "ARM v5TEJ",
14581 "ARM v6",
14582 "ARM v6KZ",
14583 "ARM v6T2",
14584 "ARM v6K",
14585 "ARM v7",
14586 "ARM v6-M",
14587 "ARM v6S-M",
14588 "ARM v7E-M",
14589 "ARM v8",
14590 "ARM v8-R",
14591 "ARM v8-M.baseline",
14592 "ARM v8-M.mainline",
14593 "ARM v8.1-A",
14594 "ARM v8.2-A",
14595 "ARM v8.3-A",
14596 "ARM v8.1-M.mainline",
14597 "ARM v9",
14598 };
14599
14600 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14601 secondary_compat = get_secondary_compatible_arch (ibfd);
14602 secondary_compat_out = get_secondary_compatible_arch (obfd);
14603 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14604 &secondary_compat_out,
14605 in_attr[i].i,
14606 secondary_compat);
14607
14608 /* Return with error if failed to merge. */
14609 if (arch_attr == -1)
14610 return false;
14611
14612 out_attr[i].i = arch_attr;
14613
14614 set_secondary_compatible_arch (obfd, secondary_compat_out);
14615
14616 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14617 if (out_attr[i].i == saved_out_attr)
14618 ; /* Leave the names alone. */
14619 else if (out_attr[i].i == in_attr[i].i)
14620 {
14621 /* The output architecture has been changed to match the
14622 input architecture. Use the input names. */
14623 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14624 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14625 : NULL;
14626 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14627 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14628 : NULL;
14629 }
14630 else
14631 {
14632 out_attr[Tag_CPU_name].s = NULL;
14633 out_attr[Tag_CPU_raw_name].s = NULL;
14634 }
14635
14636 /* If we still don't have a value for Tag_CPU_name,
14637 make one up now. Tag_CPU_raw_name remains blank. */
14638 if (out_attr[Tag_CPU_name].s == NULL
14639 && out_attr[i].i < ARRAY_SIZE (name_table))
14640 out_attr[Tag_CPU_name].s =
14641 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14642 }
14643 break;
14644
14645 case Tag_ARM_ISA_use:
14646 case Tag_THUMB_ISA_use:
14647 case Tag_WMMX_arch:
14648 case Tag_Advanced_SIMD_arch:
14649 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14650 case Tag_ABI_FP_rounding:
14651 case Tag_ABI_FP_exceptions:
14652 case Tag_ABI_FP_user_exceptions:
14653 case Tag_ABI_FP_number_model:
14654 case Tag_FP_HP_extension:
14655 case Tag_CPU_unaligned_access:
14656 case Tag_T2EE_use:
14657 case Tag_MPextension_use:
14658 case Tag_MVE_arch:
14659 case Tag_PAC_extension:
14660 case Tag_BTI_extension:
14661 case Tag_BTI_use:
14662 case Tag_PACRET_use:
14663 /* Use the largest value specified. */
14664 if (in_attr[i].i > out_attr[i].i)
14665 out_attr[i].i = in_attr[i].i;
14666 break;
14667
14668 case Tag_ABI_align_preserved:
14669 case Tag_ABI_PCS_RO_data:
14670 /* Use the smallest value specified. */
14671 if (in_attr[i].i < out_attr[i].i)
14672 out_attr[i].i = in_attr[i].i;
14673 break;
14674
14675 case Tag_ABI_align_needed:
14676 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14677 && (in_attr[Tag_ABI_align_preserved].i == 0
14678 || out_attr[Tag_ABI_align_preserved].i == 0))
14679 {
14680 /* This error message should be enabled once all non-conformant
14681 binaries in the toolchain have had the attributes set
14682 properly.
14683 _bfd_error_handler
14684 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14685 obfd, ibfd);
14686 result = false; */
14687 }
14688 /* Fall through. */
14689 case Tag_ABI_FP_denormal:
14690 case Tag_ABI_PCS_GOT_use:
14691 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14692 value if greater than 2 (for future-proofing). */
14693 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14694 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14695 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14696 out_attr[i].i = in_attr[i].i;
14697 break;
14698
14699 case Tag_Virtualization_use:
14700 /* The virtualization tag effectively stores two bits of
14701 information: the intended use of TrustZone (in bit 0), and the
14702 intended use of Virtualization (in bit 1). */
14703 if (out_attr[i].i == 0)
14704 out_attr[i].i = in_attr[i].i;
14705 else if (in_attr[i].i != 0
14706 && in_attr[i].i != out_attr[i].i)
14707 {
14708 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14709 out_attr[i].i = 3;
14710 else
14711 {
14712 _bfd_error_handler
14713 (_("error: %pB: unable to merge virtualization attributes "
14714 "with %pB"),
14715 obfd, ibfd);
14716 result = false;
14717 }
14718 }
14719 break;
14720
14721 case Tag_CPU_arch_profile:
14722 if (out_attr[i].i != in_attr[i].i)
14723 {
14724 /* 0 will merge with anything.
14725 'A' and 'S' merge to 'A'.
14726 'R' and 'S' merge to 'R'.
14727 'M' and 'A|R|S' is an error. */
14728 if (out_attr[i].i == 0
14729 || (out_attr[i].i == 'S'
14730 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14731 out_attr[i].i = in_attr[i].i;
14732 else if (in_attr[i].i == 0
14733 || (in_attr[i].i == 'S'
14734 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14735 ; /* Do nothing. */
14736 else
14737 {
14738 _bfd_error_handler
14739 (_("error: %pB: conflicting architecture profiles %c/%c"),
14740 ibfd,
14741 in_attr[i].i ? in_attr[i].i : '0',
14742 out_attr[i].i ? out_attr[i].i : '0');
14743 result = false;
14744 }
14745 }
14746 break;
14747
14748 case Tag_DSP_extension:
14749 /* No need to change output value if any of:
14750 - pre (<=) ARMv5T input architecture (do not have DSP)
14751 - M input profile not ARMv7E-M and do not have DSP. */
14752 if (in_attr[Tag_CPU_arch].i <= 3
14753 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14754 && in_attr[Tag_CPU_arch].i != 13
14755 && in_attr[i].i == 0))
14756 ; /* Do nothing. */
14757 /* Output value should be 0 if DSP part of architecture, ie.
14758 - post (>=) ARMv5te architecture output
14759 - A, R or S profile output or ARMv7E-M output architecture. */
14760 else if (out_attr[Tag_CPU_arch].i >= 4
14761 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14762 || out_attr[Tag_CPU_arch_profile].i == 'R'
14763 || out_attr[Tag_CPU_arch_profile].i == 'S'
14764 || out_attr[Tag_CPU_arch].i == 13))
14765 out_attr[i].i = 0;
14766 /* Otherwise, DSP instructions are added and not part of output
14767 architecture. */
14768 else
14769 out_attr[i].i = 1;
14770 break;
14771
14772 case Tag_FP_arch:
14773 {
14774 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14775 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14776 when it's 0. It might mean absence of FP hardware if
14777 Tag_FP_arch is zero. */
14778
14779 #define VFP_VERSION_COUNT 9
14780 static const struct
14781 {
14782 int ver;
14783 int regs;
14784 } vfp_versions[VFP_VERSION_COUNT] =
14785 {
14786 {0, 0},
14787 {1, 16},
14788 {2, 16},
14789 {3, 32},
14790 {3, 16},
14791 {4, 32},
14792 {4, 16},
14793 {8, 32},
14794 {8, 16}
14795 };
14796 int ver;
14797 int regs;
14798 int newval;
14799
14800 /* If the output has no requirement about FP hardware,
14801 follow the requirement of the input. */
14802 if (out_attr[i].i == 0)
14803 {
14804 /* This assert is still reasonable, we shouldn't
14805 produce the suspicious build attribute
14806 combination (See below for in_attr). */
14807 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14808 out_attr[i].i = in_attr[i].i;
14809 out_attr[Tag_ABI_HardFP_use].i
14810 = in_attr[Tag_ABI_HardFP_use].i;
14811 break;
14812 }
14813 /* If the input has no requirement about FP hardware, do
14814 nothing. */
14815 else if (in_attr[i].i == 0)
14816 {
14817 /* We used to assert that Tag_ABI_HardFP_use was
14818 zero here, but we should never assert when
14819 consuming an object file that has suspicious
14820 build attributes. The single precision variant
14821 of 'no FP architecture' is still 'no FP
14822 architecture', so we just ignore the tag in this
14823 case. */
14824 break;
14825 }
14826
14827 /* Both the input and the output have nonzero Tag_FP_arch.
14828 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14829
14830 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14831 do nothing. */
14832 if (in_attr[Tag_ABI_HardFP_use].i == 0
14833 && out_attr[Tag_ABI_HardFP_use].i == 0)
14834 ;
14835 /* If the input and the output have different Tag_ABI_HardFP_use,
14836 the combination of them is 0 (implied by Tag_FP_arch). */
14837 else if (in_attr[Tag_ABI_HardFP_use].i
14838 != out_attr[Tag_ABI_HardFP_use].i)
14839 out_attr[Tag_ABI_HardFP_use].i = 0;
14840
14841 /* Now we can handle Tag_FP_arch. */
14842
14843 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14844 pick the biggest. */
14845 if (in_attr[i].i >= VFP_VERSION_COUNT
14846 && in_attr[i].i > out_attr[i].i)
14847 {
14848 out_attr[i] = in_attr[i];
14849 break;
14850 }
14851 /* The output uses the superset of input features
14852 (ISA version) and registers. */
14853 ver = vfp_versions[in_attr[i].i].ver;
14854 if (ver < vfp_versions[out_attr[i].i].ver)
14855 ver = vfp_versions[out_attr[i].i].ver;
14856 regs = vfp_versions[in_attr[i].i].regs;
14857 if (regs < vfp_versions[out_attr[i].i].regs)
14858 regs = vfp_versions[out_attr[i].i].regs;
14859 /* This assumes all possible supersets are also a valid
14860 options. */
14861 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14862 {
14863 if (regs == vfp_versions[newval].regs
14864 && ver == vfp_versions[newval].ver)
14865 break;
14866 }
14867 out_attr[i].i = newval;
14868 }
14869 break;
14870 case Tag_PCS_config:
14871 if (out_attr[i].i == 0)
14872 out_attr[i].i = in_attr[i].i;
14873 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14874 {
14875 /* It's sometimes ok to mix different configs, so this is only
14876 a warning. */
14877 _bfd_error_handler
14878 (_("warning: %pB: conflicting platform configuration"), ibfd);
14879 }
14880 break;
14881 case Tag_ABI_PCS_R9_use:
14882 if (in_attr[i].i != out_attr[i].i
14883 && out_attr[i].i != AEABI_R9_unused
14884 && in_attr[i].i != AEABI_R9_unused)
14885 {
14886 _bfd_error_handler
14887 (_("error: %pB: conflicting use of R9"), ibfd);
14888 result = false;
14889 }
14890 if (out_attr[i].i == AEABI_R9_unused)
14891 out_attr[i].i = in_attr[i].i;
14892 break;
14893 case Tag_ABI_PCS_RW_data:
14894 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14895 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14896 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14897 {
14898 _bfd_error_handler
14899 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14900 ibfd);
14901 result = false;
14902 }
14903 /* Use the smallest value specified. */
14904 if (in_attr[i].i < out_attr[i].i)
14905 out_attr[i].i = in_attr[i].i;
14906 break;
14907 case Tag_ABI_PCS_wchar_t:
14908 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14909 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14910 {
14911 _bfd_error_handler
14912 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14913 ibfd, in_attr[i].i, out_attr[i].i);
14914 }
14915 else if (in_attr[i].i && !out_attr[i].i)
14916 out_attr[i].i = in_attr[i].i;
14917 break;
14918 case Tag_ABI_enum_size:
14919 if (in_attr[i].i != AEABI_enum_unused)
14920 {
14921 if (out_attr[i].i == AEABI_enum_unused
14922 || out_attr[i].i == AEABI_enum_forced_wide)
14923 {
14924 /* The existing object is compatible with anything.
14925 Use whatever requirements the new object has. */
14926 out_attr[i].i = in_attr[i].i;
14927 }
14928 else if (in_attr[i].i != AEABI_enum_forced_wide
14929 && out_attr[i].i != in_attr[i].i
14930 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14931 {
14932 static const char *aeabi_enum_names[] =
14933 { "", "variable-size", "32-bit", "" };
14934 const char *in_name =
14935 in_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14936 ? aeabi_enum_names[in_attr[i].i]
14937 : "<unknown>";
14938 const char *out_name =
14939 out_attr[i].i < ARRAY_SIZE (aeabi_enum_names)
14940 ? aeabi_enum_names[out_attr[i].i]
14941 : "<unknown>";
14942 _bfd_error_handler
14943 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14944 ibfd, in_name, out_name);
14945 }
14946 }
14947 break;
14948 case Tag_ABI_VFP_args:
14949 /* Aready done. */
14950 break;
14951 case Tag_ABI_WMMX_args:
14952 if (in_attr[i].i != out_attr[i].i)
14953 {
14954 _bfd_error_handler
14955 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14956 ibfd, obfd);
14957 result = false;
14958 }
14959 break;
14960 case Tag_compatibility:
14961 /* Merged in target-independent code. */
14962 break;
14963 case Tag_ABI_HardFP_use:
14964 /* This is handled along with Tag_FP_arch. */
14965 break;
14966 case Tag_ABI_FP_16bit_format:
14967 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14968 {
14969 if (in_attr[i].i != out_attr[i].i)
14970 {
14971 _bfd_error_handler
14972 (_("error: fp16 format mismatch between %pB and %pB"),
14973 ibfd, obfd);
14974 result = false;
14975 }
14976 }
14977 if (in_attr[i].i != 0)
14978 out_attr[i].i = in_attr[i].i;
14979 break;
14980
14981 case Tag_DIV_use:
14982 /* A value of zero on input means that the divide instruction may
14983 be used if available in the base architecture as specified via
14984 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14985 the user did not want divide instructions. A value of 2
14986 explicitly means that divide instructions were allowed in ARM
14987 and Thumb state. */
14988 if (in_attr[i].i == out_attr[i].i)
14989 /* Do nothing. */ ;
14990 else if (elf32_arm_attributes_forbid_div (in_attr)
14991 && !elf32_arm_attributes_accept_div (out_attr))
14992 out_attr[i].i = 1;
14993 else if (elf32_arm_attributes_forbid_div (out_attr)
14994 && elf32_arm_attributes_accept_div (in_attr))
14995 out_attr[i].i = in_attr[i].i;
14996 else if (in_attr[i].i == 2)
14997 out_attr[i].i = in_attr[i].i;
14998 break;
14999
15000 case Tag_MPextension_use_legacy:
15001 /* We don't output objects with Tag_MPextension_use_legacy - we
15002 move the value to Tag_MPextension_use. */
15003 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15004 {
15005 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15006 {
15007 _bfd_error_handler
15008 (_("%pB has both the current and legacy "
15009 "Tag_MPextension_use attributes"),
15010 ibfd);
15011 result = false;
15012 }
15013 }
15014
15015 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15016 out_attr[Tag_MPextension_use] = in_attr[i];
15017
15018 break;
15019
15020 case Tag_nodefaults:
15021 /* This tag is set if it exists, but the value is unused (and is
15022 typically zero). We don't actually need to do anything here -
15023 the merge happens automatically when the type flags are merged
15024 below. */
15025 break;
15026 case Tag_also_compatible_with:
15027 /* Already done in Tag_CPU_arch. */
15028 break;
15029 case Tag_conformance:
15030 /* Keep the attribute if it matches. Throw it away otherwise.
15031 No attribute means no claim to conform. */
15032 if (!in_attr[i].s || !out_attr[i].s
15033 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15034 out_attr[i].s = NULL;
15035 break;
15036
15037 default:
15038 result
15039 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15040 }
15041
15042 /* If out_attr was copied from in_attr then it won't have a type yet. */
15043 if (in_attr[i].type && !out_attr[i].type)
15044 out_attr[i].type = in_attr[i].type;
15045 }
15046
15047 /* Merge Tag_compatibility attributes and any common GNU ones. */
15048 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15049 return false;
15050
15051 /* Check for any attributes not known on ARM. */
15052 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15053
15054 return result;
15055 }
15056
15057
15058 /* Return TRUE if the two EABI versions are incompatible. */
15059
15060 static bool
15061 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15062 {
15063 /* v4 and v5 are the same spec before and after it was released,
15064 so allow mixing them. */
15065 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15066 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15067 return true;
15068
15069 return (iver == over);
15070 }
15071
15072 /* Merge backend specific data from an object file to the output
15073 object file when linking. */
15074
15075 static bool
15076 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15077
15078 /* Display the flags field. */
15079
15080 static bool
15081 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15082 {
15083 FILE * file = (FILE *) ptr;
15084 unsigned long flags;
15085
15086 BFD_ASSERT (abfd != NULL && ptr != NULL);
15087
15088 /* Print normal ELF private data. */
15089 _bfd_elf_print_private_bfd_data (abfd, ptr);
15090
15091 flags = elf_elfheader (abfd)->e_flags;
15092 /* Ignore init flag - it may not be set, despite the flags field
15093 containing valid data. */
15094
15095 fprintf (file, _("private flags = 0x%lx:"), elf_elfheader (abfd)->e_flags);
15096
15097 switch (EF_ARM_EABI_VERSION (flags))
15098 {
15099 case EF_ARM_EABI_UNKNOWN:
15100 /* The following flag bits are GNU extensions and not part of the
15101 official ARM ELF extended ABI. Hence they are only decoded if
15102 the EABI version is not set. */
15103 if (flags & EF_ARM_INTERWORK)
15104 fprintf (file, _(" [interworking enabled]"));
15105
15106 if (flags & EF_ARM_APCS_26)
15107 fprintf (file, " [APCS-26]");
15108 else
15109 fprintf (file, " [APCS-32]");
15110
15111 if (flags & EF_ARM_VFP_FLOAT)
15112 fprintf (file, _(" [VFP float format]"));
15113 else if (flags & EF_ARM_MAVERICK_FLOAT)
15114 fprintf (file, _(" [Maverick float format]"));
15115 else
15116 fprintf (file, _(" [FPA float format]"));
15117
15118 if (flags & EF_ARM_APCS_FLOAT)
15119 fprintf (file, _(" [floats passed in float registers]"));
15120
15121 if (flags & EF_ARM_PIC)
15122 fprintf (file, _(" [position independent]"));
15123
15124 if (flags & EF_ARM_NEW_ABI)
15125 fprintf (file, _(" [new ABI]"));
15126
15127 if (flags & EF_ARM_OLD_ABI)
15128 fprintf (file, _(" [old ABI]"));
15129
15130 if (flags & EF_ARM_SOFT_FLOAT)
15131 fprintf (file, _(" [software FP]"));
15132
15133 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15134 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15135 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15136 | EF_ARM_MAVERICK_FLOAT);
15137 break;
15138
15139 case EF_ARM_EABI_VER1:
15140 fprintf (file, _(" [Version1 EABI]"));
15141
15142 if (flags & EF_ARM_SYMSARESORTED)
15143 fprintf (file, _(" [sorted symbol table]"));
15144 else
15145 fprintf (file, _(" [unsorted symbol table]"));
15146
15147 flags &= ~ EF_ARM_SYMSARESORTED;
15148 break;
15149
15150 case EF_ARM_EABI_VER2:
15151 fprintf (file, _(" [Version2 EABI]"));
15152
15153 if (flags & EF_ARM_SYMSARESORTED)
15154 fprintf (file, _(" [sorted symbol table]"));
15155 else
15156 fprintf (file, _(" [unsorted symbol table]"));
15157
15158 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15159 fprintf (file, _(" [dynamic symbols use segment index]"));
15160
15161 if (flags & EF_ARM_MAPSYMSFIRST)
15162 fprintf (file, _(" [mapping symbols precede others]"));
15163
15164 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15165 | EF_ARM_MAPSYMSFIRST);
15166 break;
15167
15168 case EF_ARM_EABI_VER3:
15169 fprintf (file, _(" [Version3 EABI]"));
15170 break;
15171
15172 case EF_ARM_EABI_VER4:
15173 fprintf (file, _(" [Version4 EABI]"));
15174 goto eabi;
15175
15176 case EF_ARM_EABI_VER5:
15177 fprintf (file, _(" [Version5 EABI]"));
15178
15179 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15180 fprintf (file, _(" [soft-float ABI]"));
15181
15182 if (flags & EF_ARM_ABI_FLOAT_HARD)
15183 fprintf (file, _(" [hard-float ABI]"));
15184
15185 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15186
15187 eabi:
15188 if (flags & EF_ARM_BE8)
15189 fprintf (file, _(" [BE8]"));
15190
15191 if (flags & EF_ARM_LE8)
15192 fprintf (file, _(" [LE8]"));
15193
15194 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15195 break;
15196
15197 default:
15198 fprintf (file, _(" <EABI version unrecognised>"));
15199 break;
15200 }
15201
15202 flags &= ~ EF_ARM_EABIMASK;
15203
15204 if (flags & EF_ARM_RELEXEC)
15205 fprintf (file, _(" [relocatable executable]"));
15206
15207 if (flags & EF_ARM_PIC)
15208 fprintf (file, _(" [position independent]"));
15209
15210 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15211 fprintf (file, _(" [FDPIC ABI supplement]"));
15212
15213 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15214
15215 if (flags)
15216 fprintf (file, _(" <Unrecognised flag bits set>"));
15217
15218 fputc ('\n', file);
15219
15220 return true;
15221 }
15222
15223 static int
15224 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15225 {
15226 switch (ELF_ST_TYPE (elf_sym->st_info))
15227 {
15228 case STT_ARM_TFUNC:
15229 return ELF_ST_TYPE (elf_sym->st_info);
15230
15231 case STT_ARM_16BIT:
15232 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15233 This allows us to distinguish between data used by Thumb instructions
15234 and non-data (which is probably code) inside Thumb regions of an
15235 executable. */
15236 if (type != STT_OBJECT && type != STT_TLS)
15237 return ELF_ST_TYPE (elf_sym->st_info);
15238 break;
15239
15240 default:
15241 break;
15242 }
15243
15244 return type;
15245 }
15246
15247 static asection *
15248 elf32_arm_gc_mark_hook (asection *sec,
15249 struct bfd_link_info *info,
15250 Elf_Internal_Rela *rel,
15251 struct elf_link_hash_entry *h,
15252 Elf_Internal_Sym *sym)
15253 {
15254 if (h != NULL)
15255 switch (ELF32_R_TYPE (rel->r_info))
15256 {
15257 case R_ARM_GNU_VTINHERIT:
15258 case R_ARM_GNU_VTENTRY:
15259 return NULL;
15260 }
15261
15262 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15263 }
15264
15265 /* Look through the relocs for a section during the first phase. */
15266
15267 static bool
15268 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15269 asection *sec, const Elf_Internal_Rela *relocs)
15270 {
15271 Elf_Internal_Shdr *symtab_hdr;
15272 struct elf_link_hash_entry **sym_hashes;
15273 const Elf_Internal_Rela *rel;
15274 const Elf_Internal_Rela *rel_end;
15275 bfd *dynobj;
15276 asection *sreloc;
15277 struct elf32_arm_link_hash_table *htab;
15278 bool call_reloc_p;
15279 bool may_become_dynamic_p;
15280 bool may_need_local_target_p;
15281 unsigned long nsyms;
15282
15283 if (bfd_link_relocatable (info))
15284 return true;
15285
15286 BFD_ASSERT (is_arm_elf (abfd));
15287
15288 htab = elf32_arm_hash_table (info);
15289 if (htab == NULL)
15290 return false;
15291
15292 sreloc = NULL;
15293
15294 /* Create dynamic sections for relocatable executables so that we can
15295 copy relocations. */
15296 if (htab->root.is_relocatable_executable
15297 && ! htab->root.dynamic_sections_created)
15298 {
15299 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15300 return false;
15301 }
15302
15303 if (htab->root.dynobj == NULL)
15304 htab->root.dynobj = abfd;
15305 if (!create_ifunc_sections (info))
15306 return false;
15307
15308 dynobj = htab->root.dynobj;
15309
15310 symtab_hdr = & elf_symtab_hdr (abfd);
15311 sym_hashes = elf_sym_hashes (abfd);
15312 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15313
15314 rel_end = relocs + sec->reloc_count;
15315 for (rel = relocs; rel < rel_end; rel++)
15316 {
15317 Elf_Internal_Sym *isym;
15318 struct elf_link_hash_entry *h;
15319 struct elf32_arm_link_hash_entry *eh;
15320 unsigned int r_symndx;
15321 int r_type;
15322
15323 r_symndx = ELF32_R_SYM (rel->r_info);
15324 r_type = ELF32_R_TYPE (rel->r_info);
15325 r_type = arm_real_reloc_type (htab, r_type);
15326
15327 if (r_symndx >= nsyms
15328 /* PR 9934: It is possible to have relocations that do not
15329 refer to symbols, thus it is also possible to have an
15330 object file containing relocations but no symbol table. */
15331 && (r_symndx > STN_UNDEF || nsyms > 0))
15332 {
15333 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15334 r_symndx);
15335 return false;
15336 }
15337
15338 h = NULL;
15339 isym = NULL;
15340 if (nsyms > 0)
15341 {
15342 if (r_symndx < symtab_hdr->sh_info)
15343 {
15344 /* A local symbol. */
15345 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
15346 abfd, r_symndx);
15347 if (isym == NULL)
15348 return false;
15349 }
15350 else
15351 {
15352 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15353 while (h->root.type == bfd_link_hash_indirect
15354 || h->root.type == bfd_link_hash_warning)
15355 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15356 }
15357 }
15358
15359 eh = (struct elf32_arm_link_hash_entry *) h;
15360
15361 call_reloc_p = false;
15362 may_become_dynamic_p = false;
15363 may_need_local_target_p = false;
15364
15365 /* Could be done earlier, if h were already available. */
15366 r_type = elf32_arm_tls_transition (info, r_type, h);
15367 switch (r_type)
15368 {
15369 case R_ARM_GOTOFFFUNCDESC:
15370 {
15371 if (h == NULL)
15372 {
15373 if (!elf32_arm_allocate_local_sym_info (abfd))
15374 return false;
15375 if (r_symndx >= elf32_arm_num_entries (abfd))
15376 return false;
15377 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].gotofffuncdesc_cnt += 1;
15378 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15379 }
15380 else
15381 {
15382 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15383 }
15384 }
15385 break;
15386
15387 case R_ARM_GOTFUNCDESC:
15388 {
15389 if (h == NULL)
15390 {
15391 /* Such a relocation is not supposed to be generated
15392 by gcc on a static function. */
15393 /* Anyway if needed it could be handled. */
15394 return false;
15395 }
15396 else
15397 {
15398 eh->fdpic_cnts.gotfuncdesc_cnt++;
15399 }
15400 }
15401 break;
15402
15403 case R_ARM_FUNCDESC:
15404 {
15405 if (h == NULL)
15406 {
15407 if (!elf32_arm_allocate_local_sym_info (abfd))
15408 return false;
15409 if (r_symndx >= elf32_arm_num_entries (abfd))
15410 return false;
15411 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_cnt += 1;
15412 elf32_arm_local_fdpic_cnts (abfd) [r_symndx].funcdesc_offset = -1;
15413 }
15414 else
15415 {
15416 eh->fdpic_cnts.funcdesc_cnt++;
15417 }
15418 }
15419 break;
15420
15421 case R_ARM_GOT32:
15422 case R_ARM_GOT_PREL:
15423 case R_ARM_TLS_GD32:
15424 case R_ARM_TLS_GD32_FDPIC:
15425 case R_ARM_TLS_IE32:
15426 case R_ARM_TLS_IE32_FDPIC:
15427 case R_ARM_TLS_GOTDESC:
15428 case R_ARM_TLS_DESCSEQ:
15429 case R_ARM_THM_TLS_DESCSEQ:
15430 case R_ARM_TLS_CALL:
15431 case R_ARM_THM_TLS_CALL:
15432 /* This symbol requires a global offset table entry. */
15433 {
15434 int tls_type, old_tls_type;
15435
15436 switch (r_type)
15437 {
15438 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15439 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15440
15441 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15442 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15443
15444 case R_ARM_TLS_GOTDESC:
15445 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15446 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15447 tls_type = GOT_TLS_GDESC; break;
15448
15449 default: tls_type = GOT_NORMAL; break;
15450 }
15451
15452 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15453 info->flags |= DF_STATIC_TLS;
15454
15455 if (h != NULL)
15456 {
15457 h->got.refcount++;
15458 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15459 }
15460 else
15461 {
15462 /* This is a global offset table entry for a local symbol. */
15463 if (!elf32_arm_allocate_local_sym_info (abfd))
15464 return false;
15465 if (r_symndx >= elf32_arm_num_entries (abfd))
15466 {
15467 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15468 r_symndx);
15469 return false;
15470 }
15471
15472 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15473 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15474 }
15475
15476 /* If a variable is accessed with both tls methods, two
15477 slots may be created. */
15478 if (GOT_TLS_GD_ANY_P (old_tls_type)
15479 && GOT_TLS_GD_ANY_P (tls_type))
15480 tls_type |= old_tls_type;
15481
15482 /* We will already have issued an error message if there
15483 is a TLS/non-TLS mismatch, based on the symbol
15484 type. So just combine any TLS types needed. */
15485 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15486 && tls_type != GOT_NORMAL)
15487 tls_type |= old_tls_type;
15488
15489 /* If the symbol is accessed in both IE and GDESC
15490 method, we're able to relax. Turn off the GDESC flag,
15491 without messing up with any other kind of tls types
15492 that may be involved. */
15493 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15494 tls_type &= ~GOT_TLS_GDESC;
15495
15496 if (old_tls_type != tls_type)
15497 {
15498 if (h != NULL)
15499 elf32_arm_hash_entry (h)->tls_type = tls_type;
15500 else
15501 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15502 }
15503 }
15504 /* Fall through. */
15505
15506 case R_ARM_TLS_LDM32:
15507 case R_ARM_TLS_LDM32_FDPIC:
15508 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15509 htab->tls_ldm_got.refcount++;
15510 /* Fall through. */
15511
15512 case R_ARM_GOTOFF32:
15513 case R_ARM_GOTPC:
15514 if (htab->root.sgot == NULL
15515 && !create_got_section (htab->root.dynobj, info))
15516 return false;
15517 break;
15518
15519 case R_ARM_PC24:
15520 case R_ARM_PLT32:
15521 case R_ARM_CALL:
15522 case R_ARM_JUMP24:
15523 case R_ARM_PREL31:
15524 case R_ARM_THM_CALL:
15525 case R_ARM_THM_JUMP24:
15526 case R_ARM_THM_JUMP19:
15527 call_reloc_p = true;
15528 may_need_local_target_p = true;
15529 break;
15530
15531 case R_ARM_ABS12:
15532 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15533 ldr __GOTT_INDEX__ offsets. */
15534 if (htab->root.target_os != is_vxworks)
15535 {
15536 may_need_local_target_p = true;
15537 break;
15538 }
15539 else goto jump_over;
15540
15541 /* Fall through. */
15542
15543 case R_ARM_MOVW_ABS_NC:
15544 case R_ARM_MOVT_ABS:
15545 case R_ARM_THM_MOVW_ABS_NC:
15546 case R_ARM_THM_MOVT_ABS:
15547 if (bfd_link_pic (info))
15548 {
15549 _bfd_error_handler
15550 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15551 abfd, elf32_arm_howto_table_1[r_type].name,
15552 (h) ? h->root.root.string : "a local symbol");
15553 bfd_set_error (bfd_error_bad_value);
15554 return false;
15555 }
15556
15557 /* Fall through. */
15558 case R_ARM_ABS32:
15559 case R_ARM_ABS32_NOI:
15560 jump_over:
15561 if (h != NULL && bfd_link_executable (info))
15562 {
15563 h->pointer_equality_needed = 1;
15564 }
15565 /* Fall through. */
15566 case R_ARM_REL32:
15567 case R_ARM_REL32_NOI:
15568 case R_ARM_MOVW_PREL_NC:
15569 case R_ARM_MOVT_PREL:
15570 case R_ARM_THM_MOVW_PREL_NC:
15571 case R_ARM_THM_MOVT_PREL:
15572
15573 /* Should the interworking branches be listed here? */
15574 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15575 || htab->fdpic_p)
15576 && (sec->flags & SEC_ALLOC) != 0)
15577 {
15578 if (h == NULL
15579 && elf32_arm_howto_from_type (r_type)->pc_relative)
15580 {
15581 /* In shared libraries and relocatable executables,
15582 we treat local relative references as calls;
15583 see the related SYMBOL_CALLS_LOCAL code in
15584 allocate_dynrelocs. */
15585 call_reloc_p = true;
15586 may_need_local_target_p = true;
15587 }
15588 else
15589 /* We are creating a shared library or relocatable
15590 executable, and this is a reloc against a global symbol,
15591 or a non-PC-relative reloc against a local symbol.
15592 We may need to copy the reloc into the output. */
15593 may_become_dynamic_p = true;
15594 }
15595 else
15596 may_need_local_target_p = true;
15597 break;
15598
15599 /* This relocation describes the C++ object vtable hierarchy.
15600 Reconstruct it for later use during GC. */
15601 case R_ARM_GNU_VTINHERIT:
15602 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15603 return false;
15604 break;
15605
15606 /* This relocation describes which C++ vtable entries are actually
15607 used. Record for later use during GC. */
15608 case R_ARM_GNU_VTENTRY:
15609 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15610 return false;
15611 break;
15612 }
15613
15614 if (h != NULL)
15615 {
15616 if (call_reloc_p)
15617 /* We may need a .plt entry if the function this reloc
15618 refers to is in a different object, regardless of the
15619 symbol's type. We can't tell for sure yet, because
15620 something later might force the symbol local. */
15621 h->needs_plt = 1;
15622 else if (may_need_local_target_p)
15623 /* If this reloc is in a read-only section, we might
15624 need a copy reloc. We can't check reliably at this
15625 stage whether the section is read-only, as input
15626 sections have not yet been mapped to output sections.
15627 Tentatively set the flag for now, and correct in
15628 adjust_dynamic_symbol. */
15629 h->non_got_ref = 1;
15630 }
15631
15632 if (may_need_local_target_p
15633 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15634 {
15635 union gotplt_union *root_plt;
15636 struct arm_plt_info *arm_plt;
15637 struct arm_local_iplt_info *local_iplt;
15638
15639 if (h != NULL)
15640 {
15641 root_plt = &h->plt;
15642 arm_plt = &eh->plt;
15643 }
15644 else
15645 {
15646 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15647 if (local_iplt == NULL)
15648 return false;
15649 root_plt = &local_iplt->root;
15650 arm_plt = &local_iplt->arm;
15651 }
15652
15653 /* If the symbol is a function that doesn't bind locally,
15654 this relocation will need a PLT entry. */
15655 if (root_plt->refcount != -1)
15656 root_plt->refcount += 1;
15657
15658 if (!call_reloc_p)
15659 arm_plt->noncall_refcount++;
15660
15661 /* It's too early to use htab->use_blx here, so we have to
15662 record possible blx references separately from
15663 relocs that definitely need a thumb stub. */
15664
15665 if (r_type == R_ARM_THM_CALL)
15666 arm_plt->maybe_thumb_refcount += 1;
15667
15668 if (r_type == R_ARM_THM_JUMP24
15669 || r_type == R_ARM_THM_JUMP19)
15670 arm_plt->thumb_refcount += 1;
15671 }
15672
15673 if (may_become_dynamic_p)
15674 {
15675 struct elf_dyn_relocs *p, **head;
15676
15677 /* Create a reloc section in dynobj. */
15678 if (sreloc == NULL)
15679 {
15680 sreloc = _bfd_elf_make_dynamic_reloc_section
15681 (sec, dynobj, 2, abfd, ! htab->use_rel);
15682
15683 if (sreloc == NULL)
15684 return false;
15685 }
15686
15687 /* If this is a global symbol, count the number of
15688 relocations we need for this symbol. */
15689 if (h != NULL)
15690 head = &h->dyn_relocs;
15691 else
15692 {
15693 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15694 if (head == NULL)
15695 return false;
15696 }
15697
15698 p = *head;
15699 if (p == NULL || p->sec != sec)
15700 {
15701 size_t amt = sizeof *p;
15702
15703 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15704 if (p == NULL)
15705 return false;
15706 p->next = *head;
15707 *head = p;
15708 p->sec = sec;
15709 p->count = 0;
15710 p->pc_count = 0;
15711 }
15712
15713 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15714 p->pc_count += 1;
15715 p->count += 1;
15716 if (h == NULL && htab->fdpic_p && !bfd_link_pic (info)
15717 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI)
15718 {
15719 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15720 that will become rofixup. */
15721 /* This is due to the fact that we suppose all will become rofixup. */
15722 _bfd_error_handler
15723 (_("FDPIC does not yet support %s relocation"
15724 " to become dynamic for executable"),
15725 elf32_arm_howto_table_1[r_type].name);
15726 abort ();
15727 }
15728 }
15729 }
15730
15731 return true;
15732 }
15733
15734 static void
15735 elf32_arm_update_relocs (asection *o,
15736 struct bfd_elf_section_reloc_data *reldata)
15737 {
15738 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15739 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15740 const struct elf_backend_data *bed;
15741 _arm_elf_section_data *eado;
15742 struct bfd_link_order *p;
15743 bfd_byte *erela_head, *erela;
15744 Elf_Internal_Rela *irela_head, *irela;
15745 Elf_Internal_Shdr *rel_hdr;
15746 bfd *abfd;
15747 unsigned int count;
15748
15749 eado = get_arm_elf_section_data (o);
15750
15751 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15752 return;
15753
15754 abfd = o->owner;
15755 bed = get_elf_backend_data (abfd);
15756 rel_hdr = reldata->hdr;
15757
15758 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15759 {
15760 swap_in = bed->s->swap_reloc_in;
15761 swap_out = bed->s->swap_reloc_out;
15762 }
15763 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15764 {
15765 swap_in = bed->s->swap_reloca_in;
15766 swap_out = bed->s->swap_reloca_out;
15767 }
15768 else
15769 abort ();
15770
15771 erela_head = rel_hdr->contents;
15772 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15773 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15774
15775 erela = erela_head;
15776 irela = irela_head;
15777 count = 0;
15778
15779 for (p = o->map_head.link_order; p; p = p->next)
15780 {
15781 if (p->type == bfd_section_reloc_link_order
15782 || p->type == bfd_symbol_reloc_link_order)
15783 {
15784 (*swap_in) (abfd, erela, irela);
15785 erela += rel_hdr->sh_entsize;
15786 irela++;
15787 count++;
15788 }
15789 else if (p->type == bfd_indirect_link_order)
15790 {
15791 struct bfd_elf_section_reloc_data *input_reldata;
15792 arm_unwind_table_edit *edit_list, *edit_tail;
15793 _arm_elf_section_data *eadi;
15794 bfd_size_type j;
15795 bfd_vma offset;
15796 asection *i;
15797
15798 i = p->u.indirect.section;
15799
15800 eadi = get_arm_elf_section_data (i);
15801 edit_list = eadi->u.exidx.unwind_edit_list;
15802 edit_tail = eadi->u.exidx.unwind_edit_tail;
15803 offset = i->output_offset;
15804
15805 if (eadi->elf.rel.hdr &&
15806 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15807 input_reldata = &eadi->elf.rel;
15808 else if (eadi->elf.rela.hdr &&
15809 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15810 input_reldata = &eadi->elf.rela;
15811 else
15812 abort ();
15813
15814 if (edit_list)
15815 {
15816 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15817 {
15818 arm_unwind_table_edit *edit_node, *edit_next;
15819 bfd_vma bias;
15820 bfd_vma reloc_index;
15821
15822 (*swap_in) (abfd, erela, irela);
15823 reloc_index = (irela->r_offset - offset) / 8;
15824
15825 bias = 0;
15826 edit_node = edit_list;
15827 for (edit_next = edit_list;
15828 edit_next && edit_next->index <= reloc_index;
15829 edit_next = edit_node->next)
15830 {
15831 bias++;
15832 edit_node = edit_next;
15833 }
15834
15835 if (edit_node->type != DELETE_EXIDX_ENTRY
15836 || edit_node->index != reloc_index)
15837 {
15838 irela->r_offset -= bias * 8;
15839 irela++;
15840 count++;
15841 }
15842
15843 erela += rel_hdr->sh_entsize;
15844 }
15845
15846 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15847 {
15848 /* New relocation entity. */
15849 asection *text_sec = edit_tail->linked_section;
15850 asection *text_out = text_sec->output_section;
15851 bfd_vma exidx_offset = offset + i->size - 8;
15852
15853 irela->r_addend = 0;
15854 irela->r_offset = exidx_offset;
15855 irela->r_info = ELF32_R_INFO
15856 (text_out->target_index, R_ARM_PREL31);
15857 irela++;
15858 count++;
15859 }
15860 }
15861 else
15862 {
15863 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15864 {
15865 (*swap_in) (abfd, erela, irela);
15866 erela += rel_hdr->sh_entsize;
15867 irela++;
15868 }
15869
15870 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15871 }
15872 }
15873 }
15874
15875 reldata->count = count;
15876 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15877
15878 erela = erela_head;
15879 irela = irela_head;
15880 while (count > 0)
15881 {
15882 (*swap_out) (abfd, irela, erela);
15883 erela += rel_hdr->sh_entsize;
15884 irela++;
15885 count--;
15886 }
15887
15888 free (irela_head);
15889
15890 /* Hashes are no longer valid. */
15891 free (reldata->hashes);
15892 reldata->hashes = NULL;
15893 }
15894
15895 /* Unwinding tables are not referenced directly. This pass marks them as
15896 required if the corresponding code section is marked. Similarly, ARMv8-M
15897 secure entry functions can only be referenced by SG veneers which are
15898 created after the GC process. They need to be marked in case they reside in
15899 their own section (as would be the case if code was compiled with
15900 -ffunction-sections). */
15901
15902 static bool
15903 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15904 elf_gc_mark_hook_fn gc_mark_hook)
15905 {
15906 bfd *sub;
15907 Elf_Internal_Shdr **elf_shdrp;
15908 asection *cmse_sec;
15909 obj_attribute *out_attr;
15910 Elf_Internal_Shdr *symtab_hdr;
15911 unsigned i, sym_count, ext_start;
15912 const struct elf_backend_data *bed;
15913 struct elf_link_hash_entry **sym_hashes;
15914 struct elf32_arm_link_hash_entry *cmse_hash;
15915 bool again, is_v8m, first_bfd_browse = true;
15916 bool debug_sec_need_to_be_marked = false;
15917 asection *isec;
15918
15919 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15920
15921 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15922 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15923 && out_attr[Tag_CPU_arch_profile].i == 'M';
15924
15925 /* Marking EH data may cause additional code sections to be marked,
15926 requiring multiple passes. */
15927 again = true;
15928 while (again)
15929 {
15930 again = false;
15931 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15932 {
15933 asection *o;
15934
15935 if (! is_arm_elf (sub))
15936 continue;
15937
15938 elf_shdrp = elf_elfsections (sub);
15939 for (o = sub->sections; o != NULL; o = o->next)
15940 {
15941 Elf_Internal_Shdr *hdr;
15942
15943 hdr = &elf_section_data (o)->this_hdr;
15944 if (hdr->sh_type == SHT_ARM_EXIDX
15945 && hdr->sh_link
15946 && hdr->sh_link < elf_numsections (sub)
15947 && !o->gc_mark
15948 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15949 {
15950 again = true;
15951 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15952 return false;
15953 }
15954 }
15955
15956 /* Mark section holding ARMv8-M secure entry functions. We mark all
15957 of them so no need for a second browsing. */
15958 if (is_v8m && first_bfd_browse)
15959 {
15960 sym_hashes = elf_sym_hashes (sub);
15961 bed = get_elf_backend_data (sub);
15962 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15963 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15964 ext_start = symtab_hdr->sh_info;
15965
15966 /* Scan symbols. */
15967 for (i = ext_start; i < sym_count; i++)
15968 {
15969 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15970
15971 /* Assume it is a special symbol. If not, cmse_scan will
15972 warn about it and user can do something about it. */
15973 if (startswith (cmse_hash->root.root.root.string,
15974 CMSE_PREFIX))
15975 {
15976 cmse_sec = cmse_hash->root.root.u.def.section;
15977 if (!cmse_sec->gc_mark
15978 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15979 return false;
15980 /* The debug sections related to these secure entry
15981 functions are marked on enabling below flag. */
15982 debug_sec_need_to_be_marked = true;
15983 }
15984 }
15985
15986 if (debug_sec_need_to_be_marked)
15987 {
15988 /* Looping over all the sections of the object file containing
15989 Armv8-M secure entry functions and marking all the debug
15990 sections. */
15991 for (isec = sub->sections; isec != NULL; isec = isec->next)
15992 {
15993 /* If not a debug sections, skip it. */
15994 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
15995 isec->gc_mark = 1 ;
15996 }
15997 debug_sec_need_to_be_marked = false;
15998 }
15999 }
16000 }
16001 first_bfd_browse = false;
16002 }
16003
16004 return true;
16005 }
16006
16007 /* Treat mapping symbols as special target symbols. */
16008
16009 static bool
16010 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16011 {
16012 return bfd_is_arm_special_symbol_name (sym->name,
16013 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16014 }
16015
16016 /* If the ELF symbol SYM might be a function in SEC, return the
16017 function size and set *CODE_OFF to the function's entry point,
16018 otherwise return zero. */
16019
16020 static bfd_size_type
16021 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
16022 bfd_vma *code_off)
16023 {
16024 bfd_size_type size;
16025 elf_symbol_type * elf_sym = (elf_symbol_type *) sym;
16026
16027 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
16028 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
16029 || sym->section != sec)
16030 return 0;
16031
16032 size = (sym->flags & BSF_SYNTHETIC) ? 0 : elf_sym->internal_elf_sym.st_size;
16033
16034 if (!(sym->flags & BSF_SYNTHETIC))
16035 switch (ELF_ST_TYPE (elf_sym->internal_elf_sym.st_info))
16036 {
16037 case STT_NOTYPE:
16038 /* Ignore symbols created by the annobin plugin for gcc and clang.
16039 These symbols are hidden, local, notype and have a size of 0. */
16040 if (size == 0
16041 && sym->flags & BSF_LOCAL
16042 && ELF_ST_VISIBILITY (elf_sym->internal_elf_sym.st_other) == STV_HIDDEN)
16043 return 0;
16044 /* Fall through. */
16045 case STT_FUNC:
16046 case STT_ARM_TFUNC:
16047 /* FIXME: Allow STT_GNU_IFUNC as well ? */
16048 break;
16049 default:
16050 return 0;
16051 }
16052
16053 if ((sym->flags & BSF_LOCAL)
16054 && bfd_is_arm_special_symbol_name (sym->name,
16055 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16056 return 0;
16057
16058 *code_off = sym->value;
16059
16060 /* Do not return 0 for the function's size. */
16061 return size ? size : 1;
16062
16063 }
16064
16065 static bool
16066 elf32_arm_find_inliner_info (bfd * abfd,
16067 const char ** filename_ptr,
16068 const char ** functionname_ptr,
16069 unsigned int * line_ptr)
16070 {
16071 bool found;
16072 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16073 functionname_ptr, line_ptr,
16074 & elf_tdata (abfd)->dwarf2_find_line_info);
16075 return found;
16076 }
16077
16078 /* Adjust a symbol defined by a dynamic object and referenced by a
16079 regular object. The current definition is in some section of the
16080 dynamic object, but we're not including those sections. We have to
16081 change the definition to something the rest of the link can
16082 understand. */
16083
16084 static bool
16085 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16086 struct elf_link_hash_entry * h)
16087 {
16088 bfd * dynobj;
16089 asection *s, *srel;
16090 struct elf32_arm_link_hash_entry * eh;
16091 struct elf32_arm_link_hash_table *globals;
16092
16093 globals = elf32_arm_hash_table (info);
16094 if (globals == NULL)
16095 return false;
16096
16097 dynobj = elf_hash_table (info)->dynobj;
16098
16099 /* Make sure we know what is going on here. */
16100 BFD_ASSERT (dynobj != NULL
16101 && (h->needs_plt
16102 || h->type == STT_GNU_IFUNC
16103 || h->is_weakalias
16104 || (h->def_dynamic
16105 && h->ref_regular
16106 && !h->def_regular)));
16107
16108 eh = (struct elf32_arm_link_hash_entry *) h;
16109
16110 /* If this is a function, put it in the procedure linkage table. We
16111 will fill in the contents of the procedure linkage table later,
16112 when we know the address of the .got section. */
16113 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16114 {
16115 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16116 symbol binds locally. */
16117 if (h->plt.refcount <= 0
16118 || (h->type != STT_GNU_IFUNC
16119 && (SYMBOL_CALLS_LOCAL (info, h)
16120 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16121 && h->root.type == bfd_link_hash_undefweak))))
16122 {
16123 /* This case can occur if we saw a PLT32 reloc in an input
16124 file, but the symbol was never referred to by a dynamic
16125 object, or if all references were garbage collected. In
16126 such a case, we don't actually need to build a procedure
16127 linkage table, and we can just do a PC24 reloc instead. */
16128 h->plt.offset = (bfd_vma) -1;
16129 eh->plt.thumb_refcount = 0;
16130 eh->plt.maybe_thumb_refcount = 0;
16131 eh->plt.noncall_refcount = 0;
16132 h->needs_plt = 0;
16133 }
16134
16135 return true;
16136 }
16137 else
16138 {
16139 /* It's possible that we incorrectly decided a .plt reloc was
16140 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16141 in check_relocs. We can't decide accurately between function
16142 and non-function syms in check-relocs; Objects loaded later in
16143 the link may change h->type. So fix it now. */
16144 h->plt.offset = (bfd_vma) -1;
16145 eh->plt.thumb_refcount = 0;
16146 eh->plt.maybe_thumb_refcount = 0;
16147 eh->plt.noncall_refcount = 0;
16148 }
16149
16150 /* If this is a weak symbol, and there is a real definition, the
16151 processor independent code will have arranged for us to see the
16152 real definition first, and we can just use the same value. */
16153 if (h->is_weakalias)
16154 {
16155 struct elf_link_hash_entry *def = weakdef (h);
16156 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16157 h->root.u.def.section = def->root.u.def.section;
16158 h->root.u.def.value = def->root.u.def.value;
16159 return true;
16160 }
16161
16162 /* If there are no non-GOT references, we do not need a copy
16163 relocation. */
16164 if (!h->non_got_ref)
16165 return true;
16166
16167 /* This is a reference to a symbol defined by a dynamic object which
16168 is not a function. */
16169
16170 /* If we are creating a shared library, we must presume that the
16171 only references to the symbol are via the global offset table.
16172 For such cases we need not do anything here; the relocations will
16173 be handled correctly by relocate_section. Relocatable executables
16174 can reference data in shared objects directly, so we don't need to
16175 do anything here. */
16176 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16177 return true;
16178
16179 /* We must allocate the symbol in our .dynbss section, which will
16180 become part of the .bss section of the executable. There will be
16181 an entry for this symbol in the .dynsym section. The dynamic
16182 object will contain position independent code, so all references
16183 from the dynamic object to this symbol will go through the global
16184 offset table. The dynamic linker will use the .dynsym entry to
16185 determine the address it must put in the global offset table, so
16186 both the dynamic object and the regular object will refer to the
16187 same memory location for the variable. */
16188 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16189 linker to copy the initial value out of the dynamic object and into
16190 the runtime process image. We need to remember the offset into the
16191 .rel(a).bss section we are going to use. */
16192 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16193 {
16194 s = globals->root.sdynrelro;
16195 srel = globals->root.sreldynrelro;
16196 }
16197 else
16198 {
16199 s = globals->root.sdynbss;
16200 srel = globals->root.srelbss;
16201 }
16202 if (info->nocopyreloc == 0
16203 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16204 && h->size != 0)
16205 {
16206 elf32_arm_allocate_dynrelocs (info, srel, 1);
16207 h->needs_copy = 1;
16208 }
16209
16210 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16211 }
16212
16213 /* Allocate space in .plt, .got and associated reloc sections for
16214 dynamic relocs. */
16215
16216 static bool
16217 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16218 {
16219 struct bfd_link_info *info;
16220 struct elf32_arm_link_hash_table *htab;
16221 struct elf32_arm_link_hash_entry *eh;
16222 struct elf_dyn_relocs *p;
16223
16224 if (h->root.type == bfd_link_hash_indirect)
16225 return true;
16226
16227 eh = (struct elf32_arm_link_hash_entry *) h;
16228
16229 info = (struct bfd_link_info *) inf;
16230 htab = elf32_arm_hash_table (info);
16231 if (htab == NULL)
16232 return false;
16233
16234 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16235 && h->plt.refcount > 0)
16236 {
16237 /* Make sure this symbol is output as a dynamic symbol.
16238 Undefined weak syms won't yet be marked as dynamic. */
16239 if (h->dynindx == -1 && !h->forced_local
16240 && h->root.type == bfd_link_hash_undefweak)
16241 {
16242 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16243 return false;
16244 }
16245
16246 /* If the call in the PLT entry binds locally, the associated
16247 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16248 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16249 than the .plt section. */
16250 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16251 {
16252 eh->is_iplt = 1;
16253 if (eh->plt.noncall_refcount == 0
16254 && SYMBOL_REFERENCES_LOCAL (info, h))
16255 /* All non-call references can be resolved directly.
16256 This means that they can (and in some cases, must)
16257 resolve directly to the run-time target, rather than
16258 to the PLT. That in turns means that any .got entry
16259 would be equal to the .igot.plt entry, so there's
16260 no point having both. */
16261 h->got.refcount = 0;
16262 }
16263
16264 if (bfd_link_pic (info)
16265 || eh->is_iplt
16266 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16267 {
16268 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16269
16270 /* If this symbol is not defined in a regular file, and we are
16271 not generating a shared library, then set the symbol to this
16272 location in the .plt. This is required to make function
16273 pointers compare as equal between the normal executable and
16274 the shared library. */
16275 if (! bfd_link_pic (info)
16276 && !h->def_regular)
16277 {
16278 h->root.u.def.section = htab->root.splt;
16279 h->root.u.def.value = h->plt.offset;
16280
16281 /* Make sure the function is not marked as Thumb, in case
16282 it is the target of an ABS32 relocation, which will
16283 point to the PLT entry. */
16284 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16285 }
16286
16287 /* VxWorks executables have a second set of relocations for
16288 each PLT entry. They go in a separate relocation section,
16289 which is processed by the kernel loader. */
16290 if (htab->root.target_os == is_vxworks && !bfd_link_pic (info))
16291 {
16292 /* There is a relocation for the initial PLT entry:
16293 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16294 if (h->plt.offset == htab->plt_header_size)
16295 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16296
16297 /* There are two extra relocations for each subsequent
16298 PLT entry: an R_ARM_32 relocation for the GOT entry,
16299 and an R_ARM_32 relocation for the PLT entry. */
16300 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16301 }
16302 }
16303 else
16304 {
16305 h->plt.offset = (bfd_vma) -1;
16306 h->needs_plt = 0;
16307 }
16308 }
16309 else
16310 {
16311 h->plt.offset = (bfd_vma) -1;
16312 h->needs_plt = 0;
16313 }
16314
16315 eh = (struct elf32_arm_link_hash_entry *) h;
16316 eh->tlsdesc_got = (bfd_vma) -1;
16317
16318 if (h->got.refcount > 0)
16319 {
16320 asection *s;
16321 bool dyn;
16322 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16323 int indx;
16324
16325 /* Make sure this symbol is output as a dynamic symbol.
16326 Undefined weak syms won't yet be marked as dynamic. */
16327 if (htab->root.dynamic_sections_created
16328 && h->dynindx == -1
16329 && !h->forced_local
16330 && h->root.type == bfd_link_hash_undefweak)
16331 {
16332 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16333 return false;
16334 }
16335
16336 s = htab->root.sgot;
16337 h->got.offset = s->size;
16338
16339 if (tls_type == GOT_UNKNOWN)
16340 abort ();
16341
16342 if (tls_type == GOT_NORMAL)
16343 /* Non-TLS symbols need one GOT slot. */
16344 s->size += 4;
16345 else
16346 {
16347 if (tls_type & GOT_TLS_GDESC)
16348 {
16349 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16350 eh->tlsdesc_got
16351 = (htab->root.sgotplt->size
16352 - elf32_arm_compute_jump_table_size (htab));
16353 htab->root.sgotplt->size += 8;
16354 h->got.offset = (bfd_vma) -2;
16355 /* plt.got_offset needs to know there's a TLS_DESC
16356 reloc in the middle of .got.plt. */
16357 htab->num_tls_desc++;
16358 }
16359
16360 if (tls_type & GOT_TLS_GD)
16361 {
16362 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16363 consecutive GOT slots. If the symbol is both GD
16364 and GDESC, got.offset may have been
16365 overwritten. */
16366 h->got.offset = s->size;
16367 s->size += 8;
16368 }
16369
16370 if (tls_type & GOT_TLS_IE)
16371 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16372 slot. */
16373 s->size += 4;
16374 }
16375
16376 dyn = htab->root.dynamic_sections_created;
16377
16378 indx = 0;
16379 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, bfd_link_pic (info), h)
16380 && (!bfd_link_pic (info)
16381 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16382 indx = h->dynindx;
16383
16384 if (tls_type != GOT_NORMAL
16385 && (bfd_link_dll (info) || indx != 0)
16386 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16387 || h->root.type != bfd_link_hash_undefweak))
16388 {
16389 if (tls_type & GOT_TLS_IE)
16390 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16391
16392 if (tls_type & GOT_TLS_GD)
16393 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16394
16395 if (tls_type & GOT_TLS_GDESC)
16396 {
16397 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16398 /* GDESC needs a trampoline to jump to. */
16399 htab->tls_trampoline = -1;
16400 }
16401
16402 /* Only GD needs it. GDESC just emits one relocation per
16403 2 entries. */
16404 if ((tls_type & GOT_TLS_GD) && indx != 0)
16405 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16406 }
16407 else if (((indx != -1) || htab->fdpic_p)
16408 && !SYMBOL_REFERENCES_LOCAL (info, h))
16409 {
16410 if (htab->root.dynamic_sections_created)
16411 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16412 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16413 }
16414 else if (h->type == STT_GNU_IFUNC
16415 && eh->plt.noncall_refcount == 0)
16416 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16417 they all resolve dynamically instead. Reserve room for the
16418 GOT entry's R_ARM_IRELATIVE relocation. */
16419 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16420 else if (bfd_link_pic (info)
16421 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16422 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16423 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16424 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16425 /* Reserve room for rofixup for FDPIC executable. */
16426 /* TLS relocs do not need space since they are completely
16427 resolved. */
16428 htab->srofixup->size += 4;
16429 }
16430 else
16431 h->got.offset = (bfd_vma) -1;
16432
16433 /* FDPIC support. */
16434 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16435 {
16436 /* Symbol musn't be exported. */
16437 if (h->dynindx != -1)
16438 abort ();
16439
16440 /* We only allocate one function descriptor with its associated
16441 relocation. */
16442 if (eh->fdpic_cnts.funcdesc_offset == -1)
16443 {
16444 asection *s = htab->root.sgot;
16445
16446 eh->fdpic_cnts.funcdesc_offset = s->size;
16447 s->size += 8;
16448 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16449 if (bfd_link_pic (info))
16450 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16451 else
16452 htab->srofixup->size += 8;
16453 }
16454 }
16455
16456 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16457 {
16458 asection *s = htab->root.sgot;
16459
16460 if (htab->root.dynamic_sections_created && h->dynindx == -1
16461 && !h->forced_local)
16462 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16463 return false;
16464
16465 if (h->dynindx == -1)
16466 {
16467 /* We only allocate one function descriptor with its
16468 associated relocation. */
16469 if (eh->fdpic_cnts.funcdesc_offset == -1)
16470 {
16471
16472 eh->fdpic_cnts.funcdesc_offset = s->size;
16473 s->size += 8;
16474 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16475 rofixups. */
16476 if (bfd_link_pic (info))
16477 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16478 else
16479 htab->srofixup->size += 8;
16480 }
16481 }
16482
16483 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16484 R_ARM_RELATIVE/rofixup relocation on it. */
16485 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16486 s->size += 4;
16487 if (h->dynindx == -1 && !bfd_link_pic (info))
16488 htab->srofixup->size += 4;
16489 else
16490 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16491 }
16492
16493 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16494 {
16495 if (htab->root.dynamic_sections_created && h->dynindx == -1
16496 && !h->forced_local)
16497 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16498 return false;
16499
16500 if (h->dynindx == -1)
16501 {
16502 /* We only allocate one function descriptor with its
16503 associated relocation. */
16504 if (eh->fdpic_cnts.funcdesc_offset == -1)
16505 {
16506 asection *s = htab->root.sgot;
16507
16508 eh->fdpic_cnts.funcdesc_offset = s->size;
16509 s->size += 8;
16510 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two
16511 rofixups. */
16512 if (bfd_link_pic (info))
16513 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16514 else
16515 htab->srofixup->size += 8;
16516 }
16517 }
16518 if (h->dynindx == -1 && !bfd_link_pic (info))
16519 {
16520 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16521 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16522 }
16523 else
16524 {
16525 /* Will need one dynamic reloc per reference. will be either
16526 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16527 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16528 eh->fdpic_cnts.funcdesc_cnt);
16529 }
16530 }
16531
16532 /* Allocate stubs for exported Thumb functions on v4t. */
16533 if (!htab->use_blx && h->dynindx != -1
16534 && h->def_regular
16535 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16536 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16537 {
16538 struct elf_link_hash_entry * th;
16539 struct bfd_link_hash_entry * bh;
16540 struct elf_link_hash_entry * myh;
16541 char name[1024];
16542 asection *s;
16543 bh = NULL;
16544 /* Create a new symbol to regist the real location of the function. */
16545 s = h->root.u.def.section;
16546 sprintf (name, "__real_%s", h->root.root.string);
16547 _bfd_generic_link_add_one_symbol (info, s->owner,
16548 name, BSF_GLOBAL, s,
16549 h->root.u.def.value,
16550 NULL, true, false, &bh);
16551
16552 myh = (struct elf_link_hash_entry *) bh;
16553 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16554 myh->forced_local = 1;
16555 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16556 eh->export_glue = myh;
16557 th = record_arm_to_thumb_glue (info, h);
16558 /* Point the symbol at the stub. */
16559 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16560 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16561 h->root.u.def.section = th->root.u.def.section;
16562 h->root.u.def.value = th->root.u.def.value & ~1;
16563 }
16564
16565 if (h->dyn_relocs == NULL)
16566 return true;
16567
16568 /* In the shared -Bsymbolic case, discard space allocated for
16569 dynamic pc-relative relocs against symbols which turn out to be
16570 defined in regular objects. For the normal shared case, discard
16571 space for pc-relative relocs that have become local due to symbol
16572 visibility changes. */
16573
16574 if (bfd_link_pic (info)
16575 || htab->root.is_relocatable_executable
16576 || htab->fdpic_p)
16577 {
16578 /* Relocs that use pc_count are PC-relative forms, which will appear
16579 on something like ".long foo - ." or "movw REG, foo - .". We want
16580 calls to protected symbols to resolve directly to the function
16581 rather than going via the plt. If people want function pointer
16582 comparisons to work as expected then they should avoid writing
16583 assembly like ".long foo - .". */
16584 if (SYMBOL_CALLS_LOCAL (info, h))
16585 {
16586 struct elf_dyn_relocs **pp;
16587
16588 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16589 {
16590 p->count -= p->pc_count;
16591 p->pc_count = 0;
16592 if (p->count == 0)
16593 *pp = p->next;
16594 else
16595 pp = &p->next;
16596 }
16597 }
16598
16599 if (htab->root.target_os == is_vxworks)
16600 {
16601 struct elf_dyn_relocs **pp;
16602
16603 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16604 {
16605 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16606 *pp = p->next;
16607 else
16608 pp = &p->next;
16609 }
16610 }
16611
16612 /* Also discard relocs on undefined weak syms with non-default
16613 visibility. */
16614 if (h->dyn_relocs != NULL
16615 && h->root.type == bfd_link_hash_undefweak)
16616 {
16617 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16618 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16619 h->dyn_relocs = NULL;
16620
16621 /* Make sure undefined weak symbols are output as a dynamic
16622 symbol in PIEs. */
16623 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16624 && !h->forced_local)
16625 {
16626 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16627 return false;
16628 }
16629 }
16630
16631 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16632 && h->root.type == bfd_link_hash_new)
16633 {
16634 /* Output absolute symbols so that we can create relocations
16635 against them. For normal symbols we output a relocation
16636 against the section that contains them. */
16637 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16638 return false;
16639 }
16640
16641 }
16642 else
16643 {
16644 /* For the non-shared case, discard space for relocs against
16645 symbols which turn out to need copy relocs or are not
16646 dynamic. */
16647
16648 if (!h->non_got_ref
16649 && ((h->def_dynamic
16650 && !h->def_regular)
16651 || (htab->root.dynamic_sections_created
16652 && (h->root.type == bfd_link_hash_undefweak
16653 || h->root.type == bfd_link_hash_undefined))))
16654 {
16655 /* Make sure this symbol is output as a dynamic symbol.
16656 Undefined weak syms won't yet be marked as dynamic. */
16657 if (h->dynindx == -1 && !h->forced_local
16658 && h->root.type == bfd_link_hash_undefweak)
16659 {
16660 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16661 return false;
16662 }
16663
16664 /* If that succeeded, we know we'll be keeping all the
16665 relocs. */
16666 if (h->dynindx != -1)
16667 goto keep;
16668 }
16669
16670 h->dyn_relocs = NULL;
16671
16672 keep: ;
16673 }
16674
16675 /* Finally, allocate space. */
16676 for (p = h->dyn_relocs; p != NULL; p = p->next)
16677 {
16678 asection *sreloc = elf_section_data (p->sec)->sreloc;
16679
16680 if (h->type == STT_GNU_IFUNC
16681 && eh->plt.noncall_refcount == 0
16682 && SYMBOL_REFERENCES_LOCAL (info, h))
16683 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16684 else if (h->dynindx != -1
16685 && (!bfd_link_pic (info) || !info->symbolic || !h->def_regular))
16686 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16687 else if (htab->fdpic_p && !bfd_link_pic (info))
16688 htab->srofixup->size += 4 * p->count;
16689 else
16690 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16691 }
16692
16693 return true;
16694 }
16695
16696 void
16697 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16698 int byteswap_code)
16699 {
16700 struct elf32_arm_link_hash_table *globals;
16701
16702 globals = elf32_arm_hash_table (info);
16703 if (globals == NULL)
16704 return;
16705
16706 globals->byteswap_code = byteswap_code;
16707 }
16708
16709 /* Set the sizes of the dynamic sections. */
16710
16711 static bool
16712 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16713 struct bfd_link_info * info)
16714 {
16715 bfd * dynobj;
16716 asection * s;
16717 bool relocs;
16718 bfd *ibfd;
16719 struct elf32_arm_link_hash_table *htab;
16720
16721 htab = elf32_arm_hash_table (info);
16722 if (htab == NULL)
16723 return false;
16724
16725 dynobj = elf_hash_table (info)->dynobj;
16726 BFD_ASSERT (dynobj != NULL);
16727 check_use_blx (htab);
16728
16729 if (elf_hash_table (info)->dynamic_sections_created)
16730 {
16731 /* Set the contents of the .interp section to the interpreter. */
16732 if (bfd_link_executable (info) && !info->nointerp)
16733 {
16734 s = bfd_get_linker_section (dynobj, ".interp");
16735 BFD_ASSERT (s != NULL);
16736 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16737 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16738 }
16739 }
16740
16741 /* Set up .got offsets for local syms, and space for local dynamic
16742 relocs. */
16743 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16744 {
16745 bfd_signed_vma *local_got;
16746 bfd_signed_vma *end_local_got;
16747 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16748 char *local_tls_type;
16749 bfd_vma *local_tlsdesc_gotent;
16750 bfd_size_type locsymcount;
16751 Elf_Internal_Shdr *symtab_hdr;
16752 asection *srel;
16753 unsigned int symndx;
16754 struct fdpic_local *local_fdpic_cnts;
16755
16756 if (! is_arm_elf (ibfd))
16757 continue;
16758
16759 for (s = ibfd->sections; s != NULL; s = s->next)
16760 {
16761 struct elf_dyn_relocs *p;
16762
16763 for (p = (struct elf_dyn_relocs *)
16764 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16765 {
16766 if (!bfd_is_abs_section (p->sec)
16767 && bfd_is_abs_section (p->sec->output_section))
16768 {
16769 /* Input section has been discarded, either because
16770 it is a copy of a linkonce section or due to
16771 linker script /DISCARD/, so we'll be discarding
16772 the relocs too. */
16773 }
16774 else if (htab->root.target_os == is_vxworks
16775 && strcmp (p->sec->output_section->name,
16776 ".tls_vars") == 0)
16777 {
16778 /* Relocations in vxworks .tls_vars sections are
16779 handled specially by the loader. */
16780 }
16781 else if (p->count != 0)
16782 {
16783 srel = elf_section_data (p->sec)->sreloc;
16784 if (htab->fdpic_p && !bfd_link_pic (info))
16785 htab->srofixup->size += 4 * p->count;
16786 else
16787 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16788 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16789 info->flags |= DF_TEXTREL;
16790 }
16791 }
16792 }
16793
16794 local_got = elf_local_got_refcounts (ibfd);
16795 if (local_got == NULL)
16796 continue;
16797
16798 symtab_hdr = & elf_symtab_hdr (ibfd);
16799 locsymcount = symtab_hdr->sh_info;
16800 end_local_got = local_got + locsymcount;
16801 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16802 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16803 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16804 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16805 symndx = 0;
16806 s = htab->root.sgot;
16807 srel = htab->root.srelgot;
16808 for (; local_got < end_local_got;
16809 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16810 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16811 {
16812 if (symndx >= elf32_arm_num_entries (ibfd))
16813 return false;
16814
16815 *local_tlsdesc_gotent = (bfd_vma) -1;
16816 local_iplt = *local_iplt_ptr;
16817
16818 /* FDPIC support. */
16819 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16820 {
16821 if (local_fdpic_cnts->funcdesc_offset == -1)
16822 {
16823 local_fdpic_cnts->funcdesc_offset = s->size;
16824 s->size += 8;
16825
16826 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16827 if (bfd_link_pic (info))
16828 elf32_arm_allocate_dynrelocs (info, srel, 1);
16829 else
16830 htab->srofixup->size += 8;
16831 }
16832 }
16833
16834 if (local_fdpic_cnts->funcdesc_cnt > 0)
16835 {
16836 if (local_fdpic_cnts->funcdesc_offset == -1)
16837 {
16838 local_fdpic_cnts->funcdesc_offset = s->size;
16839 s->size += 8;
16840
16841 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16842 if (bfd_link_pic (info))
16843 elf32_arm_allocate_dynrelocs (info, srel, 1);
16844 else
16845 htab->srofixup->size += 8;
16846 }
16847
16848 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16849 if (bfd_link_pic (info))
16850 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16851 else
16852 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16853 }
16854
16855 if (local_iplt != NULL)
16856 {
16857 struct elf_dyn_relocs *p;
16858
16859 if (local_iplt->root.refcount > 0)
16860 {
16861 elf32_arm_allocate_plt_entry (info, true,
16862 &local_iplt->root,
16863 &local_iplt->arm);
16864 if (local_iplt->arm.noncall_refcount == 0)
16865 /* All references to the PLT are calls, so all
16866 non-call references can resolve directly to the
16867 run-time target. This means that the .got entry
16868 would be the same as the .igot.plt entry, so there's
16869 no point creating both. */
16870 *local_got = 0;
16871 }
16872 else
16873 {
16874 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16875 local_iplt->root.offset = (bfd_vma) -1;
16876 }
16877
16878 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16879 {
16880 asection *psrel;
16881
16882 psrel = elf_section_data (p->sec)->sreloc;
16883 if (local_iplt->arm.noncall_refcount == 0)
16884 elf32_arm_allocate_irelocs (info, psrel, p->count);
16885 else
16886 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16887 }
16888 }
16889 if (*local_got > 0)
16890 {
16891 Elf_Internal_Sym *isym;
16892
16893 *local_got = s->size;
16894 if (*local_tls_type & GOT_TLS_GD)
16895 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16896 s->size += 8;
16897 if (*local_tls_type & GOT_TLS_GDESC)
16898 {
16899 *local_tlsdesc_gotent = htab->root.sgotplt->size
16900 - elf32_arm_compute_jump_table_size (htab);
16901 htab->root.sgotplt->size += 8;
16902 *local_got = (bfd_vma) -2;
16903 /* plt.got_offset needs to know there's a TLS_DESC
16904 reloc in the middle of .got.plt. */
16905 htab->num_tls_desc++;
16906 }
16907 if (*local_tls_type & GOT_TLS_IE)
16908 s->size += 4;
16909
16910 if (*local_tls_type & GOT_NORMAL)
16911 {
16912 /* If the symbol is both GD and GDESC, *local_got
16913 may have been overwritten. */
16914 *local_got = s->size;
16915 s->size += 4;
16916 }
16917
16918 isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, ibfd,
16919 symndx);
16920 if (isym == NULL)
16921 return false;
16922
16923 /* If all references to an STT_GNU_IFUNC PLT are calls,
16924 then all non-call references, including this GOT entry,
16925 resolve directly to the run-time target. */
16926 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16927 && (local_iplt == NULL
16928 || local_iplt->arm.noncall_refcount == 0))
16929 elf32_arm_allocate_irelocs (info, srel, 1);
16930 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16931 {
16932 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16933 elf32_arm_allocate_dynrelocs (info, srel, 1);
16934 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16935 htab->srofixup->size += 4;
16936
16937 if ((bfd_link_pic (info) || htab->fdpic_p)
16938 && *local_tls_type & GOT_TLS_GDESC)
16939 {
16940 elf32_arm_allocate_dynrelocs (info,
16941 htab->root.srelplt, 1);
16942 htab->tls_trampoline = -1;
16943 }
16944 }
16945 }
16946 else
16947 *local_got = (bfd_vma) -1;
16948 }
16949 }
16950
16951 if (htab->tls_ldm_got.refcount > 0)
16952 {
16953 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16954 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16955 htab->tls_ldm_got.offset = htab->root.sgot->size;
16956 htab->root.sgot->size += 8;
16957 if (bfd_link_pic (info))
16958 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16959 }
16960 else
16961 htab->tls_ldm_got.offset = -1;
16962
16963 /* At the very end of the .rofixup section is a pointer to the GOT,
16964 reserve space for it. */
16965 if (htab->fdpic_p && htab->srofixup != NULL)
16966 htab->srofixup->size += 4;
16967
16968 /* Allocate global sym .plt and .got entries, and space for global
16969 sym dynamic relocs. */
16970 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16971
16972 /* Here we rummage through the found bfds to collect glue information. */
16973 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16974 {
16975 if (! is_arm_elf (ibfd))
16976 continue;
16977
16978 /* Initialise mapping tables for code/data. */
16979 bfd_elf32_arm_init_maps (ibfd);
16980
16981 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16982 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16983 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16984 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16985 }
16986
16987 /* Allocate space for the glue sections now that we've sized them. */
16988 bfd_elf32_arm_allocate_interworking_sections (info);
16989
16990 /* For every jump slot reserved in the sgotplt, reloc_count is
16991 incremented. However, when we reserve space for TLS descriptors,
16992 it's not incremented, so in order to compute the space reserved
16993 for them, it suffices to multiply the reloc count by the jump
16994 slot size. */
16995 if (htab->root.srelplt)
16996 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size (htab);
16997
16998 if (htab->tls_trampoline)
16999 {
17000 if (htab->root.splt->size == 0)
17001 htab->root.splt->size += htab->plt_header_size;
17002
17003 htab->tls_trampoline = htab->root.splt->size;
17004 htab->root.splt->size += htab->plt_entry_size;
17005
17006 /* If we're not using lazy TLS relocations, don't generate the
17007 PLT and GOT entries they require. */
17008 if ((info->flags & DF_BIND_NOW))
17009 htab->root.tlsdesc_plt = 0;
17010 else
17011 {
17012 htab->root.tlsdesc_got = htab->root.sgot->size;
17013 htab->root.sgot->size += 4;
17014
17015 htab->root.tlsdesc_plt = htab->root.splt->size;
17016 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17017 }
17018 }
17019
17020 /* The check_relocs and adjust_dynamic_symbol entry points have
17021 determined the sizes of the various dynamic sections. Allocate
17022 memory for them. */
17023 relocs = false;
17024 for (s = dynobj->sections; s != NULL; s = s->next)
17025 {
17026 const char * name;
17027
17028 if ((s->flags & SEC_LINKER_CREATED) == 0)
17029 continue;
17030
17031 /* It's OK to base decisions on the section name, because none
17032 of the dynobj section names depend upon the input files. */
17033 name = bfd_section_name (s);
17034
17035 if (s == htab->root.splt)
17036 {
17037 /* Remember whether there is a PLT. */
17038 ;
17039 }
17040 else if (startswith (name, ".rel"))
17041 {
17042 if (s->size != 0)
17043 {
17044 /* Remember whether there are any reloc sections other
17045 than .rel(a).plt and .rela.plt.unloaded. */
17046 if (s != htab->root.srelplt && s != htab->srelplt2)
17047 relocs = true;
17048
17049 /* We use the reloc_count field as a counter if we need
17050 to copy relocs into the output file. */
17051 s->reloc_count = 0;
17052 }
17053 }
17054 else if (s != htab->root.sgot
17055 && s != htab->root.sgotplt
17056 && s != htab->root.iplt
17057 && s != htab->root.igotplt
17058 && s != htab->root.sdynbss
17059 && s != htab->root.sdynrelro
17060 && s != htab->srofixup)
17061 {
17062 /* It's not one of our sections, so don't allocate space. */
17063 continue;
17064 }
17065
17066 if (s->size == 0)
17067 {
17068 /* If we don't need this section, strip it from the
17069 output file. This is mostly to handle .rel(a).bss and
17070 .rel(a).plt. We must create both sections in
17071 create_dynamic_sections, because they must be created
17072 before the linker maps input sections to output
17073 sections. The linker does that before
17074 adjust_dynamic_symbol is called, and it is that
17075 function which decides whether anything needs to go
17076 into these sections. */
17077 s->flags |= SEC_EXCLUDE;
17078 continue;
17079 }
17080
17081 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17082 continue;
17083
17084 /* Allocate memory for the section contents. */
17085 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17086 if (s->contents == NULL)
17087 return false;
17088 }
17089
17090 return _bfd_elf_maybe_vxworks_add_dynamic_tags (output_bfd, info,
17091 relocs);
17092 }
17093
17094 /* Size sections even though they're not dynamic. We use it to setup
17095 _TLS_MODULE_BASE_, if needed. */
17096
17097 static bool
17098 elf32_arm_always_size_sections (bfd *output_bfd,
17099 struct bfd_link_info *info)
17100 {
17101 asection *tls_sec;
17102 struct elf32_arm_link_hash_table *htab;
17103
17104 htab = elf32_arm_hash_table (info);
17105
17106 if (bfd_link_relocatable (info))
17107 return true;
17108
17109 tls_sec = elf_hash_table (info)->tls_sec;
17110
17111 if (tls_sec)
17112 {
17113 struct elf_link_hash_entry *tlsbase;
17114
17115 tlsbase = elf_link_hash_lookup
17116 (elf_hash_table (info), "_TLS_MODULE_BASE_", true, true, false);
17117
17118 if (tlsbase)
17119 {
17120 struct bfd_link_hash_entry *bh = NULL;
17121 const struct elf_backend_data *bed
17122 = get_elf_backend_data (output_bfd);
17123
17124 if (!(_bfd_generic_link_add_one_symbol
17125 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17126 tls_sec, 0, NULL, false,
17127 bed->collect, &bh)))
17128 return false;
17129
17130 tlsbase->type = STT_TLS;
17131 tlsbase = (struct elf_link_hash_entry *)bh;
17132 tlsbase->def_regular = 1;
17133 tlsbase->other = STV_HIDDEN;
17134 (*bed->elf_backend_hide_symbol) (info, tlsbase, true);
17135 }
17136 }
17137
17138 if (htab->fdpic_p && !bfd_link_relocatable (info)
17139 && !bfd_elf_stack_segment_size (output_bfd, info,
17140 "__stacksize", DEFAULT_STACK_SIZE))
17141 return false;
17142
17143 return true;
17144 }
17145
17146 /* Finish up dynamic symbol handling. We set the contents of various
17147 dynamic sections here. */
17148
17149 static bool
17150 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17151 struct bfd_link_info * info,
17152 struct elf_link_hash_entry * h,
17153 Elf_Internal_Sym * sym)
17154 {
17155 struct elf32_arm_link_hash_table *htab;
17156 struct elf32_arm_link_hash_entry *eh;
17157
17158 htab = elf32_arm_hash_table (info);
17159 if (htab == NULL)
17160 return false;
17161
17162 eh = (struct elf32_arm_link_hash_entry *) h;
17163
17164 if (h->plt.offset != (bfd_vma) -1)
17165 {
17166 if (!eh->is_iplt)
17167 {
17168 BFD_ASSERT (h->dynindx != -1);
17169 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17170 h->dynindx, 0))
17171 return false;
17172 }
17173
17174 if (!h->def_regular)
17175 {
17176 /* Mark the symbol as undefined, rather than as defined in
17177 the .plt section. */
17178 sym->st_shndx = SHN_UNDEF;
17179 /* If the symbol is weak we need to clear the value.
17180 Otherwise, the PLT entry would provide a definition for
17181 the symbol even if the symbol wasn't defined anywhere,
17182 and so the symbol would never be NULL. Leave the value if
17183 there were any relocations where pointer equality matters
17184 (this is a clue for the dynamic linker, to make function
17185 pointer comparisons work between an application and shared
17186 library). */
17187 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17188 sym->st_value = 0;
17189 }
17190 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17191 {
17192 /* At least one non-call relocation references this .iplt entry,
17193 so the .iplt entry is the function's canonical address. */
17194 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17195 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17196 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17197 (output_bfd, htab->root.iplt->output_section));
17198 sym->st_value = (h->plt.offset
17199 + htab->root.iplt->output_section->vma
17200 + htab->root.iplt->output_offset);
17201 }
17202 }
17203
17204 if (h->needs_copy)
17205 {
17206 asection * s;
17207 Elf_Internal_Rela rel;
17208
17209 /* This symbol needs a copy reloc. Set it up. */
17210 BFD_ASSERT (h->dynindx != -1
17211 && (h->root.type == bfd_link_hash_defined
17212 || h->root.type == bfd_link_hash_defweak));
17213
17214 rel.r_addend = 0;
17215 rel.r_offset = (h->root.u.def.value
17216 + h->root.u.def.section->output_section->vma
17217 + h->root.u.def.section->output_offset);
17218 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17219 if (h->root.u.def.section == htab->root.sdynrelro)
17220 s = htab->root.sreldynrelro;
17221 else
17222 s = htab->root.srelbss;
17223 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17224 }
17225
17226 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17227 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17228 it is relative to the ".got" section. */
17229 if (h == htab->root.hdynamic
17230 || (!htab->fdpic_p
17231 && htab->root.target_os != is_vxworks
17232 && h == htab->root.hgot))
17233 sym->st_shndx = SHN_ABS;
17234
17235 return true;
17236 }
17237
17238 static void
17239 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17240 void *contents,
17241 const unsigned long *template, unsigned count)
17242 {
17243 unsigned ix;
17244
17245 for (ix = 0; ix != count; ix++)
17246 {
17247 unsigned long insn = template[ix];
17248
17249 /* Emit mov pc,rx if bx is not permitted. */
17250 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17251 insn = (insn & 0xf000000f) | 0x01a0f000;
17252 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17253 }
17254 }
17255
17256 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17257 other variants, NaCl needs this entry in a static executable's
17258 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17259 zero. For .iplt really only the last bundle is useful, and .iplt
17260 could have a shorter first entry, with each individual PLT entry's
17261 relative branch calculated differently so it targets the last
17262 bundle instead of the instruction before it (labelled .Lplt_tail
17263 above). But it's simpler to keep the size and layout of PLT0
17264 consistent with the dynamic case, at the cost of some dead code at
17265 the start of .iplt and the one dead store to the stack at the start
17266 of .Lplt_tail. */
17267 static void
17268 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17269 asection *plt, bfd_vma got_displacement)
17270 {
17271 unsigned int i;
17272
17273 put_arm_insn (htab, output_bfd,
17274 elf32_arm_nacl_plt0_entry[0]
17275 | arm_movw_immediate (got_displacement),
17276 plt->contents + 0);
17277 put_arm_insn (htab, output_bfd,
17278 elf32_arm_nacl_plt0_entry[1]
17279 | arm_movt_immediate (got_displacement),
17280 plt->contents + 4);
17281
17282 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17283 put_arm_insn (htab, output_bfd,
17284 elf32_arm_nacl_plt0_entry[i],
17285 plt->contents + (i * 4));
17286 }
17287
17288 /* Finish up the dynamic sections. */
17289
17290 static bool
17291 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17292 {
17293 bfd * dynobj;
17294 asection * sgot;
17295 asection * sdyn;
17296 struct elf32_arm_link_hash_table *htab;
17297
17298 htab = elf32_arm_hash_table (info);
17299 if (htab == NULL)
17300 return false;
17301
17302 dynobj = elf_hash_table (info)->dynobj;
17303
17304 sgot = htab->root.sgotplt;
17305 /* A broken linker script might have discarded the dynamic sections.
17306 Catch this here so that we do not seg-fault later on. */
17307 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17308 return false;
17309 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17310
17311 if (elf_hash_table (info)->dynamic_sections_created)
17312 {
17313 asection *splt;
17314 Elf32_External_Dyn *dyncon, *dynconend;
17315
17316 splt = htab->root.splt;
17317 BFD_ASSERT (splt != NULL && sdyn != NULL);
17318 BFD_ASSERT (sgot != NULL);
17319
17320 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17321 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17322
17323 for (; dyncon < dynconend; dyncon++)
17324 {
17325 Elf_Internal_Dyn dyn;
17326 const char * name;
17327 asection * s;
17328
17329 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17330
17331 switch (dyn.d_tag)
17332 {
17333 default:
17334 if (htab->root.target_os == is_vxworks
17335 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17336 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17337 break;
17338
17339 case DT_HASH:
17340 case DT_STRTAB:
17341 case DT_SYMTAB:
17342 case DT_VERSYM:
17343 case DT_VERDEF:
17344 case DT_VERNEED:
17345 break;
17346
17347 case DT_PLTGOT:
17348 name = ".got.plt";
17349 goto get_vma;
17350 case DT_JMPREL:
17351 name = RELOC_SECTION (htab, ".plt");
17352 get_vma:
17353 s = bfd_get_linker_section (dynobj, name);
17354 if (s == NULL)
17355 {
17356 _bfd_error_handler
17357 (_("could not find section %s"), name);
17358 bfd_set_error (bfd_error_invalid_operation);
17359 return false;
17360 }
17361 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17362 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17363 break;
17364
17365 case DT_PLTRELSZ:
17366 s = htab->root.srelplt;
17367 BFD_ASSERT (s != NULL);
17368 dyn.d_un.d_val = s->size;
17369 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17370 break;
17371
17372 case DT_RELSZ:
17373 case DT_RELASZ:
17374 case DT_REL:
17375 case DT_RELA:
17376 break;
17377
17378 case DT_TLSDESC_PLT:
17379 s = htab->root.splt;
17380 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17381 + htab->root.tlsdesc_plt);
17382 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17383 break;
17384
17385 case DT_TLSDESC_GOT:
17386 s = htab->root.sgot;
17387 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17388 + htab->root.tlsdesc_got);
17389 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17390 break;
17391
17392 /* Set the bottom bit of DT_INIT/FINI if the
17393 corresponding function is Thumb. */
17394 case DT_INIT:
17395 name = info->init_function;
17396 goto get_sym;
17397 case DT_FINI:
17398 name = info->fini_function;
17399 get_sym:
17400 /* If it wasn't set by elf_bfd_final_link
17401 then there is nothing to adjust. */
17402 if (dyn.d_un.d_val != 0)
17403 {
17404 struct elf_link_hash_entry * eh;
17405
17406 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17407 false, false, true);
17408 if (eh != NULL
17409 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17410 == ST_BRANCH_TO_THUMB)
17411 {
17412 dyn.d_un.d_val |= 1;
17413 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17414 }
17415 }
17416 break;
17417 }
17418 }
17419
17420 /* Fill in the first entry in the procedure linkage table. */
17421 if (splt->size > 0 && htab->plt_header_size)
17422 {
17423 const bfd_vma *plt0_entry;
17424 bfd_vma got_address, plt_address, got_displacement;
17425
17426 /* Calculate the addresses of the GOT and PLT. */
17427 got_address = sgot->output_section->vma + sgot->output_offset;
17428 plt_address = splt->output_section->vma + splt->output_offset;
17429
17430 if (htab->root.target_os == is_vxworks)
17431 {
17432 /* The VxWorks GOT is relocated by the dynamic linker.
17433 Therefore, we must emit relocations rather than simply
17434 computing the values now. */
17435 Elf_Internal_Rela rel;
17436
17437 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17438 put_arm_insn (htab, output_bfd, plt0_entry[0],
17439 splt->contents + 0);
17440 put_arm_insn (htab, output_bfd, plt0_entry[1],
17441 splt->contents + 4);
17442 put_arm_insn (htab, output_bfd, plt0_entry[2],
17443 splt->contents + 8);
17444 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17445
17446 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17447 rel.r_offset = plt_address + 12;
17448 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17449 rel.r_addend = 0;
17450 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17451 htab->srelplt2->contents);
17452 }
17453 else if (htab->root.target_os == is_nacl)
17454 arm_nacl_put_plt0 (htab, output_bfd, splt,
17455 got_address + 8 - (plt_address + 16));
17456 else if (using_thumb_only (htab))
17457 {
17458 got_displacement = got_address - (plt_address + 12);
17459
17460 plt0_entry = elf32_thumb2_plt0_entry;
17461 put_arm_insn (htab, output_bfd, plt0_entry[0],
17462 splt->contents + 0);
17463 put_arm_insn (htab, output_bfd, plt0_entry[1],
17464 splt->contents + 4);
17465 put_arm_insn (htab, output_bfd, plt0_entry[2],
17466 splt->contents + 8);
17467
17468 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17469 }
17470 else
17471 {
17472 got_displacement = got_address - (plt_address + 16);
17473
17474 plt0_entry = elf32_arm_plt0_entry;
17475 put_arm_insn (htab, output_bfd, plt0_entry[0],
17476 splt->contents + 0);
17477 put_arm_insn (htab, output_bfd, plt0_entry[1],
17478 splt->contents + 4);
17479 put_arm_insn (htab, output_bfd, plt0_entry[2],
17480 splt->contents + 8);
17481 put_arm_insn (htab, output_bfd, plt0_entry[3],
17482 splt->contents + 12);
17483
17484 #ifdef FOUR_WORD_PLT
17485 /* The displacement value goes in the otherwise-unused
17486 last word of the second entry. */
17487 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17488 #else
17489 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17490 #endif
17491 }
17492 }
17493
17494 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17495 really seem like the right value. */
17496 if (splt->output_section->owner == output_bfd)
17497 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17498
17499 if (htab->root.tlsdesc_plt)
17500 {
17501 bfd_vma got_address
17502 = sgot->output_section->vma + sgot->output_offset;
17503 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17504 + htab->root.sgot->output_offset);
17505 bfd_vma plt_address
17506 = splt->output_section->vma + splt->output_offset;
17507
17508 arm_put_trampoline (htab, output_bfd,
17509 splt->contents + htab->root.tlsdesc_plt,
17510 dl_tlsdesc_lazy_trampoline, 6);
17511
17512 bfd_put_32 (output_bfd,
17513 gotplt_address + htab->root.tlsdesc_got
17514 - (plt_address + htab->root.tlsdesc_plt)
17515 - dl_tlsdesc_lazy_trampoline[6],
17516 splt->contents + htab->root.tlsdesc_plt + 24);
17517 bfd_put_32 (output_bfd,
17518 got_address - (plt_address + htab->root.tlsdesc_plt)
17519 - dl_tlsdesc_lazy_trampoline[7],
17520 splt->contents + htab->root.tlsdesc_plt + 24 + 4);
17521 }
17522
17523 if (htab->tls_trampoline)
17524 {
17525 arm_put_trampoline (htab, output_bfd,
17526 splt->contents + htab->tls_trampoline,
17527 tls_trampoline, 3);
17528 #ifdef FOUR_WORD_PLT
17529 bfd_put_32 (output_bfd, 0x00000000,
17530 splt->contents + htab->tls_trampoline + 12);
17531 #endif
17532 }
17533
17534 if (htab->root.target_os == is_vxworks
17535 && !bfd_link_pic (info)
17536 && htab->root.splt->size > 0)
17537 {
17538 /* Correct the .rel(a).plt.unloaded relocations. They will have
17539 incorrect symbol indexes. */
17540 int num_plts;
17541 unsigned char *p;
17542
17543 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17544 / htab->plt_entry_size);
17545 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17546
17547 for (; num_plts; num_plts--)
17548 {
17549 Elf_Internal_Rela rel;
17550
17551 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17552 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17553 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17554 p += RELOC_SIZE (htab);
17555
17556 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17557 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17558 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17559 p += RELOC_SIZE (htab);
17560 }
17561 }
17562 }
17563
17564 if (htab->root.target_os == is_nacl
17565 && htab->root.iplt != NULL
17566 && htab->root.iplt->size > 0)
17567 /* NaCl uses a special first entry in .iplt too. */
17568 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17569
17570 /* Fill in the first three entries in the global offset table. */
17571 if (sgot)
17572 {
17573 if (sgot->size > 0)
17574 {
17575 if (sdyn == NULL)
17576 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17577 else
17578 bfd_put_32 (output_bfd,
17579 sdyn->output_section->vma + sdyn->output_offset,
17580 sgot->contents);
17581 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17582 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17583 }
17584
17585 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17586 }
17587
17588 /* At the very end of the .rofixup section is a pointer to the GOT. */
17589 if (htab->fdpic_p && htab->srofixup != NULL)
17590 {
17591 struct elf_link_hash_entry *hgot = htab->root.hgot;
17592
17593 bfd_vma got_value = hgot->root.u.def.value
17594 + hgot->root.u.def.section->output_section->vma
17595 + hgot->root.u.def.section->output_offset;
17596
17597 arm_elf_add_rofixup (output_bfd, htab->srofixup, got_value);
17598
17599 /* Make sure we allocated and generated the same number of fixups. */
17600 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17601 }
17602
17603 return true;
17604 }
17605
17606 static bool
17607 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17608 {
17609 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17610 struct elf32_arm_link_hash_table *globals;
17611 struct elf_segment_map *m;
17612
17613 if (!_bfd_elf_init_file_header (abfd, link_info))
17614 return false;
17615
17616 i_ehdrp = elf_elfheader (abfd);
17617
17618 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17619 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17620 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17621
17622 if (link_info)
17623 {
17624 globals = elf32_arm_hash_table (link_info);
17625 if (globals != NULL && globals->byteswap_code)
17626 i_ehdrp->e_flags |= EF_ARM_BE8;
17627
17628 if (globals->fdpic_p)
17629 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17630 }
17631
17632 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17633 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17634 {
17635 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17636 if (abi == AEABI_VFP_args_vfp)
17637 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17638 else
17639 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17640 }
17641
17642 /* Scan segment to set p_flags attribute if it contains only sections with
17643 SHF_ARM_PURECODE flag. */
17644 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17645 {
17646 unsigned int j;
17647
17648 if (m->count == 0)
17649 continue;
17650 for (j = 0; j < m->count; j++)
17651 {
17652 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17653 break;
17654 }
17655 if (j == m->count)
17656 {
17657 m->p_flags = PF_X;
17658 m->p_flags_valid = 1;
17659 }
17660 }
17661 return true;
17662 }
17663
17664 static enum elf_reloc_type_class
17665 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17666 const asection *rel_sec ATTRIBUTE_UNUSED,
17667 const Elf_Internal_Rela *rela)
17668 {
17669 switch ((int) ELF32_R_TYPE (rela->r_info))
17670 {
17671 case R_ARM_RELATIVE:
17672 return reloc_class_relative;
17673 case R_ARM_JUMP_SLOT:
17674 return reloc_class_plt;
17675 case R_ARM_COPY:
17676 return reloc_class_copy;
17677 case R_ARM_IRELATIVE:
17678 return reloc_class_ifunc;
17679 default:
17680 return reloc_class_normal;
17681 }
17682 }
17683
17684 static void
17685 arm_final_write_processing (bfd *abfd)
17686 {
17687 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17688 }
17689
17690 static bool
17691 elf32_arm_final_write_processing (bfd *abfd)
17692 {
17693 arm_final_write_processing (abfd);
17694 return _bfd_elf_final_write_processing (abfd);
17695 }
17696
17697 /* Return TRUE if this is an unwinding table entry. */
17698
17699 static bool
17700 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17701 {
17702 return (startswith (name, ELF_STRING_ARM_unwind)
17703 || startswith (name, ELF_STRING_ARM_unwind_once));
17704 }
17705
17706
17707 /* Set the type and flags for an ARM section. We do this by
17708 the section name, which is a hack, but ought to work. */
17709
17710 static bool
17711 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17712 {
17713 const char * name;
17714
17715 name = bfd_section_name (sec);
17716
17717 if (is_arm_elf_unwind_section_name (abfd, name))
17718 {
17719 hdr->sh_type = SHT_ARM_EXIDX;
17720 hdr->sh_flags |= SHF_LINK_ORDER;
17721 }
17722
17723 if (sec->flags & SEC_ELF_PURECODE)
17724 hdr->sh_flags |= SHF_ARM_PURECODE;
17725
17726 return true;
17727 }
17728
17729 /* Handle an ARM specific section when reading an object file. This is
17730 called when bfd_section_from_shdr finds a section with an unknown
17731 type. */
17732
17733 static bool
17734 elf32_arm_section_from_shdr (bfd *abfd,
17735 Elf_Internal_Shdr * hdr,
17736 const char *name,
17737 int shindex)
17738 {
17739 /* There ought to be a place to keep ELF backend specific flags, but
17740 at the moment there isn't one. We just keep track of the
17741 sections by their name, instead. Fortunately, the ABI gives
17742 names for all the ARM specific sections, so we will probably get
17743 away with this. */
17744 switch (hdr->sh_type)
17745 {
17746 case SHT_ARM_EXIDX:
17747 case SHT_ARM_PREEMPTMAP:
17748 case SHT_ARM_ATTRIBUTES:
17749 break;
17750
17751 default:
17752 return false;
17753 }
17754
17755 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17756 return false;
17757
17758 return true;
17759 }
17760
17761 static _arm_elf_section_data *
17762 get_arm_elf_section_data (asection * sec)
17763 {
17764 if (sec && sec->owner && is_arm_elf (sec->owner))
17765 return elf32_arm_section_data (sec);
17766 else
17767 return NULL;
17768 }
17769
17770 typedef struct
17771 {
17772 void *flaginfo;
17773 struct bfd_link_info *info;
17774 asection *sec;
17775 int sec_shndx;
17776 int (*func) (void *, const char *, Elf_Internal_Sym *,
17777 asection *, struct elf_link_hash_entry *);
17778 } output_arch_syminfo;
17779
17780 enum map_symbol_type
17781 {
17782 ARM_MAP_ARM,
17783 ARM_MAP_THUMB,
17784 ARM_MAP_DATA
17785 };
17786
17787
17788 /* Output a single mapping symbol. */
17789
17790 static bool
17791 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17792 enum map_symbol_type type,
17793 bfd_vma offset)
17794 {
17795 static const char *names[3] = {"$a", "$t", "$d"};
17796 Elf_Internal_Sym sym;
17797
17798 sym.st_value = osi->sec->output_section->vma
17799 + osi->sec->output_offset
17800 + offset;
17801 sym.st_size = 0;
17802 sym.st_other = 0;
17803 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17804 sym.st_shndx = osi->sec_shndx;
17805 sym.st_target_internal = 0;
17806 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17807 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17808 }
17809
17810 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17811 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17812
17813 static bool
17814 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17815 bool is_iplt_entry_p,
17816 union gotplt_union *root_plt,
17817 struct arm_plt_info *arm_plt)
17818 {
17819 struct elf32_arm_link_hash_table *htab;
17820 bfd_vma addr, plt_header_size;
17821
17822 if (root_plt->offset == (bfd_vma) -1)
17823 return true;
17824
17825 htab = elf32_arm_hash_table (osi->info);
17826 if (htab == NULL)
17827 return false;
17828
17829 if (is_iplt_entry_p)
17830 {
17831 osi->sec = htab->root.iplt;
17832 plt_header_size = 0;
17833 }
17834 else
17835 {
17836 osi->sec = htab->root.splt;
17837 plt_header_size = htab->plt_header_size;
17838 }
17839 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17840 (osi->info->output_bfd, osi->sec->output_section));
17841
17842 addr = root_plt->offset & -2;
17843 if (htab->root.target_os == is_vxworks)
17844 {
17845 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17846 return false;
17847 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17848 return false;
17849 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17850 return false;
17851 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17852 return false;
17853 }
17854 else if (htab->root.target_os == is_nacl)
17855 {
17856 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17857 return false;
17858 }
17859 else if (htab->fdpic_p)
17860 {
17861 enum map_symbol_type type = using_thumb_only (htab)
17862 ? ARM_MAP_THUMB
17863 : ARM_MAP_ARM;
17864
17865 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17866 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17867 return false;
17868 if (!elf32_arm_output_map_sym (osi, type, addr))
17869 return false;
17870 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17871 return false;
17872 if (htab->plt_entry_size == 4 * ARRAY_SIZE (elf32_arm_fdpic_plt_entry))
17873 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17874 return false;
17875 }
17876 else if (using_thumb_only (htab))
17877 {
17878 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17879 return false;
17880 }
17881 else
17882 {
17883 bool thumb_stub_p;
17884
17885 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17886 if (thumb_stub_p)
17887 {
17888 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17889 return false;
17890 }
17891 #ifdef FOUR_WORD_PLT
17892 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17893 return false;
17894 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17895 return false;
17896 #else
17897 /* A three-word PLT with no Thumb thunk contains only Arm code,
17898 so only need to output a mapping symbol for the first PLT entry and
17899 entries with thumb thunks. */
17900 if (thumb_stub_p || addr == plt_header_size)
17901 {
17902 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17903 return false;
17904 }
17905 #endif
17906 }
17907
17908 return true;
17909 }
17910
17911 /* Output mapping symbols for PLT entries associated with H. */
17912
17913 static bool
17914 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17915 {
17916 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17917 struct elf32_arm_link_hash_entry *eh;
17918
17919 if (h->root.type == bfd_link_hash_indirect)
17920 return true;
17921
17922 if (h->root.type == bfd_link_hash_warning)
17923 /* When warning symbols are created, they **replace** the "real"
17924 entry in the hash table, thus we never get to see the real
17925 symbol in a hash traversal. So look at it now. */
17926 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17927
17928 eh = (struct elf32_arm_link_hash_entry *) h;
17929 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17930 &h->plt, &eh->plt);
17931 }
17932
17933 /* Bind a veneered symbol to its veneer identified by its hash entry
17934 STUB_ENTRY. The veneered location thus loose its symbol. */
17935
17936 static void
17937 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17938 {
17939 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17940
17941 BFD_ASSERT (hash);
17942 hash->root.root.u.def.section = stub_entry->stub_sec;
17943 hash->root.root.u.def.value = stub_entry->stub_offset;
17944 hash->root.size = stub_entry->stub_size;
17945 }
17946
17947 /* Output a single local symbol for a generated stub. */
17948
17949 static bool
17950 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17951 bfd_vma offset, bfd_vma size)
17952 {
17953 Elf_Internal_Sym sym;
17954
17955 sym.st_value = osi->sec->output_section->vma
17956 + osi->sec->output_offset
17957 + offset;
17958 sym.st_size = size;
17959 sym.st_other = 0;
17960 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17961 sym.st_shndx = osi->sec_shndx;
17962 sym.st_target_internal = 0;
17963 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17964 }
17965
17966 static bool
17967 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17968 void * in_arg)
17969 {
17970 struct elf32_arm_stub_hash_entry *stub_entry;
17971 asection *stub_sec;
17972 bfd_vma addr;
17973 char *stub_name;
17974 output_arch_syminfo *osi;
17975 const insn_sequence *template_sequence;
17976 enum stub_insn_type prev_type;
17977 int size;
17978 int i;
17979 enum map_symbol_type sym_type;
17980
17981 /* Massage our args to the form they really have. */
17982 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17983 osi = (output_arch_syminfo *) in_arg;
17984
17985 stub_sec = stub_entry->stub_sec;
17986
17987 /* Ensure this stub is attached to the current section being
17988 processed. */
17989 if (stub_sec != osi->sec)
17990 return true;
17991
17992 addr = (bfd_vma) stub_entry->stub_offset;
17993 template_sequence = stub_entry->stub_template;
17994
17995 if (arm_stub_sym_claimed (stub_entry->stub_type))
17996 arm_stub_claim_sym (stub_entry);
17997 else
17998 {
17999 stub_name = stub_entry->output_name;
18000 switch (template_sequence[0].type)
18001 {
18002 case ARM_TYPE:
18003 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18004 stub_entry->stub_size))
18005 return false;
18006 break;
18007 case THUMB16_TYPE:
18008 case THUMB32_TYPE:
18009 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18010 stub_entry->stub_size))
18011 return false;
18012 break;
18013 default:
18014 BFD_FAIL ();
18015 return 0;
18016 }
18017 }
18018
18019 prev_type = DATA_TYPE;
18020 size = 0;
18021 for (i = 0; i < stub_entry->stub_template_size; i++)
18022 {
18023 switch (template_sequence[i].type)
18024 {
18025 case ARM_TYPE:
18026 sym_type = ARM_MAP_ARM;
18027 break;
18028
18029 case THUMB16_TYPE:
18030 case THUMB32_TYPE:
18031 sym_type = ARM_MAP_THUMB;
18032 break;
18033
18034 case DATA_TYPE:
18035 sym_type = ARM_MAP_DATA;
18036 break;
18037
18038 default:
18039 BFD_FAIL ();
18040 return false;
18041 }
18042
18043 if (template_sequence[i].type != prev_type)
18044 {
18045 prev_type = template_sequence[i].type;
18046 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18047 return false;
18048 }
18049
18050 switch (template_sequence[i].type)
18051 {
18052 case ARM_TYPE:
18053 case THUMB32_TYPE:
18054 size += 4;
18055 break;
18056
18057 case THUMB16_TYPE:
18058 size += 2;
18059 break;
18060
18061 case DATA_TYPE:
18062 size += 4;
18063 break;
18064
18065 default:
18066 BFD_FAIL ();
18067 return false;
18068 }
18069 }
18070
18071 return true;
18072 }
18073
18074 /* Output mapping symbols for linker generated sections,
18075 and for those data-only sections that do not have a
18076 $d. */
18077
18078 static bool
18079 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18080 struct bfd_link_info *info,
18081 void *flaginfo,
18082 int (*func) (void *, const char *,
18083 Elf_Internal_Sym *,
18084 asection *,
18085 struct elf_link_hash_entry *))
18086 {
18087 output_arch_syminfo osi;
18088 struct elf32_arm_link_hash_table *htab;
18089 bfd_vma offset;
18090 bfd_size_type size;
18091 bfd *input_bfd;
18092
18093 htab = elf32_arm_hash_table (info);
18094 if (htab == NULL)
18095 return false;
18096
18097 check_use_blx (htab);
18098
18099 osi.flaginfo = flaginfo;
18100 osi.info = info;
18101 osi.func = func;
18102
18103 /* Add a $d mapping symbol to data-only sections that
18104 don't have any mapping symbol. This may result in (harmless) redundant
18105 mapping symbols. */
18106 for (input_bfd = info->input_bfds;
18107 input_bfd != NULL;
18108 input_bfd = input_bfd->link.next)
18109 {
18110 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18111 for (osi.sec = input_bfd->sections;
18112 osi.sec != NULL;
18113 osi.sec = osi.sec->next)
18114 {
18115 if (osi.sec->output_section != NULL
18116 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18117 != 0)
18118 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18119 == SEC_HAS_CONTENTS
18120 && get_arm_elf_section_data (osi.sec) != NULL
18121 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18122 && osi.sec->size > 0
18123 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18124 {
18125 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18126 (output_bfd, osi.sec->output_section);
18127 if (osi.sec_shndx != (int)SHN_BAD)
18128 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18129 }
18130 }
18131 }
18132
18133 /* ARM->Thumb glue. */
18134 if (htab->arm_glue_size > 0)
18135 {
18136 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18137 ARM2THUMB_GLUE_SECTION_NAME);
18138
18139 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18140 (output_bfd, osi.sec->output_section);
18141 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18142 || htab->pic_veneer)
18143 size = ARM2THUMB_PIC_GLUE_SIZE;
18144 else if (htab->use_blx)
18145 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18146 else
18147 size = ARM2THUMB_STATIC_GLUE_SIZE;
18148
18149 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18150 {
18151 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18152 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18153 }
18154 }
18155
18156 /* Thumb->ARM glue. */
18157 if (htab->thumb_glue_size > 0)
18158 {
18159 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18160 THUMB2ARM_GLUE_SECTION_NAME);
18161
18162 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18163 (output_bfd, osi.sec->output_section);
18164 size = THUMB2ARM_GLUE_SIZE;
18165
18166 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18167 {
18168 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18169 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18170 }
18171 }
18172
18173 /* ARMv4 BX veneers. */
18174 if (htab->bx_glue_size > 0)
18175 {
18176 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18177 ARM_BX_GLUE_SECTION_NAME);
18178
18179 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18180 (output_bfd, osi.sec->output_section);
18181
18182 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18183 }
18184
18185 /* Long calls stubs. */
18186 if (htab->stub_bfd && htab->stub_bfd->sections)
18187 {
18188 asection* stub_sec;
18189
18190 for (stub_sec = htab->stub_bfd->sections;
18191 stub_sec != NULL;
18192 stub_sec = stub_sec->next)
18193 {
18194 /* Ignore non-stub sections. */
18195 if (!strstr (stub_sec->name, STUB_SUFFIX))
18196 continue;
18197
18198 osi.sec = stub_sec;
18199
18200 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18201 (output_bfd, osi.sec->output_section);
18202
18203 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18204 }
18205 }
18206
18207 /* Finally, output mapping symbols for the PLT. */
18208 if (htab->root.splt && htab->root.splt->size > 0)
18209 {
18210 osi.sec = htab->root.splt;
18211 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18212 (output_bfd, osi.sec->output_section));
18213
18214 /* Output mapping symbols for the plt header. */
18215 if (htab->root.target_os == is_vxworks)
18216 {
18217 /* VxWorks shared libraries have no PLT header. */
18218 if (!bfd_link_pic (info))
18219 {
18220 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18221 return false;
18222 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18223 return false;
18224 }
18225 }
18226 else if (htab->root.target_os == is_nacl)
18227 {
18228 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18229 return false;
18230 }
18231 else if (using_thumb_only (htab) && !htab->fdpic_p)
18232 {
18233 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18234 return false;
18235 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18236 return false;
18237 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18238 return false;
18239 }
18240 else if (!htab->fdpic_p)
18241 {
18242 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18243 return false;
18244 #ifndef FOUR_WORD_PLT
18245 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18246 return false;
18247 #endif
18248 }
18249 }
18250 if (htab->root.target_os == is_nacl
18251 && htab->root.iplt
18252 && htab->root.iplt->size > 0)
18253 {
18254 /* NaCl uses a special first entry in .iplt too. */
18255 osi.sec = htab->root.iplt;
18256 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18257 (output_bfd, osi.sec->output_section));
18258 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18259 return false;
18260 }
18261 if ((htab->root.splt && htab->root.splt->size > 0)
18262 || (htab->root.iplt && htab->root.iplt->size > 0))
18263 {
18264 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18265 for (input_bfd = info->input_bfds;
18266 input_bfd != NULL;
18267 input_bfd = input_bfd->link.next)
18268 {
18269 struct arm_local_iplt_info **local_iplt;
18270 unsigned int i, num_syms;
18271
18272 local_iplt = elf32_arm_local_iplt (input_bfd);
18273 if (local_iplt != NULL)
18274 {
18275 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18276 if (num_syms > elf32_arm_num_entries (input_bfd))
18277 {
18278 _bfd_error_handler (_("\
18279 %pB: Number of symbols in input file has increased from %lu to %u\n"),
18280 input_bfd,
18281 (unsigned long) elf32_arm_num_entries (input_bfd),
18282 num_syms);
18283 return false;
18284 }
18285 for (i = 0; i < num_syms; i++)
18286 if (local_iplt[i] != NULL
18287 && !elf32_arm_output_plt_map_1 (&osi, true,
18288 &local_iplt[i]->root,
18289 &local_iplt[i]->arm))
18290 return false;
18291 }
18292 }
18293 }
18294 if (htab->root.tlsdesc_plt != 0)
18295 {
18296 /* Mapping symbols for the lazy tls trampoline. */
18297 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM,
18298 htab->root.tlsdesc_plt))
18299 return false;
18300
18301 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18302 htab->root.tlsdesc_plt + 24))
18303 return false;
18304 }
18305 if (htab->tls_trampoline != 0)
18306 {
18307 /* Mapping symbols for the tls trampoline. */
18308 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18309 return false;
18310 #ifdef FOUR_WORD_PLT
18311 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18312 htab->tls_trampoline + 12))
18313 return false;
18314 #endif
18315 }
18316
18317 return true;
18318 }
18319
18320 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18321 the import library. All SYMCOUNT symbols of ABFD can be examined
18322 from their pointers in SYMS. Pointers of symbols to keep should be
18323 stored continuously at the beginning of that array.
18324
18325 Returns the number of symbols to keep. */
18326
18327 static unsigned int
18328 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18329 struct bfd_link_info *info,
18330 asymbol **syms, long symcount)
18331 {
18332 size_t maxnamelen;
18333 char *cmse_name;
18334 long src_count, dst_count = 0;
18335 struct elf32_arm_link_hash_table *htab;
18336
18337 htab = elf32_arm_hash_table (info);
18338 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18339 symcount = 0;
18340
18341 maxnamelen = 128;
18342 cmse_name = (char *) bfd_malloc (maxnamelen);
18343 BFD_ASSERT (cmse_name);
18344
18345 for (src_count = 0; src_count < symcount; src_count++)
18346 {
18347 struct elf32_arm_link_hash_entry *cmse_hash;
18348 asymbol *sym;
18349 flagword flags;
18350 char *name;
18351 size_t namelen;
18352
18353 sym = syms[src_count];
18354 flags = sym->flags;
18355 name = (char *) bfd_asymbol_name (sym);
18356
18357 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18358 continue;
18359 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18360 continue;
18361
18362 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18363 if (namelen > maxnamelen)
18364 {
18365 cmse_name = (char *)
18366 bfd_realloc (cmse_name, namelen);
18367 maxnamelen = namelen;
18368 }
18369 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18370 cmse_hash = (struct elf32_arm_link_hash_entry *)
18371 elf_link_hash_lookup (&(htab)->root, cmse_name, false, false, true);
18372
18373 if (!cmse_hash
18374 || (cmse_hash->root.root.type != bfd_link_hash_defined
18375 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18376 || cmse_hash->root.type != STT_FUNC)
18377 continue;
18378
18379 syms[dst_count++] = sym;
18380 }
18381 free (cmse_name);
18382
18383 syms[dst_count] = NULL;
18384
18385 return dst_count;
18386 }
18387
18388 /* Filter symbols of ABFD to include in the import library. All
18389 SYMCOUNT symbols of ABFD can be examined from their pointers in
18390 SYMS. Pointers of symbols to keep should be stored continuously at
18391 the beginning of that array.
18392
18393 Returns the number of symbols to keep. */
18394
18395 static unsigned int
18396 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18397 struct bfd_link_info *info,
18398 asymbol **syms, long symcount)
18399 {
18400 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18401
18402 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18403 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18404 library to be a relocatable object file. */
18405 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18406 if (globals->cmse_implib)
18407 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18408 else
18409 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18410 }
18411
18412 /* Allocate target specific section data. */
18413
18414 static bool
18415 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18416 {
18417 if (!sec->used_by_bfd)
18418 {
18419 _arm_elf_section_data *sdata;
18420 size_t amt = sizeof (*sdata);
18421
18422 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18423 if (sdata == NULL)
18424 return false;
18425 sec->used_by_bfd = sdata;
18426 }
18427
18428 return _bfd_elf_new_section_hook (abfd, sec);
18429 }
18430
18431
18432 /* Used to order a list of mapping symbols by address. */
18433
18434 static int
18435 elf32_arm_compare_mapping (const void * a, const void * b)
18436 {
18437 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18438 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18439
18440 if (amap->vma > bmap->vma)
18441 return 1;
18442 else if (amap->vma < bmap->vma)
18443 return -1;
18444 else if (amap->type > bmap->type)
18445 /* Ensure results do not depend on the host qsort for objects with
18446 multiple mapping symbols at the same address by sorting on type
18447 after vma. */
18448 return 1;
18449 else if (amap->type < bmap->type)
18450 return -1;
18451 else
18452 return 0;
18453 }
18454
18455 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18456
18457 static unsigned long
18458 offset_prel31 (unsigned long addr, bfd_vma offset)
18459 {
18460 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18461 }
18462
18463 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18464 relocations. */
18465
18466 static void
18467 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18468 {
18469 unsigned long first_word = bfd_get_32 (output_bfd, from);
18470 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18471
18472 /* High bit of first word is supposed to be zero. */
18473 if ((first_word & 0x80000000ul) == 0)
18474 first_word = offset_prel31 (first_word, offset);
18475
18476 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18477 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18478 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18479 second_word = offset_prel31 (second_word, offset);
18480
18481 bfd_put_32 (output_bfd, first_word, to);
18482 bfd_put_32 (output_bfd, second_word, to + 4);
18483 }
18484
18485 /* Data for make_branch_to_a8_stub(). */
18486
18487 struct a8_branch_to_stub_data
18488 {
18489 asection *writing_section;
18490 bfd_byte *contents;
18491 };
18492
18493
18494 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18495 places for a particular section. */
18496
18497 static bool
18498 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18499 void *in_arg)
18500 {
18501 struct elf32_arm_stub_hash_entry *stub_entry;
18502 struct a8_branch_to_stub_data *data;
18503 bfd_byte *contents;
18504 unsigned long branch_insn;
18505 bfd_vma veneered_insn_loc, veneer_entry_loc;
18506 bfd_signed_vma branch_offset;
18507 bfd *abfd;
18508 unsigned int loc;
18509
18510 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18511 data = (struct a8_branch_to_stub_data *) in_arg;
18512
18513 if (stub_entry->target_section != data->writing_section
18514 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18515 return true;
18516
18517 contents = data->contents;
18518
18519 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18520 generated when both source and target are in the same section. */
18521 veneered_insn_loc = stub_entry->target_section->output_section->vma
18522 + stub_entry->target_section->output_offset
18523 + stub_entry->source_value;
18524
18525 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18526 + stub_entry->stub_sec->output_offset
18527 + stub_entry->stub_offset;
18528
18529 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18530 veneered_insn_loc &= ~3u;
18531
18532 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18533
18534 abfd = stub_entry->target_section->owner;
18535 loc = stub_entry->source_value;
18536
18537 /* We attempt to avoid this condition by setting stubs_always_after_branch
18538 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18539 This check is just to be on the safe side... */
18540 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18541 {
18542 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18543 "allocated in unsafe location"), abfd);
18544 return false;
18545 }
18546
18547 switch (stub_entry->stub_type)
18548 {
18549 case arm_stub_a8_veneer_b:
18550 case arm_stub_a8_veneer_b_cond:
18551 branch_insn = 0xf0009000;
18552 goto jump24;
18553
18554 case arm_stub_a8_veneer_blx:
18555 branch_insn = 0xf000e800;
18556 goto jump24;
18557
18558 case arm_stub_a8_veneer_bl:
18559 {
18560 unsigned int i1, j1, i2, j2, s;
18561
18562 branch_insn = 0xf000d000;
18563
18564 jump24:
18565 if (branch_offset < -16777216 || branch_offset > 16777214)
18566 {
18567 /* There's not much we can do apart from complain if this
18568 happens. */
18569 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18570 "of range (input file too large)"), abfd);
18571 return false;
18572 }
18573
18574 /* i1 = not(j1 eor s), so:
18575 not i1 = j1 eor s
18576 j1 = (not i1) eor s. */
18577
18578 branch_insn |= (branch_offset >> 1) & 0x7ff;
18579 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18580 i2 = (branch_offset >> 22) & 1;
18581 i1 = (branch_offset >> 23) & 1;
18582 s = (branch_offset >> 24) & 1;
18583 j1 = (!i1) ^ s;
18584 j2 = (!i2) ^ s;
18585 branch_insn |= j2 << 11;
18586 branch_insn |= j1 << 13;
18587 branch_insn |= s << 26;
18588 }
18589 break;
18590
18591 default:
18592 BFD_FAIL ();
18593 return false;
18594 }
18595
18596 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18597 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18598
18599 return true;
18600 }
18601
18602 /* Beginning of stm32l4xx work-around. */
18603
18604 /* Functions encoding instructions necessary for the emission of the
18605 fix-stm32l4xx-629360.
18606 Encoding is extracted from the
18607 ARM (C) Architecture Reference Manual
18608 ARMv7-A and ARMv7-R edition
18609 ARM DDI 0406C.b (ID072512). */
18610
18611 static inline bfd_vma
18612 create_instruction_branch_absolute (int branch_offset)
18613 {
18614 /* A8.8.18 B (A8-334)
18615 B target_address (Encoding T4). */
18616 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18617 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18618 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18619
18620 int s = ((branch_offset & 0x1000000) >> 24);
18621 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18622 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18623
18624 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18625 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18626
18627 bfd_vma patched_inst = 0xf0009000
18628 | s << 26 /* S. */
18629 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18630 | j1 << 13 /* J1. */
18631 | j2 << 11 /* J2. */
18632 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18633
18634 return patched_inst;
18635 }
18636
18637 static inline bfd_vma
18638 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18639 {
18640 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18641 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18642 bfd_vma patched_inst = 0xe8900000
18643 | (/*W=*/wback << 21)
18644 | (base_reg << 16)
18645 | (reg_mask & 0x0000ffff);
18646
18647 return patched_inst;
18648 }
18649
18650 static inline bfd_vma
18651 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18652 {
18653 /* A8.8.60 LDMDB/LDMEA (A8-402)
18654 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18655 bfd_vma patched_inst = 0xe9100000
18656 | (/*W=*/wback << 21)
18657 | (base_reg << 16)
18658 | (reg_mask & 0x0000ffff);
18659
18660 return patched_inst;
18661 }
18662
18663 static inline bfd_vma
18664 create_instruction_mov (int target_reg, int source_reg)
18665 {
18666 /* A8.8.103 MOV (register) (A8-486)
18667 MOV Rd, Rm (Encoding T1). */
18668 bfd_vma patched_inst = 0x4600
18669 | (target_reg & 0x7)
18670 | ((target_reg & 0x8) >> 3) << 7
18671 | (source_reg << 3);
18672
18673 return patched_inst;
18674 }
18675
18676 static inline bfd_vma
18677 create_instruction_sub (int target_reg, int source_reg, int value)
18678 {
18679 /* A8.8.221 SUB (immediate) (A8-708)
18680 SUB Rd, Rn, #value (Encoding T3). */
18681 bfd_vma patched_inst = 0xf1a00000
18682 | (target_reg << 8)
18683 | (source_reg << 16)
18684 | (/*S=*/0 << 20)
18685 | ((value & 0x800) >> 11) << 26
18686 | ((value & 0x700) >> 8) << 12
18687 | (value & 0x0ff);
18688
18689 return patched_inst;
18690 }
18691
18692 static inline bfd_vma
18693 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18694 int first_reg)
18695 {
18696 /* A8.8.332 VLDM (A8-922)
18697 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18698 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18699 | (/*W=*/wback << 21)
18700 | (base_reg << 16)
18701 | (num_words & 0x000000ff)
18702 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18703 | (first_reg & 0x00000001) << 22;
18704
18705 return patched_inst;
18706 }
18707
18708 static inline bfd_vma
18709 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18710 int first_reg)
18711 {
18712 /* A8.8.332 VLDM (A8-922)
18713 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18714 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18715 | (base_reg << 16)
18716 | (num_words & 0x000000ff)
18717 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18718 | (first_reg & 0x00000001) << 22;
18719
18720 return patched_inst;
18721 }
18722
18723 static inline bfd_vma
18724 create_instruction_udf_w (int value)
18725 {
18726 /* A8.8.247 UDF (A8-758)
18727 Undefined (Encoding T2). */
18728 bfd_vma patched_inst = 0xf7f0a000
18729 | (value & 0x00000fff)
18730 | (value & 0x000f0000) << 16;
18731
18732 return patched_inst;
18733 }
18734
18735 static inline bfd_vma
18736 create_instruction_udf (int value)
18737 {
18738 /* A8.8.247 UDF (A8-758)
18739 Undefined (Encoding T1). */
18740 bfd_vma patched_inst = 0xde00
18741 | (value & 0xff);
18742
18743 return patched_inst;
18744 }
18745
18746 /* Functions writing an instruction in memory, returning the next
18747 memory position to write to. */
18748
18749 static inline bfd_byte *
18750 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18751 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18752 {
18753 put_thumb2_insn (htab, output_bfd, insn, pt);
18754 return pt + 4;
18755 }
18756
18757 static inline bfd_byte *
18758 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18759 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18760 {
18761 put_thumb_insn (htab, output_bfd, insn, pt);
18762 return pt + 2;
18763 }
18764
18765 /* Function filling up a region in memory with T1 and T2 UDFs taking
18766 care of alignment. */
18767
18768 static bfd_byte *
18769 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18770 bfd * output_bfd,
18771 const bfd_byte * const base_stub_contents,
18772 bfd_byte * const from_stub_contents,
18773 const bfd_byte * const end_stub_contents)
18774 {
18775 bfd_byte *current_stub_contents = from_stub_contents;
18776
18777 /* Fill the remaining of the stub with deterministic contents : UDF
18778 instructions.
18779 Check if realignment is needed on modulo 4 frontier using T1, to
18780 further use T2. */
18781 if ((current_stub_contents < end_stub_contents)
18782 && !((current_stub_contents - base_stub_contents) % 2)
18783 && ((current_stub_contents - base_stub_contents) % 4))
18784 current_stub_contents =
18785 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18786 create_instruction_udf (0));
18787
18788 for (; current_stub_contents < end_stub_contents;)
18789 current_stub_contents =
18790 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18791 create_instruction_udf_w (0));
18792
18793 return current_stub_contents;
18794 }
18795
18796 /* Functions writing the stream of instructions equivalent to the
18797 derived sequence for ldmia, ldmdb, vldm respectively. */
18798
18799 static void
18800 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18801 bfd * output_bfd,
18802 const insn32 initial_insn,
18803 const bfd_byte *const initial_insn_addr,
18804 bfd_byte *const base_stub_contents)
18805 {
18806 int wback = (initial_insn & 0x00200000) >> 21;
18807 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18808 int insn_all_registers = initial_insn & 0x0000ffff;
18809 int insn_low_registers, insn_high_registers;
18810 int usable_register_mask;
18811 int nb_registers = elf32_arm_popcount (insn_all_registers);
18812 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18813 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18814 bfd_byte *current_stub_contents = base_stub_contents;
18815
18816 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18817
18818 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18819 smaller than 8 registers load sequences that do not cause the
18820 hardware issue. */
18821 if (nb_registers <= 8)
18822 {
18823 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18824 current_stub_contents =
18825 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18826 initial_insn);
18827
18828 /* B initial_insn_addr+4. */
18829 if (!restore_pc)
18830 current_stub_contents =
18831 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18832 create_instruction_branch_absolute
18833 (initial_insn_addr - current_stub_contents));
18834
18835 /* Fill the remaining of the stub with deterministic contents. */
18836 current_stub_contents =
18837 stm32l4xx_fill_stub_udf (htab, output_bfd,
18838 base_stub_contents, current_stub_contents,
18839 base_stub_contents +
18840 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18841
18842 return;
18843 }
18844
18845 /* - reg_list[13] == 0. */
18846 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18847
18848 /* - reg_list[14] & reg_list[15] != 1. */
18849 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18850
18851 /* - if (wback==1) reg_list[rn] == 0. */
18852 BFD_ASSERT (!wback || !restore_rn);
18853
18854 /* - nb_registers > 8. */
18855 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18856
18857 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18858
18859 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18860 - One with the 7 lowest registers (register mask 0x007F)
18861 This LDM will finally contain between 2 and 7 registers
18862 - One with the 7 highest registers (register mask 0xDF80)
18863 This ldm will finally contain between 2 and 7 registers. */
18864 insn_low_registers = insn_all_registers & 0x007F;
18865 insn_high_registers = insn_all_registers & 0xDF80;
18866
18867 /* A spare register may be needed during this veneer to temporarily
18868 handle the base register. This register will be restored with the
18869 last LDM operation.
18870 The usable register may be any general purpose register (that
18871 excludes PC, SP, LR : register mask is 0x1FFF). */
18872 usable_register_mask = 0x1FFF;
18873
18874 /* Generate the stub function. */
18875 if (wback)
18876 {
18877 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18878 current_stub_contents =
18879 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18880 create_instruction_ldmia
18881 (rn, /*wback=*/1, insn_low_registers));
18882
18883 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18884 current_stub_contents =
18885 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18886 create_instruction_ldmia
18887 (rn, /*wback=*/1, insn_high_registers));
18888 if (!restore_pc)
18889 {
18890 /* B initial_insn_addr+4. */
18891 current_stub_contents =
18892 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18893 create_instruction_branch_absolute
18894 (initial_insn_addr - current_stub_contents));
18895 }
18896 }
18897 else /* if (!wback). */
18898 {
18899 ri = rn;
18900
18901 /* If Rn is not part of the high-register-list, move it there. */
18902 if (!(insn_high_registers & (1 << rn)))
18903 {
18904 /* Choose a Ri in the high-register-list that will be restored. */
18905 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18906
18907 /* MOV Ri, Rn. */
18908 current_stub_contents =
18909 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18910 create_instruction_mov (ri, rn));
18911 }
18912
18913 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18914 current_stub_contents =
18915 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18916 create_instruction_ldmia
18917 (ri, /*wback=*/1, insn_low_registers));
18918
18919 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18920 current_stub_contents =
18921 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18922 create_instruction_ldmia
18923 (ri, /*wback=*/0, insn_high_registers));
18924
18925 if (!restore_pc)
18926 {
18927 /* B initial_insn_addr+4. */
18928 current_stub_contents =
18929 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18930 create_instruction_branch_absolute
18931 (initial_insn_addr - current_stub_contents));
18932 }
18933 }
18934
18935 /* Fill the remaining of the stub with deterministic contents. */
18936 current_stub_contents =
18937 stm32l4xx_fill_stub_udf (htab, output_bfd,
18938 base_stub_contents, current_stub_contents,
18939 base_stub_contents +
18940 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18941 }
18942
18943 static void
18944 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18945 bfd * output_bfd,
18946 const insn32 initial_insn,
18947 const bfd_byte *const initial_insn_addr,
18948 bfd_byte *const base_stub_contents)
18949 {
18950 int wback = (initial_insn & 0x00200000) >> 21;
18951 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18952 int insn_all_registers = initial_insn & 0x0000ffff;
18953 int insn_low_registers, insn_high_registers;
18954 int usable_register_mask;
18955 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18956 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18957 int nb_registers = elf32_arm_popcount (insn_all_registers);
18958 bfd_byte *current_stub_contents = base_stub_contents;
18959
18960 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18961
18962 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18963 smaller than 8 registers load sequences that do not cause the
18964 hardware issue. */
18965 if (nb_registers <= 8)
18966 {
18967 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18968 current_stub_contents =
18969 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18970 initial_insn);
18971
18972 /* B initial_insn_addr+4. */
18973 current_stub_contents =
18974 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18975 create_instruction_branch_absolute
18976 (initial_insn_addr - current_stub_contents));
18977
18978 /* Fill the remaining of the stub with deterministic contents. */
18979 current_stub_contents =
18980 stm32l4xx_fill_stub_udf (htab, output_bfd,
18981 base_stub_contents, current_stub_contents,
18982 base_stub_contents +
18983 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18984
18985 return;
18986 }
18987
18988 /* - reg_list[13] == 0. */
18989 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18990
18991 /* - reg_list[14] & reg_list[15] != 1. */
18992 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18993
18994 /* - if (wback==1) reg_list[rn] == 0. */
18995 BFD_ASSERT (!wback || !restore_rn);
18996
18997 /* - nb_registers > 8. */
18998 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18999
19000 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19001
19002 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19003 - One with the 7 lowest registers (register mask 0x007F)
19004 This LDM will finally contain between 2 and 7 registers
19005 - One with the 7 highest registers (register mask 0xDF80)
19006 This ldm will finally contain between 2 and 7 registers. */
19007 insn_low_registers = insn_all_registers & 0x007F;
19008 insn_high_registers = insn_all_registers & 0xDF80;
19009
19010 /* A spare register may be needed during this veneer to temporarily
19011 handle the base register. This register will be restored with
19012 the last LDM operation.
19013 The usable register may be any general purpose register (that excludes
19014 PC, SP, LR : register mask is 0x1FFF). */
19015 usable_register_mask = 0x1FFF;
19016
19017 /* Generate the stub function. */
19018 if (!wback && !restore_pc && !restore_rn)
19019 {
19020 /* Choose a Ri in the low-register-list that will be restored. */
19021 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19022
19023 /* MOV Ri, Rn. */
19024 current_stub_contents =
19025 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19026 create_instruction_mov (ri, rn));
19027
19028 /* LDMDB Ri!, {R-high-register-list}. */
19029 current_stub_contents =
19030 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19031 create_instruction_ldmdb
19032 (ri, /*wback=*/1, insn_high_registers));
19033
19034 /* LDMDB Ri, {R-low-register-list}. */
19035 current_stub_contents =
19036 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19037 create_instruction_ldmdb
19038 (ri, /*wback=*/0, insn_low_registers));
19039
19040 /* B initial_insn_addr+4. */
19041 current_stub_contents =
19042 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19043 create_instruction_branch_absolute
19044 (initial_insn_addr - current_stub_contents));
19045 }
19046 else if (wback && !restore_pc && !restore_rn)
19047 {
19048 /* LDMDB Rn!, {R-high-register-list}. */
19049 current_stub_contents =
19050 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19051 create_instruction_ldmdb
19052 (rn, /*wback=*/1, insn_high_registers));
19053
19054 /* LDMDB Rn!, {R-low-register-list}. */
19055 current_stub_contents =
19056 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19057 create_instruction_ldmdb
19058 (rn, /*wback=*/1, insn_low_registers));
19059
19060 /* B initial_insn_addr+4. */
19061 current_stub_contents =
19062 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19063 create_instruction_branch_absolute
19064 (initial_insn_addr - current_stub_contents));
19065 }
19066 else if (!wback && restore_pc && !restore_rn)
19067 {
19068 /* Choose a Ri in the high-register-list that will be restored. */
19069 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19070
19071 /* SUB Ri, Rn, #(4*nb_registers). */
19072 current_stub_contents =
19073 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19074 create_instruction_sub (ri, rn, (4 * nb_registers)));
19075
19076 /* LDMIA Ri!, {R-low-register-list}. */
19077 current_stub_contents =
19078 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19079 create_instruction_ldmia
19080 (ri, /*wback=*/1, insn_low_registers));
19081
19082 /* LDMIA Ri, {R-high-register-list}. */
19083 current_stub_contents =
19084 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19085 create_instruction_ldmia
19086 (ri, /*wback=*/0, insn_high_registers));
19087 }
19088 else if (wback && restore_pc && !restore_rn)
19089 {
19090 /* Choose a Ri in the high-register-list that will be restored. */
19091 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19092
19093 /* SUB Rn, Rn, #(4*nb_registers) */
19094 current_stub_contents =
19095 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19096 create_instruction_sub (rn, rn, (4 * nb_registers)));
19097
19098 /* MOV Ri, Rn. */
19099 current_stub_contents =
19100 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19101 create_instruction_mov (ri, rn));
19102
19103 /* LDMIA Ri!, {R-low-register-list}. */
19104 current_stub_contents =
19105 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19106 create_instruction_ldmia
19107 (ri, /*wback=*/1, insn_low_registers));
19108
19109 /* LDMIA Ri, {R-high-register-list}. */
19110 current_stub_contents =
19111 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19112 create_instruction_ldmia
19113 (ri, /*wback=*/0, insn_high_registers));
19114 }
19115 else if (!wback && !restore_pc && restore_rn)
19116 {
19117 ri = rn;
19118 if (!(insn_low_registers & (1 << rn)))
19119 {
19120 /* Choose a Ri in the low-register-list that will be restored. */
19121 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19122
19123 /* MOV Ri, Rn. */
19124 current_stub_contents =
19125 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19126 create_instruction_mov (ri, rn));
19127 }
19128
19129 /* LDMDB Ri!, {R-high-register-list}. */
19130 current_stub_contents =
19131 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19132 create_instruction_ldmdb
19133 (ri, /*wback=*/1, insn_high_registers));
19134
19135 /* LDMDB Ri, {R-low-register-list}. */
19136 current_stub_contents =
19137 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19138 create_instruction_ldmdb
19139 (ri, /*wback=*/0, insn_low_registers));
19140
19141 /* B initial_insn_addr+4. */
19142 current_stub_contents =
19143 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19144 create_instruction_branch_absolute
19145 (initial_insn_addr - current_stub_contents));
19146 }
19147 else if (!wback && restore_pc && restore_rn)
19148 {
19149 ri = rn;
19150 if (!(insn_high_registers & (1 << rn)))
19151 {
19152 /* Choose a Ri in the high-register-list that will be restored. */
19153 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19154 }
19155
19156 /* SUB Ri, Rn, #(4*nb_registers). */
19157 current_stub_contents =
19158 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19159 create_instruction_sub (ri, rn, (4 * nb_registers)));
19160
19161 /* LDMIA Ri!, {R-low-register-list}. */
19162 current_stub_contents =
19163 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19164 create_instruction_ldmia
19165 (ri, /*wback=*/1, insn_low_registers));
19166
19167 /* LDMIA Ri, {R-high-register-list}. */
19168 current_stub_contents =
19169 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19170 create_instruction_ldmia
19171 (ri, /*wback=*/0, insn_high_registers));
19172 }
19173 else if (wback && restore_rn)
19174 {
19175 /* The assembler should not have accepted to encode this. */
19176 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19177 "undefined behavior.\n");
19178 }
19179
19180 /* Fill the remaining of the stub with deterministic contents. */
19181 current_stub_contents =
19182 stm32l4xx_fill_stub_udf (htab, output_bfd,
19183 base_stub_contents, current_stub_contents,
19184 base_stub_contents +
19185 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19186
19187 }
19188
19189 static void
19190 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19191 bfd * output_bfd,
19192 const insn32 initial_insn,
19193 const bfd_byte *const initial_insn_addr,
19194 bfd_byte *const base_stub_contents)
19195 {
19196 int num_words = initial_insn & 0xff;
19197 bfd_byte *current_stub_contents = base_stub_contents;
19198
19199 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19200
19201 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19202 smaller than 8 words load sequences that do not cause the
19203 hardware issue. */
19204 if (num_words <= 8)
19205 {
19206 /* Untouched instruction. */
19207 current_stub_contents =
19208 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19209 initial_insn);
19210
19211 /* B initial_insn_addr+4. */
19212 current_stub_contents =
19213 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19214 create_instruction_branch_absolute
19215 (initial_insn_addr - current_stub_contents));
19216 }
19217 else
19218 {
19219 bool is_dp = /* DP encoding. */
19220 (initial_insn & 0xfe100f00) == 0xec100b00;
19221 bool is_ia_nobang = /* (IA without !). */
19222 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19223 bool is_ia_bang = /* (IA with !) - includes VPOP. */
19224 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19225 bool is_db_bang = /* (DB with !). */
19226 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19227 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19228 /* d = UInt (Vd:D);. */
19229 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19230 | (((unsigned int)initial_insn << 9) >> 31);
19231
19232 /* Compute the number of 8-words chunks needed to split. */
19233 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19234 int chunk;
19235
19236 /* The test coverage has been done assuming the following
19237 hypothesis that exactly one of the previous is_ predicates is
19238 true. */
19239 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19240 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19241
19242 /* We treat the cutting of the words in one pass for all
19243 cases, then we emit the adjustments:
19244
19245 vldm rx, {...}
19246 -> vldm rx!, {8_words_or_less} for each needed 8_word
19247 -> sub rx, rx, #size (list)
19248
19249 vldm rx!, {...}
19250 -> vldm rx!, {8_words_or_less} for each needed 8_word
19251 This also handles vpop instruction (when rx is sp)
19252
19253 vldmd rx!, {...}
19254 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19255 for (chunk = 0; chunk < chunks; ++chunk)
19256 {
19257 bfd_vma new_insn = 0;
19258
19259 if (is_ia_nobang || is_ia_bang)
19260 {
19261 new_insn = create_instruction_vldmia
19262 (base_reg,
19263 is_dp,
19264 /*wback= . */1,
19265 chunks - (chunk + 1) ?
19266 8 : num_words - chunk * 8,
19267 first_reg + chunk * 8);
19268 }
19269 else if (is_db_bang)
19270 {
19271 new_insn = create_instruction_vldmdb
19272 (base_reg,
19273 is_dp,
19274 chunks - (chunk + 1) ?
19275 8 : num_words - chunk * 8,
19276 first_reg + chunk * 8);
19277 }
19278
19279 if (new_insn)
19280 current_stub_contents =
19281 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19282 new_insn);
19283 }
19284
19285 /* Only this case requires the base register compensation
19286 subtract. */
19287 if (is_ia_nobang)
19288 {
19289 current_stub_contents =
19290 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19291 create_instruction_sub
19292 (base_reg, base_reg, 4*num_words));
19293 }
19294
19295 /* B initial_insn_addr+4. */
19296 current_stub_contents =
19297 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19298 create_instruction_branch_absolute
19299 (initial_insn_addr - current_stub_contents));
19300 }
19301
19302 /* Fill the remaining of the stub with deterministic contents. */
19303 current_stub_contents =
19304 stm32l4xx_fill_stub_udf (htab, output_bfd,
19305 base_stub_contents, current_stub_contents,
19306 base_stub_contents +
19307 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19308 }
19309
19310 static void
19311 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19312 bfd * output_bfd,
19313 const insn32 wrong_insn,
19314 const bfd_byte *const wrong_insn_addr,
19315 bfd_byte *const stub_contents)
19316 {
19317 if (is_thumb2_ldmia (wrong_insn))
19318 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19319 wrong_insn, wrong_insn_addr,
19320 stub_contents);
19321 else if (is_thumb2_ldmdb (wrong_insn))
19322 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19323 wrong_insn, wrong_insn_addr,
19324 stub_contents);
19325 else if (is_thumb2_vldm (wrong_insn))
19326 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19327 wrong_insn, wrong_insn_addr,
19328 stub_contents);
19329 }
19330
19331 /* End of stm32l4xx work-around. */
19332
19333
19334 /* Do code byteswapping. Return FALSE afterwards so that the section is
19335 written out as normal. */
19336
19337 static bool
19338 elf32_arm_write_section (bfd *output_bfd,
19339 struct bfd_link_info *link_info,
19340 asection *sec,
19341 bfd_byte *contents)
19342 {
19343 unsigned int mapcount, errcount;
19344 _arm_elf_section_data *arm_data;
19345 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19346 elf32_arm_section_map *map;
19347 elf32_vfp11_erratum_list *errnode;
19348 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19349 bfd_vma ptr;
19350 bfd_vma end;
19351 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19352 bfd_byte tmp;
19353 unsigned int i;
19354
19355 if (globals == NULL)
19356 return false;
19357
19358 /* If this section has not been allocated an _arm_elf_section_data
19359 structure then we cannot record anything. */
19360 arm_data = get_arm_elf_section_data (sec);
19361 if (arm_data == NULL)
19362 return false;
19363
19364 mapcount = arm_data->mapcount;
19365 map = arm_data->map;
19366 errcount = arm_data->erratumcount;
19367
19368 if (errcount != 0)
19369 {
19370 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19371
19372 for (errnode = arm_data->erratumlist; errnode != 0;
19373 errnode = errnode->next)
19374 {
19375 bfd_vma target = errnode->vma - offset;
19376
19377 switch (errnode->type)
19378 {
19379 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19380 {
19381 bfd_vma branch_to_veneer;
19382 /* Original condition code of instruction, plus bit mask for
19383 ARM B instruction. */
19384 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19385 | 0x0a000000;
19386
19387 /* The instruction is before the label. */
19388 target -= 4;
19389
19390 /* Above offset included in -4 below. */
19391 branch_to_veneer = errnode->u.b.veneer->vma
19392 - errnode->vma - 4;
19393
19394 if ((signed) branch_to_veneer < -(1 << 25)
19395 || (signed) branch_to_veneer >= (1 << 25))
19396 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19397 "range"), output_bfd);
19398
19399 insn |= (branch_to_veneer >> 2) & 0xffffff;
19400 contents[endianflip ^ target] = insn & 0xff;
19401 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19402 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19403 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19404 }
19405 break;
19406
19407 case VFP11_ERRATUM_ARM_VENEER:
19408 {
19409 bfd_vma branch_from_veneer;
19410 unsigned int insn;
19411
19412 /* Take size of veneer into account. */
19413 branch_from_veneer = errnode->u.v.branch->vma
19414 - errnode->vma - 12;
19415
19416 if ((signed) branch_from_veneer < -(1 << 25)
19417 || (signed) branch_from_veneer >= (1 << 25))
19418 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19419 "range"), output_bfd);
19420
19421 /* Original instruction. */
19422 insn = errnode->u.v.branch->u.b.vfp_insn;
19423 contents[endianflip ^ target] = insn & 0xff;
19424 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19425 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19426 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19427
19428 /* Branch back to insn after original insn. */
19429 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19430 contents[endianflip ^ (target + 4)] = insn & 0xff;
19431 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19432 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19433 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19434 }
19435 break;
19436
19437 default:
19438 abort ();
19439 }
19440 }
19441 }
19442
19443 if (arm_data->stm32l4xx_erratumcount != 0)
19444 {
19445 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19446 stm32l4xx_errnode != 0;
19447 stm32l4xx_errnode = stm32l4xx_errnode->next)
19448 {
19449 bfd_vma target = stm32l4xx_errnode->vma - offset;
19450
19451 switch (stm32l4xx_errnode->type)
19452 {
19453 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19454 {
19455 unsigned int insn;
19456 bfd_vma branch_to_veneer =
19457 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19458
19459 if ((signed) branch_to_veneer < -(1 << 24)
19460 || (signed) branch_to_veneer >= (1 << 24))
19461 {
19462 bfd_vma out_of_range =
19463 ((signed) branch_to_veneer < -(1 << 24)) ?
19464 - branch_to_veneer - (1 << 24) :
19465 ((signed) branch_to_veneer >= (1 << 24)) ?
19466 branch_to_veneer - (1 << 24) : 0;
19467
19468 _bfd_error_handler
19469 (_("%pB(%#" PRIx64 "): error: "
19470 "cannot create STM32L4XX veneer; "
19471 "jump out of range by %" PRId64 " bytes; "
19472 "cannot encode branch instruction"),
19473 output_bfd,
19474 (uint64_t) (stm32l4xx_errnode->vma - 4),
19475 (int64_t) out_of_range);
19476 continue;
19477 }
19478
19479 insn = create_instruction_branch_absolute
19480 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19481
19482 /* The instruction is before the label. */
19483 target -= 4;
19484
19485 put_thumb2_insn (globals, output_bfd,
19486 (bfd_vma) insn, contents + target);
19487 }
19488 break;
19489
19490 case STM32L4XX_ERRATUM_VENEER:
19491 {
19492 bfd_byte * veneer;
19493 bfd_byte * veneer_r;
19494 unsigned int insn;
19495
19496 veneer = contents + target;
19497 veneer_r = veneer
19498 + stm32l4xx_errnode->u.b.veneer->vma
19499 - stm32l4xx_errnode->vma - 4;
19500
19501 if ((signed) (veneer_r - veneer -
19502 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19503 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19504 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19505 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19506 || (signed) (veneer_r - veneer) >= (1 << 24))
19507 {
19508 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19509 "veneer"), output_bfd);
19510 continue;
19511 }
19512
19513 /* Original instruction. */
19514 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19515
19516 stm32l4xx_create_replacing_stub
19517 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19518 }
19519 break;
19520
19521 default:
19522 abort ();
19523 }
19524 }
19525 }
19526
19527 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19528 {
19529 arm_unwind_table_edit *edit_node
19530 = arm_data->u.exidx.unwind_edit_list;
19531 /* Now, sec->size is the size of the section we will write. The original
19532 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19533 markers) was sec->rawsize. (This isn't the case if we perform no
19534 edits, then rawsize will be zero and we should use size). */
19535 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19536 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19537 unsigned int in_index, out_index;
19538 bfd_vma add_to_offsets = 0;
19539
19540 if (edited_contents == NULL)
19541 return false;
19542 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19543 {
19544 if (edit_node)
19545 {
19546 unsigned int edit_index = edit_node->index;
19547
19548 if (in_index < edit_index && in_index * 8 < input_size)
19549 {
19550 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19551 contents + in_index * 8, add_to_offsets);
19552 out_index++;
19553 in_index++;
19554 }
19555 else if (in_index == edit_index
19556 || (in_index * 8 >= input_size
19557 && edit_index == UINT_MAX))
19558 {
19559 switch (edit_node->type)
19560 {
19561 case DELETE_EXIDX_ENTRY:
19562 in_index++;
19563 add_to_offsets += 8;
19564 break;
19565
19566 case INSERT_EXIDX_CANTUNWIND_AT_END:
19567 {
19568 asection *text_sec = edit_node->linked_section;
19569 bfd_vma text_offset = text_sec->output_section->vma
19570 + text_sec->output_offset
19571 + text_sec->size;
19572 bfd_vma exidx_offset = offset + out_index * 8;
19573 unsigned long prel31_offset;
19574
19575 /* Note: this is meant to be equivalent to an
19576 R_ARM_PREL31 relocation. These synthetic
19577 EXIDX_CANTUNWIND markers are not relocated by the
19578 usual BFD method. */
19579 prel31_offset = (text_offset - exidx_offset)
19580 & 0x7ffffffful;
19581 if (bfd_link_relocatable (link_info))
19582 {
19583 /* Here relocation for new EXIDX_CANTUNWIND is
19584 created, so there is no need to
19585 adjust offset by hand. */
19586 prel31_offset = text_sec->output_offset
19587 + text_sec->size;
19588 }
19589
19590 /* First address we can't unwind. */
19591 bfd_put_32 (output_bfd, prel31_offset,
19592 &edited_contents[out_index * 8]);
19593
19594 /* Code for EXIDX_CANTUNWIND. */
19595 bfd_put_32 (output_bfd, 0x1,
19596 &edited_contents[out_index * 8 + 4]);
19597
19598 out_index++;
19599 add_to_offsets -= 8;
19600 }
19601 break;
19602 }
19603
19604 edit_node = edit_node->next;
19605 }
19606 }
19607 else
19608 {
19609 /* No more edits, copy remaining entries verbatim. */
19610 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19611 contents + in_index * 8, add_to_offsets);
19612 out_index++;
19613 in_index++;
19614 }
19615 }
19616
19617 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19618 bfd_set_section_contents (output_bfd, sec->output_section,
19619 edited_contents,
19620 (file_ptr) sec->output_offset, sec->size);
19621
19622 return true;
19623 }
19624
19625 /* Fix code to point to Cortex-A8 erratum stubs. */
19626 if (globals->fix_cortex_a8)
19627 {
19628 struct a8_branch_to_stub_data data;
19629
19630 data.writing_section = sec;
19631 data.contents = contents;
19632
19633 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19634 & data);
19635 }
19636
19637 if (mapcount == 0)
19638 return false;
19639
19640 if (globals->byteswap_code)
19641 {
19642 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19643
19644 ptr = map[0].vma;
19645 for (i = 0; i < mapcount; i++)
19646 {
19647 if (i == mapcount - 1)
19648 end = sec->size;
19649 else
19650 end = map[i + 1].vma;
19651
19652 switch (map[i].type)
19653 {
19654 case 'a':
19655 /* Byte swap code words. */
19656 while (ptr + 3 < end)
19657 {
19658 tmp = contents[ptr];
19659 contents[ptr] = contents[ptr + 3];
19660 contents[ptr + 3] = tmp;
19661 tmp = contents[ptr + 1];
19662 contents[ptr + 1] = contents[ptr + 2];
19663 contents[ptr + 2] = tmp;
19664 ptr += 4;
19665 }
19666 break;
19667
19668 case 't':
19669 /* Byte swap code halfwords. */
19670 while (ptr + 1 < end)
19671 {
19672 tmp = contents[ptr];
19673 contents[ptr] = contents[ptr + 1];
19674 contents[ptr + 1] = tmp;
19675 ptr += 2;
19676 }
19677 break;
19678
19679 case 'd':
19680 /* Leave data alone. */
19681 break;
19682 }
19683 ptr = end;
19684 }
19685 }
19686
19687 free (map);
19688 arm_data->mapcount = -1;
19689 arm_data->mapsize = 0;
19690 arm_data->map = NULL;
19691
19692 return false;
19693 }
19694
19695 /* Mangle thumb function symbols as we read them in. */
19696
19697 static bool
19698 elf32_arm_swap_symbol_in (bfd * abfd,
19699 const void *psrc,
19700 const void *pshn,
19701 Elf_Internal_Sym *dst)
19702 {
19703 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19704 return false;
19705 dst->st_target_internal = 0;
19706
19707 /* New EABI objects mark thumb function symbols by setting the low bit of
19708 the address. */
19709 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19710 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19711 {
19712 if (dst->st_value & 1)
19713 {
19714 dst->st_value &= ~(bfd_vma) 1;
19715 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19716 ST_BRANCH_TO_THUMB);
19717 }
19718 else
19719 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19720 }
19721 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19722 {
19723 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19724 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19725 }
19726 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19727 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19728 else
19729 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19730
19731 return true;
19732 }
19733
19734
19735 /* Mangle thumb function symbols as we write them out. */
19736
19737 static void
19738 elf32_arm_swap_symbol_out (bfd *abfd,
19739 const Elf_Internal_Sym *src,
19740 void *cdst,
19741 void *shndx)
19742 {
19743 Elf_Internal_Sym newsym;
19744
19745 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19746 of the address set, as per the new EABI. We do this unconditionally
19747 because objcopy does not set the elf header flags until after
19748 it writes out the symbol table. */
19749 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19750 {
19751 newsym = *src;
19752 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19753 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19754 if (newsym.st_shndx != SHN_UNDEF)
19755 {
19756 /* Do this only for defined symbols. At link type, the static
19757 linker will simulate the work of dynamic linker of resolving
19758 symbols and will carry over the thumbness of found symbols to
19759 the output symbol table. It's not clear how it happens, but
19760 the thumbness of undefined symbols can well be different at
19761 runtime, and writing '1' for them will be confusing for users
19762 and possibly for dynamic linker itself.
19763 */
19764 newsym.st_value |= 1;
19765 }
19766
19767 src = &newsym;
19768 }
19769 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19770 }
19771
19772 /* Add the PT_ARM_EXIDX program header. */
19773
19774 static bool
19775 elf32_arm_modify_segment_map (bfd *abfd,
19776 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19777 {
19778 struct elf_segment_map *m;
19779 asection *sec;
19780
19781 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19782 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19783 {
19784 /* If there is already a PT_ARM_EXIDX header, then we do not
19785 want to add another one. This situation arises when running
19786 "strip"; the input binary already has the header. */
19787 m = elf_seg_map (abfd);
19788 while (m && m->p_type != PT_ARM_EXIDX)
19789 m = m->next;
19790 if (!m)
19791 {
19792 m = (struct elf_segment_map *)
19793 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19794 if (m == NULL)
19795 return false;
19796 m->p_type = PT_ARM_EXIDX;
19797 m->count = 1;
19798 m->sections[0] = sec;
19799
19800 m->next = elf_seg_map (abfd);
19801 elf_seg_map (abfd) = m;
19802 }
19803 }
19804
19805 return true;
19806 }
19807
19808 /* We may add a PT_ARM_EXIDX program header. */
19809
19810 static int
19811 elf32_arm_additional_program_headers (bfd *abfd,
19812 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19813 {
19814 asection *sec;
19815
19816 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19817 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19818 return 1;
19819 else
19820 return 0;
19821 }
19822
19823 /* Hook called by the linker routine which adds symbols from an object
19824 file. */
19825
19826 static bool
19827 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19828 Elf_Internal_Sym *sym, const char **namep,
19829 flagword *flagsp, asection **secp, bfd_vma *valp)
19830 {
19831 if (elf32_arm_hash_table (info) == NULL)
19832 return false;
19833
19834 if (elf32_arm_hash_table (info)->root.target_os == is_vxworks
19835 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19836 flagsp, secp, valp))
19837 return false;
19838
19839 return true;
19840 }
19841
19842 /* We use this to override swap_symbol_in and swap_symbol_out. */
19843 const struct elf_size_info elf32_arm_size_info =
19844 {
19845 sizeof (Elf32_External_Ehdr),
19846 sizeof (Elf32_External_Phdr),
19847 sizeof (Elf32_External_Shdr),
19848 sizeof (Elf32_External_Rel),
19849 sizeof (Elf32_External_Rela),
19850 sizeof (Elf32_External_Sym),
19851 sizeof (Elf32_External_Dyn),
19852 sizeof (Elf_External_Note),
19853 4,
19854 1,
19855 32, 2,
19856 ELFCLASS32, EV_CURRENT,
19857 bfd_elf32_write_out_phdrs,
19858 bfd_elf32_write_shdrs_and_ehdr,
19859 bfd_elf32_checksum_contents,
19860 bfd_elf32_write_relocs,
19861 elf32_arm_swap_symbol_in,
19862 elf32_arm_swap_symbol_out,
19863 bfd_elf32_slurp_reloc_table,
19864 bfd_elf32_slurp_symbol_table,
19865 bfd_elf32_swap_dyn_in,
19866 bfd_elf32_swap_dyn_out,
19867 bfd_elf32_swap_reloc_in,
19868 bfd_elf32_swap_reloc_out,
19869 bfd_elf32_swap_reloca_in,
19870 bfd_elf32_swap_reloca_out
19871 };
19872
19873 static bfd_vma
19874 read_code32 (const bfd *abfd, const bfd_byte *addr)
19875 {
19876 /* V7 BE8 code is always little endian. */
19877 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19878 return bfd_getl32 (addr);
19879
19880 return bfd_get_32 (abfd, addr);
19881 }
19882
19883 static bfd_vma
19884 read_code16 (const bfd *abfd, const bfd_byte *addr)
19885 {
19886 /* V7 BE8 code is always little endian. */
19887 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19888 return bfd_getl16 (addr);
19889
19890 return bfd_get_16 (abfd, addr);
19891 }
19892
19893 /* Return size of plt0 entry starting at ADDR
19894 or (bfd_vma) -1 if size can not be determined. */
19895
19896 static bfd_vma
19897 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19898 {
19899 bfd_vma first_word;
19900 bfd_vma plt0_size;
19901
19902 first_word = read_code32 (abfd, addr);
19903
19904 if (first_word == elf32_arm_plt0_entry[0])
19905 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19906 else if (first_word == elf32_thumb2_plt0_entry[0])
19907 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19908 else
19909 /* We don't yet handle this PLT format. */
19910 return (bfd_vma) -1;
19911
19912 return plt0_size;
19913 }
19914
19915 /* Return size of plt entry starting at offset OFFSET
19916 of plt section located at address START
19917 or (bfd_vma) -1 if size can not be determined. */
19918
19919 static bfd_vma
19920 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19921 {
19922 bfd_vma first_insn;
19923 bfd_vma plt_size = 0;
19924 const bfd_byte *addr = start + offset;
19925
19926 /* PLT entry size if fixed on Thumb-only platforms. */
19927 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19928 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19929
19930 /* Respect Thumb stub if necessary. */
19931 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19932 {
19933 plt_size += 2 * ARRAY_SIZE (elf32_arm_plt_thumb_stub);
19934 }
19935
19936 /* Strip immediate from first add. */
19937 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19938
19939 #ifdef FOUR_WORD_PLT
19940 if (first_insn == elf32_arm_plt_entry[0])
19941 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19942 #else
19943 if (first_insn == elf32_arm_plt_entry_long[0])
19944 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19945 else if (first_insn == elf32_arm_plt_entry_short[0])
19946 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19947 #endif
19948 else
19949 /* We don't yet handle this PLT format. */
19950 return (bfd_vma) -1;
19951
19952 return plt_size;
19953 }
19954
19955 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19956
19957 static long
19958 elf32_arm_get_synthetic_symtab (bfd *abfd,
19959 long symcount ATTRIBUTE_UNUSED,
19960 asymbol **syms ATTRIBUTE_UNUSED,
19961 long dynsymcount,
19962 asymbol **dynsyms,
19963 asymbol **ret)
19964 {
19965 asection *relplt;
19966 asymbol *s;
19967 arelent *p;
19968 long count, i, n;
19969 size_t size;
19970 Elf_Internal_Shdr *hdr;
19971 char *names;
19972 asection *plt;
19973 bfd_vma offset;
19974 bfd_byte *data;
19975
19976 *ret = NULL;
19977
19978 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19979 return 0;
19980
19981 if (dynsymcount <= 0)
19982 return 0;
19983
19984 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19985 if (relplt == NULL)
19986 return 0;
19987
19988 hdr = &elf_section_data (relplt)->this_hdr;
19989 if (hdr->sh_link != elf_dynsymtab (abfd)
19990 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19991 return 0;
19992
19993 plt = bfd_get_section_by_name (abfd, ".plt");
19994 if (plt == NULL)
19995 return 0;
19996
19997 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, true))
19998 return -1;
19999
20000 data = plt->contents;
20001 if (data == NULL)
20002 {
20003 if (!bfd_get_full_section_contents (abfd, (asection *) plt, &data) || data == NULL)
20004 return -1;
20005 bfd_cache_section_contents ((asection *) plt, data);
20006 }
20007
20008 count = relplt->size / hdr->sh_entsize;
20009 size = count * sizeof (asymbol);
20010 p = relplt->relocation;
20011 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20012 {
20013 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20014 if (p->addend != 0)
20015 size += sizeof ("+0x") - 1 + 8;
20016 }
20017
20018 s = *ret = (asymbol *) bfd_malloc (size);
20019 if (s == NULL)
20020 return -1;
20021
20022 offset = elf32_arm_plt0_size (abfd, data);
20023 if (offset == (bfd_vma) -1)
20024 return -1;
20025
20026 names = (char *) (s + count);
20027 p = relplt->relocation;
20028 n = 0;
20029 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20030 {
20031 size_t len;
20032
20033 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20034 if (plt_size == (bfd_vma) -1)
20035 break;
20036
20037 *s = **p->sym_ptr_ptr;
20038 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20039 we are defining a symbol, ensure one of them is set. */
20040 if ((s->flags & BSF_LOCAL) == 0)
20041 s->flags |= BSF_GLOBAL;
20042 s->flags |= BSF_SYNTHETIC;
20043 s->section = plt;
20044 s->value = offset;
20045 s->name = names;
20046 s->udata.p = NULL;
20047 len = strlen ((*p->sym_ptr_ptr)->name);
20048 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20049 names += len;
20050 if (p->addend != 0)
20051 {
20052 char buf[30], *a;
20053
20054 memcpy (names, "+0x", sizeof ("+0x") - 1);
20055 names += sizeof ("+0x") - 1;
20056 bfd_sprintf_vma (abfd, buf, p->addend);
20057 for (a = buf; *a == '0'; ++a)
20058 ;
20059 len = strlen (a);
20060 memcpy (names, a, len);
20061 names += len;
20062 }
20063 memcpy (names, "@plt", sizeof ("@plt"));
20064 names += sizeof ("@plt");
20065 ++s, ++n;
20066 offset += plt_size;
20067 }
20068
20069 return n;
20070 }
20071
20072 static bool
20073 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20074 {
20075 if (hdr->sh_flags & SHF_ARM_PURECODE)
20076 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20077 return true;
20078 }
20079
20080 static flagword
20081 elf32_arm_lookup_section_flags (char *flag_name)
20082 {
20083 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20084 return SHF_ARM_PURECODE;
20085
20086 return SEC_NO_FLAGS;
20087 }
20088
20089 static unsigned int
20090 elf32_arm_count_additional_relocs (asection *sec)
20091 {
20092 struct _arm_elf_section_data *arm_data;
20093 arm_data = get_arm_elf_section_data (sec);
20094
20095 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20096 }
20097
20098 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20099 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20100 FALSE otherwise. ISECTION is the best guess matching section from the
20101 input bfd IBFD, but it might be NULL. */
20102
20103 static bool
20104 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20105 bfd *obfd ATTRIBUTE_UNUSED,
20106 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20107 Elf_Internal_Shdr *osection)
20108 {
20109 switch (osection->sh_type)
20110 {
20111 case SHT_ARM_EXIDX:
20112 {
20113 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20114 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20115 unsigned i = 0;
20116
20117 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20118 osection->sh_info = 0;
20119
20120 /* The sh_link field must be set to the text section associated with
20121 this index section. Unfortunately the ARM EHABI does not specify
20122 exactly how to determine this association. Our caller does try
20123 to match up OSECTION with its corresponding input section however
20124 so that is a good first guess. */
20125 if (isection != NULL
20126 && osection->bfd_section != NULL
20127 && isection->bfd_section != NULL
20128 && isection->bfd_section->output_section != NULL
20129 && isection->bfd_section->output_section == osection->bfd_section
20130 && iheaders != NULL
20131 && isection->sh_link > 0
20132 && isection->sh_link < elf_numsections (ibfd)
20133 && iheaders[isection->sh_link]->bfd_section != NULL
20134 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20135 )
20136 {
20137 for (i = elf_numsections (obfd); i-- > 0;)
20138 if (oheaders[i]->bfd_section
20139 == iheaders[isection->sh_link]->bfd_section->output_section)
20140 break;
20141 }
20142
20143 if (i == 0)
20144 {
20145 /* Failing that we have to find a matching section ourselves. If
20146 we had the output section name available we could compare that
20147 with input section names. Unfortunately we don't. So instead
20148 we use a simple heuristic and look for the nearest executable
20149 section before this one. */
20150 for (i = elf_numsections (obfd); i-- > 0;)
20151 if (oheaders[i] == osection)
20152 break;
20153 if (i == 0)
20154 break;
20155
20156 while (i-- > 0)
20157 if (oheaders[i]->sh_type == SHT_PROGBITS
20158 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20159 == (SHF_ALLOC | SHF_EXECINSTR))
20160 break;
20161 }
20162
20163 if (i)
20164 {
20165 osection->sh_link = i;
20166 /* If the text section was part of a group
20167 then the index section should be too. */
20168 if (oheaders[i]->sh_flags & SHF_GROUP)
20169 osection->sh_flags |= SHF_GROUP;
20170 return true;
20171 }
20172 }
20173 break;
20174
20175 case SHT_ARM_PREEMPTMAP:
20176 osection->sh_flags = SHF_ALLOC;
20177 break;
20178
20179 case SHT_ARM_ATTRIBUTES:
20180 case SHT_ARM_DEBUGOVERLAY:
20181 case SHT_ARM_OVERLAYSECTION:
20182 default:
20183 break;
20184 }
20185
20186 return false;
20187 }
20188
20189 /* Returns TRUE if NAME is an ARM mapping symbol.
20190 Traditionally the symbols $a, $d and $t have been used.
20191 The ARM ELF standard also defines $x (for A64 code). It also allows a
20192 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20193 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20194 not support them here. $t.x indicates the start of ThumbEE instructions. */
20195
20196 static bool
20197 is_arm_mapping_symbol (const char * name)
20198 {
20199 return name != NULL /* Paranoia. */
20200 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20201 the mapping symbols could have acquired a prefix.
20202 We do not support this here, since such symbols no
20203 longer conform to the ARM ELF ABI. */
20204 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20205 && (name[2] == 0 || name[2] == '.');
20206 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20207 any characters that follow the period are legal characters for the body
20208 of a symbol's name. For now we just assume that this is the case. */
20209 }
20210
20211 /* Make sure that mapping symbols in object files are not removed via the
20212 "strip --strip-unneeded" tool. These symbols are needed in order to
20213 correctly generate interworking veneers, and for byte swapping code
20214 regions. Once an object file has been linked, it is safe to remove the
20215 symbols as they will no longer be needed. */
20216
20217 static void
20218 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20219 {
20220 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20221 && sym->section != bfd_abs_section_ptr
20222 && is_arm_mapping_symbol (sym->name))
20223 sym->flags |= BSF_KEEP;
20224 }
20225
20226 #undef elf_backend_copy_special_section_fields
20227 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20228
20229 #define ELF_ARCH bfd_arch_arm
20230 #define ELF_TARGET_ID ARM_ELF_DATA
20231 #define ELF_MACHINE_CODE EM_ARM
20232 #ifdef __QNXTARGET__
20233 #define ELF_MAXPAGESIZE 0x1000
20234 #else
20235 #define ELF_MAXPAGESIZE 0x10000
20236 #endif
20237 #define ELF_COMMONPAGESIZE 0x1000
20238
20239 #define bfd_elf32_mkobject elf32_arm_mkobject
20240
20241 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20242 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20243 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20244 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20245 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20246 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20247 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20248 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20249 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20250 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20251 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20252 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20253
20254 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20255 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20256 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20257 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20258 #define elf_backend_check_relocs elf32_arm_check_relocs
20259 #define elf_backend_update_relocs elf32_arm_update_relocs
20260 #define elf_backend_relocate_section elf32_arm_relocate_section
20261 #define elf_backend_write_section elf32_arm_write_section
20262 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20263 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20264 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20265 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20266 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20267 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20268 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20269 #define elf_backend_init_file_header elf32_arm_init_file_header
20270 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20271 #define elf_backend_object_p elf32_arm_object_p
20272 #define elf_backend_fake_sections elf32_arm_fake_sections
20273 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20274 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20275 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20276 #define elf_backend_size_info elf32_arm_size_info
20277 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20278 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20279 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20280 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20281 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20282 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20283 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20284 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20285
20286 #define elf_backend_can_refcount 1
20287 #define elf_backend_can_gc_sections 1
20288 #define elf_backend_plt_readonly 1
20289 #define elf_backend_want_got_plt 1
20290 #define elf_backend_want_plt_sym 0
20291 #define elf_backend_want_dynrelro 1
20292 #define elf_backend_may_use_rel_p 1
20293 #define elf_backend_may_use_rela_p 0
20294 #define elf_backend_default_use_rela_p 0
20295 #define elf_backend_dtrel_excludes_plt 1
20296
20297 #define elf_backend_got_header_size 12
20298 #define elf_backend_extern_protected_data 1
20299
20300 #undef elf_backend_obj_attrs_vendor
20301 #define elf_backend_obj_attrs_vendor "aeabi"
20302 #undef elf_backend_obj_attrs_section
20303 #define elf_backend_obj_attrs_section ".ARM.attributes"
20304 #undef elf_backend_obj_attrs_arg_type
20305 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20306 #undef elf_backend_obj_attrs_section_type
20307 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20308 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20309 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20310
20311 #undef elf_backend_section_flags
20312 #define elf_backend_section_flags elf32_arm_section_flags
20313 #undef elf_backend_lookup_section_flags_hook
20314 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20315
20316 #define elf_backend_linux_prpsinfo32_ugid16 true
20317
20318 #include "elf32-target.h"
20319
20320 /* Native Client targets. */
20321
20322 #undef TARGET_LITTLE_SYM
20323 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20324 #undef TARGET_LITTLE_NAME
20325 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20326 #undef TARGET_BIG_SYM
20327 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20328 #undef TARGET_BIG_NAME
20329 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20330
20331 /* Like elf32_arm_link_hash_table_create -- but overrides
20332 appropriately for NaCl. */
20333
20334 static struct bfd_link_hash_table *
20335 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20336 {
20337 struct bfd_link_hash_table *ret;
20338
20339 ret = elf32_arm_link_hash_table_create (abfd);
20340 if (ret)
20341 {
20342 struct elf32_arm_link_hash_table *htab
20343 = (struct elf32_arm_link_hash_table *) ret;
20344
20345 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20346 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20347 }
20348 return ret;
20349 }
20350
20351 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20352 really need to use elf32_arm_modify_segment_map. But we do it
20353 anyway just to reduce gratuitous differences with the stock ARM backend. */
20354
20355 static bool
20356 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20357 {
20358 return (elf32_arm_modify_segment_map (abfd, info)
20359 && nacl_modify_segment_map (abfd, info));
20360 }
20361
20362 static bool
20363 elf32_arm_nacl_final_write_processing (bfd *abfd)
20364 {
20365 arm_final_write_processing (abfd);
20366 return nacl_final_write_processing (abfd);
20367 }
20368
20369 static bfd_vma
20370 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20371 const arelent *rel ATTRIBUTE_UNUSED)
20372 {
20373 return plt->vma
20374 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20375 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20376 }
20377
20378 #undef elf32_bed
20379 #define elf32_bed elf32_arm_nacl_bed
20380 #undef bfd_elf32_bfd_link_hash_table_create
20381 #define bfd_elf32_bfd_link_hash_table_create \
20382 elf32_arm_nacl_link_hash_table_create
20383 #undef elf_backend_plt_alignment
20384 #define elf_backend_plt_alignment 4
20385 #undef elf_backend_modify_segment_map
20386 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20387 #undef elf_backend_modify_headers
20388 #define elf_backend_modify_headers nacl_modify_headers
20389 #undef elf_backend_final_write_processing
20390 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20391 #undef bfd_elf32_get_synthetic_symtab
20392 #undef elf_backend_plt_sym_val
20393 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20394 #undef elf_backend_copy_special_section_fields
20395
20396 #undef ELF_MINPAGESIZE
20397 #undef ELF_COMMONPAGESIZE
20398
20399 #undef ELF_TARGET_OS
20400 #define ELF_TARGET_OS is_nacl
20401
20402 #include "elf32-target.h"
20403
20404 /* Reset to defaults. */
20405 #undef elf_backend_plt_alignment
20406 #undef elf_backend_modify_segment_map
20407 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20408 #undef elf_backend_modify_headers
20409 #undef elf_backend_final_write_processing
20410 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20411 #undef ELF_MINPAGESIZE
20412 #undef ELF_COMMONPAGESIZE
20413 #define ELF_COMMONPAGESIZE 0x1000
20414
20415
20416 /* FDPIC Targets. */
20417
20418 #undef TARGET_LITTLE_SYM
20419 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20420 #undef TARGET_LITTLE_NAME
20421 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20422 #undef TARGET_BIG_SYM
20423 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20424 #undef TARGET_BIG_NAME
20425 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20426 #undef elf_match_priority
20427 #define elf_match_priority 128
20428 #undef ELF_OSABI
20429 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20430
20431 /* Like elf32_arm_link_hash_table_create -- but overrides
20432 appropriately for FDPIC. */
20433
20434 static struct bfd_link_hash_table *
20435 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20436 {
20437 struct bfd_link_hash_table *ret;
20438
20439 ret = elf32_arm_link_hash_table_create (abfd);
20440 if (ret)
20441 {
20442 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20443
20444 htab->fdpic_p = 1;
20445 }
20446 return ret;
20447 }
20448
20449 /* We need dynamic symbols for every section, since segments can
20450 relocate independently. */
20451 static bool
20452 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20453 struct bfd_link_info *info
20454 ATTRIBUTE_UNUSED,
20455 asection *p ATTRIBUTE_UNUSED)
20456 {
20457 switch (elf_section_data (p)->this_hdr.sh_type)
20458 {
20459 case SHT_PROGBITS:
20460 case SHT_NOBITS:
20461 /* If sh_type is yet undecided, assume it could be
20462 SHT_PROGBITS/SHT_NOBITS. */
20463 case SHT_NULL:
20464 return false;
20465
20466 /* There shouldn't be section relative relocations
20467 against any other section. */
20468 default:
20469 return true;
20470 }
20471 }
20472
20473 #undef elf32_bed
20474 #define elf32_bed elf32_arm_fdpic_bed
20475
20476 #undef bfd_elf32_bfd_link_hash_table_create
20477 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20478
20479 #undef elf_backend_omit_section_dynsym
20480 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20481
20482 #undef ELF_TARGET_OS
20483
20484 #include "elf32-target.h"
20485
20486 #undef elf_match_priority
20487 #undef ELF_OSABI
20488 #undef elf_backend_omit_section_dynsym
20489
20490 /* VxWorks Targets. */
20491
20492 #undef TARGET_LITTLE_SYM
20493 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20494 #undef TARGET_LITTLE_NAME
20495 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20496 #undef TARGET_BIG_SYM
20497 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20498 #undef TARGET_BIG_NAME
20499 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20500
20501 /* Like elf32_arm_link_hash_table_create -- but overrides
20502 appropriately for VxWorks. */
20503
20504 static struct bfd_link_hash_table *
20505 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20506 {
20507 struct bfd_link_hash_table *ret;
20508
20509 ret = elf32_arm_link_hash_table_create (abfd);
20510 if (ret)
20511 {
20512 struct elf32_arm_link_hash_table *htab
20513 = (struct elf32_arm_link_hash_table *) ret;
20514 htab->use_rel = 0;
20515 }
20516 return ret;
20517 }
20518
20519 static bool
20520 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20521 {
20522 arm_final_write_processing (abfd);
20523 return elf_vxworks_final_write_processing (abfd);
20524 }
20525
20526 #undef elf32_bed
20527 #define elf32_bed elf32_arm_vxworks_bed
20528
20529 #undef bfd_elf32_bfd_link_hash_table_create
20530 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20531 #undef elf_backend_final_write_processing
20532 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20533 #undef elf_backend_emit_relocs
20534 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20535
20536 #undef elf_backend_may_use_rel_p
20537 #define elf_backend_may_use_rel_p 0
20538 #undef elf_backend_may_use_rela_p
20539 #define elf_backend_may_use_rela_p 1
20540 #undef elf_backend_default_use_rela_p
20541 #define elf_backend_default_use_rela_p 1
20542 #undef elf_backend_want_plt_sym
20543 #define elf_backend_want_plt_sym 1
20544 #undef ELF_MAXPAGESIZE
20545 #define ELF_MAXPAGESIZE 0x1000
20546 #undef ELF_TARGET_OS
20547 #define ELF_TARGET_OS is_vxworks
20548
20549 #include "elf32-target.h"
20550
20551
20552 /* Merge backend specific data from an object file to the output
20553 object file when linking. */
20554
20555 static bool
20556 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20557 {
20558 bfd *obfd = info->output_bfd;
20559 flagword out_flags;
20560 flagword in_flags;
20561 bool flags_compatible = true;
20562 asection *sec;
20563
20564 /* Check if we have the same endianness. */
20565 if (! _bfd_generic_verify_endian_match (ibfd, info))
20566 return false;
20567
20568 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20569 return true;
20570
20571 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20572 return false;
20573
20574 /* The input BFD must have had its flags initialised. */
20575 /* The following seems bogus to me -- The flags are initialized in
20576 the assembler but I don't think an elf_flags_init field is
20577 written into the object. */
20578 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20579
20580 in_flags = elf_elfheader (ibfd)->e_flags;
20581 out_flags = elf_elfheader (obfd)->e_flags;
20582
20583 /* In theory there is no reason why we couldn't handle this. However
20584 in practice it isn't even close to working and there is no real
20585 reason to want it. */
20586 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20587 && !(ibfd->flags & DYNAMIC)
20588 && (in_flags & EF_ARM_BE8))
20589 {
20590 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20591 ibfd);
20592 return false;
20593 }
20594
20595 if (!elf_flags_init (obfd))
20596 {
20597 /* If the input is the default architecture and had the default
20598 flags then do not bother setting the flags for the output
20599 architecture, instead allow future merges to do this. If no
20600 future merges ever set these flags then they will retain their
20601 uninitialised values, which surprise surprise, correspond
20602 to the default values. */
20603 if (bfd_get_arch_info (ibfd)->the_default
20604 && elf_elfheader (ibfd)->e_flags == 0)
20605 return true;
20606
20607 elf_flags_init (obfd) = true;
20608 elf_elfheader (obfd)->e_flags = in_flags;
20609
20610 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20611 && bfd_get_arch_info (obfd)->the_default)
20612 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20613
20614 return true;
20615 }
20616
20617 /* Determine what should happen if the input ARM architecture
20618 does not match the output ARM architecture. */
20619 if (! bfd_arm_merge_machines (ibfd, obfd))
20620 return false;
20621
20622 /* Identical flags must be compatible. */
20623 if (in_flags == out_flags)
20624 return true;
20625
20626 /* Check to see if the input BFD actually contains any sections. If
20627 not, its flags may not have been initialised either, but it
20628 cannot actually cause any incompatiblity. Do not short-circuit
20629 dynamic objects; their section list may be emptied by
20630 elf_link_add_object_symbols.
20631
20632 Also check to see if there are no code sections in the input.
20633 In this case there is no need to check for code specific flags.
20634 XXX - do we need to worry about floating-point format compatability
20635 in data sections ? */
20636 if (!(ibfd->flags & DYNAMIC))
20637 {
20638 bool null_input_bfd = true;
20639 bool only_data_sections = true;
20640
20641 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20642 {
20643 /* Ignore synthetic glue sections. */
20644 if (strcmp (sec->name, ".glue_7")
20645 && strcmp (sec->name, ".glue_7t"))
20646 {
20647 if ((bfd_section_flags (sec)
20648 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20649 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20650 only_data_sections = false;
20651
20652 null_input_bfd = false;
20653 break;
20654 }
20655 }
20656
20657 if (null_input_bfd || only_data_sections)
20658 return true;
20659 }
20660
20661 /* Complain about various flag mismatches. */
20662 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20663 EF_ARM_EABI_VERSION (out_flags)))
20664 {
20665 _bfd_error_handler
20666 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20667 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20668 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20669 return false;
20670 }
20671
20672 /* Not sure what needs to be checked for EABI versions >= 1. */
20673 /* VxWorks libraries do not use these flags. */
20674 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20675 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20676 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20677 {
20678 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20679 {
20680 _bfd_error_handler
20681 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20682 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20683 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20684 flags_compatible = false;
20685 }
20686
20687 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20688 {
20689 if (in_flags & EF_ARM_APCS_FLOAT)
20690 _bfd_error_handler
20691 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20692 ibfd, obfd);
20693 else
20694 _bfd_error_handler
20695 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20696 ibfd, obfd);
20697
20698 flags_compatible = false;
20699 }
20700
20701 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20702 {
20703 if (in_flags & EF_ARM_VFP_FLOAT)
20704 _bfd_error_handler
20705 (_("error: %pB uses %s instructions, whereas %pB does not"),
20706 ibfd, "VFP", obfd);
20707 else
20708 _bfd_error_handler
20709 (_("error: %pB uses %s instructions, whereas %pB does not"),
20710 ibfd, "FPA", obfd);
20711
20712 flags_compatible = false;
20713 }
20714
20715 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20716 {
20717 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20718 _bfd_error_handler
20719 (_("error: %pB uses %s instructions, whereas %pB does not"),
20720 ibfd, "Maverick", obfd);
20721 else
20722 _bfd_error_handler
20723 (_("error: %pB does not use %s instructions, whereas %pB does"),
20724 ibfd, "Maverick", obfd);
20725
20726 flags_compatible = false;
20727 }
20728
20729 #ifdef EF_ARM_SOFT_FLOAT
20730 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20731 {
20732 /* We can allow interworking between code that is VFP format
20733 layout, and uses either soft float or integer regs for
20734 passing floating point arguments and results. We already
20735 know that the APCS_FLOAT flags match; similarly for VFP
20736 flags. */
20737 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20738 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20739 {
20740 if (in_flags & EF_ARM_SOFT_FLOAT)
20741 _bfd_error_handler
20742 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20743 ibfd, obfd);
20744 else
20745 _bfd_error_handler
20746 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20747 ibfd, obfd);
20748
20749 flags_compatible = false;
20750 }
20751 }
20752 #endif
20753
20754 /* Interworking mismatch is only a warning. */
20755 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20756 {
20757 if (in_flags & EF_ARM_INTERWORK)
20758 {
20759 _bfd_error_handler
20760 (_("warning: %pB supports interworking, whereas %pB does not"),
20761 ibfd, obfd);
20762 }
20763 else
20764 {
20765 _bfd_error_handler
20766 (_("warning: %pB does not support interworking, whereas %pB does"),
20767 ibfd, obfd);
20768 }
20769 }
20770 }
20771
20772 return flags_compatible;
20773 }