]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf32-arm.c
Introduce new section flag: SEC_ELF_OCTETS
[thirdparty/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2019 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
68
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
73
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
77
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 FALSE, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 FALSE), /* pcrel_offset */
94
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 TRUE, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
108
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 FALSE, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
123
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 TRUE, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
138
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 TRUE, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
153
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 FALSE, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
168
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 FALSE, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
183
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 FALSE, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
197
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 FALSE, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
212
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 FALSE, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
226
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 TRUE, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
240
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 TRUE, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
254
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 FALSE, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
268
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 FALSE, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
282
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 FALSE, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
296
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 TRUE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
311
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 TRUE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
326
327 /* Dynamic TLS relocations. */
328
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 FALSE, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
342
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 FALSE, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
370
371 /* Relocs used in ARM Linux */
372
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 FALSE, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
414
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 FALSE, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
428
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 FALSE, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
442
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 TRUE, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
456
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 FALSE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
470
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 TRUE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
498
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 TRUE, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
512
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 TRUE, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
526
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 FALSE, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
540
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 TRUE, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
554
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 TRUE, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
568
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 TRUE, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
582
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 FALSE, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
596
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 FALSE, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
610
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 FALSE, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
624
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 FALSE, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
638
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 FALSE, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 FALSE, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
680
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 TRUE, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
694
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 FALSE, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
708
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 FALSE, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
722
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 TRUE, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
736
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 TRUE, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
750
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 FALSE, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
778
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 TRUE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
792
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 TRUE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
806
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 TRUE, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
834
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 TRUE, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
851
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 TRUE, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
865
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 FALSE, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
879
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 TRUE, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
893
894 /* Group relocations. */
895
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 TRUE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
909
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 TRUE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
923
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 TRUE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
937
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 TRUE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
951
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 TRUE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
965
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 TRUE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
979
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 TRUE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
993
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 TRUE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1007
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 TRUE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1021
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 TRUE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1035
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 TRUE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1049
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 TRUE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1063
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 TRUE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1077
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 TRUE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1091
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 TRUE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1133
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 TRUE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1147
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 TRUE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1161
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 TRUE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1175
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 TRUE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1189
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 TRUE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1203
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 TRUE, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1217
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 TRUE, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1231
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 TRUE, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1245
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 TRUE, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1259
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 TRUE, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1273
1274 /* End of group relocations. */
1275
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 FALSE, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1289
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 FALSE, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1303
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 FALSE, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1317
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 FALSE, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1331
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 FALSE, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1345
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 FALSE, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1359
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 FALSE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_bitfield,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 FALSE, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1443
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 TRUE, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1457
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 FALSE, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 FALSE, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 FALSE), /* pcrel_offset */
1502
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE), /* pcrel_offset */
1517
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 TRUE, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1531
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 TRUE, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1545
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 FALSE, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1616
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 FALSE, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1630
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 FALSE, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1644
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 FALSE, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1658
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1676
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1679
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 FALSE, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_bitfield,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 16, /* bitsize. */
1699 FALSE, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 FALSE, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 FALSE), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 16, /* bitsize. */
1712 FALSE, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 FALSE, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 FALSE), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 16, /* bitsize. */
1725 FALSE, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 FALSE, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 FALSE), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 16, /* bitsize. */
1738 FALSE, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 FALSE, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 FALSE), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1751 16, /* bitsize. */
1752 TRUE, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 FALSE, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 TRUE), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1764 12, /* bitsize. */
1765 TRUE, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 FALSE, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 TRUE), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1777 18, /* bitsize. */
1778 TRUE, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 FALSE, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 TRUE), /* pcrel_offset. */
1787 };
1788
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1791 {
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1795 32, /* bitsize */
1796 FALSE, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 TRUE, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 FALSE), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1808 32, /* bitsize */
1809 FALSE, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 FALSE, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 FALSE), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1821 32, /* bitsize */
1822 FALSE, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 FALSE, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 FALSE), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1834 32, /* bitsize */
1835 FALSE, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 FALSE, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 FALSE), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1847 64, /* bitsize */
1848 FALSE, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 FALSE, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 FALSE), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1860 32, /* bitsize */
1861 FALSE, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 FALSE, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 FALSE), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1873 32, /* bitsize */
1874 FALSE, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 FALSE, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 FALSE), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1886 32, /* bitsize */
1887 FALSE, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 FALSE, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 FALSE), /* pcrel_offset */
1896 };
1897
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1900 {
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1904 0, /* bitsize */
1905 FALSE, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 FALSE, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 FALSE), /* pcrel_offset */
1914
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1918 0, /* bitsize */
1919 FALSE, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 FALSE, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 FALSE), /* pcrel_offset */
1928
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1932 0, /* bitsize */
1933 FALSE, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 FALSE, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 FALSE), /* pcrel_offset */
1942
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1946 0, /* bitsize */
1947 FALSE, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 FALSE, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 FALSE) /* pcrel_offset */
1956 };
1957
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1960 {
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1963
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971
1972 return NULL;
1973 }
1974
1975 static bfd_boolean
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1978 {
1979 unsigned int r_type;
1980
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983 {
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return FALSE;
1989 }
1990 return TRUE;
1991 }
1992
1993 struct elf32_arm_reloc_map
1994 {
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1997 };
1998
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001 {
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102 };
2103
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2107 {
2108 unsigned int i;
2109
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113
2114 return NULL;
2115 }
2116
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2120 {
2121 unsigned int i;
2122
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2127
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2132
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2137
2138 return NULL;
2139 }
2140
2141 /* Support for core dump NOTE sections. */
2142
2143 static bfd_boolean
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146 int offset;
2147 size_t size;
2148
2149 switch (note->descsz)
2150 {
2151 default:
2152 return FALSE;
2153
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2164
2165 break;
2166 }
2167
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2171 }
2172
2173 static bfd_boolean
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175 {
2176 switch (note->descsz)
2177 {
2178 default:
2179 return FALSE;
2180
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188 }
2189
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2193 {
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2196
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2199 }
2200
2201 return TRUE;
2202 }
2203
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2207 {
2208 switch (note_type)
2209 {
2210 default:
2211 return NULL;
2212
2213 case NT_PRPSINFO:
2214 {
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2217
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226 */
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2234
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2237 }
2238
2239 case NT_PRSTATUS:
2240 {
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2246
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2256
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2259 }
2260 }
2261 }
2262
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2267
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2271
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2274
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2281
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2288
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2291
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2294
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2297
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2300
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2302
2303 #define CMSE_PREFIX "__acle_se_"
2304
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2306
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2310
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2313
2314 static const unsigned long tls_trampoline [] =
2315 {
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2319 };
2320
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322 {
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332 };
2333
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2338
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342 {
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2353 };
2354
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358 {
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2369 };
2370
2371 #ifdef FOUR_WORD_PLT
2372
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2378 {
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2383 };
2384
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2388 {
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2393 };
2394
2395 #else /* not FOUR_WORD_PLT */
2396
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2402 {
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2408 };
2409
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2413 {
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2417 };
2418
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2422 {
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2427 };
2428
2429 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2430
2431 #endif /* not FOUR_WORD_PLT */
2432
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2437 {
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2445 };
2446
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2450 {
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fdf000 /* ldr.w pc, [ip] */
2457 /* b .-2 */
2458 };
2459
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463 {
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2468 };
2469
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472 {
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2479 };
2480
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483 {
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2490 };
2491
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495 {
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2498 };
2499
2500 /* The entries in a PLT when using a DLL-based target with multiple
2501 address spaces. */
2502 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2503 {
2504 0xe51ff004, /* ldr pc, [pc, #-4] */
2505 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2506 };
2507
2508 /* The first entry in a procedure linkage table looks like
2509 this. It is set up so that any shared library function that is
2510 called before the relocation has been set up calls the dynamic
2511 linker first. */
2512 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2513 {
2514 /* First bundle: */
2515 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2516 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2517 0xe08cc00f, /* add ip, ip, pc */
2518 0xe52dc008, /* str ip, [sp, #-8]! */
2519 /* Second bundle: */
2520 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2521 0xe59cc000, /* ldr ip, [ip] */
2522 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2523 0xe12fff1c, /* bx ip */
2524 /* Third bundle: */
2525 0xe320f000, /* nop */
2526 0xe320f000, /* nop */
2527 0xe320f000, /* nop */
2528 /* .Lplt_tail: */
2529 0xe50dc004, /* str ip, [sp, #-4] */
2530 /* Fourth bundle: */
2531 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2532 0xe59cc000, /* ldr ip, [ip] */
2533 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2534 0xe12fff1c, /* bx ip */
2535 };
2536 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2537
2538 /* Subsequent entries in a procedure linkage table look like this. */
2539 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2540 {
2541 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2542 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2543 0xe08cc00f, /* add ip, ip, pc */
2544 0xea000000, /* b .Lplt_tail */
2545 };
2546
2547 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2548 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2549 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2550 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2551 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2552 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2553 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2554 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2555
2556 enum stub_insn_type
2557 {
2558 THUMB16_TYPE = 1,
2559 THUMB32_TYPE,
2560 ARM_TYPE,
2561 DATA_TYPE
2562 };
2563
2564 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2565 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2566 is inserted in arm_build_one_stub(). */
2567 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2568 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2569 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2570 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2571 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2572 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2573 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2574 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2575
2576 typedef struct
2577 {
2578 bfd_vma data;
2579 enum stub_insn_type type;
2580 unsigned int r_type;
2581 int reloc_addend;
2582 } insn_sequence;
2583
2584 /* See note [Thumb nop sequence] when adding a veneer. */
2585
2586 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2587 to reach the stub if necessary. */
2588 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2589 {
2590 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2591 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2592 };
2593
2594 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2595 available. */
2596 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2597 {
2598 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2599 ARM_INSN (0xe12fff1c), /* bx ip */
2600 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2601 };
2602
2603 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2604 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2605 {
2606 THUMB16_INSN (0xb401), /* push {r0} */
2607 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2608 THUMB16_INSN (0x4684), /* mov ip, r0 */
2609 THUMB16_INSN (0xbc01), /* pop {r0} */
2610 THUMB16_INSN (0x4760), /* bx ip */
2611 THUMB16_INSN (0xbf00), /* nop */
2612 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2613 };
2614
2615 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2617 {
2618 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2619 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2620 };
2621
2622 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2623 M-profile architectures. */
2624 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2625 {
2626 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2627 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2628 THUMB16_INSN (0x4760), /* bx ip */
2629 };
2630
2631 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2632 allowed. */
2633 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2634 {
2635 THUMB16_INSN (0x4778), /* bx pc */
2636 THUMB16_INSN (0xe7fd), /* b .-2 */
2637 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2638 ARM_INSN (0xe12fff1c), /* bx ip */
2639 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2640 };
2641
2642 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2643 available. */
2644 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2645 {
2646 THUMB16_INSN (0x4778), /* bx pc */
2647 THUMB16_INSN (0xe7fd), /* b .-2 */
2648 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2649 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2650 };
2651
2652 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2653 one, when the destination is close enough. */
2654 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2655 {
2656 THUMB16_INSN (0x4778), /* bx pc */
2657 THUMB16_INSN (0xe7fd), /* b .-2 */
2658 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2659 };
2660
2661 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2662 blx to reach the stub if necessary. */
2663 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2664 {
2665 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2666 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2667 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2668 };
2669
2670 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2671 blx to reach the stub if necessary. We can not add into pc;
2672 it is not guaranteed to mode switch (different in ARMv6 and
2673 ARMv7). */
2674 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2675 {
2676 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2677 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2678 ARM_INSN (0xe12fff1c), /* bx ip */
2679 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2680 };
2681
2682 /* V4T ARM -> ARM long branch stub, PIC. */
2683 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2684 {
2685 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2686 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2687 ARM_INSN (0xe12fff1c), /* bx ip */
2688 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2689 };
2690
2691 /* V4T Thumb -> ARM long branch stub, PIC. */
2692 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2693 {
2694 THUMB16_INSN (0x4778), /* bx pc */
2695 THUMB16_INSN (0xe7fd), /* b .-2 */
2696 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2697 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2698 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2699 };
2700
2701 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2702 architectures. */
2703 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2704 {
2705 THUMB16_INSN (0xb401), /* push {r0} */
2706 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2707 THUMB16_INSN (0x46fc), /* mov ip, pc */
2708 THUMB16_INSN (0x4484), /* add ip, r0 */
2709 THUMB16_INSN (0xbc01), /* pop {r0} */
2710 THUMB16_INSN (0x4760), /* bx ip */
2711 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2712 };
2713
2714 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2715 allowed. */
2716 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2717 {
2718 THUMB16_INSN (0x4778), /* bx pc */
2719 THUMB16_INSN (0xe7fd), /* b .-2 */
2720 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2721 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2722 ARM_INSN (0xe12fff1c), /* bx ip */
2723 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2724 };
2725
2726 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2727 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2728 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2729 {
2730 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2731 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2732 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2733 };
2734
2735 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2736 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2737 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2738 {
2739 THUMB16_INSN (0x4778), /* bx pc */
2740 THUMB16_INSN (0xe7fd), /* b .-2 */
2741 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2742 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2743 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2744 };
2745
2746 /* NaCl ARM -> ARM long branch stub. */
2747 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2748 {
2749 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2750 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2751 ARM_INSN (0xe12fff1c), /* bx ip */
2752 ARM_INSN (0xe320f000), /* nop */
2753 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2754 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2755 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2756 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2757 };
2758
2759 /* NaCl ARM -> ARM long branch stub, PIC. */
2760 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2761 {
2762 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2763 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2764 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2765 ARM_INSN (0xe12fff1c), /* bx ip */
2766 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2767 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2768 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2769 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2770 };
2771
2772 /* Stub used for transition to secure state (aka SG veneer). */
2773 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2774 {
2775 THUMB32_INSN (0xe97fe97f), /* sg. */
2776 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2777 };
2778
2779
2780 /* Cortex-A8 erratum-workaround stubs. */
2781
2782 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2783 can't use a conditional branch to reach this stub). */
2784
2785 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2786 {
2787 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2788 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2789 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2790 };
2791
2792 /* Stub used for b.w and bl.w instructions. */
2793
2794 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2795 {
2796 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2797 };
2798
2799 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2800 {
2801 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2802 };
2803
2804 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2805 instruction (which switches to ARM mode) to point to this stub. Jump to the
2806 real destination using an ARM-mode branch. */
2807
2808 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2809 {
2810 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2811 };
2812
2813 /* For each section group there can be a specially created linker section
2814 to hold the stubs for that group. The name of the stub section is based
2815 upon the name of another section within that group with the suffix below
2816 applied.
2817
2818 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2819 create what appeared to be a linker stub section when it actually
2820 contained user code/data. For example, consider this fragment:
2821
2822 const char * stubborn_problems[] = { "np" };
2823
2824 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2825 section called:
2826
2827 .data.rel.local.stubborn_problems
2828
2829 This then causes problems in arm32_arm_build_stubs() as it triggers:
2830
2831 // Ignore non-stub sections.
2832 if (!strstr (stub_sec->name, STUB_SUFFIX))
2833 continue;
2834
2835 And so the section would be ignored instead of being processed. Hence
2836 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2837 C identifier. */
2838 #define STUB_SUFFIX ".__stub"
2839
2840 /* One entry per long/short branch stub defined above. */
2841 #define DEF_STUBS \
2842 DEF_STUB(long_branch_any_any) \
2843 DEF_STUB(long_branch_v4t_arm_thumb) \
2844 DEF_STUB(long_branch_thumb_only) \
2845 DEF_STUB(long_branch_v4t_thumb_thumb) \
2846 DEF_STUB(long_branch_v4t_thumb_arm) \
2847 DEF_STUB(short_branch_v4t_thumb_arm) \
2848 DEF_STUB(long_branch_any_arm_pic) \
2849 DEF_STUB(long_branch_any_thumb_pic) \
2850 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2851 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2852 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2853 DEF_STUB(long_branch_thumb_only_pic) \
2854 DEF_STUB(long_branch_any_tls_pic) \
2855 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2856 DEF_STUB(long_branch_arm_nacl) \
2857 DEF_STUB(long_branch_arm_nacl_pic) \
2858 DEF_STUB(cmse_branch_thumb_only) \
2859 DEF_STUB(a8_veneer_b_cond) \
2860 DEF_STUB(a8_veneer_b) \
2861 DEF_STUB(a8_veneer_bl) \
2862 DEF_STUB(a8_veneer_blx) \
2863 DEF_STUB(long_branch_thumb2_only) \
2864 DEF_STUB(long_branch_thumb2_only_pure)
2865
2866 #define DEF_STUB(x) arm_stub_##x,
2867 enum elf32_arm_stub_type
2868 {
2869 arm_stub_none,
2870 DEF_STUBS
2871 max_stub_type
2872 };
2873 #undef DEF_STUB
2874
2875 /* Note the first a8_veneer type. */
2876 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2877
2878 typedef struct
2879 {
2880 const insn_sequence* template_sequence;
2881 int template_size;
2882 } stub_def;
2883
2884 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2885 static const stub_def stub_definitions[] =
2886 {
2887 {NULL, 0},
2888 DEF_STUBS
2889 };
2890
2891 struct elf32_arm_stub_hash_entry
2892 {
2893 /* Base hash table entry structure. */
2894 struct bfd_hash_entry root;
2895
2896 /* The stub section. */
2897 asection *stub_sec;
2898
2899 /* Offset within stub_sec of the beginning of this stub. */
2900 bfd_vma stub_offset;
2901
2902 /* Given the symbol's value and its section we can determine its final
2903 value when building the stubs (so the stub knows where to jump). */
2904 bfd_vma target_value;
2905 asection *target_section;
2906
2907 /* Same as above but for the source of the branch to the stub. Used for
2908 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2909 such, source section does not need to be recorded since Cortex-A8 erratum
2910 workaround stubs are only generated when both source and target are in the
2911 same section. */
2912 bfd_vma source_value;
2913
2914 /* The instruction which caused this stub to be generated (only valid for
2915 Cortex-A8 erratum workaround stubs at present). */
2916 unsigned long orig_insn;
2917
2918 /* The stub type. */
2919 enum elf32_arm_stub_type stub_type;
2920 /* Its encoding size in bytes. */
2921 int stub_size;
2922 /* Its template. */
2923 const insn_sequence *stub_template;
2924 /* The size of the template (number of entries). */
2925 int stub_template_size;
2926
2927 /* The symbol table entry, if any, that this was derived from. */
2928 struct elf32_arm_link_hash_entry *h;
2929
2930 /* Type of branch. */
2931 enum arm_st_branch_type branch_type;
2932
2933 /* Where this stub is being called from, or, in the case of combined
2934 stub sections, the first input section in the group. */
2935 asection *id_sec;
2936
2937 /* The name for the local symbol at the start of this stub. The
2938 stub name in the hash table has to be unique; this does not, so
2939 it can be friendlier. */
2940 char *output_name;
2941 };
2942
2943 /* Used to build a map of a section. This is required for mixed-endian
2944 code/data. */
2945
2946 typedef struct elf32_elf_section_map
2947 {
2948 bfd_vma vma;
2949 char type;
2950 }
2951 elf32_arm_section_map;
2952
2953 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2954
2955 typedef enum
2956 {
2957 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2958 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2959 VFP11_ERRATUM_ARM_VENEER,
2960 VFP11_ERRATUM_THUMB_VENEER
2961 }
2962 elf32_vfp11_erratum_type;
2963
2964 typedef struct elf32_vfp11_erratum_list
2965 {
2966 struct elf32_vfp11_erratum_list *next;
2967 bfd_vma vma;
2968 union
2969 {
2970 struct
2971 {
2972 struct elf32_vfp11_erratum_list *veneer;
2973 unsigned int vfp_insn;
2974 } b;
2975 struct
2976 {
2977 struct elf32_vfp11_erratum_list *branch;
2978 unsigned int id;
2979 } v;
2980 } u;
2981 elf32_vfp11_erratum_type type;
2982 }
2983 elf32_vfp11_erratum_list;
2984
2985 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2986 veneer. */
2987 typedef enum
2988 {
2989 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2990 STM32L4XX_ERRATUM_VENEER
2991 }
2992 elf32_stm32l4xx_erratum_type;
2993
2994 typedef struct elf32_stm32l4xx_erratum_list
2995 {
2996 struct elf32_stm32l4xx_erratum_list *next;
2997 bfd_vma vma;
2998 union
2999 {
3000 struct
3001 {
3002 struct elf32_stm32l4xx_erratum_list *veneer;
3003 unsigned int insn;
3004 } b;
3005 struct
3006 {
3007 struct elf32_stm32l4xx_erratum_list *branch;
3008 unsigned int id;
3009 } v;
3010 } u;
3011 elf32_stm32l4xx_erratum_type type;
3012 }
3013 elf32_stm32l4xx_erratum_list;
3014
3015 typedef enum
3016 {
3017 DELETE_EXIDX_ENTRY,
3018 INSERT_EXIDX_CANTUNWIND_AT_END
3019 }
3020 arm_unwind_edit_type;
3021
3022 /* A (sorted) list of edits to apply to an unwind table. */
3023 typedef struct arm_unwind_table_edit
3024 {
3025 arm_unwind_edit_type type;
3026 /* Note: we sometimes want to insert an unwind entry corresponding to a
3027 section different from the one we're currently writing out, so record the
3028 (text) section this edit relates to here. */
3029 asection *linked_section;
3030 unsigned int index;
3031 struct arm_unwind_table_edit *next;
3032 }
3033 arm_unwind_table_edit;
3034
3035 typedef struct _arm_elf_section_data
3036 {
3037 /* Information about mapping symbols. */
3038 struct bfd_elf_section_data elf;
3039 unsigned int mapcount;
3040 unsigned int mapsize;
3041 elf32_arm_section_map *map;
3042 /* Information about CPU errata. */
3043 unsigned int erratumcount;
3044 elf32_vfp11_erratum_list *erratumlist;
3045 unsigned int stm32l4xx_erratumcount;
3046 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3047 unsigned int additional_reloc_count;
3048 /* Information about unwind tables. */
3049 union
3050 {
3051 /* Unwind info attached to a text section. */
3052 struct
3053 {
3054 asection *arm_exidx_sec;
3055 } text;
3056
3057 /* Unwind info attached to an .ARM.exidx section. */
3058 struct
3059 {
3060 arm_unwind_table_edit *unwind_edit_list;
3061 arm_unwind_table_edit *unwind_edit_tail;
3062 } exidx;
3063 } u;
3064 }
3065 _arm_elf_section_data;
3066
3067 #define elf32_arm_section_data(sec) \
3068 ((_arm_elf_section_data *) elf_section_data (sec))
3069
3070 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3071 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3072 so may be created multiple times: we use an array of these entries whilst
3073 relaxing which we can refresh easily, then create stubs for each potentially
3074 erratum-triggering instruction once we've settled on a solution. */
3075
3076 struct a8_erratum_fix
3077 {
3078 bfd *input_bfd;
3079 asection *section;
3080 bfd_vma offset;
3081 bfd_vma target_offset;
3082 unsigned long orig_insn;
3083 char *stub_name;
3084 enum elf32_arm_stub_type stub_type;
3085 enum arm_st_branch_type branch_type;
3086 };
3087
3088 /* A table of relocs applied to branches which might trigger Cortex-A8
3089 erratum. */
3090
3091 struct a8_erratum_reloc
3092 {
3093 bfd_vma from;
3094 bfd_vma destination;
3095 struct elf32_arm_link_hash_entry *hash;
3096 const char *sym_name;
3097 unsigned int r_type;
3098 enum arm_st_branch_type branch_type;
3099 bfd_boolean non_a8_stub;
3100 };
3101
3102 /* The size of the thread control block. */
3103 #define TCB_SIZE 8
3104
3105 /* ARM-specific information about a PLT entry, over and above the usual
3106 gotplt_union. */
3107 struct arm_plt_info
3108 {
3109 /* We reference count Thumb references to a PLT entry separately,
3110 so that we can emit the Thumb trampoline only if needed. */
3111 bfd_signed_vma thumb_refcount;
3112
3113 /* Some references from Thumb code may be eliminated by BL->BLX
3114 conversion, so record them separately. */
3115 bfd_signed_vma maybe_thumb_refcount;
3116
3117 /* How many of the recorded PLT accesses were from non-call relocations.
3118 This information is useful when deciding whether anything takes the
3119 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3120 non-call references to the function should resolve directly to the
3121 real runtime target. */
3122 unsigned int noncall_refcount;
3123
3124 /* Since PLT entries have variable size if the Thumb prologue is
3125 used, we need to record the index into .got.plt instead of
3126 recomputing it from the PLT offset. */
3127 bfd_signed_vma got_offset;
3128 };
3129
3130 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3131 struct arm_local_iplt_info
3132 {
3133 /* The information that is usually found in the generic ELF part of
3134 the hash table entry. */
3135 union gotplt_union root;
3136
3137 /* The information that is usually found in the ARM-specific part of
3138 the hash table entry. */
3139 struct arm_plt_info arm;
3140
3141 /* A list of all potential dynamic relocations against this symbol. */
3142 struct elf_dyn_relocs *dyn_relocs;
3143 };
3144
3145 /* Structure to handle FDPIC support for local functions. */
3146 struct fdpic_local {
3147 unsigned int funcdesc_cnt;
3148 unsigned int gotofffuncdesc_cnt;
3149 int funcdesc_offset;
3150 };
3151
3152 struct elf_arm_obj_tdata
3153 {
3154 struct elf_obj_tdata root;
3155
3156 /* tls_type for each local got entry. */
3157 char *local_got_tls_type;
3158
3159 /* GOTPLT entries for TLS descriptors. */
3160 bfd_vma *local_tlsdesc_gotent;
3161
3162 /* Information for local symbols that need entries in .iplt. */
3163 struct arm_local_iplt_info **local_iplt;
3164
3165 /* Zero to warn when linking objects with incompatible enum sizes. */
3166 int no_enum_size_warning;
3167
3168 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3169 int no_wchar_size_warning;
3170
3171 /* Maintains FDPIC counters and funcdesc info. */
3172 struct fdpic_local *local_fdpic_cnts;
3173 };
3174
3175 #define elf_arm_tdata(bfd) \
3176 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3177
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179 (elf_arm_tdata (bfd)->local_got_tls_type)
3180
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3183
3184 #define elf32_arm_local_iplt(bfd) \
3185 (elf_arm_tdata (bfd)->local_iplt)
3186
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3189
3190 #define is_arm_elf(bfd) \
3191 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192 && elf_tdata (bfd) != NULL \
3193 && elf_object_id (bfd) == ARM_ELF_DATA)
3194
3195 static bfd_boolean
3196 elf32_arm_mkobject (bfd *abfd)
3197 {
3198 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3199 ARM_ELF_DATA);
3200 }
3201
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3203
3204 /* Structure to handle FDPIC support for extern functions. */
3205 struct fdpic_global {
3206 unsigned int gotofffuncdesc_cnt;
3207 unsigned int gotfuncdesc_cnt;
3208 unsigned int funcdesc_cnt;
3209 int funcdesc_offset;
3210 int gotfuncdesc_offset;
3211 };
3212
3213 /* Arm ELF linker hash entry. */
3214 struct elf32_arm_link_hash_entry
3215 {
3216 struct elf_link_hash_entry root;
3217
3218 /* Track dynamic relocs copied for this symbol. */
3219 struct elf_dyn_relocs *dyn_relocs;
3220
3221 /* ARM-specific PLT information. */
3222 struct arm_plt_info plt;
3223
3224 #define GOT_UNKNOWN 0
3225 #define GOT_NORMAL 1
3226 #define GOT_TLS_GD 2
3227 #define GOT_TLS_IE 4
3228 #define GOT_TLS_GDESC 8
3229 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3230 unsigned int tls_type : 8;
3231
3232 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3233 unsigned int is_iplt : 1;
3234
3235 unsigned int unused : 23;
3236
3237 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3238 starting at the end of the jump table. */
3239 bfd_vma tlsdesc_got;
3240
3241 /* The symbol marking the real symbol location for exported thumb
3242 symbols with Arm stubs. */
3243 struct elf_link_hash_entry *export_glue;
3244
3245 /* A pointer to the most recently used stub hash entry against this
3246 symbol. */
3247 struct elf32_arm_stub_hash_entry *stub_cache;
3248
3249 /* Counter for FDPIC relocations against this symbol. */
3250 struct fdpic_global fdpic_cnts;
3251 };
3252
3253 /* Traverse an arm ELF linker hash table. */
3254 #define elf32_arm_link_hash_traverse(table, func, info) \
3255 (elf_link_hash_traverse \
3256 (&(table)->root, \
3257 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3258 (info)))
3259
3260 /* Get the ARM elf linker hash table from a link_info structure. */
3261 #define elf32_arm_hash_table(info) \
3262 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3263 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3264
3265 #define arm_stub_hash_lookup(table, string, create, copy) \
3266 ((struct elf32_arm_stub_hash_entry *) \
3267 bfd_hash_lookup ((table), (string), (create), (copy)))
3268
3269 /* Array to keep track of which stub sections have been created, and
3270 information on stub grouping. */
3271 struct map_stub
3272 {
3273 /* This is the section to which stubs in the group will be
3274 attached. */
3275 asection *link_sec;
3276 /* The stub section. */
3277 asection *stub_sec;
3278 };
3279
3280 #define elf32_arm_compute_jump_table_size(htab) \
3281 ((htab)->next_tls_desc_index * 4)
3282
3283 /* ARM ELF linker hash table. */
3284 struct elf32_arm_link_hash_table
3285 {
3286 /* The main hash table. */
3287 struct elf_link_hash_table root;
3288
3289 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3290 bfd_size_type thumb_glue_size;
3291
3292 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3293 bfd_size_type arm_glue_size;
3294
3295 /* The size in bytes of section containing the ARMv4 BX veneers. */
3296 bfd_size_type bx_glue_size;
3297
3298 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3299 veneer has been populated. */
3300 bfd_vma bx_glue_offset[15];
3301
3302 /* The size in bytes of the section containing glue for VFP11 erratum
3303 veneers. */
3304 bfd_size_type vfp11_erratum_glue_size;
3305
3306 /* The size in bytes of the section containing glue for STM32L4XX erratum
3307 veneers. */
3308 bfd_size_type stm32l4xx_erratum_glue_size;
3309
3310 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3311 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3312 elf32_arm_write_section(). */
3313 struct a8_erratum_fix *a8_erratum_fixes;
3314 unsigned int num_a8_erratum_fixes;
3315
3316 /* An arbitrary input BFD chosen to hold the glue sections. */
3317 bfd * bfd_of_glue_owner;
3318
3319 /* Nonzero to output a BE8 image. */
3320 int byteswap_code;
3321
3322 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3323 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3324 int target1_is_rel;
3325
3326 /* The relocation to use for R_ARM_TARGET2 relocations. */
3327 int target2_reloc;
3328
3329 /* 0 = Ignore R_ARM_V4BX.
3330 1 = Convert BX to MOV PC.
3331 2 = Generate v4 interworing stubs. */
3332 int fix_v4bx;
3333
3334 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3335 int fix_cortex_a8;
3336
3337 /* Whether we should fix the ARM1176 BLX immediate issue. */
3338 int fix_arm1176;
3339
3340 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3341 int use_blx;
3342
3343 /* What sort of code sequences we should look for which may trigger the
3344 VFP11 denorm erratum. */
3345 bfd_arm_vfp11_fix vfp11_fix;
3346
3347 /* Global counter for the number of fixes we have emitted. */
3348 int num_vfp11_fixes;
3349
3350 /* What sort of code sequences we should look for which may trigger the
3351 STM32L4XX erratum. */
3352 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3353
3354 /* Global counter for the number of fixes we have emitted. */
3355 int num_stm32l4xx_fixes;
3356
3357 /* Nonzero to force PIC branch veneers. */
3358 int pic_veneer;
3359
3360 /* The number of bytes in the initial entry in the PLT. */
3361 bfd_size_type plt_header_size;
3362
3363 /* The number of bytes in the subsequent PLT etries. */
3364 bfd_size_type plt_entry_size;
3365
3366 /* True if the target system is VxWorks. */
3367 int vxworks_p;
3368
3369 /* True if the target system is Symbian OS. */
3370 int symbian_p;
3371
3372 /* True if the target system is Native Client. */
3373 int nacl_p;
3374
3375 /* True if the target uses REL relocations. */
3376 bfd_boolean use_rel;
3377
3378 /* Nonzero if import library must be a secure gateway import library
3379 as per ARMv8-M Security Extensions. */
3380 int cmse_implib;
3381
3382 /* The import library whose symbols' address must remain stable in
3383 the import library generated. */
3384 bfd *in_implib_bfd;
3385
3386 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3387 bfd_vma next_tls_desc_index;
3388
3389 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3390 bfd_vma num_tls_desc;
3391
3392 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3393 asection *srelplt2;
3394
3395 /* The offset into splt of the PLT entry for the TLS descriptor
3396 resolver. Special values are 0, if not necessary (or not found
3397 to be necessary yet), and -1 if needed but not determined
3398 yet. */
3399 bfd_vma dt_tlsdesc_plt;
3400
3401 /* The offset into sgot of the GOT entry used by the PLT entry
3402 above. */
3403 bfd_vma dt_tlsdesc_got;
3404
3405 /* Offset in .plt section of tls_arm_trampoline. */
3406 bfd_vma tls_trampoline;
3407
3408 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3409 union
3410 {
3411 bfd_signed_vma refcount;
3412 bfd_vma offset;
3413 } tls_ldm_got;
3414
3415 /* Small local sym cache. */
3416 struct sym_cache sym_cache;
3417
3418 /* For convenience in allocate_dynrelocs. */
3419 bfd * obfd;
3420
3421 /* The amount of space used by the reserved portion of the sgotplt
3422 section, plus whatever space is used by the jump slots. */
3423 bfd_vma sgotplt_jump_table_size;
3424
3425 /* The stub hash table. */
3426 struct bfd_hash_table stub_hash_table;
3427
3428 /* Linker stub bfd. */
3429 bfd *stub_bfd;
3430
3431 /* Linker call-backs. */
3432 asection * (*add_stub_section) (const char *, asection *, asection *,
3433 unsigned int);
3434 void (*layout_sections_again) (void);
3435
3436 /* Array to keep track of which stub sections have been created, and
3437 information on stub grouping. */
3438 struct map_stub *stub_group;
3439
3440 /* Input stub section holding secure gateway veneers. */
3441 asection *cmse_stub_sec;
3442
3443 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3444 start to be allocated. */
3445 bfd_vma new_cmse_stub_offset;
3446
3447 /* Number of elements in stub_group. */
3448 unsigned int top_id;
3449
3450 /* Assorted information used by elf32_arm_size_stubs. */
3451 unsigned int bfd_count;
3452 unsigned int top_index;
3453 asection **input_list;
3454
3455 /* True if the target system uses FDPIC. */
3456 int fdpic_p;
3457
3458 /* Fixup section. Used for FDPIC. */
3459 asection *srofixup;
3460 };
3461
3462 /* Add an FDPIC read-only fixup. */
3463 static void
3464 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3465 {
3466 bfd_vma fixup_offset;
3467
3468 fixup_offset = srofixup->reloc_count++ * 4;
3469 BFD_ASSERT (fixup_offset < srofixup->size);
3470 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3471 }
3472
3473 static inline int
3474 ctz (unsigned int mask)
3475 {
3476 #if GCC_VERSION >= 3004
3477 return __builtin_ctz (mask);
3478 #else
3479 unsigned int i;
3480
3481 for (i = 0; i < 8 * sizeof (mask); i++)
3482 {
3483 if (mask & 0x1)
3484 break;
3485 mask = (mask >> 1);
3486 }
3487 return i;
3488 #endif
3489 }
3490
3491 static inline int
3492 elf32_arm_popcount (unsigned int mask)
3493 {
3494 #if GCC_VERSION >= 3004
3495 return __builtin_popcount (mask);
3496 #else
3497 unsigned int i;
3498 int sum = 0;
3499
3500 for (i = 0; i < 8 * sizeof (mask); i++)
3501 {
3502 if (mask & 0x1)
3503 sum++;
3504 mask = (mask >> 1);
3505 }
3506 return sum;
3507 #endif
3508 }
3509
3510 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3511 asection *sreloc, Elf_Internal_Rela *rel);
3512
3513 static void
3514 arm_elf_fill_funcdesc(bfd *output_bfd,
3515 struct bfd_link_info *info,
3516 int *funcdesc_offset,
3517 int dynindx,
3518 int offset,
3519 bfd_vma addr,
3520 bfd_vma dynreloc_value,
3521 bfd_vma seg)
3522 {
3523 if ((*funcdesc_offset & 1) == 0)
3524 {
3525 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3526 asection *sgot = globals->root.sgot;
3527
3528 if (bfd_link_pic(info))
3529 {
3530 asection *srelgot = globals->root.srelgot;
3531 Elf_Internal_Rela outrel;
3532
3533 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3534 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3535 outrel.r_addend = 0;
3536
3537 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3538 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3539 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3540 }
3541 else
3542 {
3543 struct elf_link_hash_entry *hgot = globals->root.hgot;
3544 bfd_vma got_value = hgot->root.u.def.value
3545 + hgot->root.u.def.section->output_section->vma
3546 + hgot->root.u.def.section->output_offset;
3547
3548 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3549 sgot->output_section->vma + sgot->output_offset
3550 + offset);
3551 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3552 sgot->output_section->vma + sgot->output_offset
3553 + offset + 4);
3554 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3555 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3556 }
3557 *funcdesc_offset |= 1;
3558 }
3559 }
3560
3561 /* Create an entry in an ARM ELF linker hash table. */
3562
3563 static struct bfd_hash_entry *
3564 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3565 struct bfd_hash_table * table,
3566 const char * string)
3567 {
3568 struct elf32_arm_link_hash_entry * ret =
3569 (struct elf32_arm_link_hash_entry *) entry;
3570
3571 /* Allocate the structure if it has not already been allocated by a
3572 subclass. */
3573 if (ret == NULL)
3574 ret = (struct elf32_arm_link_hash_entry *)
3575 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3576 if (ret == NULL)
3577 return (struct bfd_hash_entry *) ret;
3578
3579 /* Call the allocation method of the superclass. */
3580 ret = ((struct elf32_arm_link_hash_entry *)
3581 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3582 table, string));
3583 if (ret != NULL)
3584 {
3585 ret->dyn_relocs = NULL;
3586 ret->tls_type = GOT_UNKNOWN;
3587 ret->tlsdesc_got = (bfd_vma) -1;
3588 ret->plt.thumb_refcount = 0;
3589 ret->plt.maybe_thumb_refcount = 0;
3590 ret->plt.noncall_refcount = 0;
3591 ret->plt.got_offset = -1;
3592 ret->is_iplt = FALSE;
3593 ret->export_glue = NULL;
3594
3595 ret->stub_cache = NULL;
3596
3597 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3598 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3599 ret->fdpic_cnts.funcdesc_cnt = 0;
3600 ret->fdpic_cnts.funcdesc_offset = -1;
3601 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3602 }
3603
3604 return (struct bfd_hash_entry *) ret;
3605 }
3606
3607 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3608 symbols. */
3609
3610 static bfd_boolean
3611 elf32_arm_allocate_local_sym_info (bfd *abfd)
3612 {
3613 if (elf_local_got_refcounts (abfd) == NULL)
3614 {
3615 bfd_size_type num_syms;
3616 bfd_size_type size;
3617 char *data;
3618
3619 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3620 size = num_syms * (sizeof (bfd_signed_vma)
3621 + sizeof (struct arm_local_iplt_info *)
3622 + sizeof (bfd_vma)
3623 + sizeof (char)
3624 + sizeof (struct fdpic_local));
3625 data = bfd_zalloc (abfd, size);
3626 if (data == NULL)
3627 return FALSE;
3628
3629 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3630 data += num_syms * sizeof (struct fdpic_local);
3631
3632 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3633 data += num_syms * sizeof (bfd_signed_vma);
3634
3635 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3636 data += num_syms * sizeof (struct arm_local_iplt_info *);
3637
3638 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3639 data += num_syms * sizeof (bfd_vma);
3640
3641 elf32_arm_local_got_tls_type (abfd) = data;
3642 }
3643 return TRUE;
3644 }
3645
3646 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3647 to input bfd ABFD. Create the information if it doesn't already exist.
3648 Return null if an allocation fails. */
3649
3650 static struct arm_local_iplt_info *
3651 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3652 {
3653 struct arm_local_iplt_info **ptr;
3654
3655 if (!elf32_arm_allocate_local_sym_info (abfd))
3656 return NULL;
3657
3658 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3659 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3660 if (*ptr == NULL)
3661 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3662 return *ptr;
3663 }
3664
3665 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3666 in ABFD's symbol table. If the symbol is global, H points to its
3667 hash table entry, otherwise H is null.
3668
3669 Return true if the symbol does have PLT information. When returning
3670 true, point *ROOT_PLT at the target-independent reference count/offset
3671 union and *ARM_PLT at the ARM-specific information. */
3672
3673 static bfd_boolean
3674 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3675 struct elf32_arm_link_hash_entry *h,
3676 unsigned long r_symndx, union gotplt_union **root_plt,
3677 struct arm_plt_info **arm_plt)
3678 {
3679 struct arm_local_iplt_info *local_iplt;
3680
3681 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3682 return FALSE;
3683
3684 if (h != NULL)
3685 {
3686 *root_plt = &h->root.plt;
3687 *arm_plt = &h->plt;
3688 return TRUE;
3689 }
3690
3691 if (elf32_arm_local_iplt (abfd) == NULL)
3692 return FALSE;
3693
3694 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3695 if (local_iplt == NULL)
3696 return FALSE;
3697
3698 *root_plt = &local_iplt->root;
3699 *arm_plt = &local_iplt->arm;
3700 return TRUE;
3701 }
3702
3703 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3704
3705 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3706 before it. */
3707
3708 static bfd_boolean
3709 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3710 struct arm_plt_info *arm_plt)
3711 {
3712 struct elf32_arm_link_hash_table *htab;
3713
3714 htab = elf32_arm_hash_table (info);
3715
3716 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3717 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3718 }
3719
3720 /* Return a pointer to the head of the dynamic reloc list that should
3721 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3722 ABFD's symbol table. Return null if an error occurs. */
3723
3724 static struct elf_dyn_relocs **
3725 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3726 Elf_Internal_Sym *isym)
3727 {
3728 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3729 {
3730 struct arm_local_iplt_info *local_iplt;
3731
3732 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3733 if (local_iplt == NULL)
3734 return NULL;
3735 return &local_iplt->dyn_relocs;
3736 }
3737 else
3738 {
3739 /* Track dynamic relocs needed for local syms too.
3740 We really need local syms available to do this
3741 easily. Oh well. */
3742 asection *s;
3743 void *vpp;
3744
3745 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3746 if (s == NULL)
3747 abort ();
3748
3749 vpp = &elf_section_data (s)->local_dynrel;
3750 return (struct elf_dyn_relocs **) vpp;
3751 }
3752 }
3753
3754 /* Initialize an entry in the stub hash table. */
3755
3756 static struct bfd_hash_entry *
3757 stub_hash_newfunc (struct bfd_hash_entry *entry,
3758 struct bfd_hash_table *table,
3759 const char *string)
3760 {
3761 /* Allocate the structure if it has not already been allocated by a
3762 subclass. */
3763 if (entry == NULL)
3764 {
3765 entry = (struct bfd_hash_entry *)
3766 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3767 if (entry == NULL)
3768 return entry;
3769 }
3770
3771 /* Call the allocation method of the superclass. */
3772 entry = bfd_hash_newfunc (entry, table, string);
3773 if (entry != NULL)
3774 {
3775 struct elf32_arm_stub_hash_entry *eh;
3776
3777 /* Initialize the local fields. */
3778 eh = (struct elf32_arm_stub_hash_entry *) entry;
3779 eh->stub_sec = NULL;
3780 eh->stub_offset = (bfd_vma) -1;
3781 eh->source_value = 0;
3782 eh->target_value = 0;
3783 eh->target_section = NULL;
3784 eh->orig_insn = 0;
3785 eh->stub_type = arm_stub_none;
3786 eh->stub_size = 0;
3787 eh->stub_template = NULL;
3788 eh->stub_template_size = -1;
3789 eh->h = NULL;
3790 eh->id_sec = NULL;
3791 eh->output_name = NULL;
3792 }
3793
3794 return entry;
3795 }
3796
3797 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3798 shortcuts to them in our hash table. */
3799
3800 static bfd_boolean
3801 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3802 {
3803 struct elf32_arm_link_hash_table *htab;
3804
3805 htab = elf32_arm_hash_table (info);
3806 if (htab == NULL)
3807 return FALSE;
3808
3809 /* BPABI objects never have a GOT, or associated sections. */
3810 if (htab->symbian_p)
3811 return TRUE;
3812
3813 if (! _bfd_elf_create_got_section (dynobj, info))
3814 return FALSE;
3815
3816 /* Also create .rofixup. */
3817 if (htab->fdpic_p)
3818 {
3819 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3820 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3821 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3822 if (htab->srofixup == NULL
3823 || !bfd_set_section_alignment (htab->srofixup, 2))
3824 return FALSE;
3825 }
3826
3827 return TRUE;
3828 }
3829
3830 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3831
3832 static bfd_boolean
3833 create_ifunc_sections (struct bfd_link_info *info)
3834 {
3835 struct elf32_arm_link_hash_table *htab;
3836 const struct elf_backend_data *bed;
3837 bfd *dynobj;
3838 asection *s;
3839 flagword flags;
3840
3841 htab = elf32_arm_hash_table (info);
3842 dynobj = htab->root.dynobj;
3843 bed = get_elf_backend_data (dynobj);
3844 flags = bed->dynamic_sec_flags;
3845
3846 if (htab->root.iplt == NULL)
3847 {
3848 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3849 flags | SEC_READONLY | SEC_CODE);
3850 if (s == NULL
3851 || !bfd_set_section_alignment (s, bed->plt_alignment))
3852 return FALSE;
3853 htab->root.iplt = s;
3854 }
3855
3856 if (htab->root.irelplt == NULL)
3857 {
3858 s = bfd_make_section_anyway_with_flags (dynobj,
3859 RELOC_SECTION (htab, ".iplt"),
3860 flags | SEC_READONLY);
3861 if (s == NULL
3862 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3863 return FALSE;
3864 htab->root.irelplt = s;
3865 }
3866
3867 if (htab->root.igotplt == NULL)
3868 {
3869 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3870 if (s == NULL
3871 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3872 return FALSE;
3873 htab->root.igotplt = s;
3874 }
3875 return TRUE;
3876 }
3877
3878 /* Determine if we're dealing with a Thumb only architecture. */
3879
3880 static bfd_boolean
3881 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3882 {
3883 int arch;
3884 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3885 Tag_CPU_arch_profile);
3886
3887 if (profile)
3888 return profile == 'M';
3889
3890 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3891
3892 /* Force return logic to be reviewed for each new architecture. */
3893 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3894
3895 if (arch == TAG_CPU_ARCH_V6_M
3896 || arch == TAG_CPU_ARCH_V6S_M
3897 || arch == TAG_CPU_ARCH_V7E_M
3898 || arch == TAG_CPU_ARCH_V8M_BASE
3899 || arch == TAG_CPU_ARCH_V8M_MAIN
3900 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3901 return TRUE;
3902
3903 return FALSE;
3904 }
3905
3906 /* Determine if we're dealing with a Thumb-2 object. */
3907
3908 static bfd_boolean
3909 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3910 {
3911 int arch;
3912 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3913 Tag_THUMB_ISA_use);
3914
3915 if (thumb_isa)
3916 return thumb_isa == 2;
3917
3918 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3919
3920 /* Force return logic to be reviewed for each new architecture. */
3921 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3922
3923 return (arch == TAG_CPU_ARCH_V6T2
3924 || arch == TAG_CPU_ARCH_V7
3925 || arch == TAG_CPU_ARCH_V7E_M
3926 || arch == TAG_CPU_ARCH_V8
3927 || arch == TAG_CPU_ARCH_V8R
3928 || arch == TAG_CPU_ARCH_V8M_MAIN
3929 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3930 }
3931
3932 /* Determine whether Thumb-2 BL instruction is available. */
3933
3934 static bfd_boolean
3935 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3936 {
3937 int arch =
3938 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3939
3940 /* Force return logic to be reviewed for each new architecture. */
3941 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3942
3943 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3944 return (arch == TAG_CPU_ARCH_V6T2
3945 || arch >= TAG_CPU_ARCH_V7);
3946 }
3947
3948 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3949 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3950 hash table. */
3951
3952 static bfd_boolean
3953 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3954 {
3955 struct elf32_arm_link_hash_table *htab;
3956
3957 htab = elf32_arm_hash_table (info);
3958 if (htab == NULL)
3959 return FALSE;
3960
3961 if (!htab->root.sgot && !create_got_section (dynobj, info))
3962 return FALSE;
3963
3964 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3965 return FALSE;
3966
3967 if (htab->vxworks_p)
3968 {
3969 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3970 return FALSE;
3971
3972 if (bfd_link_pic (info))
3973 {
3974 htab->plt_header_size = 0;
3975 htab->plt_entry_size
3976 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3977 }
3978 else
3979 {
3980 htab->plt_header_size
3981 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3982 htab->plt_entry_size
3983 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3984 }
3985
3986 if (elf_elfheader (dynobj))
3987 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3988 }
3989 else
3990 {
3991 /* PR ld/16017
3992 Test for thumb only architectures. Note - we cannot just call
3993 using_thumb_only() as the attributes in the output bfd have not been
3994 initialised at this point, so instead we use the input bfd. */
3995 bfd * saved_obfd = htab->obfd;
3996
3997 htab->obfd = dynobj;
3998 if (using_thumb_only (htab))
3999 {
4000 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4001 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4002 }
4003 htab->obfd = saved_obfd;
4004 }
4005
4006 if (htab->fdpic_p) {
4007 htab->plt_header_size = 0;
4008 if (info->flags & DF_BIND_NOW)
4009 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
4010 else
4011 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
4012 }
4013
4014 if (!htab->root.splt
4015 || !htab->root.srelplt
4016 || !htab->root.sdynbss
4017 || (!bfd_link_pic (info) && !htab->root.srelbss))
4018 abort ();
4019
4020 return TRUE;
4021 }
4022
4023 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4024
4025 static void
4026 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4027 struct elf_link_hash_entry *dir,
4028 struct elf_link_hash_entry *ind)
4029 {
4030 struct elf32_arm_link_hash_entry *edir, *eind;
4031
4032 edir = (struct elf32_arm_link_hash_entry *) dir;
4033 eind = (struct elf32_arm_link_hash_entry *) ind;
4034
4035 if (eind->dyn_relocs != NULL)
4036 {
4037 if (edir->dyn_relocs != NULL)
4038 {
4039 struct elf_dyn_relocs **pp;
4040 struct elf_dyn_relocs *p;
4041
4042 /* Add reloc counts against the indirect sym to the direct sym
4043 list. Merge any entries against the same section. */
4044 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
4045 {
4046 struct elf_dyn_relocs *q;
4047
4048 for (q = edir->dyn_relocs; q != NULL; q = q->next)
4049 if (q->sec == p->sec)
4050 {
4051 q->pc_count += p->pc_count;
4052 q->count += p->count;
4053 *pp = p->next;
4054 break;
4055 }
4056 if (q == NULL)
4057 pp = &p->next;
4058 }
4059 *pp = edir->dyn_relocs;
4060 }
4061
4062 edir->dyn_relocs = eind->dyn_relocs;
4063 eind->dyn_relocs = NULL;
4064 }
4065
4066 if (ind->root.type == bfd_link_hash_indirect)
4067 {
4068 /* Copy over PLT info. */
4069 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4070 eind->plt.thumb_refcount = 0;
4071 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4072 eind->plt.maybe_thumb_refcount = 0;
4073 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4074 eind->plt.noncall_refcount = 0;
4075
4076 /* Copy FDPIC counters. */
4077 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4078 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4079 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4080
4081 /* We should only allocate a function to .iplt once the final
4082 symbol information is known. */
4083 BFD_ASSERT (!eind->is_iplt);
4084
4085 if (dir->got.refcount <= 0)
4086 {
4087 edir->tls_type = eind->tls_type;
4088 eind->tls_type = GOT_UNKNOWN;
4089 }
4090 }
4091
4092 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4093 }
4094
4095 /* Destroy an ARM elf linker hash table. */
4096
4097 static void
4098 elf32_arm_link_hash_table_free (bfd *obfd)
4099 {
4100 struct elf32_arm_link_hash_table *ret
4101 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4102
4103 bfd_hash_table_free (&ret->stub_hash_table);
4104 _bfd_elf_link_hash_table_free (obfd);
4105 }
4106
4107 /* Create an ARM elf linker hash table. */
4108
4109 static struct bfd_link_hash_table *
4110 elf32_arm_link_hash_table_create (bfd *abfd)
4111 {
4112 struct elf32_arm_link_hash_table *ret;
4113 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4114
4115 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4116 if (ret == NULL)
4117 return NULL;
4118
4119 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4120 elf32_arm_link_hash_newfunc,
4121 sizeof (struct elf32_arm_link_hash_entry),
4122 ARM_ELF_DATA))
4123 {
4124 free (ret);
4125 return NULL;
4126 }
4127
4128 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4129 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4130 #ifdef FOUR_WORD_PLT
4131 ret->plt_header_size = 16;
4132 ret->plt_entry_size = 16;
4133 #else
4134 ret->plt_header_size = 20;
4135 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4136 #endif
4137 ret->use_rel = TRUE;
4138 ret->obfd = abfd;
4139 ret->fdpic_p = 0;
4140
4141 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4142 sizeof (struct elf32_arm_stub_hash_entry)))
4143 {
4144 _bfd_elf_link_hash_table_free (abfd);
4145 return NULL;
4146 }
4147 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4148
4149 return &ret->root.root;
4150 }
4151
4152 /* Determine what kind of NOPs are available. */
4153
4154 static bfd_boolean
4155 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4156 {
4157 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4158 Tag_CPU_arch);
4159
4160 /* Force return logic to be reviewed for each new architecture. */
4161 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4162
4163 return (arch == TAG_CPU_ARCH_V6T2
4164 || arch == TAG_CPU_ARCH_V6K
4165 || arch == TAG_CPU_ARCH_V7
4166 || arch == TAG_CPU_ARCH_V8
4167 || arch == TAG_CPU_ARCH_V8R);
4168 }
4169
4170 static bfd_boolean
4171 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4172 {
4173 switch (stub_type)
4174 {
4175 case arm_stub_long_branch_thumb_only:
4176 case arm_stub_long_branch_thumb2_only:
4177 case arm_stub_long_branch_thumb2_only_pure:
4178 case arm_stub_long_branch_v4t_thumb_arm:
4179 case arm_stub_short_branch_v4t_thumb_arm:
4180 case arm_stub_long_branch_v4t_thumb_arm_pic:
4181 case arm_stub_long_branch_v4t_thumb_tls_pic:
4182 case arm_stub_long_branch_thumb_only_pic:
4183 case arm_stub_cmse_branch_thumb_only:
4184 return TRUE;
4185 case arm_stub_none:
4186 BFD_FAIL ();
4187 return FALSE;
4188 break;
4189 default:
4190 return FALSE;
4191 }
4192 }
4193
4194 /* Determine the type of stub needed, if any, for a call. */
4195
4196 static enum elf32_arm_stub_type
4197 arm_type_of_stub (struct bfd_link_info *info,
4198 asection *input_sec,
4199 const Elf_Internal_Rela *rel,
4200 unsigned char st_type,
4201 enum arm_st_branch_type *actual_branch_type,
4202 struct elf32_arm_link_hash_entry *hash,
4203 bfd_vma destination,
4204 asection *sym_sec,
4205 bfd *input_bfd,
4206 const char *name)
4207 {
4208 bfd_vma location;
4209 bfd_signed_vma branch_offset;
4210 unsigned int r_type;
4211 struct elf32_arm_link_hash_table * globals;
4212 bfd_boolean thumb2, thumb2_bl, thumb_only;
4213 enum elf32_arm_stub_type stub_type = arm_stub_none;
4214 int use_plt = 0;
4215 enum arm_st_branch_type branch_type = *actual_branch_type;
4216 union gotplt_union *root_plt;
4217 struct arm_plt_info *arm_plt;
4218 int arch;
4219 int thumb2_movw;
4220
4221 if (branch_type == ST_BRANCH_LONG)
4222 return stub_type;
4223
4224 globals = elf32_arm_hash_table (info);
4225 if (globals == NULL)
4226 return stub_type;
4227
4228 thumb_only = using_thumb_only (globals);
4229 thumb2 = using_thumb2 (globals);
4230 thumb2_bl = using_thumb2_bl (globals);
4231
4232 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4233
4234 /* True for architectures that implement the thumb2 movw instruction. */
4235 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4236
4237 /* Determine where the call point is. */
4238 location = (input_sec->output_offset
4239 + input_sec->output_section->vma
4240 + rel->r_offset);
4241
4242 r_type = ELF32_R_TYPE (rel->r_info);
4243
4244 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4245 are considering a function call relocation. */
4246 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4247 || r_type == R_ARM_THM_JUMP19)
4248 && branch_type == ST_BRANCH_TO_ARM)
4249 branch_type = ST_BRANCH_TO_THUMB;
4250
4251 /* For TLS call relocs, it is the caller's responsibility to provide
4252 the address of the appropriate trampoline. */
4253 if (r_type != R_ARM_TLS_CALL
4254 && r_type != R_ARM_THM_TLS_CALL
4255 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4256 ELF32_R_SYM (rel->r_info), &root_plt,
4257 &arm_plt)
4258 && root_plt->offset != (bfd_vma) -1)
4259 {
4260 asection *splt;
4261
4262 if (hash == NULL || hash->is_iplt)
4263 splt = globals->root.iplt;
4264 else
4265 splt = globals->root.splt;
4266 if (splt != NULL)
4267 {
4268 use_plt = 1;
4269
4270 /* Note when dealing with PLT entries: the main PLT stub is in
4271 ARM mode, so if the branch is in Thumb mode, another
4272 Thumb->ARM stub will be inserted later just before the ARM
4273 PLT stub. If a long branch stub is needed, we'll add a
4274 Thumb->Arm one and branch directly to the ARM PLT entry.
4275 Here, we have to check if a pre-PLT Thumb->ARM stub
4276 is needed and if it will be close enough. */
4277
4278 destination = (splt->output_section->vma
4279 + splt->output_offset
4280 + root_plt->offset);
4281 st_type = STT_FUNC;
4282
4283 /* Thumb branch/call to PLT: it can become a branch to ARM
4284 or to Thumb. We must perform the same checks and
4285 corrections as in elf32_arm_final_link_relocate. */
4286 if ((r_type == R_ARM_THM_CALL)
4287 || (r_type == R_ARM_THM_JUMP24))
4288 {
4289 if (globals->use_blx
4290 && r_type == R_ARM_THM_CALL
4291 && !thumb_only)
4292 {
4293 /* If the Thumb BLX instruction is available, convert
4294 the BL to a BLX instruction to call the ARM-mode
4295 PLT entry. */
4296 branch_type = ST_BRANCH_TO_ARM;
4297 }
4298 else
4299 {
4300 if (!thumb_only)
4301 /* Target the Thumb stub before the ARM PLT entry. */
4302 destination -= PLT_THUMB_STUB_SIZE;
4303 branch_type = ST_BRANCH_TO_THUMB;
4304 }
4305 }
4306 else
4307 {
4308 branch_type = ST_BRANCH_TO_ARM;
4309 }
4310 }
4311 }
4312 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4313 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4314
4315 branch_offset = (bfd_signed_vma)(destination - location);
4316
4317 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4318 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4319 {
4320 /* Handle cases where:
4321 - this call goes too far (different Thumb/Thumb2 max
4322 distance)
4323 - it's a Thumb->Arm call and blx is not available, or it's a
4324 Thumb->Arm branch (not bl). A stub is needed in this case,
4325 but only if this call is not through a PLT entry. Indeed,
4326 PLT stubs handle mode switching already. */
4327 if ((!thumb2_bl
4328 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4329 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4330 || (thumb2_bl
4331 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4332 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4333 || (thumb2
4334 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4335 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4336 && (r_type == R_ARM_THM_JUMP19))
4337 || (branch_type == ST_BRANCH_TO_ARM
4338 && (((r_type == R_ARM_THM_CALL
4339 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4340 || (r_type == R_ARM_THM_JUMP24)
4341 || (r_type == R_ARM_THM_JUMP19))
4342 && !use_plt))
4343 {
4344 /* If we need to insert a Thumb-Thumb long branch stub to a
4345 PLT, use one that branches directly to the ARM PLT
4346 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4347 stub, undo this now. */
4348 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4349 {
4350 branch_type = ST_BRANCH_TO_ARM;
4351 branch_offset += PLT_THUMB_STUB_SIZE;
4352 }
4353
4354 if (branch_type == ST_BRANCH_TO_THUMB)
4355 {
4356 /* Thumb to thumb. */
4357 if (!thumb_only)
4358 {
4359 if (input_sec->flags & SEC_ELF_PURECODE)
4360 _bfd_error_handler
4361 (_("%pB(%pA): warning: long branch veneers used in"
4362 " section with SHF_ARM_PURECODE section"
4363 " attribute is only supported for M-profile"
4364 " targets that implement the movw instruction"),
4365 input_bfd, input_sec);
4366
4367 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4368 /* PIC stubs. */
4369 ? ((globals->use_blx
4370 && (r_type == R_ARM_THM_CALL))
4371 /* V5T and above. Stub starts with ARM code, so
4372 we must be able to switch mode before
4373 reaching it, which is only possible for 'bl'
4374 (ie R_ARM_THM_CALL relocation). */
4375 ? arm_stub_long_branch_any_thumb_pic
4376 /* On V4T, use Thumb code only. */
4377 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4378
4379 /* non-PIC stubs. */
4380 : ((globals->use_blx
4381 && (r_type == R_ARM_THM_CALL))
4382 /* V5T and above. */
4383 ? arm_stub_long_branch_any_any
4384 /* V4T. */
4385 : arm_stub_long_branch_v4t_thumb_thumb);
4386 }
4387 else
4388 {
4389 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4390 stub_type = arm_stub_long_branch_thumb2_only_pure;
4391 else
4392 {
4393 if (input_sec->flags & SEC_ELF_PURECODE)
4394 _bfd_error_handler
4395 (_("%pB(%pA): warning: long branch veneers used in"
4396 " section with SHF_ARM_PURECODE section"
4397 " attribute is only supported for M-profile"
4398 " targets that implement the movw instruction"),
4399 input_bfd, input_sec);
4400
4401 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4402 /* PIC stub. */
4403 ? arm_stub_long_branch_thumb_only_pic
4404 /* non-PIC stub. */
4405 : (thumb2 ? arm_stub_long_branch_thumb2_only
4406 : arm_stub_long_branch_thumb_only);
4407 }
4408 }
4409 }
4410 else
4411 {
4412 if (input_sec->flags & SEC_ELF_PURECODE)
4413 _bfd_error_handler
4414 (_("%pB(%pA): warning: long branch veneers used in"
4415 " section with SHF_ARM_PURECODE section"
4416 " attribute is only supported" " for M-profile"
4417 " targets that implement the movw instruction"),
4418 input_bfd, input_sec);
4419
4420 /* Thumb to arm. */
4421 if (sym_sec != NULL
4422 && sym_sec->owner != NULL
4423 && !INTERWORK_FLAG (sym_sec->owner))
4424 {
4425 _bfd_error_handler
4426 (_("%pB(%s): warning: interworking not enabled;"
4427 " first occurrence: %pB: %s call to %s"),
4428 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4429 }
4430
4431 stub_type =
4432 (bfd_link_pic (info) | globals->pic_veneer)
4433 /* PIC stubs. */
4434 ? (r_type == R_ARM_THM_TLS_CALL
4435 /* TLS PIC stubs. */
4436 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4437 : arm_stub_long_branch_v4t_thumb_tls_pic)
4438 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4439 /* V5T PIC and above. */
4440 ? arm_stub_long_branch_any_arm_pic
4441 /* V4T PIC stub. */
4442 : arm_stub_long_branch_v4t_thumb_arm_pic))
4443
4444 /* non-PIC stubs. */
4445 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4446 /* V5T and above. */
4447 ? arm_stub_long_branch_any_any
4448 /* V4T. */
4449 : arm_stub_long_branch_v4t_thumb_arm);
4450
4451 /* Handle v4t short branches. */
4452 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4453 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4454 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4455 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4456 }
4457 }
4458 }
4459 else if (r_type == R_ARM_CALL
4460 || r_type == R_ARM_JUMP24
4461 || r_type == R_ARM_PLT32
4462 || r_type == R_ARM_TLS_CALL)
4463 {
4464 if (input_sec->flags & SEC_ELF_PURECODE)
4465 _bfd_error_handler
4466 (_("%pB(%pA): warning: long branch veneers used in"
4467 " section with SHF_ARM_PURECODE section"
4468 " attribute is only supported for M-profile"
4469 " targets that implement the movw instruction"),
4470 input_bfd, input_sec);
4471 if (branch_type == ST_BRANCH_TO_THUMB)
4472 {
4473 /* Arm to thumb. */
4474
4475 if (sym_sec != NULL
4476 && sym_sec->owner != NULL
4477 && !INTERWORK_FLAG (sym_sec->owner))
4478 {
4479 _bfd_error_handler
4480 (_("%pB(%s): warning: interworking not enabled;"
4481 " first occurrence: %pB: %s call to %s"),
4482 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4483 }
4484
4485 /* We have an extra 2-bytes reach because of
4486 the mode change (bit 24 (H) of BLX encoding). */
4487 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4488 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4489 || (r_type == R_ARM_CALL && !globals->use_blx)
4490 || (r_type == R_ARM_JUMP24)
4491 || (r_type == R_ARM_PLT32))
4492 {
4493 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4494 /* PIC stubs. */
4495 ? ((globals->use_blx)
4496 /* V5T and above. */
4497 ? arm_stub_long_branch_any_thumb_pic
4498 /* V4T stub. */
4499 : arm_stub_long_branch_v4t_arm_thumb_pic)
4500
4501 /* non-PIC stubs. */
4502 : ((globals->use_blx)
4503 /* V5T and above. */
4504 ? arm_stub_long_branch_any_any
4505 /* V4T. */
4506 : arm_stub_long_branch_v4t_arm_thumb);
4507 }
4508 }
4509 else
4510 {
4511 /* Arm to arm. */
4512 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4513 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4514 {
4515 stub_type =
4516 (bfd_link_pic (info) | globals->pic_veneer)
4517 /* PIC stubs. */
4518 ? (r_type == R_ARM_TLS_CALL
4519 /* TLS PIC Stub. */
4520 ? arm_stub_long_branch_any_tls_pic
4521 : (globals->nacl_p
4522 ? arm_stub_long_branch_arm_nacl_pic
4523 : arm_stub_long_branch_any_arm_pic))
4524 /* non-PIC stubs. */
4525 : (globals->nacl_p
4526 ? arm_stub_long_branch_arm_nacl
4527 : arm_stub_long_branch_any_any);
4528 }
4529 }
4530 }
4531
4532 /* If a stub is needed, record the actual destination type. */
4533 if (stub_type != arm_stub_none)
4534 *actual_branch_type = branch_type;
4535
4536 return stub_type;
4537 }
4538
4539 /* Build a name for an entry in the stub hash table. */
4540
4541 static char *
4542 elf32_arm_stub_name (const asection *input_section,
4543 const asection *sym_sec,
4544 const struct elf32_arm_link_hash_entry *hash,
4545 const Elf_Internal_Rela *rel,
4546 enum elf32_arm_stub_type stub_type)
4547 {
4548 char *stub_name;
4549 bfd_size_type len;
4550
4551 if (hash)
4552 {
4553 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4554 stub_name = (char *) bfd_malloc (len);
4555 if (stub_name != NULL)
4556 sprintf (stub_name, "%08x_%s+%x_%d",
4557 input_section->id & 0xffffffff,
4558 hash->root.root.root.string,
4559 (int) rel->r_addend & 0xffffffff,
4560 (int) stub_type);
4561 }
4562 else
4563 {
4564 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4565 stub_name = (char *) bfd_malloc (len);
4566 if (stub_name != NULL)
4567 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4568 input_section->id & 0xffffffff,
4569 sym_sec->id & 0xffffffff,
4570 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4571 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4572 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4573 (int) rel->r_addend & 0xffffffff,
4574 (int) stub_type);
4575 }
4576
4577 return stub_name;
4578 }
4579
4580 /* Look up an entry in the stub hash. Stub entries are cached because
4581 creating the stub name takes a bit of time. */
4582
4583 static struct elf32_arm_stub_hash_entry *
4584 elf32_arm_get_stub_entry (const asection *input_section,
4585 const asection *sym_sec,
4586 struct elf_link_hash_entry *hash,
4587 const Elf_Internal_Rela *rel,
4588 struct elf32_arm_link_hash_table *htab,
4589 enum elf32_arm_stub_type stub_type)
4590 {
4591 struct elf32_arm_stub_hash_entry *stub_entry;
4592 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4593 const asection *id_sec;
4594
4595 if ((input_section->flags & SEC_CODE) == 0)
4596 return NULL;
4597
4598 /* If the input section is the CMSE stubs one and it needs a long
4599 branch stub to reach it's final destination, give up with an
4600 error message: this is not supported. See PR ld/24709. */
4601 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen(CMSE_STUB_NAME)))
4602 {
4603 bfd *output_bfd = htab->obfd;
4604 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4605
4606 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4607 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4608 CMSE_STUB_NAME,
4609 (uint64_t)out_sec->output_section->vma
4610 + out_sec->output_offset,
4611 (uint64_t)sym_sec->output_section->vma
4612 + sym_sec->output_offset
4613 + h->root.root.u.def.value);
4614 /* Exit, rather than leave incompletely processed
4615 relocations. */
4616 xexit(1);
4617 }
4618
4619 /* If this input section is part of a group of sections sharing one
4620 stub section, then use the id of the first section in the group.
4621 Stub names need to include a section id, as there may well be
4622 more than one stub used to reach say, printf, and we need to
4623 distinguish between them. */
4624 BFD_ASSERT (input_section->id <= htab->top_id);
4625 id_sec = htab->stub_group[input_section->id].link_sec;
4626
4627 if (h != NULL && h->stub_cache != NULL
4628 && h->stub_cache->h == h
4629 && h->stub_cache->id_sec == id_sec
4630 && h->stub_cache->stub_type == stub_type)
4631 {
4632 stub_entry = h->stub_cache;
4633 }
4634 else
4635 {
4636 char *stub_name;
4637
4638 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4639 if (stub_name == NULL)
4640 return NULL;
4641
4642 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4643 stub_name, FALSE, FALSE);
4644 if (h != NULL)
4645 h->stub_cache = stub_entry;
4646
4647 free (stub_name);
4648 }
4649
4650 return stub_entry;
4651 }
4652
4653 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4654 section. */
4655
4656 static bfd_boolean
4657 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4658 {
4659 if (stub_type >= max_stub_type)
4660 abort (); /* Should be unreachable. */
4661
4662 switch (stub_type)
4663 {
4664 case arm_stub_cmse_branch_thumb_only:
4665 return TRUE;
4666
4667 default:
4668 return FALSE;
4669 }
4670
4671 abort (); /* Should be unreachable. */
4672 }
4673
4674 /* Required alignment (as a power of 2) for the dedicated section holding
4675 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4676 with input sections. */
4677
4678 static int
4679 arm_dedicated_stub_output_section_required_alignment
4680 (enum elf32_arm_stub_type stub_type)
4681 {
4682 if (stub_type >= max_stub_type)
4683 abort (); /* Should be unreachable. */
4684
4685 switch (stub_type)
4686 {
4687 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4688 boundary. */
4689 case arm_stub_cmse_branch_thumb_only:
4690 return 5;
4691
4692 default:
4693 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4694 return 0;
4695 }
4696
4697 abort (); /* Should be unreachable. */
4698 }
4699
4700 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4701 NULL if veneers of this type are interspersed with input sections. */
4702
4703 static const char *
4704 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4705 {
4706 if (stub_type >= max_stub_type)
4707 abort (); /* Should be unreachable. */
4708
4709 switch (stub_type)
4710 {
4711 case arm_stub_cmse_branch_thumb_only:
4712 return CMSE_STUB_NAME;
4713
4714 default:
4715 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4716 return NULL;
4717 }
4718
4719 abort (); /* Should be unreachable. */
4720 }
4721
4722 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4723 returns the address of the hash table field in HTAB holding a pointer to the
4724 corresponding input section. Otherwise, returns NULL. */
4725
4726 static asection **
4727 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4728 enum elf32_arm_stub_type stub_type)
4729 {
4730 if (stub_type >= max_stub_type)
4731 abort (); /* Should be unreachable. */
4732
4733 switch (stub_type)
4734 {
4735 case arm_stub_cmse_branch_thumb_only:
4736 return &htab->cmse_stub_sec;
4737
4738 default:
4739 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4740 return NULL;
4741 }
4742
4743 abort (); /* Should be unreachable. */
4744 }
4745
4746 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4747 is the section that branch into veneer and can be NULL if stub should go in
4748 a dedicated output section. Returns a pointer to the stub section, and the
4749 section to which the stub section will be attached (in *LINK_SEC_P).
4750 LINK_SEC_P may be NULL. */
4751
4752 static asection *
4753 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4754 struct elf32_arm_link_hash_table *htab,
4755 enum elf32_arm_stub_type stub_type)
4756 {
4757 asection *link_sec, *out_sec, **stub_sec_p;
4758 const char *stub_sec_prefix;
4759 bfd_boolean dedicated_output_section =
4760 arm_dedicated_stub_output_section_required (stub_type);
4761 int align;
4762
4763 if (dedicated_output_section)
4764 {
4765 bfd *output_bfd = htab->obfd;
4766 const char *out_sec_name =
4767 arm_dedicated_stub_output_section_name (stub_type);
4768 link_sec = NULL;
4769 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4770 stub_sec_prefix = out_sec_name;
4771 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4772 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4773 if (out_sec == NULL)
4774 {
4775 _bfd_error_handler (_("no address assigned to the veneers output "
4776 "section %s"), out_sec_name);
4777 return NULL;
4778 }
4779 }
4780 else
4781 {
4782 BFD_ASSERT (section->id <= htab->top_id);
4783 link_sec = htab->stub_group[section->id].link_sec;
4784 BFD_ASSERT (link_sec != NULL);
4785 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4786 if (*stub_sec_p == NULL)
4787 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4788 stub_sec_prefix = link_sec->name;
4789 out_sec = link_sec->output_section;
4790 align = htab->nacl_p ? 4 : 3;
4791 }
4792
4793 if (*stub_sec_p == NULL)
4794 {
4795 size_t namelen;
4796 bfd_size_type len;
4797 char *s_name;
4798
4799 namelen = strlen (stub_sec_prefix);
4800 len = namelen + sizeof (STUB_SUFFIX);
4801 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4802 if (s_name == NULL)
4803 return NULL;
4804
4805 memcpy (s_name, stub_sec_prefix, namelen);
4806 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4807 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4808 align);
4809 if (*stub_sec_p == NULL)
4810 return NULL;
4811
4812 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4813 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4814 | SEC_KEEP;
4815 }
4816
4817 if (!dedicated_output_section)
4818 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4819
4820 if (link_sec_p)
4821 *link_sec_p = link_sec;
4822
4823 return *stub_sec_p;
4824 }
4825
4826 /* Add a new stub entry to the stub hash. Not all fields of the new
4827 stub entry are initialised. */
4828
4829 static struct elf32_arm_stub_hash_entry *
4830 elf32_arm_add_stub (const char *stub_name, asection *section,
4831 struct elf32_arm_link_hash_table *htab,
4832 enum elf32_arm_stub_type stub_type)
4833 {
4834 asection *link_sec;
4835 asection *stub_sec;
4836 struct elf32_arm_stub_hash_entry *stub_entry;
4837
4838 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4839 stub_type);
4840 if (stub_sec == NULL)
4841 return NULL;
4842
4843 /* Enter this entry into the linker stub hash table. */
4844 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4845 TRUE, FALSE);
4846 if (stub_entry == NULL)
4847 {
4848 if (section == NULL)
4849 section = stub_sec;
4850 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4851 section->owner, stub_name);
4852 return NULL;
4853 }
4854
4855 stub_entry->stub_sec = stub_sec;
4856 stub_entry->stub_offset = (bfd_vma) -1;
4857 stub_entry->id_sec = link_sec;
4858
4859 return stub_entry;
4860 }
4861
4862 /* Store an Arm insn into an output section not processed by
4863 elf32_arm_write_section. */
4864
4865 static void
4866 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4867 bfd * output_bfd, bfd_vma val, void * ptr)
4868 {
4869 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4870 bfd_putl32 (val, ptr);
4871 else
4872 bfd_putb32 (val, ptr);
4873 }
4874
4875 /* Store a 16-bit Thumb insn into an output section not processed by
4876 elf32_arm_write_section. */
4877
4878 static void
4879 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4880 bfd * output_bfd, bfd_vma val, void * ptr)
4881 {
4882 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4883 bfd_putl16 (val, ptr);
4884 else
4885 bfd_putb16 (val, ptr);
4886 }
4887
4888 /* Store a Thumb2 insn into an output section not processed by
4889 elf32_arm_write_section. */
4890
4891 static void
4892 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4893 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4894 {
4895 /* T2 instructions are 16-bit streamed. */
4896 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4897 {
4898 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4899 bfd_putl16 ((val & 0xffff), ptr + 2);
4900 }
4901 else
4902 {
4903 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4904 bfd_putb16 ((val & 0xffff), ptr + 2);
4905 }
4906 }
4907
4908 /* If it's possible to change R_TYPE to a more efficient access
4909 model, return the new reloc type. */
4910
4911 static unsigned
4912 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4913 struct elf_link_hash_entry *h)
4914 {
4915 int is_local = (h == NULL);
4916
4917 if (bfd_link_dll (info)
4918 || (h && h->root.type == bfd_link_hash_undefweak))
4919 return r_type;
4920
4921 /* We do not support relaxations for Old TLS models. */
4922 switch (r_type)
4923 {
4924 case R_ARM_TLS_GOTDESC:
4925 case R_ARM_TLS_CALL:
4926 case R_ARM_THM_TLS_CALL:
4927 case R_ARM_TLS_DESCSEQ:
4928 case R_ARM_THM_TLS_DESCSEQ:
4929 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4930 }
4931
4932 return r_type;
4933 }
4934
4935 static bfd_reloc_status_type elf32_arm_final_link_relocate
4936 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4937 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4938 const char *, unsigned char, enum arm_st_branch_type,
4939 struct elf_link_hash_entry *, bfd_boolean *, char **);
4940
4941 static unsigned int
4942 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4943 {
4944 switch (stub_type)
4945 {
4946 case arm_stub_a8_veneer_b_cond:
4947 case arm_stub_a8_veneer_b:
4948 case arm_stub_a8_veneer_bl:
4949 return 2;
4950
4951 case arm_stub_long_branch_any_any:
4952 case arm_stub_long_branch_v4t_arm_thumb:
4953 case arm_stub_long_branch_thumb_only:
4954 case arm_stub_long_branch_thumb2_only:
4955 case arm_stub_long_branch_thumb2_only_pure:
4956 case arm_stub_long_branch_v4t_thumb_thumb:
4957 case arm_stub_long_branch_v4t_thumb_arm:
4958 case arm_stub_short_branch_v4t_thumb_arm:
4959 case arm_stub_long_branch_any_arm_pic:
4960 case arm_stub_long_branch_any_thumb_pic:
4961 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4962 case arm_stub_long_branch_v4t_arm_thumb_pic:
4963 case arm_stub_long_branch_v4t_thumb_arm_pic:
4964 case arm_stub_long_branch_thumb_only_pic:
4965 case arm_stub_long_branch_any_tls_pic:
4966 case arm_stub_long_branch_v4t_thumb_tls_pic:
4967 case arm_stub_cmse_branch_thumb_only:
4968 case arm_stub_a8_veneer_blx:
4969 return 4;
4970
4971 case arm_stub_long_branch_arm_nacl:
4972 case arm_stub_long_branch_arm_nacl_pic:
4973 return 16;
4974
4975 default:
4976 abort (); /* Should be unreachable. */
4977 }
4978 }
4979
4980 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4981 veneering (TRUE) or have their own symbol (FALSE). */
4982
4983 static bfd_boolean
4984 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4985 {
4986 if (stub_type >= max_stub_type)
4987 abort (); /* Should be unreachable. */
4988
4989 switch (stub_type)
4990 {
4991 case arm_stub_cmse_branch_thumb_only:
4992 return TRUE;
4993
4994 default:
4995 return FALSE;
4996 }
4997
4998 abort (); /* Should be unreachable. */
4999 }
5000
5001 /* Returns the padding needed for the dedicated section used stubs of type
5002 STUB_TYPE. */
5003
5004 static int
5005 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
5006 {
5007 if (stub_type >= max_stub_type)
5008 abort (); /* Should be unreachable. */
5009
5010 switch (stub_type)
5011 {
5012 case arm_stub_cmse_branch_thumb_only:
5013 return 32;
5014
5015 default:
5016 return 0;
5017 }
5018
5019 abort (); /* Should be unreachable. */
5020 }
5021
5022 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5023 returns the address of the hash table field in HTAB holding the offset at
5024 which new veneers should be layed out in the stub section. */
5025
5026 static bfd_vma*
5027 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5028 enum elf32_arm_stub_type stub_type)
5029 {
5030 switch (stub_type)
5031 {
5032 case arm_stub_cmse_branch_thumb_only:
5033 return &htab->new_cmse_stub_offset;
5034
5035 default:
5036 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5037 return NULL;
5038 }
5039 }
5040
5041 static bfd_boolean
5042 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5043 void * in_arg)
5044 {
5045 #define MAXRELOCS 3
5046 bfd_boolean removed_sg_veneer;
5047 struct elf32_arm_stub_hash_entry *stub_entry;
5048 struct elf32_arm_link_hash_table *globals;
5049 struct bfd_link_info *info;
5050 asection *stub_sec;
5051 bfd *stub_bfd;
5052 bfd_byte *loc;
5053 bfd_vma sym_value;
5054 int template_size;
5055 int size;
5056 const insn_sequence *template_sequence;
5057 int i;
5058 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5059 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5060 int nrelocs = 0;
5061 int just_allocated = 0;
5062
5063 /* Massage our args to the form they really have. */
5064 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5065 info = (struct bfd_link_info *) in_arg;
5066
5067 globals = elf32_arm_hash_table (info);
5068 if (globals == NULL)
5069 return FALSE;
5070
5071 stub_sec = stub_entry->stub_sec;
5072
5073 if ((globals->fix_cortex_a8 < 0)
5074 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5075 /* We have to do less-strictly-aligned fixes last. */
5076 return TRUE;
5077
5078 /* Assign a slot at the end of section if none assigned yet. */
5079 if (stub_entry->stub_offset == (bfd_vma) -1)
5080 {
5081 stub_entry->stub_offset = stub_sec->size;
5082 just_allocated = 1;
5083 }
5084 loc = stub_sec->contents + stub_entry->stub_offset;
5085
5086 stub_bfd = stub_sec->owner;
5087
5088 /* This is the address of the stub destination. */
5089 sym_value = (stub_entry->target_value
5090 + stub_entry->target_section->output_offset
5091 + stub_entry->target_section->output_section->vma);
5092
5093 template_sequence = stub_entry->stub_template;
5094 template_size = stub_entry->stub_template_size;
5095
5096 size = 0;
5097 for (i = 0; i < template_size; i++)
5098 {
5099 switch (template_sequence[i].type)
5100 {
5101 case THUMB16_TYPE:
5102 {
5103 bfd_vma data = (bfd_vma) template_sequence[i].data;
5104 if (template_sequence[i].reloc_addend != 0)
5105 {
5106 /* We've borrowed the reloc_addend field to mean we should
5107 insert a condition code into this (Thumb-1 branch)
5108 instruction. See THUMB16_BCOND_INSN. */
5109 BFD_ASSERT ((data & 0xff00) == 0xd000);
5110 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5111 }
5112 bfd_put_16 (stub_bfd, data, loc + size);
5113 size += 2;
5114 }
5115 break;
5116
5117 case THUMB32_TYPE:
5118 bfd_put_16 (stub_bfd,
5119 (template_sequence[i].data >> 16) & 0xffff,
5120 loc + size);
5121 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5122 loc + size + 2);
5123 if (template_sequence[i].r_type != R_ARM_NONE)
5124 {
5125 stub_reloc_idx[nrelocs] = i;
5126 stub_reloc_offset[nrelocs++] = size;
5127 }
5128 size += 4;
5129 break;
5130
5131 case ARM_TYPE:
5132 bfd_put_32 (stub_bfd, template_sequence[i].data,
5133 loc + size);
5134 /* Handle cases where the target is encoded within the
5135 instruction. */
5136 if (template_sequence[i].r_type == R_ARM_JUMP24)
5137 {
5138 stub_reloc_idx[nrelocs] = i;
5139 stub_reloc_offset[nrelocs++] = size;
5140 }
5141 size += 4;
5142 break;
5143
5144 case DATA_TYPE:
5145 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5146 stub_reloc_idx[nrelocs] = i;
5147 stub_reloc_offset[nrelocs++] = size;
5148 size += 4;
5149 break;
5150
5151 default:
5152 BFD_FAIL ();
5153 return FALSE;
5154 }
5155 }
5156
5157 if (just_allocated)
5158 stub_sec->size += size;
5159
5160 /* Stub size has already been computed in arm_size_one_stub. Check
5161 consistency. */
5162 BFD_ASSERT (size == stub_entry->stub_size);
5163
5164 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5165 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5166 sym_value |= 1;
5167
5168 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5169 to relocate in each stub. */
5170 removed_sg_veneer =
5171 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5172 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5173
5174 for (i = 0; i < nrelocs; i++)
5175 {
5176 Elf_Internal_Rela rel;
5177 bfd_boolean unresolved_reloc;
5178 char *error_message;
5179 bfd_vma points_to =
5180 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5181
5182 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5183 rel.r_info = ELF32_R_INFO (0,
5184 template_sequence[stub_reloc_idx[i]].r_type);
5185 rel.r_addend = 0;
5186
5187 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5188 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5189 template should refer back to the instruction after the original
5190 branch. We use target_section as Cortex-A8 erratum workaround stubs
5191 are only generated when both source and target are in the same
5192 section. */
5193 points_to = stub_entry->target_section->output_section->vma
5194 + stub_entry->target_section->output_offset
5195 + stub_entry->source_value;
5196
5197 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5198 (template_sequence[stub_reloc_idx[i]].r_type),
5199 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5200 points_to, info, stub_entry->target_section, "", STT_FUNC,
5201 stub_entry->branch_type,
5202 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5203 &error_message);
5204 }
5205
5206 return TRUE;
5207 #undef MAXRELOCS
5208 }
5209
5210 /* Calculate the template, template size and instruction size for a stub.
5211 Return value is the instruction size. */
5212
5213 static unsigned int
5214 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5215 const insn_sequence **stub_template,
5216 int *stub_template_size)
5217 {
5218 const insn_sequence *template_sequence = NULL;
5219 int template_size = 0, i;
5220 unsigned int size;
5221
5222 template_sequence = stub_definitions[stub_type].template_sequence;
5223 if (stub_template)
5224 *stub_template = template_sequence;
5225
5226 template_size = stub_definitions[stub_type].template_size;
5227 if (stub_template_size)
5228 *stub_template_size = template_size;
5229
5230 size = 0;
5231 for (i = 0; i < template_size; i++)
5232 {
5233 switch (template_sequence[i].type)
5234 {
5235 case THUMB16_TYPE:
5236 size += 2;
5237 break;
5238
5239 case ARM_TYPE:
5240 case THUMB32_TYPE:
5241 case DATA_TYPE:
5242 size += 4;
5243 break;
5244
5245 default:
5246 BFD_FAIL ();
5247 return 0;
5248 }
5249 }
5250
5251 return size;
5252 }
5253
5254 /* As above, but don't actually build the stub. Just bump offset so
5255 we know stub section sizes. */
5256
5257 static bfd_boolean
5258 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5259 void *in_arg ATTRIBUTE_UNUSED)
5260 {
5261 struct elf32_arm_stub_hash_entry *stub_entry;
5262 const insn_sequence *template_sequence;
5263 int template_size, size;
5264
5265 /* Massage our args to the form they really have. */
5266 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5267
5268 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5269 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5270
5271 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5272 &template_size);
5273
5274 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5275 if (stub_entry->stub_template_size)
5276 {
5277 stub_entry->stub_size = size;
5278 stub_entry->stub_template = template_sequence;
5279 stub_entry->stub_template_size = template_size;
5280 }
5281
5282 /* Already accounted for. */
5283 if (stub_entry->stub_offset != (bfd_vma) -1)
5284 return TRUE;
5285
5286 size = (size + 7) & ~7;
5287 stub_entry->stub_sec->size += size;
5288
5289 return TRUE;
5290 }
5291
5292 /* External entry points for sizing and building linker stubs. */
5293
5294 /* Set up various things so that we can make a list of input sections
5295 for each output section included in the link. Returns -1 on error,
5296 0 when no stubs will be needed, and 1 on success. */
5297
5298 int
5299 elf32_arm_setup_section_lists (bfd *output_bfd,
5300 struct bfd_link_info *info)
5301 {
5302 bfd *input_bfd;
5303 unsigned int bfd_count;
5304 unsigned int top_id, top_index;
5305 asection *section;
5306 asection **input_list, **list;
5307 bfd_size_type amt;
5308 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5309
5310 if (htab == NULL)
5311 return 0;
5312 if (! is_elf_hash_table (htab))
5313 return 0;
5314
5315 /* Count the number of input BFDs and find the top input section id. */
5316 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5317 input_bfd != NULL;
5318 input_bfd = input_bfd->link.next)
5319 {
5320 bfd_count += 1;
5321 for (section = input_bfd->sections;
5322 section != NULL;
5323 section = section->next)
5324 {
5325 if (top_id < section->id)
5326 top_id = section->id;
5327 }
5328 }
5329 htab->bfd_count = bfd_count;
5330
5331 amt = sizeof (struct map_stub) * (top_id + 1);
5332 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5333 if (htab->stub_group == NULL)
5334 return -1;
5335 htab->top_id = top_id;
5336
5337 /* We can't use output_bfd->section_count here to find the top output
5338 section index as some sections may have been removed, and
5339 _bfd_strip_section_from_output doesn't renumber the indices. */
5340 for (section = output_bfd->sections, top_index = 0;
5341 section != NULL;
5342 section = section->next)
5343 {
5344 if (top_index < section->index)
5345 top_index = section->index;
5346 }
5347
5348 htab->top_index = top_index;
5349 amt = sizeof (asection *) * (top_index + 1);
5350 input_list = (asection **) bfd_malloc (amt);
5351 htab->input_list = input_list;
5352 if (input_list == NULL)
5353 return -1;
5354
5355 /* For sections we aren't interested in, mark their entries with a
5356 value we can check later. */
5357 list = input_list + top_index;
5358 do
5359 *list = bfd_abs_section_ptr;
5360 while (list-- != input_list);
5361
5362 for (section = output_bfd->sections;
5363 section != NULL;
5364 section = section->next)
5365 {
5366 if ((section->flags & SEC_CODE) != 0)
5367 input_list[section->index] = NULL;
5368 }
5369
5370 return 1;
5371 }
5372
5373 /* The linker repeatedly calls this function for each input section,
5374 in the order that input sections are linked into output sections.
5375 Build lists of input sections to determine groupings between which
5376 we may insert linker stubs. */
5377
5378 void
5379 elf32_arm_next_input_section (struct bfd_link_info *info,
5380 asection *isec)
5381 {
5382 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5383
5384 if (htab == NULL)
5385 return;
5386
5387 if (isec->output_section->index <= htab->top_index)
5388 {
5389 asection **list = htab->input_list + isec->output_section->index;
5390
5391 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5392 {
5393 /* Steal the link_sec pointer for our list. */
5394 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5395 /* This happens to make the list in reverse order,
5396 which we reverse later. */
5397 PREV_SEC (isec) = *list;
5398 *list = isec;
5399 }
5400 }
5401 }
5402
5403 /* See whether we can group stub sections together. Grouping stub
5404 sections may result in fewer stubs. More importantly, we need to
5405 put all .init* and .fini* stubs at the end of the .init or
5406 .fini output sections respectively, because glibc splits the
5407 _init and _fini functions into multiple parts. Putting a stub in
5408 the middle of a function is not a good idea. */
5409
5410 static void
5411 group_sections (struct elf32_arm_link_hash_table *htab,
5412 bfd_size_type stub_group_size,
5413 bfd_boolean stubs_always_after_branch)
5414 {
5415 asection **list = htab->input_list;
5416
5417 do
5418 {
5419 asection *tail = *list;
5420 asection *head;
5421
5422 if (tail == bfd_abs_section_ptr)
5423 continue;
5424
5425 /* Reverse the list: we must avoid placing stubs at the
5426 beginning of the section because the beginning of the text
5427 section may be required for an interrupt vector in bare metal
5428 code. */
5429 #define NEXT_SEC PREV_SEC
5430 head = NULL;
5431 while (tail != NULL)
5432 {
5433 /* Pop from tail. */
5434 asection *item = tail;
5435 tail = PREV_SEC (item);
5436
5437 /* Push on head. */
5438 NEXT_SEC (item) = head;
5439 head = item;
5440 }
5441
5442 while (head != NULL)
5443 {
5444 asection *curr;
5445 asection *next;
5446 bfd_vma stub_group_start = head->output_offset;
5447 bfd_vma end_of_next;
5448
5449 curr = head;
5450 while (NEXT_SEC (curr) != NULL)
5451 {
5452 next = NEXT_SEC (curr);
5453 end_of_next = next->output_offset + next->size;
5454 if (end_of_next - stub_group_start >= stub_group_size)
5455 /* End of NEXT is too far from start, so stop. */
5456 break;
5457 /* Add NEXT to the group. */
5458 curr = next;
5459 }
5460
5461 /* OK, the size from the start to the start of CURR is less
5462 than stub_group_size and thus can be handled by one stub
5463 section. (Or the head section is itself larger than
5464 stub_group_size, in which case we may be toast.)
5465 We should really be keeping track of the total size of
5466 stubs added here, as stubs contribute to the final output
5467 section size. */
5468 do
5469 {
5470 next = NEXT_SEC (head);
5471 /* Set up this stub group. */
5472 htab->stub_group[head->id].link_sec = curr;
5473 }
5474 while (head != curr && (head = next) != NULL);
5475
5476 /* But wait, there's more! Input sections up to stub_group_size
5477 bytes after the stub section can be handled by it too. */
5478 if (!stubs_always_after_branch)
5479 {
5480 stub_group_start = curr->output_offset + curr->size;
5481
5482 while (next != NULL)
5483 {
5484 end_of_next = next->output_offset + next->size;
5485 if (end_of_next - stub_group_start >= stub_group_size)
5486 /* End of NEXT is too far from stubs, so stop. */
5487 break;
5488 /* Add NEXT to the stub group. */
5489 head = next;
5490 next = NEXT_SEC (head);
5491 htab->stub_group[head->id].link_sec = curr;
5492 }
5493 }
5494 head = next;
5495 }
5496 }
5497 while (list++ != htab->input_list + htab->top_index);
5498
5499 free (htab->input_list);
5500 #undef PREV_SEC
5501 #undef NEXT_SEC
5502 }
5503
5504 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5505 erratum fix. */
5506
5507 static int
5508 a8_reloc_compare (const void *a, const void *b)
5509 {
5510 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5511 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5512
5513 if (ra->from < rb->from)
5514 return -1;
5515 else if (ra->from > rb->from)
5516 return 1;
5517 else
5518 return 0;
5519 }
5520
5521 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5522 const char *, char **);
5523
5524 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5525 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5526 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5527 otherwise. */
5528
5529 static bfd_boolean
5530 cortex_a8_erratum_scan (bfd *input_bfd,
5531 struct bfd_link_info *info,
5532 struct a8_erratum_fix **a8_fixes_p,
5533 unsigned int *num_a8_fixes_p,
5534 unsigned int *a8_fix_table_size_p,
5535 struct a8_erratum_reloc *a8_relocs,
5536 unsigned int num_a8_relocs,
5537 unsigned prev_num_a8_fixes,
5538 bfd_boolean *stub_changed_p)
5539 {
5540 asection *section;
5541 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5542 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5543 unsigned int num_a8_fixes = *num_a8_fixes_p;
5544 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5545
5546 if (htab == NULL)
5547 return FALSE;
5548
5549 for (section = input_bfd->sections;
5550 section != NULL;
5551 section = section->next)
5552 {
5553 bfd_byte *contents = NULL;
5554 struct _arm_elf_section_data *sec_data;
5555 unsigned int span;
5556 bfd_vma base_vma;
5557
5558 if (elf_section_type (section) != SHT_PROGBITS
5559 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5560 || (section->flags & SEC_EXCLUDE) != 0
5561 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5562 || (section->output_section == bfd_abs_section_ptr))
5563 continue;
5564
5565 base_vma = section->output_section->vma + section->output_offset;
5566
5567 if (elf_section_data (section)->this_hdr.contents != NULL)
5568 contents = elf_section_data (section)->this_hdr.contents;
5569 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5570 return TRUE;
5571
5572 sec_data = elf32_arm_section_data (section);
5573
5574 for (span = 0; span < sec_data->mapcount; span++)
5575 {
5576 unsigned int span_start = sec_data->map[span].vma;
5577 unsigned int span_end = (span == sec_data->mapcount - 1)
5578 ? section->size : sec_data->map[span + 1].vma;
5579 unsigned int i;
5580 char span_type = sec_data->map[span].type;
5581 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5582
5583 if (span_type != 't')
5584 continue;
5585
5586 /* Span is entirely within a single 4KB region: skip scanning. */
5587 if (((base_vma + span_start) & ~0xfff)
5588 == ((base_vma + span_end) & ~0xfff))
5589 continue;
5590
5591 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5592
5593 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5594 * The branch target is in the same 4KB region as the
5595 first half of the branch.
5596 * The instruction before the branch is a 32-bit
5597 length non-branch instruction. */
5598 for (i = span_start; i < span_end;)
5599 {
5600 unsigned int insn = bfd_getl16 (&contents[i]);
5601 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5602 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5603
5604 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5605 insn_32bit = TRUE;
5606
5607 if (insn_32bit)
5608 {
5609 /* Load the rest of the insn (in manual-friendly order). */
5610 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5611
5612 /* Encoding T4: B<c>.W. */
5613 is_b = (insn & 0xf800d000) == 0xf0009000;
5614 /* Encoding T1: BL<c>.W. */
5615 is_bl = (insn & 0xf800d000) == 0xf000d000;
5616 /* Encoding T2: BLX<c>.W. */
5617 is_blx = (insn & 0xf800d000) == 0xf000c000;
5618 /* Encoding T3: B<c>.W (not permitted in IT block). */
5619 is_bcc = (insn & 0xf800d000) == 0xf0008000
5620 && (insn & 0x07f00000) != 0x03800000;
5621 }
5622
5623 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5624
5625 if (((base_vma + i) & 0xfff) == 0xffe
5626 && insn_32bit
5627 && is_32bit_branch
5628 && last_was_32bit
5629 && ! last_was_branch)
5630 {
5631 bfd_signed_vma offset = 0;
5632 bfd_boolean force_target_arm = FALSE;
5633 bfd_boolean force_target_thumb = FALSE;
5634 bfd_vma target;
5635 enum elf32_arm_stub_type stub_type = arm_stub_none;
5636 struct a8_erratum_reloc key, *found;
5637 bfd_boolean use_plt = FALSE;
5638
5639 key.from = base_vma + i;
5640 found = (struct a8_erratum_reloc *)
5641 bsearch (&key, a8_relocs, num_a8_relocs,
5642 sizeof (struct a8_erratum_reloc),
5643 &a8_reloc_compare);
5644
5645 if (found)
5646 {
5647 char *error_message = NULL;
5648 struct elf_link_hash_entry *entry;
5649
5650 /* We don't care about the error returned from this
5651 function, only if there is glue or not. */
5652 entry = find_thumb_glue (info, found->sym_name,
5653 &error_message);
5654
5655 if (entry)
5656 found->non_a8_stub = TRUE;
5657
5658 /* Keep a simpler condition, for the sake of clarity. */
5659 if (htab->root.splt != NULL && found->hash != NULL
5660 && found->hash->root.plt.offset != (bfd_vma) -1)
5661 use_plt = TRUE;
5662
5663 if (found->r_type == R_ARM_THM_CALL)
5664 {
5665 if (found->branch_type == ST_BRANCH_TO_ARM
5666 || use_plt)
5667 force_target_arm = TRUE;
5668 else
5669 force_target_thumb = TRUE;
5670 }
5671 }
5672
5673 /* Check if we have an offending branch instruction. */
5674
5675 if (found && found->non_a8_stub)
5676 /* We've already made a stub for this instruction, e.g.
5677 it's a long branch or a Thumb->ARM stub. Assume that
5678 stub will suffice to work around the A8 erratum (see
5679 setting of always_after_branch above). */
5680 ;
5681 else if (is_bcc)
5682 {
5683 offset = (insn & 0x7ff) << 1;
5684 offset |= (insn & 0x3f0000) >> 4;
5685 offset |= (insn & 0x2000) ? 0x40000 : 0;
5686 offset |= (insn & 0x800) ? 0x80000 : 0;
5687 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5688 if (offset & 0x100000)
5689 offset |= ~ ((bfd_signed_vma) 0xfffff);
5690 stub_type = arm_stub_a8_veneer_b_cond;
5691 }
5692 else if (is_b || is_bl || is_blx)
5693 {
5694 int s = (insn & 0x4000000) != 0;
5695 int j1 = (insn & 0x2000) != 0;
5696 int j2 = (insn & 0x800) != 0;
5697 int i1 = !(j1 ^ s);
5698 int i2 = !(j2 ^ s);
5699
5700 offset = (insn & 0x7ff) << 1;
5701 offset |= (insn & 0x3ff0000) >> 4;
5702 offset |= i2 << 22;
5703 offset |= i1 << 23;
5704 offset |= s << 24;
5705 if (offset & 0x1000000)
5706 offset |= ~ ((bfd_signed_vma) 0xffffff);
5707
5708 if (is_blx)
5709 offset &= ~ ((bfd_signed_vma) 3);
5710
5711 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5712 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5713 }
5714
5715 if (stub_type != arm_stub_none)
5716 {
5717 bfd_vma pc_for_insn = base_vma + i + 4;
5718
5719 /* The original instruction is a BL, but the target is
5720 an ARM instruction. If we were not making a stub,
5721 the BL would have been converted to a BLX. Use the
5722 BLX stub instead in that case. */
5723 if (htab->use_blx && force_target_arm
5724 && stub_type == arm_stub_a8_veneer_bl)
5725 {
5726 stub_type = arm_stub_a8_veneer_blx;
5727 is_blx = TRUE;
5728 is_bl = FALSE;
5729 }
5730 /* Conversely, if the original instruction was
5731 BLX but the target is Thumb mode, use the BL
5732 stub. */
5733 else if (force_target_thumb
5734 && stub_type == arm_stub_a8_veneer_blx)
5735 {
5736 stub_type = arm_stub_a8_veneer_bl;
5737 is_blx = FALSE;
5738 is_bl = TRUE;
5739 }
5740
5741 if (is_blx)
5742 pc_for_insn &= ~ ((bfd_vma) 3);
5743
5744 /* If we found a relocation, use the proper destination,
5745 not the offset in the (unrelocated) instruction.
5746 Note this is always done if we switched the stub type
5747 above. */
5748 if (found)
5749 offset =
5750 (bfd_signed_vma) (found->destination - pc_for_insn);
5751
5752 /* If the stub will use a Thumb-mode branch to a
5753 PLT target, redirect it to the preceding Thumb
5754 entry point. */
5755 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5756 offset -= PLT_THUMB_STUB_SIZE;
5757
5758 target = pc_for_insn + offset;
5759
5760 /* The BLX stub is ARM-mode code. Adjust the offset to
5761 take the different PC value (+8 instead of +4) into
5762 account. */
5763 if (stub_type == arm_stub_a8_veneer_blx)
5764 offset += 4;
5765
5766 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5767 {
5768 char *stub_name = NULL;
5769
5770 if (num_a8_fixes == a8_fix_table_size)
5771 {
5772 a8_fix_table_size *= 2;
5773 a8_fixes = (struct a8_erratum_fix *)
5774 bfd_realloc (a8_fixes,
5775 sizeof (struct a8_erratum_fix)
5776 * a8_fix_table_size);
5777 }
5778
5779 if (num_a8_fixes < prev_num_a8_fixes)
5780 {
5781 /* If we're doing a subsequent scan,
5782 check if we've found the same fix as
5783 before, and try and reuse the stub
5784 name. */
5785 stub_name = a8_fixes[num_a8_fixes].stub_name;
5786 if ((a8_fixes[num_a8_fixes].section != section)
5787 || (a8_fixes[num_a8_fixes].offset != i))
5788 {
5789 free (stub_name);
5790 stub_name = NULL;
5791 *stub_changed_p = TRUE;
5792 }
5793 }
5794
5795 if (!stub_name)
5796 {
5797 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5798 if (stub_name != NULL)
5799 sprintf (stub_name, "%x:%x", section->id, i);
5800 }
5801
5802 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5803 a8_fixes[num_a8_fixes].section = section;
5804 a8_fixes[num_a8_fixes].offset = i;
5805 a8_fixes[num_a8_fixes].target_offset =
5806 target - base_vma;
5807 a8_fixes[num_a8_fixes].orig_insn = insn;
5808 a8_fixes[num_a8_fixes].stub_name = stub_name;
5809 a8_fixes[num_a8_fixes].stub_type = stub_type;
5810 a8_fixes[num_a8_fixes].branch_type =
5811 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5812
5813 num_a8_fixes++;
5814 }
5815 }
5816 }
5817
5818 i += insn_32bit ? 4 : 2;
5819 last_was_32bit = insn_32bit;
5820 last_was_branch = is_32bit_branch;
5821 }
5822 }
5823
5824 if (elf_section_data (section)->this_hdr.contents == NULL)
5825 free (contents);
5826 }
5827
5828 *a8_fixes_p = a8_fixes;
5829 *num_a8_fixes_p = num_a8_fixes;
5830 *a8_fix_table_size_p = a8_fix_table_size;
5831
5832 return FALSE;
5833 }
5834
5835 /* Create or update a stub entry depending on whether the stub can already be
5836 found in HTAB. The stub is identified by:
5837 - its type STUB_TYPE
5838 - its source branch (note that several can share the same stub) whose
5839 section and relocation (if any) are given by SECTION and IRELA
5840 respectively
5841 - its target symbol whose input section, hash, name, value and branch type
5842 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5843 respectively
5844
5845 If found, the value of the stub's target symbol is updated from SYM_VALUE
5846 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5847 TRUE and the stub entry is initialized.
5848
5849 Returns the stub that was created or updated, or NULL if an error
5850 occurred. */
5851
5852 static struct elf32_arm_stub_hash_entry *
5853 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5854 enum elf32_arm_stub_type stub_type, asection *section,
5855 Elf_Internal_Rela *irela, asection *sym_sec,
5856 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5857 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5858 bfd_boolean *new_stub)
5859 {
5860 const asection *id_sec;
5861 char *stub_name;
5862 struct elf32_arm_stub_hash_entry *stub_entry;
5863 unsigned int r_type;
5864 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5865
5866 BFD_ASSERT (stub_type != arm_stub_none);
5867 *new_stub = FALSE;
5868
5869 if (sym_claimed)
5870 stub_name = sym_name;
5871 else
5872 {
5873 BFD_ASSERT (irela);
5874 BFD_ASSERT (section);
5875 BFD_ASSERT (section->id <= htab->top_id);
5876
5877 /* Support for grouping stub sections. */
5878 id_sec = htab->stub_group[section->id].link_sec;
5879
5880 /* Get the name of this stub. */
5881 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5882 stub_type);
5883 if (!stub_name)
5884 return NULL;
5885 }
5886
5887 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5888 FALSE);
5889 /* The proper stub has already been created, just update its value. */
5890 if (stub_entry != NULL)
5891 {
5892 if (!sym_claimed)
5893 free (stub_name);
5894 stub_entry->target_value = sym_value;
5895 return stub_entry;
5896 }
5897
5898 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5899 if (stub_entry == NULL)
5900 {
5901 if (!sym_claimed)
5902 free (stub_name);
5903 return NULL;
5904 }
5905
5906 stub_entry->target_value = sym_value;
5907 stub_entry->target_section = sym_sec;
5908 stub_entry->stub_type = stub_type;
5909 stub_entry->h = hash;
5910 stub_entry->branch_type = branch_type;
5911
5912 if (sym_claimed)
5913 stub_entry->output_name = sym_name;
5914 else
5915 {
5916 if (sym_name == NULL)
5917 sym_name = "unnamed";
5918 stub_entry->output_name = (char *)
5919 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5920 + strlen (sym_name));
5921 if (stub_entry->output_name == NULL)
5922 {
5923 free (stub_name);
5924 return NULL;
5925 }
5926
5927 /* For historical reasons, use the existing names for ARM-to-Thumb and
5928 Thumb-to-ARM stubs. */
5929 r_type = ELF32_R_TYPE (irela->r_info);
5930 if ((r_type == (unsigned int) R_ARM_THM_CALL
5931 || r_type == (unsigned int) R_ARM_THM_JUMP24
5932 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5933 && branch_type == ST_BRANCH_TO_ARM)
5934 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5935 else if ((r_type == (unsigned int) R_ARM_CALL
5936 || r_type == (unsigned int) R_ARM_JUMP24)
5937 && branch_type == ST_BRANCH_TO_THUMB)
5938 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5939 else
5940 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5941 }
5942
5943 *new_stub = TRUE;
5944 return stub_entry;
5945 }
5946
5947 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5948 gateway veneer to transition from non secure to secure state and create them
5949 accordingly.
5950
5951 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5952 defines the conditions that govern Secure Gateway veneer creation for a
5953 given symbol <SYM> as follows:
5954 - it has function type
5955 - it has non local binding
5956 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5957 same type, binding and value as <SYM> (called normal symbol).
5958 An entry function can handle secure state transition itself in which case
5959 its special symbol would have a different value from the normal symbol.
5960
5961 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5962 entry mapping while HTAB gives the name to hash entry mapping.
5963 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5964 created.
5965
5966 The return value gives whether a stub failed to be allocated. */
5967
5968 static bfd_boolean
5969 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5970 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5971 int *cmse_stub_created)
5972 {
5973 const struct elf_backend_data *bed;
5974 Elf_Internal_Shdr *symtab_hdr;
5975 unsigned i, j, sym_count, ext_start;
5976 Elf_Internal_Sym *cmse_sym, *local_syms;
5977 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5978 enum arm_st_branch_type branch_type;
5979 char *sym_name, *lsym_name;
5980 bfd_vma sym_value;
5981 asection *section;
5982 struct elf32_arm_stub_hash_entry *stub_entry;
5983 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5984
5985 bed = get_elf_backend_data (input_bfd);
5986 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5987 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5988 ext_start = symtab_hdr->sh_info;
5989 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5990 && out_attr[Tag_CPU_arch_profile].i == 'M');
5991
5992 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5993 if (local_syms == NULL)
5994 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5995 symtab_hdr->sh_info, 0, NULL, NULL,
5996 NULL);
5997 if (symtab_hdr->sh_info && local_syms == NULL)
5998 return FALSE;
5999
6000 /* Scan symbols. */
6001 for (i = 0; i < sym_count; i++)
6002 {
6003 cmse_invalid = FALSE;
6004
6005 if (i < ext_start)
6006 {
6007 cmse_sym = &local_syms[i];
6008 sym_name = bfd_elf_string_from_elf_section (input_bfd,
6009 symtab_hdr->sh_link,
6010 cmse_sym->st_name);
6011 if (!sym_name || !CONST_STRNEQ (sym_name, CMSE_PREFIX))
6012 continue;
6013
6014 /* Special symbol with local binding. */
6015 cmse_invalid = TRUE;
6016 }
6017 else
6018 {
6019 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
6020 sym_name = (char *) cmse_hash->root.root.root.string;
6021 if (!CONST_STRNEQ (sym_name, CMSE_PREFIX))
6022 continue;
6023
6024 /* Special symbol has incorrect binding or type. */
6025 if ((cmse_hash->root.root.type != bfd_link_hash_defined
6026 && cmse_hash->root.root.type != bfd_link_hash_defweak)
6027 || cmse_hash->root.type != STT_FUNC)
6028 cmse_invalid = TRUE;
6029 }
6030
6031 if (!is_v8m)
6032 {
6033 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6034 "ARMv8-M architecture or later"),
6035 input_bfd, sym_name);
6036 is_v8m = TRUE; /* Avoid multiple warning. */
6037 ret = FALSE;
6038 }
6039
6040 if (cmse_invalid)
6041 {
6042 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6043 " a global or weak function symbol"),
6044 input_bfd, sym_name);
6045 ret = FALSE;
6046 if (i < ext_start)
6047 continue;
6048 }
6049
6050 sym_name += strlen (CMSE_PREFIX);
6051 hash = (struct elf32_arm_link_hash_entry *)
6052 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6053
6054 /* No associated normal symbol or it is neither global nor weak. */
6055 if (!hash
6056 || (hash->root.root.type != bfd_link_hash_defined
6057 && hash->root.root.type != bfd_link_hash_defweak)
6058 || hash->root.type != STT_FUNC)
6059 {
6060 /* Initialize here to avoid warning about use of possibly
6061 uninitialized variable. */
6062 j = 0;
6063
6064 if (!hash)
6065 {
6066 /* Searching for a normal symbol with local binding. */
6067 for (; j < ext_start; j++)
6068 {
6069 lsym_name =
6070 bfd_elf_string_from_elf_section (input_bfd,
6071 symtab_hdr->sh_link,
6072 local_syms[j].st_name);
6073 if (!strcmp (sym_name, lsym_name))
6074 break;
6075 }
6076 }
6077
6078 if (hash || j < ext_start)
6079 {
6080 _bfd_error_handler
6081 (_("%pB: invalid standard symbol `%s'; it must be "
6082 "a global or weak function symbol"),
6083 input_bfd, sym_name);
6084 }
6085 else
6086 _bfd_error_handler
6087 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6088 ret = FALSE;
6089 if (!hash)
6090 continue;
6091 }
6092
6093 sym_value = hash->root.root.u.def.value;
6094 section = hash->root.root.u.def.section;
6095
6096 if (cmse_hash->root.root.u.def.section != section)
6097 {
6098 _bfd_error_handler
6099 (_("%pB: `%s' and its special symbol are in different sections"),
6100 input_bfd, sym_name);
6101 ret = FALSE;
6102 }
6103 if (cmse_hash->root.root.u.def.value != sym_value)
6104 continue; /* Ignore: could be an entry function starting with SG. */
6105
6106 /* If this section is a link-once section that will be discarded, then
6107 don't create any stubs. */
6108 if (section->output_section == NULL)
6109 {
6110 _bfd_error_handler
6111 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6112 continue;
6113 }
6114
6115 if (hash->root.size == 0)
6116 {
6117 _bfd_error_handler
6118 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6119 ret = FALSE;
6120 }
6121
6122 if (!ret)
6123 continue;
6124 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6125 stub_entry
6126 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6127 NULL, NULL, section, hash, sym_name,
6128 sym_value, branch_type, &new_stub);
6129
6130 if (stub_entry == NULL)
6131 ret = FALSE;
6132 else
6133 {
6134 BFD_ASSERT (new_stub);
6135 (*cmse_stub_created)++;
6136 }
6137 }
6138
6139 if (!symtab_hdr->contents)
6140 free (local_syms);
6141 return ret;
6142 }
6143
6144 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6145 code entry function, ie can be called from non secure code without using a
6146 veneer. */
6147
6148 static bfd_boolean
6149 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6150 {
6151 bfd_byte contents[4];
6152 uint32_t first_insn;
6153 asection *section;
6154 file_ptr offset;
6155 bfd *abfd;
6156
6157 /* Defined symbol of function type. */
6158 if (hash->root.root.type != bfd_link_hash_defined
6159 && hash->root.root.type != bfd_link_hash_defweak)
6160 return FALSE;
6161 if (hash->root.type != STT_FUNC)
6162 return FALSE;
6163
6164 /* Read first instruction. */
6165 section = hash->root.root.u.def.section;
6166 abfd = section->owner;
6167 offset = hash->root.root.u.def.value - section->vma;
6168 if (!bfd_get_section_contents (abfd, section, contents, offset,
6169 sizeof (contents)))
6170 return FALSE;
6171
6172 first_insn = bfd_get_32 (abfd, contents);
6173
6174 /* Starts by SG instruction. */
6175 return first_insn == 0xe97fe97f;
6176 }
6177
6178 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6179 secure gateway veneers (ie. the veneers was not in the input import library)
6180 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6181
6182 static bfd_boolean
6183 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6184 {
6185 struct elf32_arm_stub_hash_entry *stub_entry;
6186 struct bfd_link_info *info;
6187
6188 /* Massage our args to the form they really have. */
6189 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6190 info = (struct bfd_link_info *) gen_info;
6191
6192 if (info->out_implib_bfd)
6193 return TRUE;
6194
6195 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6196 return TRUE;
6197
6198 if (stub_entry->stub_offset == (bfd_vma) -1)
6199 _bfd_error_handler (" %s", stub_entry->output_name);
6200
6201 return TRUE;
6202 }
6203
6204 /* Set offset of each secure gateway veneers so that its address remain
6205 identical to the one in the input import library referred by
6206 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6207 (present in input import library but absent from the executable being
6208 linked) or if new veneers appeared and there is no output import library
6209 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6210 number of secure gateway veneers found in the input import library.
6211
6212 The function returns whether an error occurred. If no error occurred,
6213 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6214 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6215 veneer observed set for new veneers to be layed out after. */
6216
6217 static bfd_boolean
6218 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6219 struct elf32_arm_link_hash_table *htab,
6220 int *cmse_stub_created)
6221 {
6222 long symsize;
6223 char *sym_name;
6224 flagword flags;
6225 long i, symcount;
6226 bfd *in_implib_bfd;
6227 asection *stub_out_sec;
6228 bfd_boolean ret = TRUE;
6229 Elf_Internal_Sym *intsym;
6230 const char *out_sec_name;
6231 bfd_size_type cmse_stub_size;
6232 asymbol **sympp = NULL, *sym;
6233 struct elf32_arm_link_hash_entry *hash;
6234 const insn_sequence *cmse_stub_template;
6235 struct elf32_arm_stub_hash_entry *stub_entry;
6236 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6237 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6238 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6239
6240 /* No input secure gateway import library. */
6241 if (!htab->in_implib_bfd)
6242 return TRUE;
6243
6244 in_implib_bfd = htab->in_implib_bfd;
6245 if (!htab->cmse_implib)
6246 {
6247 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6248 "Gateway import libraries"), in_implib_bfd);
6249 return FALSE;
6250 }
6251
6252 /* Get symbol table size. */
6253 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6254 if (symsize < 0)
6255 return FALSE;
6256
6257 /* Read in the input secure gateway import library's symbol table. */
6258 sympp = (asymbol **) bfd_malloc (symsize);
6259 if (sympp == NULL)
6260 return FALSE;
6261
6262 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6263 if (symcount < 0)
6264 {
6265 ret = FALSE;
6266 goto free_sym_buf;
6267 }
6268
6269 htab->new_cmse_stub_offset = 0;
6270 cmse_stub_size =
6271 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6272 &cmse_stub_template,
6273 &cmse_stub_template_size);
6274 out_sec_name =
6275 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6276 stub_out_sec =
6277 bfd_get_section_by_name (htab->obfd, out_sec_name);
6278 if (stub_out_sec != NULL)
6279 cmse_stub_sec_vma = stub_out_sec->vma;
6280
6281 /* Set addresses of veneers mentionned in input secure gateway import
6282 library's symbol table. */
6283 for (i = 0; i < symcount; i++)
6284 {
6285 sym = sympp[i];
6286 flags = sym->flags;
6287 sym_name = (char *) bfd_asymbol_name (sym);
6288 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6289
6290 if (sym->section != bfd_abs_section_ptr
6291 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6292 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6293 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6294 != ST_BRANCH_TO_THUMB))
6295 {
6296 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6297 "symbol should be absolute, global and "
6298 "refer to Thumb functions"),
6299 in_implib_bfd, sym_name);
6300 ret = FALSE;
6301 continue;
6302 }
6303
6304 veneer_value = bfd_asymbol_value (sym);
6305 stub_offset = veneer_value - cmse_stub_sec_vma;
6306 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6307 FALSE, FALSE);
6308 hash = (struct elf32_arm_link_hash_entry *)
6309 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6310
6311 /* Stub entry should have been created by cmse_scan or the symbol be of
6312 a secure function callable from non secure code. */
6313 if (!stub_entry && !hash)
6314 {
6315 bfd_boolean new_stub;
6316
6317 _bfd_error_handler
6318 (_("entry function `%s' disappeared from secure code"), sym_name);
6319 hash = (struct elf32_arm_link_hash_entry *)
6320 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6321 stub_entry
6322 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6323 NULL, NULL, bfd_abs_section_ptr, hash,
6324 sym_name, veneer_value,
6325 ST_BRANCH_TO_THUMB, &new_stub);
6326 if (stub_entry == NULL)
6327 ret = FALSE;
6328 else
6329 {
6330 BFD_ASSERT (new_stub);
6331 new_cmse_stubs_created++;
6332 (*cmse_stub_created)++;
6333 }
6334 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6335 stub_entry->stub_offset = stub_offset;
6336 }
6337 /* Symbol found is not callable from non secure code. */
6338 else if (!stub_entry)
6339 {
6340 if (!cmse_entry_fct_p (hash))
6341 {
6342 _bfd_error_handler (_("`%s' refers to a non entry function"),
6343 sym_name);
6344 ret = FALSE;
6345 }
6346 continue;
6347 }
6348 else
6349 {
6350 /* Only stubs for SG veneers should have been created. */
6351 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6352
6353 /* Check visibility hasn't changed. */
6354 if (!!(flags & BSF_GLOBAL)
6355 != (hash->root.root.type == bfd_link_hash_defined))
6356 _bfd_error_handler
6357 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6358 sym_name);
6359
6360 stub_entry->stub_offset = stub_offset;
6361 }
6362
6363 /* Size should match that of a SG veneer. */
6364 if (intsym->st_size != cmse_stub_size)
6365 {
6366 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6367 in_implib_bfd, sym_name);
6368 ret = FALSE;
6369 }
6370
6371 /* Previous veneer address is before current SG veneer section. */
6372 if (veneer_value < cmse_stub_sec_vma)
6373 {
6374 /* Avoid offset underflow. */
6375 if (stub_entry)
6376 stub_entry->stub_offset = 0;
6377 stub_offset = 0;
6378 ret = FALSE;
6379 }
6380
6381 /* Complain if stub offset not a multiple of stub size. */
6382 if (stub_offset % cmse_stub_size)
6383 {
6384 _bfd_error_handler
6385 (_("offset of veneer for entry function `%s' not a multiple of "
6386 "its size"), sym_name);
6387 ret = FALSE;
6388 }
6389
6390 if (!ret)
6391 continue;
6392
6393 new_cmse_stubs_created--;
6394 if (veneer_value < cmse_stub_array_start)
6395 cmse_stub_array_start = veneer_value;
6396 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6397 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6398 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6399 }
6400
6401 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6402 {
6403 BFD_ASSERT (new_cmse_stubs_created > 0);
6404 _bfd_error_handler
6405 (_("new entry function(s) introduced but no output import library "
6406 "specified:"));
6407 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6408 }
6409
6410 if (cmse_stub_array_start != cmse_stub_sec_vma)
6411 {
6412 _bfd_error_handler
6413 (_("start address of `%s' is different from previous link"),
6414 out_sec_name);
6415 ret = FALSE;
6416 }
6417
6418 free_sym_buf:
6419 free (sympp);
6420 return ret;
6421 }
6422
6423 /* Determine and set the size of the stub section for a final link.
6424
6425 The basic idea here is to examine all the relocations looking for
6426 PC-relative calls to a target that is unreachable with a "bl"
6427 instruction. */
6428
6429 bfd_boolean
6430 elf32_arm_size_stubs (bfd *output_bfd,
6431 bfd *stub_bfd,
6432 struct bfd_link_info *info,
6433 bfd_signed_vma group_size,
6434 asection * (*add_stub_section) (const char *, asection *,
6435 asection *,
6436 unsigned int),
6437 void (*layout_sections_again) (void))
6438 {
6439 bfd_boolean ret = TRUE;
6440 obj_attribute *out_attr;
6441 int cmse_stub_created = 0;
6442 bfd_size_type stub_group_size;
6443 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6444 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6445 struct a8_erratum_fix *a8_fixes = NULL;
6446 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6447 struct a8_erratum_reloc *a8_relocs = NULL;
6448 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6449
6450 if (htab == NULL)
6451 return FALSE;
6452
6453 if (htab->fix_cortex_a8)
6454 {
6455 a8_fixes = (struct a8_erratum_fix *)
6456 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6457 a8_relocs = (struct a8_erratum_reloc *)
6458 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6459 }
6460
6461 /* Propagate mach to stub bfd, because it may not have been
6462 finalized when we created stub_bfd. */
6463 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6464 bfd_get_mach (output_bfd));
6465
6466 /* Stash our params away. */
6467 htab->stub_bfd = stub_bfd;
6468 htab->add_stub_section = add_stub_section;
6469 htab->layout_sections_again = layout_sections_again;
6470 stubs_always_after_branch = group_size < 0;
6471
6472 out_attr = elf_known_obj_attributes_proc (output_bfd);
6473 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6474
6475 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6476 as the first half of a 32-bit branch straddling two 4K pages. This is a
6477 crude way of enforcing that. */
6478 if (htab->fix_cortex_a8)
6479 stubs_always_after_branch = 1;
6480
6481 if (group_size < 0)
6482 stub_group_size = -group_size;
6483 else
6484 stub_group_size = group_size;
6485
6486 if (stub_group_size == 1)
6487 {
6488 /* Default values. */
6489 /* Thumb branch range is +-4MB has to be used as the default
6490 maximum size (a given section can contain both ARM and Thumb
6491 code, so the worst case has to be taken into account).
6492
6493 This value is 24K less than that, which allows for 2025
6494 12-byte stubs. If we exceed that, then we will fail to link.
6495 The user will have to relink with an explicit group size
6496 option. */
6497 stub_group_size = 4170000;
6498 }
6499
6500 group_sections (htab, stub_group_size, stubs_always_after_branch);
6501
6502 /* If we're applying the cortex A8 fix, we need to determine the
6503 program header size now, because we cannot change it later --
6504 that could alter section placements. Notice the A8 erratum fix
6505 ends up requiring the section addresses to remain unchanged
6506 modulo the page size. That's something we cannot represent
6507 inside BFD, and we don't want to force the section alignment to
6508 be the page size. */
6509 if (htab->fix_cortex_a8)
6510 (*htab->layout_sections_again) ();
6511
6512 while (1)
6513 {
6514 bfd *input_bfd;
6515 unsigned int bfd_indx;
6516 asection *stub_sec;
6517 enum elf32_arm_stub_type stub_type;
6518 bfd_boolean stub_changed = FALSE;
6519 unsigned prev_num_a8_fixes = num_a8_fixes;
6520
6521 num_a8_fixes = 0;
6522 for (input_bfd = info->input_bfds, bfd_indx = 0;
6523 input_bfd != NULL;
6524 input_bfd = input_bfd->link.next, bfd_indx++)
6525 {
6526 Elf_Internal_Shdr *symtab_hdr;
6527 asection *section;
6528 Elf_Internal_Sym *local_syms = NULL;
6529
6530 if (!is_arm_elf (input_bfd))
6531 continue;
6532 if ((input_bfd->flags & DYNAMIC) != 0
6533 && (elf_sym_hashes (input_bfd) == NULL
6534 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6535 continue;
6536
6537 num_a8_relocs = 0;
6538
6539 /* We'll need the symbol table in a second. */
6540 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6541 if (symtab_hdr->sh_info == 0)
6542 continue;
6543
6544 /* Limit scan of symbols to object file whose profile is
6545 Microcontroller to not hinder performance in the general case. */
6546 if (m_profile && first_veneer_scan)
6547 {
6548 struct elf_link_hash_entry **sym_hashes;
6549
6550 sym_hashes = elf_sym_hashes (input_bfd);
6551 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6552 &cmse_stub_created))
6553 goto error_ret_free_local;
6554
6555 if (cmse_stub_created != 0)
6556 stub_changed = TRUE;
6557 }
6558
6559 /* Walk over each section attached to the input bfd. */
6560 for (section = input_bfd->sections;
6561 section != NULL;
6562 section = section->next)
6563 {
6564 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6565
6566 /* If there aren't any relocs, then there's nothing more
6567 to do. */
6568 if ((section->flags & SEC_RELOC) == 0
6569 || section->reloc_count == 0
6570 || (section->flags & SEC_CODE) == 0)
6571 continue;
6572
6573 /* If this section is a link-once section that will be
6574 discarded, then don't create any stubs. */
6575 if (section->output_section == NULL
6576 || section->output_section->owner != output_bfd)
6577 continue;
6578
6579 /* Get the relocs. */
6580 internal_relocs
6581 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6582 NULL, info->keep_memory);
6583 if (internal_relocs == NULL)
6584 goto error_ret_free_local;
6585
6586 /* Now examine each relocation. */
6587 irela = internal_relocs;
6588 irelaend = irela + section->reloc_count;
6589 for (; irela < irelaend; irela++)
6590 {
6591 unsigned int r_type, r_indx;
6592 asection *sym_sec;
6593 bfd_vma sym_value;
6594 bfd_vma destination;
6595 struct elf32_arm_link_hash_entry *hash;
6596 const char *sym_name;
6597 unsigned char st_type;
6598 enum arm_st_branch_type branch_type;
6599 bfd_boolean created_stub = FALSE;
6600
6601 r_type = ELF32_R_TYPE (irela->r_info);
6602 r_indx = ELF32_R_SYM (irela->r_info);
6603
6604 if (r_type >= (unsigned int) R_ARM_max)
6605 {
6606 bfd_set_error (bfd_error_bad_value);
6607 error_ret_free_internal:
6608 if (elf_section_data (section)->relocs == NULL)
6609 free (internal_relocs);
6610 /* Fall through. */
6611 error_ret_free_local:
6612 if (local_syms != NULL
6613 && (symtab_hdr->contents
6614 != (unsigned char *) local_syms))
6615 free (local_syms);
6616 return FALSE;
6617 }
6618
6619 hash = NULL;
6620 if (r_indx >= symtab_hdr->sh_info)
6621 hash = elf32_arm_hash_entry
6622 (elf_sym_hashes (input_bfd)
6623 [r_indx - symtab_hdr->sh_info]);
6624
6625 /* Only look for stubs on branch instructions, or
6626 non-relaxed TLSCALL */
6627 if ((r_type != (unsigned int) R_ARM_CALL)
6628 && (r_type != (unsigned int) R_ARM_THM_CALL)
6629 && (r_type != (unsigned int) R_ARM_JUMP24)
6630 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6631 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6632 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6633 && (r_type != (unsigned int) R_ARM_PLT32)
6634 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6635 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6636 && r_type == elf32_arm_tls_transition
6637 (info, r_type, &hash->root)
6638 && ((hash ? hash->tls_type
6639 : (elf32_arm_local_got_tls_type
6640 (input_bfd)[r_indx]))
6641 & GOT_TLS_GDESC) != 0))
6642 continue;
6643
6644 /* Now determine the call target, its name, value,
6645 section. */
6646 sym_sec = NULL;
6647 sym_value = 0;
6648 destination = 0;
6649 sym_name = NULL;
6650
6651 if (r_type == (unsigned int) R_ARM_TLS_CALL
6652 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6653 {
6654 /* A non-relaxed TLS call. The target is the
6655 plt-resident trampoline and nothing to do
6656 with the symbol. */
6657 BFD_ASSERT (htab->tls_trampoline > 0);
6658 sym_sec = htab->root.splt;
6659 sym_value = htab->tls_trampoline;
6660 hash = 0;
6661 st_type = STT_FUNC;
6662 branch_type = ST_BRANCH_TO_ARM;
6663 }
6664 else if (!hash)
6665 {
6666 /* It's a local symbol. */
6667 Elf_Internal_Sym *sym;
6668
6669 if (local_syms == NULL)
6670 {
6671 local_syms
6672 = (Elf_Internal_Sym *) symtab_hdr->contents;
6673 if (local_syms == NULL)
6674 local_syms
6675 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6676 symtab_hdr->sh_info, 0,
6677 NULL, NULL, NULL);
6678 if (local_syms == NULL)
6679 goto error_ret_free_internal;
6680 }
6681
6682 sym = local_syms + r_indx;
6683 if (sym->st_shndx == SHN_UNDEF)
6684 sym_sec = bfd_und_section_ptr;
6685 else if (sym->st_shndx == SHN_ABS)
6686 sym_sec = bfd_abs_section_ptr;
6687 else if (sym->st_shndx == SHN_COMMON)
6688 sym_sec = bfd_com_section_ptr;
6689 else
6690 sym_sec =
6691 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6692
6693 if (!sym_sec)
6694 /* This is an undefined symbol. It can never
6695 be resolved. */
6696 continue;
6697
6698 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6699 sym_value = sym->st_value;
6700 destination = (sym_value + irela->r_addend
6701 + sym_sec->output_offset
6702 + sym_sec->output_section->vma);
6703 st_type = ELF_ST_TYPE (sym->st_info);
6704 branch_type =
6705 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6706 sym_name
6707 = bfd_elf_string_from_elf_section (input_bfd,
6708 symtab_hdr->sh_link,
6709 sym->st_name);
6710 }
6711 else
6712 {
6713 /* It's an external symbol. */
6714 while (hash->root.root.type == bfd_link_hash_indirect
6715 || hash->root.root.type == bfd_link_hash_warning)
6716 hash = ((struct elf32_arm_link_hash_entry *)
6717 hash->root.root.u.i.link);
6718
6719 if (hash->root.root.type == bfd_link_hash_defined
6720 || hash->root.root.type == bfd_link_hash_defweak)
6721 {
6722 sym_sec = hash->root.root.u.def.section;
6723 sym_value = hash->root.root.u.def.value;
6724
6725 struct elf32_arm_link_hash_table *globals =
6726 elf32_arm_hash_table (info);
6727
6728 /* For a destination in a shared library,
6729 use the PLT stub as target address to
6730 decide whether a branch stub is
6731 needed. */
6732 if (globals != NULL
6733 && globals->root.splt != NULL
6734 && hash != NULL
6735 && hash->root.plt.offset != (bfd_vma) -1)
6736 {
6737 sym_sec = globals->root.splt;
6738 sym_value = hash->root.plt.offset;
6739 if (sym_sec->output_section != NULL)
6740 destination = (sym_value
6741 + sym_sec->output_offset
6742 + sym_sec->output_section->vma);
6743 }
6744 else if (sym_sec->output_section != NULL)
6745 destination = (sym_value + irela->r_addend
6746 + sym_sec->output_offset
6747 + sym_sec->output_section->vma);
6748 }
6749 else if ((hash->root.root.type == bfd_link_hash_undefined)
6750 || (hash->root.root.type == bfd_link_hash_undefweak))
6751 {
6752 /* For a shared library, use the PLT stub as
6753 target address to decide whether a long
6754 branch stub is needed.
6755 For absolute code, they cannot be handled. */
6756 struct elf32_arm_link_hash_table *globals =
6757 elf32_arm_hash_table (info);
6758
6759 if (globals != NULL
6760 && globals->root.splt != NULL
6761 && hash != NULL
6762 && hash->root.plt.offset != (bfd_vma) -1)
6763 {
6764 sym_sec = globals->root.splt;
6765 sym_value = hash->root.plt.offset;
6766 if (sym_sec->output_section != NULL)
6767 destination = (sym_value
6768 + sym_sec->output_offset
6769 + sym_sec->output_section->vma);
6770 }
6771 else
6772 continue;
6773 }
6774 else
6775 {
6776 bfd_set_error (bfd_error_bad_value);
6777 goto error_ret_free_internal;
6778 }
6779 st_type = hash->root.type;
6780 branch_type =
6781 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6782 sym_name = hash->root.root.root.string;
6783 }
6784
6785 do
6786 {
6787 bfd_boolean new_stub;
6788 struct elf32_arm_stub_hash_entry *stub_entry;
6789
6790 /* Determine what (if any) linker stub is needed. */
6791 stub_type = arm_type_of_stub (info, section, irela,
6792 st_type, &branch_type,
6793 hash, destination, sym_sec,
6794 input_bfd, sym_name);
6795 if (stub_type == arm_stub_none)
6796 break;
6797
6798 /* We've either created a stub for this reloc already,
6799 or we are about to. */
6800 stub_entry =
6801 elf32_arm_create_stub (htab, stub_type, section, irela,
6802 sym_sec, hash,
6803 (char *) sym_name, sym_value,
6804 branch_type, &new_stub);
6805
6806 created_stub = stub_entry != NULL;
6807 if (!created_stub)
6808 goto error_ret_free_internal;
6809 else if (!new_stub)
6810 break;
6811 else
6812 stub_changed = TRUE;
6813 }
6814 while (0);
6815
6816 /* Look for relocations which might trigger Cortex-A8
6817 erratum. */
6818 if (htab->fix_cortex_a8
6819 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6820 || r_type == (unsigned int) R_ARM_THM_JUMP19
6821 || r_type == (unsigned int) R_ARM_THM_CALL
6822 || r_type == (unsigned int) R_ARM_THM_XPC22))
6823 {
6824 bfd_vma from = section->output_section->vma
6825 + section->output_offset
6826 + irela->r_offset;
6827
6828 if ((from & 0xfff) == 0xffe)
6829 {
6830 /* Found a candidate. Note we haven't checked the
6831 destination is within 4K here: if we do so (and
6832 don't create an entry in a8_relocs) we can't tell
6833 that a branch should have been relocated when
6834 scanning later. */
6835 if (num_a8_relocs == a8_reloc_table_size)
6836 {
6837 a8_reloc_table_size *= 2;
6838 a8_relocs = (struct a8_erratum_reloc *)
6839 bfd_realloc (a8_relocs,
6840 sizeof (struct a8_erratum_reloc)
6841 * a8_reloc_table_size);
6842 }
6843
6844 a8_relocs[num_a8_relocs].from = from;
6845 a8_relocs[num_a8_relocs].destination = destination;
6846 a8_relocs[num_a8_relocs].r_type = r_type;
6847 a8_relocs[num_a8_relocs].branch_type = branch_type;
6848 a8_relocs[num_a8_relocs].sym_name = sym_name;
6849 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6850 a8_relocs[num_a8_relocs].hash = hash;
6851
6852 num_a8_relocs++;
6853 }
6854 }
6855 }
6856
6857 /* We're done with the internal relocs, free them. */
6858 if (elf_section_data (section)->relocs == NULL)
6859 free (internal_relocs);
6860 }
6861
6862 if (htab->fix_cortex_a8)
6863 {
6864 /* Sort relocs which might apply to Cortex-A8 erratum. */
6865 qsort (a8_relocs, num_a8_relocs,
6866 sizeof (struct a8_erratum_reloc),
6867 &a8_reloc_compare);
6868
6869 /* Scan for branches which might trigger Cortex-A8 erratum. */
6870 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6871 &num_a8_fixes, &a8_fix_table_size,
6872 a8_relocs, num_a8_relocs,
6873 prev_num_a8_fixes, &stub_changed)
6874 != 0)
6875 goto error_ret_free_local;
6876 }
6877
6878 if (local_syms != NULL
6879 && symtab_hdr->contents != (unsigned char *) local_syms)
6880 {
6881 if (!info->keep_memory)
6882 free (local_syms);
6883 else
6884 symtab_hdr->contents = (unsigned char *) local_syms;
6885 }
6886 }
6887
6888 if (first_veneer_scan
6889 && !set_cmse_veneer_addr_from_implib (info, htab,
6890 &cmse_stub_created))
6891 ret = FALSE;
6892
6893 if (prev_num_a8_fixes != num_a8_fixes)
6894 stub_changed = TRUE;
6895
6896 if (!stub_changed)
6897 break;
6898
6899 /* OK, we've added some stubs. Find out the new size of the
6900 stub sections. */
6901 for (stub_sec = htab->stub_bfd->sections;
6902 stub_sec != NULL;
6903 stub_sec = stub_sec->next)
6904 {
6905 /* Ignore non-stub sections. */
6906 if (!strstr (stub_sec->name, STUB_SUFFIX))
6907 continue;
6908
6909 stub_sec->size = 0;
6910 }
6911
6912 /* Add new SG veneers after those already in the input import
6913 library. */
6914 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6915 stub_type++)
6916 {
6917 bfd_vma *start_offset_p;
6918 asection **stub_sec_p;
6919
6920 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6921 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6922 if (start_offset_p == NULL)
6923 continue;
6924
6925 BFD_ASSERT (stub_sec_p != NULL);
6926 if (*stub_sec_p != NULL)
6927 (*stub_sec_p)->size = *start_offset_p;
6928 }
6929
6930 /* Compute stub section size, considering padding. */
6931 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6932 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6933 stub_type++)
6934 {
6935 int size, padding;
6936 asection **stub_sec_p;
6937
6938 padding = arm_dedicated_stub_section_padding (stub_type);
6939 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6940 /* Skip if no stub input section or no stub section padding
6941 required. */
6942 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6943 continue;
6944 /* Stub section padding required but no dedicated section. */
6945 BFD_ASSERT (stub_sec_p);
6946
6947 size = (*stub_sec_p)->size;
6948 size = (size + padding - 1) & ~(padding - 1);
6949 (*stub_sec_p)->size = size;
6950 }
6951
6952 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6953 if (htab->fix_cortex_a8)
6954 for (i = 0; i < num_a8_fixes; i++)
6955 {
6956 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6957 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6958
6959 if (stub_sec == NULL)
6960 return FALSE;
6961
6962 stub_sec->size
6963 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6964 NULL);
6965 }
6966
6967
6968 /* Ask the linker to do its stuff. */
6969 (*htab->layout_sections_again) ();
6970 first_veneer_scan = FALSE;
6971 }
6972
6973 /* Add stubs for Cortex-A8 erratum fixes now. */
6974 if (htab->fix_cortex_a8)
6975 {
6976 for (i = 0; i < num_a8_fixes; i++)
6977 {
6978 struct elf32_arm_stub_hash_entry *stub_entry;
6979 char *stub_name = a8_fixes[i].stub_name;
6980 asection *section = a8_fixes[i].section;
6981 unsigned int section_id = a8_fixes[i].section->id;
6982 asection *link_sec = htab->stub_group[section_id].link_sec;
6983 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6984 const insn_sequence *template_sequence;
6985 int template_size, size = 0;
6986
6987 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6988 TRUE, FALSE);
6989 if (stub_entry == NULL)
6990 {
6991 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6992 section->owner, stub_name);
6993 return FALSE;
6994 }
6995
6996 stub_entry->stub_sec = stub_sec;
6997 stub_entry->stub_offset = (bfd_vma) -1;
6998 stub_entry->id_sec = link_sec;
6999 stub_entry->stub_type = a8_fixes[i].stub_type;
7000 stub_entry->source_value = a8_fixes[i].offset;
7001 stub_entry->target_section = a8_fixes[i].section;
7002 stub_entry->target_value = a8_fixes[i].target_offset;
7003 stub_entry->orig_insn = a8_fixes[i].orig_insn;
7004 stub_entry->branch_type = a8_fixes[i].branch_type;
7005
7006 size = find_stub_size_and_template (a8_fixes[i].stub_type,
7007 &template_sequence,
7008 &template_size);
7009
7010 stub_entry->stub_size = size;
7011 stub_entry->stub_template = template_sequence;
7012 stub_entry->stub_template_size = template_size;
7013 }
7014
7015 /* Stash the Cortex-A8 erratum fix array for use later in
7016 elf32_arm_write_section(). */
7017 htab->a8_erratum_fixes = a8_fixes;
7018 htab->num_a8_erratum_fixes = num_a8_fixes;
7019 }
7020 else
7021 {
7022 htab->a8_erratum_fixes = NULL;
7023 htab->num_a8_erratum_fixes = 0;
7024 }
7025 return ret;
7026 }
7027
7028 /* Build all the stubs associated with the current output file. The
7029 stubs are kept in a hash table attached to the main linker hash
7030 table. We also set up the .plt entries for statically linked PIC
7031 functions here. This function is called via arm_elf_finish in the
7032 linker. */
7033
7034 bfd_boolean
7035 elf32_arm_build_stubs (struct bfd_link_info *info)
7036 {
7037 asection *stub_sec;
7038 struct bfd_hash_table *table;
7039 enum elf32_arm_stub_type stub_type;
7040 struct elf32_arm_link_hash_table *htab;
7041
7042 htab = elf32_arm_hash_table (info);
7043 if (htab == NULL)
7044 return FALSE;
7045
7046 for (stub_sec = htab->stub_bfd->sections;
7047 stub_sec != NULL;
7048 stub_sec = stub_sec->next)
7049 {
7050 bfd_size_type size;
7051
7052 /* Ignore non-stub sections. */
7053 if (!strstr (stub_sec->name, STUB_SUFFIX))
7054 continue;
7055
7056 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7057 must at least be done for stub section requiring padding and for SG
7058 veneers to ensure that a non secure code branching to a removed SG
7059 veneer causes an error. */
7060 size = stub_sec->size;
7061 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7062 if (stub_sec->contents == NULL && size != 0)
7063 return FALSE;
7064
7065 stub_sec->size = 0;
7066 }
7067
7068 /* Add new SG veneers after those already in the input import library. */
7069 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7070 {
7071 bfd_vma *start_offset_p;
7072 asection **stub_sec_p;
7073
7074 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7075 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7076 if (start_offset_p == NULL)
7077 continue;
7078
7079 BFD_ASSERT (stub_sec_p != NULL);
7080 if (*stub_sec_p != NULL)
7081 (*stub_sec_p)->size = *start_offset_p;
7082 }
7083
7084 /* Build the stubs as directed by the stub hash table. */
7085 table = &htab->stub_hash_table;
7086 bfd_hash_traverse (table, arm_build_one_stub, info);
7087 if (htab->fix_cortex_a8)
7088 {
7089 /* Place the cortex a8 stubs last. */
7090 htab->fix_cortex_a8 = -1;
7091 bfd_hash_traverse (table, arm_build_one_stub, info);
7092 }
7093
7094 return TRUE;
7095 }
7096
7097 /* Locate the Thumb encoded calling stub for NAME. */
7098
7099 static struct elf_link_hash_entry *
7100 find_thumb_glue (struct bfd_link_info *link_info,
7101 const char *name,
7102 char **error_message)
7103 {
7104 char *tmp_name;
7105 struct elf_link_hash_entry *hash;
7106 struct elf32_arm_link_hash_table *hash_table;
7107
7108 /* We need a pointer to the armelf specific hash table. */
7109 hash_table = elf32_arm_hash_table (link_info);
7110 if (hash_table == NULL)
7111 return NULL;
7112
7113 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7114 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7115
7116 BFD_ASSERT (tmp_name);
7117
7118 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7119
7120 hash = elf_link_hash_lookup
7121 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7122
7123 if (hash == NULL
7124 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7125 "Thumb", tmp_name, name) == -1)
7126 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7127
7128 free (tmp_name);
7129
7130 return hash;
7131 }
7132
7133 /* Locate the ARM encoded calling stub for NAME. */
7134
7135 static struct elf_link_hash_entry *
7136 find_arm_glue (struct bfd_link_info *link_info,
7137 const char *name,
7138 char **error_message)
7139 {
7140 char *tmp_name;
7141 struct elf_link_hash_entry *myh;
7142 struct elf32_arm_link_hash_table *hash_table;
7143
7144 /* We need a pointer to the elfarm specific hash table. */
7145 hash_table = elf32_arm_hash_table (link_info);
7146 if (hash_table == NULL)
7147 return NULL;
7148
7149 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7150 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7151
7152 BFD_ASSERT (tmp_name);
7153
7154 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7155
7156 myh = elf_link_hash_lookup
7157 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7158
7159 if (myh == NULL
7160 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7161 "ARM", tmp_name, name) == -1)
7162 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7163
7164 free (tmp_name);
7165
7166 return myh;
7167 }
7168
7169 /* ARM->Thumb glue (static images):
7170
7171 .arm
7172 __func_from_arm:
7173 ldr r12, __func_addr
7174 bx r12
7175 __func_addr:
7176 .word func @ behave as if you saw a ARM_32 reloc.
7177
7178 (v5t static images)
7179 .arm
7180 __func_from_arm:
7181 ldr pc, __func_addr
7182 __func_addr:
7183 .word func @ behave as if you saw a ARM_32 reloc.
7184
7185 (relocatable images)
7186 .arm
7187 __func_from_arm:
7188 ldr r12, __func_offset
7189 add r12, r12, pc
7190 bx r12
7191 __func_offset:
7192 .word func - . */
7193
7194 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7195 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7196 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7197 static const insn32 a2t3_func_addr_insn = 0x00000001;
7198
7199 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7200 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7201 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7202
7203 #define ARM2THUMB_PIC_GLUE_SIZE 16
7204 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7205 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7206 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7207
7208 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7209
7210 .thumb .thumb
7211 .align 2 .align 2
7212 __func_from_thumb: __func_from_thumb:
7213 bx pc push {r6, lr}
7214 nop ldr r6, __func_addr
7215 .arm mov lr, pc
7216 b func bx r6
7217 .arm
7218 ;; back_to_thumb
7219 ldmia r13! {r6, lr}
7220 bx lr
7221 __func_addr:
7222 .word func */
7223
7224 #define THUMB2ARM_GLUE_SIZE 8
7225 static const insn16 t2a1_bx_pc_insn = 0x4778;
7226 static const insn16 t2a2_noop_insn = 0x46c0;
7227 static const insn32 t2a3_b_insn = 0xea000000;
7228
7229 #define VFP11_ERRATUM_VENEER_SIZE 8
7230 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7231 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7232
7233 #define ARM_BX_VENEER_SIZE 12
7234 static const insn32 armbx1_tst_insn = 0xe3100001;
7235 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7236 static const insn32 armbx3_bx_insn = 0xe12fff10;
7237
7238 #ifndef ELFARM_NABI_C_INCLUDED
7239 static void
7240 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7241 {
7242 asection * s;
7243 bfd_byte * contents;
7244
7245 if (size == 0)
7246 {
7247 /* Do not include empty glue sections in the output. */
7248 if (abfd != NULL)
7249 {
7250 s = bfd_get_linker_section (abfd, name);
7251 if (s != NULL)
7252 s->flags |= SEC_EXCLUDE;
7253 }
7254 return;
7255 }
7256
7257 BFD_ASSERT (abfd != NULL);
7258
7259 s = bfd_get_linker_section (abfd, name);
7260 BFD_ASSERT (s != NULL);
7261
7262 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7263
7264 BFD_ASSERT (s->size == size);
7265 s->contents = contents;
7266 }
7267
7268 bfd_boolean
7269 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7270 {
7271 struct elf32_arm_link_hash_table * globals;
7272
7273 globals = elf32_arm_hash_table (info);
7274 BFD_ASSERT (globals != NULL);
7275
7276 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7277 globals->arm_glue_size,
7278 ARM2THUMB_GLUE_SECTION_NAME);
7279
7280 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7281 globals->thumb_glue_size,
7282 THUMB2ARM_GLUE_SECTION_NAME);
7283
7284 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7285 globals->vfp11_erratum_glue_size,
7286 VFP11_ERRATUM_VENEER_SECTION_NAME);
7287
7288 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7289 globals->stm32l4xx_erratum_glue_size,
7290 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7291
7292 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7293 globals->bx_glue_size,
7294 ARM_BX_GLUE_SECTION_NAME);
7295
7296 return TRUE;
7297 }
7298
7299 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7300 returns the symbol identifying the stub. */
7301
7302 static struct elf_link_hash_entry *
7303 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7304 struct elf_link_hash_entry * h)
7305 {
7306 const char * name = h->root.root.string;
7307 asection * s;
7308 char * tmp_name;
7309 struct elf_link_hash_entry * myh;
7310 struct bfd_link_hash_entry * bh;
7311 struct elf32_arm_link_hash_table * globals;
7312 bfd_vma val;
7313 bfd_size_type size;
7314
7315 globals = elf32_arm_hash_table (link_info);
7316 BFD_ASSERT (globals != NULL);
7317 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7318
7319 s = bfd_get_linker_section
7320 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7321
7322 BFD_ASSERT (s != NULL);
7323
7324 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7325 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7326
7327 BFD_ASSERT (tmp_name);
7328
7329 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7330
7331 myh = elf_link_hash_lookup
7332 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7333
7334 if (myh != NULL)
7335 {
7336 /* We've already seen this guy. */
7337 free (tmp_name);
7338 return myh;
7339 }
7340
7341 /* The only trick here is using hash_table->arm_glue_size as the value.
7342 Even though the section isn't allocated yet, this is where we will be
7343 putting it. The +1 on the value marks that the stub has not been
7344 output yet - not that it is a Thumb function. */
7345 bh = NULL;
7346 val = globals->arm_glue_size + 1;
7347 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7348 tmp_name, BSF_GLOBAL, s, val,
7349 NULL, TRUE, FALSE, &bh);
7350
7351 myh = (struct elf_link_hash_entry *) bh;
7352 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7353 myh->forced_local = 1;
7354
7355 free (tmp_name);
7356
7357 if (bfd_link_pic (link_info)
7358 || globals->root.is_relocatable_executable
7359 || globals->pic_veneer)
7360 size = ARM2THUMB_PIC_GLUE_SIZE;
7361 else if (globals->use_blx)
7362 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7363 else
7364 size = ARM2THUMB_STATIC_GLUE_SIZE;
7365
7366 s->size += size;
7367 globals->arm_glue_size += size;
7368
7369 return myh;
7370 }
7371
7372 /* Allocate space for ARMv4 BX veneers. */
7373
7374 static void
7375 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7376 {
7377 asection * s;
7378 struct elf32_arm_link_hash_table *globals;
7379 char *tmp_name;
7380 struct elf_link_hash_entry *myh;
7381 struct bfd_link_hash_entry *bh;
7382 bfd_vma val;
7383
7384 /* BX PC does not need a veneer. */
7385 if (reg == 15)
7386 return;
7387
7388 globals = elf32_arm_hash_table (link_info);
7389 BFD_ASSERT (globals != NULL);
7390 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7391
7392 /* Check if this veneer has already been allocated. */
7393 if (globals->bx_glue_offset[reg])
7394 return;
7395
7396 s = bfd_get_linker_section
7397 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7398
7399 BFD_ASSERT (s != NULL);
7400
7401 /* Add symbol for veneer. */
7402 tmp_name = (char *)
7403 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7404
7405 BFD_ASSERT (tmp_name);
7406
7407 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7408
7409 myh = elf_link_hash_lookup
7410 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7411
7412 BFD_ASSERT (myh == NULL);
7413
7414 bh = NULL;
7415 val = globals->bx_glue_size;
7416 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7417 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7418 NULL, TRUE, FALSE, &bh);
7419
7420 myh = (struct elf_link_hash_entry *) bh;
7421 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7422 myh->forced_local = 1;
7423
7424 s->size += ARM_BX_VENEER_SIZE;
7425 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7426 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7427 }
7428
7429
7430 /* Add an entry to the code/data map for section SEC. */
7431
7432 static void
7433 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7434 {
7435 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7436 unsigned int newidx;
7437
7438 if (sec_data->map == NULL)
7439 {
7440 sec_data->map = (elf32_arm_section_map *)
7441 bfd_malloc (sizeof (elf32_arm_section_map));
7442 sec_data->mapcount = 0;
7443 sec_data->mapsize = 1;
7444 }
7445
7446 newidx = sec_data->mapcount++;
7447
7448 if (sec_data->mapcount > sec_data->mapsize)
7449 {
7450 sec_data->mapsize *= 2;
7451 sec_data->map = (elf32_arm_section_map *)
7452 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7453 * sizeof (elf32_arm_section_map));
7454 }
7455
7456 if (sec_data->map)
7457 {
7458 sec_data->map[newidx].vma = vma;
7459 sec_data->map[newidx].type = type;
7460 }
7461 }
7462
7463
7464 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7465 veneers are handled for now. */
7466
7467 static bfd_vma
7468 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7469 elf32_vfp11_erratum_list *branch,
7470 bfd *branch_bfd,
7471 asection *branch_sec,
7472 unsigned int offset)
7473 {
7474 asection *s;
7475 struct elf32_arm_link_hash_table *hash_table;
7476 char *tmp_name;
7477 struct elf_link_hash_entry *myh;
7478 struct bfd_link_hash_entry *bh;
7479 bfd_vma val;
7480 struct _arm_elf_section_data *sec_data;
7481 elf32_vfp11_erratum_list *newerr;
7482
7483 hash_table = elf32_arm_hash_table (link_info);
7484 BFD_ASSERT (hash_table != NULL);
7485 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7486
7487 s = bfd_get_linker_section
7488 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7489
7490 sec_data = elf32_arm_section_data (s);
7491
7492 BFD_ASSERT (s != NULL);
7493
7494 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7495 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7496
7497 BFD_ASSERT (tmp_name);
7498
7499 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7500 hash_table->num_vfp11_fixes);
7501
7502 myh = elf_link_hash_lookup
7503 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7504
7505 BFD_ASSERT (myh == NULL);
7506
7507 bh = NULL;
7508 val = hash_table->vfp11_erratum_glue_size;
7509 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7510 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7511 NULL, TRUE, FALSE, &bh);
7512
7513 myh = (struct elf_link_hash_entry *) bh;
7514 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7515 myh->forced_local = 1;
7516
7517 /* Link veneer back to calling location. */
7518 sec_data->erratumcount += 1;
7519 newerr = (elf32_vfp11_erratum_list *)
7520 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7521
7522 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7523 newerr->vma = -1;
7524 newerr->u.v.branch = branch;
7525 newerr->u.v.id = hash_table->num_vfp11_fixes;
7526 branch->u.b.veneer = newerr;
7527
7528 newerr->next = sec_data->erratumlist;
7529 sec_data->erratumlist = newerr;
7530
7531 /* A symbol for the return from the veneer. */
7532 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7533 hash_table->num_vfp11_fixes);
7534
7535 myh = elf_link_hash_lookup
7536 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7537
7538 if (myh != NULL)
7539 abort ();
7540
7541 bh = NULL;
7542 val = offset + 4;
7543 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7544 branch_sec, val, NULL, TRUE, FALSE, &bh);
7545
7546 myh = (struct elf_link_hash_entry *) bh;
7547 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7548 myh->forced_local = 1;
7549
7550 free (tmp_name);
7551
7552 /* Generate a mapping symbol for the veneer section, and explicitly add an
7553 entry for that symbol to the code/data map for the section. */
7554 if (hash_table->vfp11_erratum_glue_size == 0)
7555 {
7556 bh = NULL;
7557 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7558 ever requires this erratum fix. */
7559 _bfd_generic_link_add_one_symbol (link_info,
7560 hash_table->bfd_of_glue_owner, "$a",
7561 BSF_LOCAL, s, 0, NULL,
7562 TRUE, FALSE, &bh);
7563
7564 myh = (struct elf_link_hash_entry *) bh;
7565 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7566 myh->forced_local = 1;
7567
7568 /* The elf32_arm_init_maps function only cares about symbols from input
7569 BFDs. We must make a note of this generated mapping symbol
7570 ourselves so that code byteswapping works properly in
7571 elf32_arm_write_section. */
7572 elf32_arm_section_map_add (s, 'a', 0);
7573 }
7574
7575 s->size += VFP11_ERRATUM_VENEER_SIZE;
7576 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7577 hash_table->num_vfp11_fixes++;
7578
7579 /* The offset of the veneer. */
7580 return val;
7581 }
7582
7583 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7584 veneers need to be handled because used only in Cortex-M. */
7585
7586 static bfd_vma
7587 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7588 elf32_stm32l4xx_erratum_list *branch,
7589 bfd *branch_bfd,
7590 asection *branch_sec,
7591 unsigned int offset,
7592 bfd_size_type veneer_size)
7593 {
7594 asection *s;
7595 struct elf32_arm_link_hash_table *hash_table;
7596 char *tmp_name;
7597 struct elf_link_hash_entry *myh;
7598 struct bfd_link_hash_entry *bh;
7599 bfd_vma val;
7600 struct _arm_elf_section_data *sec_data;
7601 elf32_stm32l4xx_erratum_list *newerr;
7602
7603 hash_table = elf32_arm_hash_table (link_info);
7604 BFD_ASSERT (hash_table != NULL);
7605 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7606
7607 s = bfd_get_linker_section
7608 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7609
7610 BFD_ASSERT (s != NULL);
7611
7612 sec_data = elf32_arm_section_data (s);
7613
7614 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7615 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7616
7617 BFD_ASSERT (tmp_name);
7618
7619 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7620 hash_table->num_stm32l4xx_fixes);
7621
7622 myh = elf_link_hash_lookup
7623 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7624
7625 BFD_ASSERT (myh == NULL);
7626
7627 bh = NULL;
7628 val = hash_table->stm32l4xx_erratum_glue_size;
7629 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7630 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7631 NULL, TRUE, FALSE, &bh);
7632
7633 myh = (struct elf_link_hash_entry *) bh;
7634 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7635 myh->forced_local = 1;
7636
7637 /* Link veneer back to calling location. */
7638 sec_data->stm32l4xx_erratumcount += 1;
7639 newerr = (elf32_stm32l4xx_erratum_list *)
7640 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7641
7642 newerr->type = STM32L4XX_ERRATUM_VENEER;
7643 newerr->vma = -1;
7644 newerr->u.v.branch = branch;
7645 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7646 branch->u.b.veneer = newerr;
7647
7648 newerr->next = sec_data->stm32l4xx_erratumlist;
7649 sec_data->stm32l4xx_erratumlist = newerr;
7650
7651 /* A symbol for the return from the veneer. */
7652 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7653 hash_table->num_stm32l4xx_fixes);
7654
7655 myh = elf_link_hash_lookup
7656 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7657
7658 if (myh != NULL)
7659 abort ();
7660
7661 bh = NULL;
7662 val = offset + 4;
7663 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7664 branch_sec, val, NULL, TRUE, FALSE, &bh);
7665
7666 myh = (struct elf_link_hash_entry *) bh;
7667 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7668 myh->forced_local = 1;
7669
7670 free (tmp_name);
7671
7672 /* Generate a mapping symbol for the veneer section, and explicitly add an
7673 entry for that symbol to the code/data map for the section. */
7674 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7675 {
7676 bh = NULL;
7677 /* Creates a THUMB symbol since there is no other choice. */
7678 _bfd_generic_link_add_one_symbol (link_info,
7679 hash_table->bfd_of_glue_owner, "$t",
7680 BSF_LOCAL, s, 0, NULL,
7681 TRUE, FALSE, &bh);
7682
7683 myh = (struct elf_link_hash_entry *) bh;
7684 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7685 myh->forced_local = 1;
7686
7687 /* The elf32_arm_init_maps function only cares about symbols from input
7688 BFDs. We must make a note of this generated mapping symbol
7689 ourselves so that code byteswapping works properly in
7690 elf32_arm_write_section. */
7691 elf32_arm_section_map_add (s, 't', 0);
7692 }
7693
7694 s->size += veneer_size;
7695 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7696 hash_table->num_stm32l4xx_fixes++;
7697
7698 /* The offset of the veneer. */
7699 return val;
7700 }
7701
7702 #define ARM_GLUE_SECTION_FLAGS \
7703 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7704 | SEC_READONLY | SEC_LINKER_CREATED)
7705
7706 /* Create a fake section for use by the ARM backend of the linker. */
7707
7708 static bfd_boolean
7709 arm_make_glue_section (bfd * abfd, const char * name)
7710 {
7711 asection * sec;
7712
7713 sec = bfd_get_linker_section (abfd, name);
7714 if (sec != NULL)
7715 /* Already made. */
7716 return TRUE;
7717
7718 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7719
7720 if (sec == NULL
7721 || !bfd_set_section_alignment (sec, 2))
7722 return FALSE;
7723
7724 /* Set the gc mark to prevent the section from being removed by garbage
7725 collection, despite the fact that no relocs refer to this section. */
7726 sec->gc_mark = 1;
7727
7728 return TRUE;
7729 }
7730
7731 /* Set size of .plt entries. This function is called from the
7732 linker scripts in ld/emultempl/{armelf}.em. */
7733
7734 void
7735 bfd_elf32_arm_use_long_plt (void)
7736 {
7737 elf32_arm_use_long_plt_entry = TRUE;
7738 }
7739
7740 /* Add the glue sections to ABFD. This function is called from the
7741 linker scripts in ld/emultempl/{armelf}.em. */
7742
7743 bfd_boolean
7744 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7745 struct bfd_link_info *info)
7746 {
7747 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7748 bfd_boolean dostm32l4xx = globals
7749 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7750 bfd_boolean addglue;
7751
7752 /* If we are only performing a partial
7753 link do not bother adding the glue. */
7754 if (bfd_link_relocatable (info))
7755 return TRUE;
7756
7757 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7758 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7759 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7760 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7761
7762 if (!dostm32l4xx)
7763 return addglue;
7764
7765 return addglue
7766 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7767 }
7768
7769 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7770 ensures they are not marked for deletion by
7771 strip_excluded_output_sections () when veneers are going to be created
7772 later. Not doing so would trigger assert on empty section size in
7773 lang_size_sections_1 (). */
7774
7775 void
7776 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7777 {
7778 enum elf32_arm_stub_type stub_type;
7779
7780 /* If we are only performing a partial
7781 link do not bother adding the glue. */
7782 if (bfd_link_relocatable (info))
7783 return;
7784
7785 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7786 {
7787 asection *out_sec;
7788 const char *out_sec_name;
7789
7790 if (!arm_dedicated_stub_output_section_required (stub_type))
7791 continue;
7792
7793 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7794 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7795 if (out_sec != NULL)
7796 out_sec->flags |= SEC_KEEP;
7797 }
7798 }
7799
7800 /* Select a BFD to be used to hold the sections used by the glue code.
7801 This function is called from the linker scripts in ld/emultempl/
7802 {armelf/pe}.em. */
7803
7804 bfd_boolean
7805 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7806 {
7807 struct elf32_arm_link_hash_table *globals;
7808
7809 /* If we are only performing a partial link
7810 do not bother getting a bfd to hold the glue. */
7811 if (bfd_link_relocatable (info))
7812 return TRUE;
7813
7814 /* Make sure we don't attach the glue sections to a dynamic object. */
7815 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7816
7817 globals = elf32_arm_hash_table (info);
7818 BFD_ASSERT (globals != NULL);
7819
7820 if (globals->bfd_of_glue_owner != NULL)
7821 return TRUE;
7822
7823 /* Save the bfd for later use. */
7824 globals->bfd_of_glue_owner = abfd;
7825
7826 return TRUE;
7827 }
7828
7829 static void
7830 check_use_blx (struct elf32_arm_link_hash_table *globals)
7831 {
7832 int cpu_arch;
7833
7834 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7835 Tag_CPU_arch);
7836
7837 if (globals->fix_arm1176)
7838 {
7839 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7840 globals->use_blx = 1;
7841 }
7842 else
7843 {
7844 if (cpu_arch > TAG_CPU_ARCH_V4T)
7845 globals->use_blx = 1;
7846 }
7847 }
7848
7849 bfd_boolean
7850 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7851 struct bfd_link_info *link_info)
7852 {
7853 Elf_Internal_Shdr *symtab_hdr;
7854 Elf_Internal_Rela *internal_relocs = NULL;
7855 Elf_Internal_Rela *irel, *irelend;
7856 bfd_byte *contents = NULL;
7857
7858 asection *sec;
7859 struct elf32_arm_link_hash_table *globals;
7860
7861 /* If we are only performing a partial link do not bother
7862 to construct any glue. */
7863 if (bfd_link_relocatable (link_info))
7864 return TRUE;
7865
7866 /* Here we have a bfd that is to be included on the link. We have a
7867 hook to do reloc rummaging, before section sizes are nailed down. */
7868 globals = elf32_arm_hash_table (link_info);
7869 BFD_ASSERT (globals != NULL);
7870
7871 check_use_blx (globals);
7872
7873 if (globals->byteswap_code && !bfd_big_endian (abfd))
7874 {
7875 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7876 abfd);
7877 return FALSE;
7878 }
7879
7880 /* PR 5398: If we have not decided to include any loadable sections in
7881 the output then we will not have a glue owner bfd. This is OK, it
7882 just means that there is nothing else for us to do here. */
7883 if (globals->bfd_of_glue_owner == NULL)
7884 return TRUE;
7885
7886 /* Rummage around all the relocs and map the glue vectors. */
7887 sec = abfd->sections;
7888
7889 if (sec == NULL)
7890 return TRUE;
7891
7892 for (; sec != NULL; sec = sec->next)
7893 {
7894 if (sec->reloc_count == 0)
7895 continue;
7896
7897 if ((sec->flags & SEC_EXCLUDE) != 0)
7898 continue;
7899
7900 symtab_hdr = & elf_symtab_hdr (abfd);
7901
7902 /* Load the relocs. */
7903 internal_relocs
7904 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7905
7906 if (internal_relocs == NULL)
7907 goto error_return;
7908
7909 irelend = internal_relocs + sec->reloc_count;
7910 for (irel = internal_relocs; irel < irelend; irel++)
7911 {
7912 long r_type;
7913 unsigned long r_index;
7914
7915 struct elf_link_hash_entry *h;
7916
7917 r_type = ELF32_R_TYPE (irel->r_info);
7918 r_index = ELF32_R_SYM (irel->r_info);
7919
7920 /* These are the only relocation types we care about. */
7921 if ( r_type != R_ARM_PC24
7922 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7923 continue;
7924
7925 /* Get the section contents if we haven't done so already. */
7926 if (contents == NULL)
7927 {
7928 /* Get cached copy if it exists. */
7929 if (elf_section_data (sec)->this_hdr.contents != NULL)
7930 contents = elf_section_data (sec)->this_hdr.contents;
7931 else
7932 {
7933 /* Go get them off disk. */
7934 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7935 goto error_return;
7936 }
7937 }
7938
7939 if (r_type == R_ARM_V4BX)
7940 {
7941 int reg;
7942
7943 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7944 record_arm_bx_glue (link_info, reg);
7945 continue;
7946 }
7947
7948 /* If the relocation is not against a symbol it cannot concern us. */
7949 h = NULL;
7950
7951 /* We don't care about local symbols. */
7952 if (r_index < symtab_hdr->sh_info)
7953 continue;
7954
7955 /* This is an external symbol. */
7956 r_index -= symtab_hdr->sh_info;
7957 h = (struct elf_link_hash_entry *)
7958 elf_sym_hashes (abfd)[r_index];
7959
7960 /* If the relocation is against a static symbol it must be within
7961 the current section and so cannot be a cross ARM/Thumb relocation. */
7962 if (h == NULL)
7963 continue;
7964
7965 /* If the call will go through a PLT entry then we do not need
7966 glue. */
7967 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7968 continue;
7969
7970 switch (r_type)
7971 {
7972 case R_ARM_PC24:
7973 /* This one is a call from arm code. We need to look up
7974 the target of the call. If it is a thumb target, we
7975 insert glue. */
7976 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7977 == ST_BRANCH_TO_THUMB)
7978 record_arm_to_thumb_glue (link_info, h);
7979 break;
7980
7981 default:
7982 abort ();
7983 }
7984 }
7985
7986 if (contents != NULL
7987 && elf_section_data (sec)->this_hdr.contents != contents)
7988 free (contents);
7989 contents = NULL;
7990
7991 if (internal_relocs != NULL
7992 && elf_section_data (sec)->relocs != internal_relocs)
7993 free (internal_relocs);
7994 internal_relocs = NULL;
7995 }
7996
7997 return TRUE;
7998
7999 error_return:
8000 if (contents != NULL
8001 && elf_section_data (sec)->this_hdr.contents != contents)
8002 free (contents);
8003 if (internal_relocs != NULL
8004 && elf_section_data (sec)->relocs != internal_relocs)
8005 free (internal_relocs);
8006
8007 return FALSE;
8008 }
8009 #endif
8010
8011
8012 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8013
8014 void
8015 bfd_elf32_arm_init_maps (bfd *abfd)
8016 {
8017 Elf_Internal_Sym *isymbuf;
8018 Elf_Internal_Shdr *hdr;
8019 unsigned int i, localsyms;
8020
8021 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8022 if (! is_arm_elf (abfd))
8023 return;
8024
8025 if ((abfd->flags & DYNAMIC) != 0)
8026 return;
8027
8028 hdr = & elf_symtab_hdr (abfd);
8029 localsyms = hdr->sh_info;
8030
8031 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8032 should contain the number of local symbols, which should come before any
8033 global symbols. Mapping symbols are always local. */
8034 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8035 NULL);
8036
8037 /* No internal symbols read? Skip this BFD. */
8038 if (isymbuf == NULL)
8039 return;
8040
8041 for (i = 0; i < localsyms; i++)
8042 {
8043 Elf_Internal_Sym *isym = &isymbuf[i];
8044 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8045 const char *name;
8046
8047 if (sec != NULL
8048 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8049 {
8050 name = bfd_elf_string_from_elf_section (abfd,
8051 hdr->sh_link, isym->st_name);
8052
8053 if (bfd_is_arm_special_symbol_name (name,
8054 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8055 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8056 }
8057 }
8058 }
8059
8060
8061 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8062 say what they wanted. */
8063
8064 void
8065 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8066 {
8067 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8068 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8069
8070 if (globals == NULL)
8071 return;
8072
8073 if (globals->fix_cortex_a8 == -1)
8074 {
8075 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8076 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8077 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8078 || out_attr[Tag_CPU_arch_profile].i == 0))
8079 globals->fix_cortex_a8 = 1;
8080 else
8081 globals->fix_cortex_a8 = 0;
8082 }
8083 }
8084
8085
8086 void
8087 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8088 {
8089 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8090 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8091
8092 if (globals == NULL)
8093 return;
8094 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8095 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8096 {
8097 switch (globals->vfp11_fix)
8098 {
8099 case BFD_ARM_VFP11_FIX_DEFAULT:
8100 case BFD_ARM_VFP11_FIX_NONE:
8101 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8102 break;
8103
8104 default:
8105 /* Give a warning, but do as the user requests anyway. */
8106 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8107 "workaround is not necessary for target architecture"), obfd);
8108 }
8109 }
8110 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8111 /* For earlier architectures, we might need the workaround, but do not
8112 enable it by default. If users is running with broken hardware, they
8113 must enable the erratum fix explicitly. */
8114 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8115 }
8116
8117 void
8118 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8119 {
8120 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8121 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8122
8123 if (globals == NULL)
8124 return;
8125
8126 /* We assume only Cortex-M4 may require the fix. */
8127 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8128 || out_attr[Tag_CPU_arch_profile].i != 'M')
8129 {
8130 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8131 /* Give a warning, but do as the user requests anyway. */
8132 _bfd_error_handler
8133 (_("%pB: warning: selected STM32L4XX erratum "
8134 "workaround is not necessary for target architecture"), obfd);
8135 }
8136 }
8137
8138 enum bfd_arm_vfp11_pipe
8139 {
8140 VFP11_FMAC,
8141 VFP11_LS,
8142 VFP11_DS,
8143 VFP11_BAD
8144 };
8145
8146 /* Return a VFP register number. This is encoded as RX:X for single-precision
8147 registers, or X:RX for double-precision registers, where RX is the group of
8148 four bits in the instruction encoding and X is the single extension bit.
8149 RX and X fields are specified using their lowest (starting) bit. The return
8150 value is:
8151
8152 0...31: single-precision registers s0...s31
8153 32...63: double-precision registers d0...d31.
8154
8155 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8156 encounter VFP3 instructions, so we allow the full range for DP registers. */
8157
8158 static unsigned int
8159 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8160 unsigned int x)
8161 {
8162 if (is_double)
8163 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8164 else
8165 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8166 }
8167
8168 /* Set bits in *WMASK according to a register number REG as encoded by
8169 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8170
8171 static void
8172 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8173 {
8174 if (reg < 32)
8175 *wmask |= 1 << reg;
8176 else if (reg < 48)
8177 *wmask |= 3 << ((reg - 32) * 2);
8178 }
8179
8180 /* Return TRUE if WMASK overwrites anything in REGS. */
8181
8182 static bfd_boolean
8183 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8184 {
8185 int i;
8186
8187 for (i = 0; i < numregs; i++)
8188 {
8189 unsigned int reg = regs[i];
8190
8191 if (reg < 32 && (wmask & (1 << reg)) != 0)
8192 return TRUE;
8193
8194 reg -= 32;
8195
8196 if (reg >= 16)
8197 continue;
8198
8199 if ((wmask & (3 << (reg * 2))) != 0)
8200 return TRUE;
8201 }
8202
8203 return FALSE;
8204 }
8205
8206 /* In this function, we're interested in two things: finding input registers
8207 for VFP data-processing instructions, and finding the set of registers which
8208 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8209 hold the written set, so FLDM etc. are easy to deal with (we're only
8210 interested in 32 SP registers or 16 dp registers, due to the VFP version
8211 implemented by the chip in question). DP registers are marked by setting
8212 both SP registers in the write mask). */
8213
8214 static enum bfd_arm_vfp11_pipe
8215 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8216 int *numregs)
8217 {
8218 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8219 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8220
8221 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8222 {
8223 unsigned int pqrs;
8224 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8225 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8226
8227 pqrs = ((insn & 0x00800000) >> 20)
8228 | ((insn & 0x00300000) >> 19)
8229 | ((insn & 0x00000040) >> 6);
8230
8231 switch (pqrs)
8232 {
8233 case 0: /* fmac[sd]. */
8234 case 1: /* fnmac[sd]. */
8235 case 2: /* fmsc[sd]. */
8236 case 3: /* fnmsc[sd]. */
8237 vpipe = VFP11_FMAC;
8238 bfd_arm_vfp11_write_mask (destmask, fd);
8239 regs[0] = fd;
8240 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8241 regs[2] = fm;
8242 *numregs = 3;
8243 break;
8244
8245 case 4: /* fmul[sd]. */
8246 case 5: /* fnmul[sd]. */
8247 case 6: /* fadd[sd]. */
8248 case 7: /* fsub[sd]. */
8249 vpipe = VFP11_FMAC;
8250 goto vfp_binop;
8251
8252 case 8: /* fdiv[sd]. */
8253 vpipe = VFP11_DS;
8254 vfp_binop:
8255 bfd_arm_vfp11_write_mask (destmask, fd);
8256 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8257 regs[1] = fm;
8258 *numregs = 2;
8259 break;
8260
8261 case 15: /* extended opcode. */
8262 {
8263 unsigned int extn = ((insn >> 15) & 0x1e)
8264 | ((insn >> 7) & 1);
8265
8266 switch (extn)
8267 {
8268 case 0: /* fcpy[sd]. */
8269 case 1: /* fabs[sd]. */
8270 case 2: /* fneg[sd]. */
8271 case 8: /* fcmp[sd]. */
8272 case 9: /* fcmpe[sd]. */
8273 case 10: /* fcmpz[sd]. */
8274 case 11: /* fcmpez[sd]. */
8275 case 16: /* fuito[sd]. */
8276 case 17: /* fsito[sd]. */
8277 case 24: /* ftoui[sd]. */
8278 case 25: /* ftouiz[sd]. */
8279 case 26: /* ftosi[sd]. */
8280 case 27: /* ftosiz[sd]. */
8281 /* These instructions will not bounce due to underflow. */
8282 *numregs = 0;
8283 vpipe = VFP11_FMAC;
8284 break;
8285
8286 case 3: /* fsqrt[sd]. */
8287 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8288 registers to cause the erratum in previous instructions. */
8289 bfd_arm_vfp11_write_mask (destmask, fd);
8290 vpipe = VFP11_DS;
8291 break;
8292
8293 case 15: /* fcvt{ds,sd}. */
8294 {
8295 int rnum = 0;
8296
8297 bfd_arm_vfp11_write_mask (destmask, fd);
8298
8299 /* Only FCVTSD can underflow. */
8300 if ((insn & 0x100) != 0)
8301 regs[rnum++] = fm;
8302
8303 *numregs = rnum;
8304
8305 vpipe = VFP11_FMAC;
8306 }
8307 break;
8308
8309 default:
8310 return VFP11_BAD;
8311 }
8312 }
8313 break;
8314
8315 default:
8316 return VFP11_BAD;
8317 }
8318 }
8319 /* Two-register transfer. */
8320 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8321 {
8322 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8323
8324 if ((insn & 0x100000) == 0)
8325 {
8326 if (is_double)
8327 bfd_arm_vfp11_write_mask (destmask, fm);
8328 else
8329 {
8330 bfd_arm_vfp11_write_mask (destmask, fm);
8331 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8332 }
8333 }
8334
8335 vpipe = VFP11_LS;
8336 }
8337 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8338 {
8339 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8340 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8341
8342 switch (puw)
8343 {
8344 case 0: /* Two-reg transfer. We should catch these above. */
8345 abort ();
8346
8347 case 2: /* fldm[sdx]. */
8348 case 3:
8349 case 5:
8350 {
8351 unsigned int i, offset = insn & 0xff;
8352
8353 if (is_double)
8354 offset >>= 1;
8355
8356 for (i = fd; i < fd + offset; i++)
8357 bfd_arm_vfp11_write_mask (destmask, i);
8358 }
8359 break;
8360
8361 case 4: /* fld[sd]. */
8362 case 6:
8363 bfd_arm_vfp11_write_mask (destmask, fd);
8364 break;
8365
8366 default:
8367 return VFP11_BAD;
8368 }
8369
8370 vpipe = VFP11_LS;
8371 }
8372 /* Single-register transfer. Note L==0. */
8373 else if ((insn & 0x0f100e10) == 0x0e000a10)
8374 {
8375 unsigned int opcode = (insn >> 21) & 7;
8376 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8377
8378 switch (opcode)
8379 {
8380 case 0: /* fmsr/fmdlr. */
8381 case 1: /* fmdhr. */
8382 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8383 destination register. I don't know if this is exactly right,
8384 but it is the conservative choice. */
8385 bfd_arm_vfp11_write_mask (destmask, fn);
8386 break;
8387
8388 case 7: /* fmxr. */
8389 break;
8390 }
8391
8392 vpipe = VFP11_LS;
8393 }
8394
8395 return vpipe;
8396 }
8397
8398
8399 static int elf32_arm_compare_mapping (const void * a, const void * b);
8400
8401
8402 /* Look for potentially-troublesome code sequences which might trigger the
8403 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8404 (available from ARM) for details of the erratum. A short version is
8405 described in ld.texinfo. */
8406
8407 bfd_boolean
8408 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8409 {
8410 asection *sec;
8411 bfd_byte *contents = NULL;
8412 int state = 0;
8413 int regs[3], numregs = 0;
8414 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8415 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8416
8417 if (globals == NULL)
8418 return FALSE;
8419
8420 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8421 The states transition as follows:
8422
8423 0 -> 1 (vector) or 0 -> 2 (scalar)
8424 A VFP FMAC-pipeline instruction has been seen. Fill
8425 regs[0]..regs[numregs-1] with its input operands. Remember this
8426 instruction in 'first_fmac'.
8427
8428 1 -> 2
8429 Any instruction, except for a VFP instruction which overwrites
8430 regs[*].
8431
8432 1 -> 3 [ -> 0 ] or
8433 2 -> 3 [ -> 0 ]
8434 A VFP instruction has been seen which overwrites any of regs[*].
8435 We must make a veneer! Reset state to 0 before examining next
8436 instruction.
8437
8438 2 -> 0
8439 If we fail to match anything in state 2, reset to state 0 and reset
8440 the instruction pointer to the instruction after 'first_fmac'.
8441
8442 If the VFP11 vector mode is in use, there must be at least two unrelated
8443 instructions between anti-dependent VFP11 instructions to properly avoid
8444 triggering the erratum, hence the use of the extra state 1. */
8445
8446 /* If we are only performing a partial link do not bother
8447 to construct any glue. */
8448 if (bfd_link_relocatable (link_info))
8449 return TRUE;
8450
8451 /* Skip if this bfd does not correspond to an ELF image. */
8452 if (! is_arm_elf (abfd))
8453 return TRUE;
8454
8455 /* We should have chosen a fix type by the time we get here. */
8456 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8457
8458 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8459 return TRUE;
8460
8461 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8462 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8463 return TRUE;
8464
8465 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8466 {
8467 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8468 struct _arm_elf_section_data *sec_data;
8469
8470 /* If we don't have executable progbits, we're not interested in this
8471 section. Also skip if section is to be excluded. */
8472 if (elf_section_type (sec) != SHT_PROGBITS
8473 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8474 || (sec->flags & SEC_EXCLUDE) != 0
8475 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8476 || sec->output_section == bfd_abs_section_ptr
8477 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8478 continue;
8479
8480 sec_data = elf32_arm_section_data (sec);
8481
8482 if (sec_data->mapcount == 0)
8483 continue;
8484
8485 if (elf_section_data (sec)->this_hdr.contents != NULL)
8486 contents = elf_section_data (sec)->this_hdr.contents;
8487 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8488 goto error_return;
8489
8490 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8491 elf32_arm_compare_mapping);
8492
8493 for (span = 0; span < sec_data->mapcount; span++)
8494 {
8495 unsigned int span_start = sec_data->map[span].vma;
8496 unsigned int span_end = (span == sec_data->mapcount - 1)
8497 ? sec->size : sec_data->map[span + 1].vma;
8498 char span_type = sec_data->map[span].type;
8499
8500 /* FIXME: Only ARM mode is supported at present. We may need to
8501 support Thumb-2 mode also at some point. */
8502 if (span_type != 'a')
8503 continue;
8504
8505 for (i = span_start; i < span_end;)
8506 {
8507 unsigned int next_i = i + 4;
8508 unsigned int insn = bfd_big_endian (abfd)
8509 ? (contents[i] << 24)
8510 | (contents[i + 1] << 16)
8511 | (contents[i + 2] << 8)
8512 | contents[i + 3]
8513 : (contents[i + 3] << 24)
8514 | (contents[i + 2] << 16)
8515 | (contents[i + 1] << 8)
8516 | contents[i];
8517 unsigned int writemask = 0;
8518 enum bfd_arm_vfp11_pipe vpipe;
8519
8520 switch (state)
8521 {
8522 case 0:
8523 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8524 &numregs);
8525 /* I'm assuming the VFP11 erratum can trigger with denorm
8526 operands on either the FMAC or the DS pipeline. This might
8527 lead to slightly overenthusiastic veneer insertion. */
8528 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8529 {
8530 state = use_vector ? 1 : 2;
8531 first_fmac = i;
8532 veneer_of_insn = insn;
8533 }
8534 break;
8535
8536 case 1:
8537 {
8538 int other_regs[3], other_numregs;
8539 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8540 other_regs,
8541 &other_numregs);
8542 if (vpipe != VFP11_BAD
8543 && bfd_arm_vfp11_antidependency (writemask, regs,
8544 numregs))
8545 state = 3;
8546 else
8547 state = 2;
8548 }
8549 break;
8550
8551 case 2:
8552 {
8553 int other_regs[3], other_numregs;
8554 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8555 other_regs,
8556 &other_numregs);
8557 if (vpipe != VFP11_BAD
8558 && bfd_arm_vfp11_antidependency (writemask, regs,
8559 numregs))
8560 state = 3;
8561 else
8562 {
8563 state = 0;
8564 next_i = first_fmac + 4;
8565 }
8566 }
8567 break;
8568
8569 case 3:
8570 abort (); /* Should be unreachable. */
8571 }
8572
8573 if (state == 3)
8574 {
8575 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8576 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8577
8578 elf32_arm_section_data (sec)->erratumcount += 1;
8579
8580 newerr->u.b.vfp_insn = veneer_of_insn;
8581
8582 switch (span_type)
8583 {
8584 case 'a':
8585 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8586 break;
8587
8588 default:
8589 abort ();
8590 }
8591
8592 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8593 first_fmac);
8594
8595 newerr->vma = -1;
8596
8597 newerr->next = sec_data->erratumlist;
8598 sec_data->erratumlist = newerr;
8599
8600 state = 0;
8601 }
8602
8603 i = next_i;
8604 }
8605 }
8606
8607 if (contents != NULL
8608 && elf_section_data (sec)->this_hdr.contents != contents)
8609 free (contents);
8610 contents = NULL;
8611 }
8612
8613 return TRUE;
8614
8615 error_return:
8616 if (contents != NULL
8617 && elf_section_data (sec)->this_hdr.contents != contents)
8618 free (contents);
8619
8620 return FALSE;
8621 }
8622
8623 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8624 after sections have been laid out, using specially-named symbols. */
8625
8626 void
8627 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8628 struct bfd_link_info *link_info)
8629 {
8630 asection *sec;
8631 struct elf32_arm_link_hash_table *globals;
8632 char *tmp_name;
8633
8634 if (bfd_link_relocatable (link_info))
8635 return;
8636
8637 /* Skip if this bfd does not correspond to an ELF image. */
8638 if (! is_arm_elf (abfd))
8639 return;
8640
8641 globals = elf32_arm_hash_table (link_info);
8642 if (globals == NULL)
8643 return;
8644
8645 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8646 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8647
8648 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8649 {
8650 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8651 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8652
8653 for (; errnode != NULL; errnode = errnode->next)
8654 {
8655 struct elf_link_hash_entry *myh;
8656 bfd_vma vma;
8657
8658 switch (errnode->type)
8659 {
8660 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8661 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8662 /* Find veneer symbol. */
8663 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8664 errnode->u.b.veneer->u.v.id);
8665
8666 myh = elf_link_hash_lookup
8667 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8668
8669 if (myh == NULL)
8670 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8671 abfd, "VFP11", tmp_name);
8672
8673 vma = myh->root.u.def.section->output_section->vma
8674 + myh->root.u.def.section->output_offset
8675 + myh->root.u.def.value;
8676
8677 errnode->u.b.veneer->vma = vma;
8678 break;
8679
8680 case VFP11_ERRATUM_ARM_VENEER:
8681 case VFP11_ERRATUM_THUMB_VENEER:
8682 /* Find return location. */
8683 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8684 errnode->u.v.id);
8685
8686 myh = elf_link_hash_lookup
8687 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8688
8689 if (myh == NULL)
8690 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8691 abfd, "VFP11", tmp_name);
8692
8693 vma = myh->root.u.def.section->output_section->vma
8694 + myh->root.u.def.section->output_offset
8695 + myh->root.u.def.value;
8696
8697 errnode->u.v.branch->vma = vma;
8698 break;
8699
8700 default:
8701 abort ();
8702 }
8703 }
8704 }
8705
8706 free (tmp_name);
8707 }
8708
8709 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8710 return locations after sections have been laid out, using
8711 specially-named symbols. */
8712
8713 void
8714 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8715 struct bfd_link_info *link_info)
8716 {
8717 asection *sec;
8718 struct elf32_arm_link_hash_table *globals;
8719 char *tmp_name;
8720
8721 if (bfd_link_relocatable (link_info))
8722 return;
8723
8724 /* Skip if this bfd does not correspond to an ELF image. */
8725 if (! is_arm_elf (abfd))
8726 return;
8727
8728 globals = elf32_arm_hash_table (link_info);
8729 if (globals == NULL)
8730 return;
8731
8732 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8733 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8734
8735 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8736 {
8737 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8738 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8739
8740 for (; errnode != NULL; errnode = errnode->next)
8741 {
8742 struct elf_link_hash_entry *myh;
8743 bfd_vma vma;
8744
8745 switch (errnode->type)
8746 {
8747 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8748 /* Find veneer symbol. */
8749 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8750 errnode->u.b.veneer->u.v.id);
8751
8752 myh = elf_link_hash_lookup
8753 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8754
8755 if (myh == NULL)
8756 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8757 abfd, "STM32L4XX", tmp_name);
8758
8759 vma = myh->root.u.def.section->output_section->vma
8760 + myh->root.u.def.section->output_offset
8761 + myh->root.u.def.value;
8762
8763 errnode->u.b.veneer->vma = vma;
8764 break;
8765
8766 case STM32L4XX_ERRATUM_VENEER:
8767 /* Find return location. */
8768 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8769 errnode->u.v.id);
8770
8771 myh = elf_link_hash_lookup
8772 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8773
8774 if (myh == NULL)
8775 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8776 abfd, "STM32L4XX", tmp_name);
8777
8778 vma = myh->root.u.def.section->output_section->vma
8779 + myh->root.u.def.section->output_offset
8780 + myh->root.u.def.value;
8781
8782 errnode->u.v.branch->vma = vma;
8783 break;
8784
8785 default:
8786 abort ();
8787 }
8788 }
8789 }
8790
8791 free (tmp_name);
8792 }
8793
8794 static inline bfd_boolean
8795 is_thumb2_ldmia (const insn32 insn)
8796 {
8797 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8798 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8799 return (insn & 0xffd02000) == 0xe8900000;
8800 }
8801
8802 static inline bfd_boolean
8803 is_thumb2_ldmdb (const insn32 insn)
8804 {
8805 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8806 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8807 return (insn & 0xffd02000) == 0xe9100000;
8808 }
8809
8810 static inline bfd_boolean
8811 is_thumb2_vldm (const insn32 insn)
8812 {
8813 /* A6.5 Extension register load or store instruction
8814 A7.7.229
8815 We look for SP 32-bit and DP 64-bit registers.
8816 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8817 <list> is consecutive 64-bit registers
8818 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8819 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8820 <list> is consecutive 32-bit registers
8821 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8822 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8823 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8824 return
8825 (((insn & 0xfe100f00) == 0xec100b00) ||
8826 ((insn & 0xfe100f00) == 0xec100a00))
8827 && /* (IA without !). */
8828 (((((insn << 7) >> 28) & 0xd) == 0x4)
8829 /* (IA with !), includes VPOP (when reg number is SP). */
8830 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8831 /* (DB with !). */
8832 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8833 }
8834
8835 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8836 VLDM opcode and:
8837 - computes the number and the mode of memory accesses
8838 - decides if the replacement should be done:
8839 . replaces only if > 8-word accesses
8840 . or (testing purposes only) replaces all accesses. */
8841
8842 static bfd_boolean
8843 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8844 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8845 {
8846 int nb_words = 0;
8847
8848 /* The field encoding the register list is the same for both LDMIA
8849 and LDMDB encodings. */
8850 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8851 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8852 else if (is_thumb2_vldm (insn))
8853 nb_words = (insn & 0xff);
8854
8855 /* DEFAULT mode accounts for the real bug condition situation,
8856 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8857 return
8858 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8859 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8860 }
8861
8862 /* Look for potentially-troublesome code sequences which might trigger
8863 the STM STM32L4XX erratum. */
8864
8865 bfd_boolean
8866 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8867 struct bfd_link_info *link_info)
8868 {
8869 asection *sec;
8870 bfd_byte *contents = NULL;
8871 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8872
8873 if (globals == NULL)
8874 return FALSE;
8875
8876 /* If we are only performing a partial link do not bother
8877 to construct any glue. */
8878 if (bfd_link_relocatable (link_info))
8879 return TRUE;
8880
8881 /* Skip if this bfd does not correspond to an ELF image. */
8882 if (! is_arm_elf (abfd))
8883 return TRUE;
8884
8885 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8886 return TRUE;
8887
8888 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8889 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8890 return TRUE;
8891
8892 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8893 {
8894 unsigned int i, span;
8895 struct _arm_elf_section_data *sec_data;
8896
8897 /* If we don't have executable progbits, we're not interested in this
8898 section. Also skip if section is to be excluded. */
8899 if (elf_section_type (sec) != SHT_PROGBITS
8900 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8901 || (sec->flags & SEC_EXCLUDE) != 0
8902 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8903 || sec->output_section == bfd_abs_section_ptr
8904 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8905 continue;
8906
8907 sec_data = elf32_arm_section_data (sec);
8908
8909 if (sec_data->mapcount == 0)
8910 continue;
8911
8912 if (elf_section_data (sec)->this_hdr.contents != NULL)
8913 contents = elf_section_data (sec)->this_hdr.contents;
8914 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8915 goto error_return;
8916
8917 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8918 elf32_arm_compare_mapping);
8919
8920 for (span = 0; span < sec_data->mapcount; span++)
8921 {
8922 unsigned int span_start = sec_data->map[span].vma;
8923 unsigned int span_end = (span == sec_data->mapcount - 1)
8924 ? sec->size : sec_data->map[span + 1].vma;
8925 char span_type = sec_data->map[span].type;
8926 int itblock_current_pos = 0;
8927
8928 /* Only Thumb2 mode need be supported with this CM4 specific
8929 code, we should not encounter any arm mode eg span_type
8930 != 'a'. */
8931 if (span_type != 't')
8932 continue;
8933
8934 for (i = span_start; i < span_end;)
8935 {
8936 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8937 bfd_boolean insn_32bit = FALSE;
8938 bfd_boolean is_ldm = FALSE;
8939 bfd_boolean is_vldm = FALSE;
8940 bfd_boolean is_not_last_in_it_block = FALSE;
8941
8942 /* The first 16-bits of all 32-bit thumb2 instructions start
8943 with opcode[15..13]=0b111 and the encoded op1 can be anything
8944 except opcode[12..11]!=0b00.
8945 See 32-bit Thumb instruction encoding. */
8946 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8947 insn_32bit = TRUE;
8948
8949 /* Compute the predicate that tells if the instruction
8950 is concerned by the IT block
8951 - Creates an error if there is a ldm that is not
8952 last in the IT block thus cannot be replaced
8953 - Otherwise we can create a branch at the end of the
8954 IT block, it will be controlled naturally by IT
8955 with the proper pseudo-predicate
8956 - So the only interesting predicate is the one that
8957 tells that we are not on the last item of an IT
8958 block. */
8959 if (itblock_current_pos != 0)
8960 is_not_last_in_it_block = !!--itblock_current_pos;
8961
8962 if (insn_32bit)
8963 {
8964 /* Load the rest of the insn (in manual-friendly order). */
8965 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8966 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8967 is_vldm = is_thumb2_vldm (insn);
8968
8969 /* Veneers are created for (v)ldm depending on
8970 option flags and memory accesses conditions; but
8971 if the instruction is not the last instruction of
8972 an IT block, we cannot create a jump there, so we
8973 bail out. */
8974 if ((is_ldm || is_vldm)
8975 && stm32l4xx_need_create_replacing_stub
8976 (insn, globals->stm32l4xx_fix))
8977 {
8978 if (is_not_last_in_it_block)
8979 {
8980 _bfd_error_handler
8981 /* xgettext:c-format */
8982 (_("%pB(%pA+%#x): error: multiple load detected"
8983 " in non-last IT block instruction:"
8984 " STM32L4XX veneer cannot be generated; "
8985 "use gcc option -mrestrict-it to generate"
8986 " only one instruction per IT block"),
8987 abfd, sec, i);
8988 }
8989 else
8990 {
8991 elf32_stm32l4xx_erratum_list *newerr =
8992 (elf32_stm32l4xx_erratum_list *)
8993 bfd_zmalloc
8994 (sizeof (elf32_stm32l4xx_erratum_list));
8995
8996 elf32_arm_section_data (sec)
8997 ->stm32l4xx_erratumcount += 1;
8998 newerr->u.b.insn = insn;
8999 /* We create only thumb branches. */
9000 newerr->type =
9001 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
9002 record_stm32l4xx_erratum_veneer
9003 (link_info, newerr, abfd, sec,
9004 i,
9005 is_ldm ?
9006 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
9007 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
9008 newerr->vma = -1;
9009 newerr->next = sec_data->stm32l4xx_erratumlist;
9010 sec_data->stm32l4xx_erratumlist = newerr;
9011 }
9012 }
9013 }
9014 else
9015 {
9016 /* A7.7.37 IT p208
9017 IT blocks are only encoded in T1
9018 Encoding T1: IT{x{y{z}}} <firstcond>
9019 1 0 1 1 - 1 1 1 1 - firstcond - mask
9020 if mask = '0000' then see 'related encodings'
9021 We don't deal with UNPREDICTABLE, just ignore these.
9022 There can be no nested IT blocks so an IT block
9023 is naturally a new one for which it is worth
9024 computing its size. */
9025 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
9026 && ((insn & 0x000f) != 0x0000);
9027 /* If we have a new IT block we compute its size. */
9028 if (is_newitblock)
9029 {
9030 /* Compute the number of instructions controlled
9031 by the IT block, it will be used to decide
9032 whether we are inside an IT block or not. */
9033 unsigned int mask = insn & 0x000f;
9034 itblock_current_pos = 4 - ctz (mask);
9035 }
9036 }
9037
9038 i += insn_32bit ? 4 : 2;
9039 }
9040 }
9041
9042 if (contents != NULL
9043 && elf_section_data (sec)->this_hdr.contents != contents)
9044 free (contents);
9045 contents = NULL;
9046 }
9047
9048 return TRUE;
9049
9050 error_return:
9051 if (contents != NULL
9052 && elf_section_data (sec)->this_hdr.contents != contents)
9053 free (contents);
9054
9055 return FALSE;
9056 }
9057
9058 /* Set target relocation values needed during linking. */
9059
9060 void
9061 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9062 struct bfd_link_info *link_info,
9063 struct elf32_arm_params *params)
9064 {
9065 struct elf32_arm_link_hash_table *globals;
9066
9067 globals = elf32_arm_hash_table (link_info);
9068 if (globals == NULL)
9069 return;
9070
9071 globals->target1_is_rel = params->target1_is_rel;
9072 if (globals->fdpic_p)
9073 globals->target2_reloc = R_ARM_GOT32;
9074 else if (strcmp (params->target2_type, "rel") == 0)
9075 globals->target2_reloc = R_ARM_REL32;
9076 else if (strcmp (params->target2_type, "abs") == 0)
9077 globals->target2_reloc = R_ARM_ABS32;
9078 else if (strcmp (params->target2_type, "got-rel") == 0)
9079 globals->target2_reloc = R_ARM_GOT_PREL;
9080 else
9081 {
9082 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9083 params->target2_type);
9084 }
9085 globals->fix_v4bx = params->fix_v4bx;
9086 globals->use_blx |= params->use_blx;
9087 globals->vfp11_fix = params->vfp11_denorm_fix;
9088 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9089 if (globals->fdpic_p)
9090 globals->pic_veneer = 1;
9091 else
9092 globals->pic_veneer = params->pic_veneer;
9093 globals->fix_cortex_a8 = params->fix_cortex_a8;
9094 globals->fix_arm1176 = params->fix_arm1176;
9095 globals->cmse_implib = params->cmse_implib;
9096 globals->in_implib_bfd = params->in_implib_bfd;
9097
9098 BFD_ASSERT (is_arm_elf (output_bfd));
9099 elf_arm_tdata (output_bfd)->no_enum_size_warning
9100 = params->no_enum_size_warning;
9101 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9102 = params->no_wchar_size_warning;
9103 }
9104
9105 /* Replace the target offset of a Thumb bl or b.w instruction. */
9106
9107 static void
9108 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9109 {
9110 bfd_vma upper;
9111 bfd_vma lower;
9112 int reloc_sign;
9113
9114 BFD_ASSERT ((offset & 1) == 0);
9115
9116 upper = bfd_get_16 (abfd, insn);
9117 lower = bfd_get_16 (abfd, insn + 2);
9118 reloc_sign = (offset < 0) ? 1 : 0;
9119 upper = (upper & ~(bfd_vma) 0x7ff)
9120 | ((offset >> 12) & 0x3ff)
9121 | (reloc_sign << 10);
9122 lower = (lower & ~(bfd_vma) 0x2fff)
9123 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9124 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9125 | ((offset >> 1) & 0x7ff);
9126 bfd_put_16 (abfd, upper, insn);
9127 bfd_put_16 (abfd, lower, insn + 2);
9128 }
9129
9130 /* Thumb code calling an ARM function. */
9131
9132 static int
9133 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9134 const char * name,
9135 bfd * input_bfd,
9136 bfd * output_bfd,
9137 asection * input_section,
9138 bfd_byte * hit_data,
9139 asection * sym_sec,
9140 bfd_vma offset,
9141 bfd_signed_vma addend,
9142 bfd_vma val,
9143 char **error_message)
9144 {
9145 asection * s = 0;
9146 bfd_vma my_offset;
9147 long int ret_offset;
9148 struct elf_link_hash_entry * myh;
9149 struct elf32_arm_link_hash_table * globals;
9150
9151 myh = find_thumb_glue (info, name, error_message);
9152 if (myh == NULL)
9153 return FALSE;
9154
9155 globals = elf32_arm_hash_table (info);
9156 BFD_ASSERT (globals != NULL);
9157 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9158
9159 my_offset = myh->root.u.def.value;
9160
9161 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9162 THUMB2ARM_GLUE_SECTION_NAME);
9163
9164 BFD_ASSERT (s != NULL);
9165 BFD_ASSERT (s->contents != NULL);
9166 BFD_ASSERT (s->output_section != NULL);
9167
9168 if ((my_offset & 0x01) == 0x01)
9169 {
9170 if (sym_sec != NULL
9171 && sym_sec->owner != NULL
9172 && !INTERWORK_FLAG (sym_sec->owner))
9173 {
9174 _bfd_error_handler
9175 (_("%pB(%s): warning: interworking not enabled;"
9176 " first occurrence: %pB: %s call to %s"),
9177 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9178
9179 return FALSE;
9180 }
9181
9182 --my_offset;
9183 myh->root.u.def.value = my_offset;
9184
9185 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9186 s->contents + my_offset);
9187
9188 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9189 s->contents + my_offset + 2);
9190
9191 ret_offset =
9192 /* Address of destination of the stub. */
9193 ((bfd_signed_vma) val)
9194 - ((bfd_signed_vma)
9195 /* Offset from the start of the current section
9196 to the start of the stubs. */
9197 (s->output_offset
9198 /* Offset of the start of this stub from the start of the stubs. */
9199 + my_offset
9200 /* Address of the start of the current section. */
9201 + s->output_section->vma)
9202 /* The branch instruction is 4 bytes into the stub. */
9203 + 4
9204 /* ARM branches work from the pc of the instruction + 8. */
9205 + 8);
9206
9207 put_arm_insn (globals, output_bfd,
9208 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9209 s->contents + my_offset + 4);
9210 }
9211
9212 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9213
9214 /* Now go back and fix up the original BL insn to point to here. */
9215 ret_offset =
9216 /* Address of where the stub is located. */
9217 (s->output_section->vma + s->output_offset + my_offset)
9218 /* Address of where the BL is located. */
9219 - (input_section->output_section->vma + input_section->output_offset
9220 + offset)
9221 /* Addend in the relocation. */
9222 - addend
9223 /* Biassing for PC-relative addressing. */
9224 - 8;
9225
9226 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9227
9228 return TRUE;
9229 }
9230
9231 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9232
9233 static struct elf_link_hash_entry *
9234 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9235 const char * name,
9236 bfd * input_bfd,
9237 bfd * output_bfd,
9238 asection * sym_sec,
9239 bfd_vma val,
9240 asection * s,
9241 char ** error_message)
9242 {
9243 bfd_vma my_offset;
9244 long int ret_offset;
9245 struct elf_link_hash_entry * myh;
9246 struct elf32_arm_link_hash_table * globals;
9247
9248 myh = find_arm_glue (info, name, error_message);
9249 if (myh == NULL)
9250 return NULL;
9251
9252 globals = elf32_arm_hash_table (info);
9253 BFD_ASSERT (globals != NULL);
9254 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9255
9256 my_offset = myh->root.u.def.value;
9257
9258 if ((my_offset & 0x01) == 0x01)
9259 {
9260 if (sym_sec != NULL
9261 && sym_sec->owner != NULL
9262 && !INTERWORK_FLAG (sym_sec->owner))
9263 {
9264 _bfd_error_handler
9265 (_("%pB(%s): warning: interworking not enabled;"
9266 " first occurrence: %pB: %s call to %s"),
9267 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9268 }
9269
9270 --my_offset;
9271 myh->root.u.def.value = my_offset;
9272
9273 if (bfd_link_pic (info)
9274 || globals->root.is_relocatable_executable
9275 || globals->pic_veneer)
9276 {
9277 /* For relocatable objects we can't use absolute addresses,
9278 so construct the address from a relative offset. */
9279 /* TODO: If the offset is small it's probably worth
9280 constructing the address with adds. */
9281 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9282 s->contents + my_offset);
9283 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9284 s->contents + my_offset + 4);
9285 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9286 s->contents + my_offset + 8);
9287 /* Adjust the offset by 4 for the position of the add,
9288 and 8 for the pipeline offset. */
9289 ret_offset = (val - (s->output_offset
9290 + s->output_section->vma
9291 + my_offset + 12))
9292 | 1;
9293 bfd_put_32 (output_bfd, ret_offset,
9294 s->contents + my_offset + 12);
9295 }
9296 else if (globals->use_blx)
9297 {
9298 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9299 s->contents + my_offset);
9300
9301 /* It's a thumb address. Add the low order bit. */
9302 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9303 s->contents + my_offset + 4);
9304 }
9305 else
9306 {
9307 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9308 s->contents + my_offset);
9309
9310 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9311 s->contents + my_offset + 4);
9312
9313 /* It's a thumb address. Add the low order bit. */
9314 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9315 s->contents + my_offset + 8);
9316
9317 my_offset += 12;
9318 }
9319 }
9320
9321 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9322
9323 return myh;
9324 }
9325
9326 /* Arm code calling a Thumb function. */
9327
9328 static int
9329 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9330 const char * name,
9331 bfd * input_bfd,
9332 bfd * output_bfd,
9333 asection * input_section,
9334 bfd_byte * hit_data,
9335 asection * sym_sec,
9336 bfd_vma offset,
9337 bfd_signed_vma addend,
9338 bfd_vma val,
9339 char **error_message)
9340 {
9341 unsigned long int tmp;
9342 bfd_vma my_offset;
9343 asection * s;
9344 long int ret_offset;
9345 struct elf_link_hash_entry * myh;
9346 struct elf32_arm_link_hash_table * globals;
9347
9348 globals = elf32_arm_hash_table (info);
9349 BFD_ASSERT (globals != NULL);
9350 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9351
9352 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9353 ARM2THUMB_GLUE_SECTION_NAME);
9354 BFD_ASSERT (s != NULL);
9355 BFD_ASSERT (s->contents != NULL);
9356 BFD_ASSERT (s->output_section != NULL);
9357
9358 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9359 sym_sec, val, s, error_message);
9360 if (!myh)
9361 return FALSE;
9362
9363 my_offset = myh->root.u.def.value;
9364 tmp = bfd_get_32 (input_bfd, hit_data);
9365 tmp = tmp & 0xFF000000;
9366
9367 /* Somehow these are both 4 too far, so subtract 8. */
9368 ret_offset = (s->output_offset
9369 + my_offset
9370 + s->output_section->vma
9371 - (input_section->output_offset
9372 + input_section->output_section->vma
9373 + offset + addend)
9374 - 8);
9375
9376 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9377
9378 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9379
9380 return TRUE;
9381 }
9382
9383 /* Populate Arm stub for an exported Thumb function. */
9384
9385 static bfd_boolean
9386 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9387 {
9388 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9389 asection * s;
9390 struct elf_link_hash_entry * myh;
9391 struct elf32_arm_link_hash_entry *eh;
9392 struct elf32_arm_link_hash_table * globals;
9393 asection *sec;
9394 bfd_vma val;
9395 char *error_message;
9396
9397 eh = elf32_arm_hash_entry (h);
9398 /* Allocate stubs for exported Thumb functions on v4t. */
9399 if (eh->export_glue == NULL)
9400 return TRUE;
9401
9402 globals = elf32_arm_hash_table (info);
9403 BFD_ASSERT (globals != NULL);
9404 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9405
9406 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9407 ARM2THUMB_GLUE_SECTION_NAME);
9408 BFD_ASSERT (s != NULL);
9409 BFD_ASSERT (s->contents != NULL);
9410 BFD_ASSERT (s->output_section != NULL);
9411
9412 sec = eh->export_glue->root.u.def.section;
9413
9414 BFD_ASSERT (sec->output_section != NULL);
9415
9416 val = eh->export_glue->root.u.def.value + sec->output_offset
9417 + sec->output_section->vma;
9418
9419 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9420 h->root.u.def.section->owner,
9421 globals->obfd, sec, val, s,
9422 &error_message);
9423 BFD_ASSERT (myh);
9424 return TRUE;
9425 }
9426
9427 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9428
9429 static bfd_vma
9430 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9431 {
9432 bfd_byte *p;
9433 bfd_vma glue_addr;
9434 asection *s;
9435 struct elf32_arm_link_hash_table *globals;
9436
9437 globals = elf32_arm_hash_table (info);
9438 BFD_ASSERT (globals != NULL);
9439 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9440
9441 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9442 ARM_BX_GLUE_SECTION_NAME);
9443 BFD_ASSERT (s != NULL);
9444 BFD_ASSERT (s->contents != NULL);
9445 BFD_ASSERT (s->output_section != NULL);
9446
9447 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9448
9449 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9450
9451 if ((globals->bx_glue_offset[reg] & 1) == 0)
9452 {
9453 p = s->contents + glue_addr;
9454 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9455 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9456 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9457 globals->bx_glue_offset[reg] |= 1;
9458 }
9459
9460 return glue_addr + s->output_section->vma + s->output_offset;
9461 }
9462
9463 /* Generate Arm stubs for exported Thumb symbols. */
9464 static void
9465 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9466 struct bfd_link_info *link_info)
9467 {
9468 struct elf32_arm_link_hash_table * globals;
9469
9470 if (link_info == NULL)
9471 /* Ignore this if we are not called by the ELF backend linker. */
9472 return;
9473
9474 globals = elf32_arm_hash_table (link_info);
9475 if (globals == NULL)
9476 return;
9477
9478 /* If blx is available then exported Thumb symbols are OK and there is
9479 nothing to do. */
9480 if (globals->use_blx)
9481 return;
9482
9483 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9484 link_info);
9485 }
9486
9487 /* Reserve space for COUNT dynamic relocations in relocation selection
9488 SRELOC. */
9489
9490 static void
9491 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9492 bfd_size_type count)
9493 {
9494 struct elf32_arm_link_hash_table *htab;
9495
9496 htab = elf32_arm_hash_table (info);
9497 BFD_ASSERT (htab->root.dynamic_sections_created);
9498 if (sreloc == NULL)
9499 abort ();
9500 sreloc->size += RELOC_SIZE (htab) * count;
9501 }
9502
9503 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9504 dynamic, the relocations should go in SRELOC, otherwise they should
9505 go in the special .rel.iplt section. */
9506
9507 static void
9508 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9509 bfd_size_type count)
9510 {
9511 struct elf32_arm_link_hash_table *htab;
9512
9513 htab = elf32_arm_hash_table (info);
9514 if (!htab->root.dynamic_sections_created)
9515 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9516 else
9517 {
9518 BFD_ASSERT (sreloc != NULL);
9519 sreloc->size += RELOC_SIZE (htab) * count;
9520 }
9521 }
9522
9523 /* Add relocation REL to the end of relocation section SRELOC. */
9524
9525 static void
9526 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9527 asection *sreloc, Elf_Internal_Rela *rel)
9528 {
9529 bfd_byte *loc;
9530 struct elf32_arm_link_hash_table *htab;
9531
9532 htab = elf32_arm_hash_table (info);
9533 if (!htab->root.dynamic_sections_created
9534 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9535 sreloc = htab->root.irelplt;
9536 if (sreloc == NULL)
9537 abort ();
9538 loc = sreloc->contents;
9539 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9540 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9541 abort ();
9542 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9543 }
9544
9545 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9546 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9547 to .plt. */
9548
9549 static void
9550 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9551 bfd_boolean is_iplt_entry,
9552 union gotplt_union *root_plt,
9553 struct arm_plt_info *arm_plt)
9554 {
9555 struct elf32_arm_link_hash_table *htab;
9556 asection *splt;
9557 asection *sgotplt;
9558
9559 htab = elf32_arm_hash_table (info);
9560
9561 if (is_iplt_entry)
9562 {
9563 splt = htab->root.iplt;
9564 sgotplt = htab->root.igotplt;
9565
9566 /* NaCl uses a special first entry in .iplt too. */
9567 if (htab->nacl_p && splt->size == 0)
9568 splt->size += htab->plt_header_size;
9569
9570 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9571 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9572 }
9573 else
9574 {
9575 splt = htab->root.splt;
9576 sgotplt = htab->root.sgotplt;
9577
9578 if (htab->fdpic_p)
9579 {
9580 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9581 /* For lazy binding, relocations will be put into .rel.plt, in
9582 .rel.got otherwise. */
9583 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9584 if (info->flags & DF_BIND_NOW)
9585 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9586 else
9587 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9588 }
9589 else
9590 {
9591 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9592 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9593 }
9594
9595 /* If this is the first .plt entry, make room for the special
9596 first entry. */
9597 if (splt->size == 0)
9598 splt->size += htab->plt_header_size;
9599
9600 htab->next_tls_desc_index++;
9601 }
9602
9603 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9604 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9605 splt->size += PLT_THUMB_STUB_SIZE;
9606 root_plt->offset = splt->size;
9607 splt->size += htab->plt_entry_size;
9608
9609 if (!htab->symbian_p)
9610 {
9611 /* We also need to make an entry in the .got.plt section, which
9612 will be placed in the .got section by the linker script. */
9613 if (is_iplt_entry)
9614 arm_plt->got_offset = sgotplt->size;
9615 else
9616 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9617 if (htab->fdpic_p)
9618 /* Function descriptor takes 64 bits in GOT. */
9619 sgotplt->size += 8;
9620 else
9621 sgotplt->size += 4;
9622 }
9623 }
9624
9625 static bfd_vma
9626 arm_movw_immediate (bfd_vma value)
9627 {
9628 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9629 }
9630
9631 static bfd_vma
9632 arm_movt_immediate (bfd_vma value)
9633 {
9634 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9635 }
9636
9637 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9638 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9639 Otherwise, DYNINDX is the index of the symbol in the dynamic
9640 symbol table and SYM_VALUE is undefined.
9641
9642 ROOT_PLT points to the offset of the PLT entry from the start of its
9643 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9644 bookkeeping information.
9645
9646 Returns FALSE if there was a problem. */
9647
9648 static bfd_boolean
9649 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9650 union gotplt_union *root_plt,
9651 struct arm_plt_info *arm_plt,
9652 int dynindx, bfd_vma sym_value)
9653 {
9654 struct elf32_arm_link_hash_table *htab;
9655 asection *sgot;
9656 asection *splt;
9657 asection *srel;
9658 bfd_byte *loc;
9659 bfd_vma plt_index;
9660 Elf_Internal_Rela rel;
9661 bfd_vma plt_header_size;
9662 bfd_vma got_header_size;
9663
9664 htab = elf32_arm_hash_table (info);
9665
9666 /* Pick the appropriate sections and sizes. */
9667 if (dynindx == -1)
9668 {
9669 splt = htab->root.iplt;
9670 sgot = htab->root.igotplt;
9671 srel = htab->root.irelplt;
9672
9673 /* There are no reserved entries in .igot.plt, and no special
9674 first entry in .iplt. */
9675 got_header_size = 0;
9676 plt_header_size = 0;
9677 }
9678 else
9679 {
9680 splt = htab->root.splt;
9681 sgot = htab->root.sgotplt;
9682 srel = htab->root.srelplt;
9683
9684 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9685 plt_header_size = htab->plt_header_size;
9686 }
9687 BFD_ASSERT (splt != NULL && srel != NULL);
9688
9689 /* Fill in the entry in the procedure linkage table. */
9690 if (htab->symbian_p)
9691 {
9692 BFD_ASSERT (dynindx >= 0);
9693 put_arm_insn (htab, output_bfd,
9694 elf32_arm_symbian_plt_entry[0],
9695 splt->contents + root_plt->offset);
9696 bfd_put_32 (output_bfd,
9697 elf32_arm_symbian_plt_entry[1],
9698 splt->contents + root_plt->offset + 4);
9699
9700 /* Fill in the entry in the .rel.plt section. */
9701 rel.r_offset = (splt->output_section->vma
9702 + splt->output_offset
9703 + root_plt->offset + 4);
9704 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9705
9706 /* Get the index in the procedure linkage table which
9707 corresponds to this symbol. This is the index of this symbol
9708 in all the symbols for which we are making plt entries. The
9709 first entry in the procedure linkage table is reserved. */
9710 plt_index = ((root_plt->offset - plt_header_size)
9711 / htab->plt_entry_size);
9712 }
9713 else
9714 {
9715 bfd_vma got_offset, got_address, plt_address;
9716 bfd_vma got_displacement, initial_got_entry;
9717 bfd_byte * ptr;
9718
9719 BFD_ASSERT (sgot != NULL);
9720
9721 /* Get the offset into the .(i)got.plt table of the entry that
9722 corresponds to this function. */
9723 got_offset = (arm_plt->got_offset & -2);
9724
9725 /* Get the index in the procedure linkage table which
9726 corresponds to this symbol. This is the index of this symbol
9727 in all the symbols for which we are making plt entries.
9728 After the reserved .got.plt entries, all symbols appear in
9729 the same order as in .plt. */
9730 if (htab->fdpic_p)
9731 /* Function descriptor takes 8 bytes. */
9732 plt_index = (got_offset - got_header_size) / 8;
9733 else
9734 plt_index = (got_offset - got_header_size) / 4;
9735
9736 /* Calculate the address of the GOT entry. */
9737 got_address = (sgot->output_section->vma
9738 + sgot->output_offset
9739 + got_offset);
9740
9741 /* ...and the address of the PLT entry. */
9742 plt_address = (splt->output_section->vma
9743 + splt->output_offset
9744 + root_plt->offset);
9745
9746 ptr = splt->contents + root_plt->offset;
9747 if (htab->vxworks_p && bfd_link_pic (info))
9748 {
9749 unsigned int i;
9750 bfd_vma val;
9751
9752 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9753 {
9754 val = elf32_arm_vxworks_shared_plt_entry[i];
9755 if (i == 2)
9756 val |= got_address - sgot->output_section->vma;
9757 if (i == 5)
9758 val |= plt_index * RELOC_SIZE (htab);
9759 if (i == 2 || i == 5)
9760 bfd_put_32 (output_bfd, val, ptr);
9761 else
9762 put_arm_insn (htab, output_bfd, val, ptr);
9763 }
9764 }
9765 else if (htab->vxworks_p)
9766 {
9767 unsigned int i;
9768 bfd_vma val;
9769
9770 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9771 {
9772 val = elf32_arm_vxworks_exec_plt_entry[i];
9773 if (i == 2)
9774 val |= got_address;
9775 if (i == 4)
9776 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9777 if (i == 5)
9778 val |= plt_index * RELOC_SIZE (htab);
9779 if (i == 2 || i == 5)
9780 bfd_put_32 (output_bfd, val, ptr);
9781 else
9782 put_arm_insn (htab, output_bfd, val, ptr);
9783 }
9784
9785 loc = (htab->srelplt2->contents
9786 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9787
9788 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9789 referencing the GOT for this PLT entry. */
9790 rel.r_offset = plt_address + 8;
9791 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9792 rel.r_addend = got_offset;
9793 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9794 loc += RELOC_SIZE (htab);
9795
9796 /* Create the R_ARM_ABS32 relocation referencing the
9797 beginning of the PLT for this GOT entry. */
9798 rel.r_offset = got_address;
9799 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9800 rel.r_addend = 0;
9801 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9802 }
9803 else if (htab->nacl_p)
9804 {
9805 /* Calculate the displacement between the PLT slot and the
9806 common tail that's part of the special initial PLT slot. */
9807 int32_t tail_displacement
9808 = ((splt->output_section->vma + splt->output_offset
9809 + ARM_NACL_PLT_TAIL_OFFSET)
9810 - (plt_address + htab->plt_entry_size + 4));
9811 BFD_ASSERT ((tail_displacement & 3) == 0);
9812 tail_displacement >>= 2;
9813
9814 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9815 || (-tail_displacement & 0xff000000) == 0);
9816
9817 /* Calculate the displacement between the PLT slot and the entry
9818 in the GOT. The offset accounts for the value produced by
9819 adding to pc in the penultimate instruction of the PLT stub. */
9820 got_displacement = (got_address
9821 - (plt_address + htab->plt_entry_size));
9822
9823 /* NaCl does not support interworking at all. */
9824 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9825
9826 put_arm_insn (htab, output_bfd,
9827 elf32_arm_nacl_plt_entry[0]
9828 | arm_movw_immediate (got_displacement),
9829 ptr + 0);
9830 put_arm_insn (htab, output_bfd,
9831 elf32_arm_nacl_plt_entry[1]
9832 | arm_movt_immediate (got_displacement),
9833 ptr + 4);
9834 put_arm_insn (htab, output_bfd,
9835 elf32_arm_nacl_plt_entry[2],
9836 ptr + 8);
9837 put_arm_insn (htab, output_bfd,
9838 elf32_arm_nacl_plt_entry[3]
9839 | (tail_displacement & 0x00ffffff),
9840 ptr + 12);
9841 }
9842 else if (htab->fdpic_p)
9843 {
9844 const bfd_vma *plt_entry = using_thumb_only(htab)
9845 ? elf32_arm_fdpic_thumb_plt_entry
9846 : elf32_arm_fdpic_plt_entry;
9847
9848 /* Fill-up Thumb stub if needed. */
9849 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9850 {
9851 put_thumb_insn (htab, output_bfd,
9852 elf32_arm_plt_thumb_stub[0], ptr - 4);
9853 put_thumb_insn (htab, output_bfd,
9854 elf32_arm_plt_thumb_stub[1], ptr - 2);
9855 }
9856 /* As we are using 32 bit instructions even for the Thumb
9857 version, we have to use 'put_arm_insn' instead of
9858 'put_thumb_insn'. */
9859 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9860 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9861 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9862 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9863 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9864
9865 if (!(info->flags & DF_BIND_NOW))
9866 {
9867 /* funcdesc_value_reloc_offset. */
9868 bfd_put_32 (output_bfd,
9869 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9870 ptr + 20);
9871 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9872 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9873 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9874 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9875 }
9876 }
9877 else if (using_thumb_only (htab))
9878 {
9879 /* PR ld/16017: Generate thumb only PLT entries. */
9880 if (!using_thumb2 (htab))
9881 {
9882 /* FIXME: We ought to be able to generate thumb-1 PLT
9883 instructions... */
9884 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9885 output_bfd);
9886 return FALSE;
9887 }
9888
9889 /* Calculate the displacement between the PLT slot and the entry in
9890 the GOT. The 12-byte offset accounts for the value produced by
9891 adding to pc in the 3rd instruction of the PLT stub. */
9892 got_displacement = got_address - (plt_address + 12);
9893
9894 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9895 instead of 'put_thumb_insn'. */
9896 put_arm_insn (htab, output_bfd,
9897 elf32_thumb2_plt_entry[0]
9898 | ((got_displacement & 0x000000ff) << 16)
9899 | ((got_displacement & 0x00000700) << 20)
9900 | ((got_displacement & 0x00000800) >> 1)
9901 | ((got_displacement & 0x0000f000) >> 12),
9902 ptr + 0);
9903 put_arm_insn (htab, output_bfd,
9904 elf32_thumb2_plt_entry[1]
9905 | ((got_displacement & 0x00ff0000) )
9906 | ((got_displacement & 0x07000000) << 4)
9907 | ((got_displacement & 0x08000000) >> 17)
9908 | ((got_displacement & 0xf0000000) >> 28),
9909 ptr + 4);
9910 put_arm_insn (htab, output_bfd,
9911 elf32_thumb2_plt_entry[2],
9912 ptr + 8);
9913 put_arm_insn (htab, output_bfd,
9914 elf32_thumb2_plt_entry[3],
9915 ptr + 12);
9916 }
9917 else
9918 {
9919 /* Calculate the displacement between the PLT slot and the
9920 entry in the GOT. The eight-byte offset accounts for the
9921 value produced by adding to pc in the first instruction
9922 of the PLT stub. */
9923 got_displacement = got_address - (plt_address + 8);
9924
9925 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9926 {
9927 put_thumb_insn (htab, output_bfd,
9928 elf32_arm_plt_thumb_stub[0], ptr - 4);
9929 put_thumb_insn (htab, output_bfd,
9930 elf32_arm_plt_thumb_stub[1], ptr - 2);
9931 }
9932
9933 if (!elf32_arm_use_long_plt_entry)
9934 {
9935 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9936
9937 put_arm_insn (htab, output_bfd,
9938 elf32_arm_plt_entry_short[0]
9939 | ((got_displacement & 0x0ff00000) >> 20),
9940 ptr + 0);
9941 put_arm_insn (htab, output_bfd,
9942 elf32_arm_plt_entry_short[1]
9943 | ((got_displacement & 0x000ff000) >> 12),
9944 ptr+ 4);
9945 put_arm_insn (htab, output_bfd,
9946 elf32_arm_plt_entry_short[2]
9947 | (got_displacement & 0x00000fff),
9948 ptr + 8);
9949 #ifdef FOUR_WORD_PLT
9950 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9951 #endif
9952 }
9953 else
9954 {
9955 put_arm_insn (htab, output_bfd,
9956 elf32_arm_plt_entry_long[0]
9957 | ((got_displacement & 0xf0000000) >> 28),
9958 ptr + 0);
9959 put_arm_insn (htab, output_bfd,
9960 elf32_arm_plt_entry_long[1]
9961 | ((got_displacement & 0x0ff00000) >> 20),
9962 ptr + 4);
9963 put_arm_insn (htab, output_bfd,
9964 elf32_arm_plt_entry_long[2]
9965 | ((got_displacement & 0x000ff000) >> 12),
9966 ptr+ 8);
9967 put_arm_insn (htab, output_bfd,
9968 elf32_arm_plt_entry_long[3]
9969 | (got_displacement & 0x00000fff),
9970 ptr + 12);
9971 }
9972 }
9973
9974 /* Fill in the entry in the .rel(a).(i)plt section. */
9975 rel.r_offset = got_address;
9976 rel.r_addend = 0;
9977 if (dynindx == -1)
9978 {
9979 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9980 The dynamic linker or static executable then calls SYM_VALUE
9981 to determine the correct run-time value of the .igot.plt entry. */
9982 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9983 initial_got_entry = sym_value;
9984 }
9985 else
9986 {
9987 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9988 used by PLT entry. */
9989 if (htab->fdpic_p)
9990 {
9991 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9992 initial_got_entry = 0;
9993 }
9994 else
9995 {
9996 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9997 initial_got_entry = (splt->output_section->vma
9998 + splt->output_offset);
9999 }
10000 }
10001
10002 /* Fill in the entry in the global offset table. */
10003 bfd_put_32 (output_bfd, initial_got_entry,
10004 sgot->contents + got_offset);
10005
10006 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
10007 {
10008 /* Setup initial funcdesc value. */
10009 /* FIXME: we don't support lazy binding because there is a
10010 race condition between both words getting written and
10011 some other thread attempting to read them. The ARM
10012 architecture does not have an atomic 64 bit load/store
10013 instruction that could be used to prevent it; it is
10014 recommended that threaded FDPIC applications run with the
10015 LD_BIND_NOW environment variable set. */
10016 bfd_put_32(output_bfd, plt_address + 0x18,
10017 sgot->contents + got_offset);
10018 bfd_put_32(output_bfd, -1 /*TODO*/,
10019 sgot->contents + got_offset + 4);
10020 }
10021 }
10022
10023 if (dynindx == -1)
10024 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
10025 else
10026 {
10027 if (htab->fdpic_p)
10028 {
10029 /* For FDPIC we put PLT relocationss into .rel.got when not
10030 lazy binding otherwise we put them in .rel.plt. For now,
10031 we don't support lazy binding so put it in .rel.got. */
10032 if (info->flags & DF_BIND_NOW)
10033 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
10034 else
10035 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
10036 }
10037 else
10038 {
10039 loc = srel->contents + plt_index * RELOC_SIZE (htab);
10040 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
10041 }
10042 }
10043
10044 return TRUE;
10045 }
10046
10047 /* Some relocations map to different relocations depending on the
10048 target. Return the real relocation. */
10049
10050 static int
10051 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10052 int r_type)
10053 {
10054 switch (r_type)
10055 {
10056 case R_ARM_TARGET1:
10057 if (globals->target1_is_rel)
10058 return R_ARM_REL32;
10059 else
10060 return R_ARM_ABS32;
10061
10062 case R_ARM_TARGET2:
10063 return globals->target2_reloc;
10064
10065 default:
10066 return r_type;
10067 }
10068 }
10069
10070 /* Return the base VMA address which should be subtracted from real addresses
10071 when resolving @dtpoff relocation.
10072 This is PT_TLS segment p_vaddr. */
10073
10074 static bfd_vma
10075 dtpoff_base (struct bfd_link_info *info)
10076 {
10077 /* If tls_sec is NULL, we should have signalled an error already. */
10078 if (elf_hash_table (info)->tls_sec == NULL)
10079 return 0;
10080 return elf_hash_table (info)->tls_sec->vma;
10081 }
10082
10083 /* Return the relocation value for @tpoff relocation
10084 if STT_TLS virtual address is ADDRESS. */
10085
10086 static bfd_vma
10087 tpoff (struct bfd_link_info *info, bfd_vma address)
10088 {
10089 struct elf_link_hash_table *htab = elf_hash_table (info);
10090 bfd_vma base;
10091
10092 /* If tls_sec is NULL, we should have signalled an error already. */
10093 if (htab->tls_sec == NULL)
10094 return 0;
10095 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10096 return address - htab->tls_sec->vma + base;
10097 }
10098
10099 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10100 VALUE is the relocation value. */
10101
10102 static bfd_reloc_status_type
10103 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10104 {
10105 if (value > 0xfff)
10106 return bfd_reloc_overflow;
10107
10108 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10109 bfd_put_32 (abfd, value, data);
10110 return bfd_reloc_ok;
10111 }
10112
10113 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10114 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10115 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10116
10117 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10118 is to then call final_link_relocate. Return other values in the
10119 case of error.
10120
10121 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10122 the pre-relaxed code. It would be nice if the relocs were updated
10123 to match the optimization. */
10124
10125 static bfd_reloc_status_type
10126 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10127 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10128 Elf_Internal_Rela *rel, unsigned long is_local)
10129 {
10130 unsigned long insn;
10131
10132 switch (ELF32_R_TYPE (rel->r_info))
10133 {
10134 default:
10135 return bfd_reloc_notsupported;
10136
10137 case R_ARM_TLS_GOTDESC:
10138 if (is_local)
10139 insn = 0;
10140 else
10141 {
10142 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10143 if (insn & 1)
10144 insn -= 5; /* THUMB */
10145 else
10146 insn -= 8; /* ARM */
10147 }
10148 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10149 return bfd_reloc_continue;
10150
10151 case R_ARM_THM_TLS_DESCSEQ:
10152 /* Thumb insn. */
10153 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10154 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10155 {
10156 if (is_local)
10157 /* nop */
10158 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10159 }
10160 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10161 {
10162 if (is_local)
10163 /* nop */
10164 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10165 else
10166 /* ldr rx,[ry] */
10167 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10168 }
10169 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10170 {
10171 if (is_local)
10172 /* nop */
10173 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10174 else
10175 /* mov r0, rx */
10176 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10177 contents + rel->r_offset);
10178 }
10179 else
10180 {
10181 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10182 /* It's a 32 bit instruction, fetch the rest of it for
10183 error generation. */
10184 insn = (insn << 16)
10185 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10186 _bfd_error_handler
10187 /* xgettext:c-format */
10188 (_("%pB(%pA+%#" PRIx64 "): "
10189 "unexpected %s instruction '%#lx' in TLS trampoline"),
10190 input_bfd, input_sec, (uint64_t) rel->r_offset,
10191 "Thumb", insn);
10192 return bfd_reloc_notsupported;
10193 }
10194 break;
10195
10196 case R_ARM_TLS_DESCSEQ:
10197 /* arm insn. */
10198 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10199 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10200 {
10201 if (is_local)
10202 /* mov rx, ry */
10203 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10204 contents + rel->r_offset);
10205 }
10206 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10207 {
10208 if (is_local)
10209 /* nop */
10210 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10211 else
10212 /* ldr rx,[ry] */
10213 bfd_put_32 (input_bfd, insn & 0xfffff000,
10214 contents + rel->r_offset);
10215 }
10216 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10217 {
10218 if (is_local)
10219 /* nop */
10220 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10221 else
10222 /* mov r0, rx */
10223 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10224 contents + rel->r_offset);
10225 }
10226 else
10227 {
10228 _bfd_error_handler
10229 /* xgettext:c-format */
10230 (_("%pB(%pA+%#" PRIx64 "): "
10231 "unexpected %s instruction '%#lx' in TLS trampoline"),
10232 input_bfd, input_sec, (uint64_t) rel->r_offset,
10233 "ARM", insn);
10234 return bfd_reloc_notsupported;
10235 }
10236 break;
10237
10238 case R_ARM_TLS_CALL:
10239 /* GD->IE relaxation, turn the instruction into 'nop' or
10240 'ldr r0, [pc,r0]' */
10241 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10242 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10243 break;
10244
10245 case R_ARM_THM_TLS_CALL:
10246 /* GD->IE relaxation. */
10247 if (!is_local)
10248 /* add r0,pc; ldr r0, [r0] */
10249 insn = 0x44786800;
10250 else if (using_thumb2 (globals))
10251 /* nop.w */
10252 insn = 0xf3af8000;
10253 else
10254 /* nop; nop */
10255 insn = 0xbf00bf00;
10256
10257 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10258 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10259 break;
10260 }
10261 return bfd_reloc_ok;
10262 }
10263
10264 /* For a given value of n, calculate the value of G_n as required to
10265 deal with group relocations. We return it in the form of an
10266 encoded constant-and-rotation, together with the final residual. If n is
10267 specified as less than zero, then final_residual is filled with the
10268 input value and no further action is performed. */
10269
10270 static bfd_vma
10271 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10272 {
10273 int current_n;
10274 bfd_vma g_n;
10275 bfd_vma encoded_g_n = 0;
10276 bfd_vma residual = value; /* Also known as Y_n. */
10277
10278 for (current_n = 0; current_n <= n; current_n++)
10279 {
10280 int shift;
10281
10282 /* Calculate which part of the value to mask. */
10283 if (residual == 0)
10284 shift = 0;
10285 else
10286 {
10287 int msb;
10288
10289 /* Determine the most significant bit in the residual and
10290 align the resulting value to a 2-bit boundary. */
10291 for (msb = 30; msb >= 0; msb -= 2)
10292 if (residual & (3 << msb))
10293 break;
10294
10295 /* The desired shift is now (msb - 6), or zero, whichever
10296 is the greater. */
10297 shift = msb - 6;
10298 if (shift < 0)
10299 shift = 0;
10300 }
10301
10302 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10303 g_n = residual & (0xff << shift);
10304 encoded_g_n = (g_n >> shift)
10305 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10306
10307 /* Calculate the residual for the next time around. */
10308 residual &= ~g_n;
10309 }
10310
10311 *final_residual = residual;
10312
10313 return encoded_g_n;
10314 }
10315
10316 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10317 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10318
10319 static int
10320 identify_add_or_sub (bfd_vma insn)
10321 {
10322 int opcode = insn & 0x1e00000;
10323
10324 if (opcode == 1 << 23) /* ADD */
10325 return 1;
10326
10327 if (opcode == 1 << 22) /* SUB */
10328 return -1;
10329
10330 return 0;
10331 }
10332
10333 /* Perform a relocation as part of a final link. */
10334
10335 static bfd_reloc_status_type
10336 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10337 bfd * input_bfd,
10338 bfd * output_bfd,
10339 asection * input_section,
10340 bfd_byte * contents,
10341 Elf_Internal_Rela * rel,
10342 bfd_vma value,
10343 struct bfd_link_info * info,
10344 asection * sym_sec,
10345 const char * sym_name,
10346 unsigned char st_type,
10347 enum arm_st_branch_type branch_type,
10348 struct elf_link_hash_entry * h,
10349 bfd_boolean * unresolved_reloc_p,
10350 char ** error_message)
10351 {
10352 unsigned long r_type = howto->type;
10353 unsigned long r_symndx;
10354 bfd_byte * hit_data = contents + rel->r_offset;
10355 bfd_vma * local_got_offsets;
10356 bfd_vma * local_tlsdesc_gotents;
10357 asection * sgot;
10358 asection * splt;
10359 asection * sreloc = NULL;
10360 asection * srelgot;
10361 bfd_vma addend;
10362 bfd_signed_vma signed_addend;
10363 unsigned char dynreloc_st_type;
10364 bfd_vma dynreloc_value;
10365 struct elf32_arm_link_hash_table * globals;
10366 struct elf32_arm_link_hash_entry *eh;
10367 union gotplt_union *root_plt;
10368 struct arm_plt_info *arm_plt;
10369 bfd_vma plt_offset;
10370 bfd_vma gotplt_offset;
10371 bfd_boolean has_iplt_entry;
10372 bfd_boolean resolved_to_zero;
10373
10374 globals = elf32_arm_hash_table (info);
10375 if (globals == NULL)
10376 return bfd_reloc_notsupported;
10377
10378 BFD_ASSERT (is_arm_elf (input_bfd));
10379 BFD_ASSERT (howto != NULL);
10380
10381 /* Some relocation types map to different relocations depending on the
10382 target. We pick the right one here. */
10383 r_type = arm_real_reloc_type (globals, r_type);
10384
10385 /* It is possible to have linker relaxations on some TLS access
10386 models. Update our information here. */
10387 r_type = elf32_arm_tls_transition (info, r_type, h);
10388
10389 if (r_type != howto->type)
10390 howto = elf32_arm_howto_from_type (r_type);
10391
10392 eh = (struct elf32_arm_link_hash_entry *) h;
10393 sgot = globals->root.sgot;
10394 local_got_offsets = elf_local_got_offsets (input_bfd);
10395 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10396
10397 if (globals->root.dynamic_sections_created)
10398 srelgot = globals->root.srelgot;
10399 else
10400 srelgot = NULL;
10401
10402 r_symndx = ELF32_R_SYM (rel->r_info);
10403
10404 if (globals->use_rel)
10405 {
10406 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10407
10408 if (addend & ((howto->src_mask + 1) >> 1))
10409 {
10410 signed_addend = -1;
10411 signed_addend &= ~ howto->src_mask;
10412 signed_addend |= addend;
10413 }
10414 else
10415 signed_addend = addend;
10416 }
10417 else
10418 addend = signed_addend = rel->r_addend;
10419
10420 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10421 are resolving a function call relocation. */
10422 if (using_thumb_only (globals)
10423 && (r_type == R_ARM_THM_CALL
10424 || r_type == R_ARM_THM_JUMP24)
10425 && branch_type == ST_BRANCH_TO_ARM)
10426 branch_type = ST_BRANCH_TO_THUMB;
10427
10428 /* Record the symbol information that should be used in dynamic
10429 relocations. */
10430 dynreloc_st_type = st_type;
10431 dynreloc_value = value;
10432 if (branch_type == ST_BRANCH_TO_THUMB)
10433 dynreloc_value |= 1;
10434
10435 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10436 VALUE appropriately for relocations that we resolve at link time. */
10437 has_iplt_entry = FALSE;
10438 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10439 &arm_plt)
10440 && root_plt->offset != (bfd_vma) -1)
10441 {
10442 plt_offset = root_plt->offset;
10443 gotplt_offset = arm_plt->got_offset;
10444
10445 if (h == NULL || eh->is_iplt)
10446 {
10447 has_iplt_entry = TRUE;
10448 splt = globals->root.iplt;
10449
10450 /* Populate .iplt entries here, because not all of them will
10451 be seen by finish_dynamic_symbol. The lower bit is set if
10452 we have already populated the entry. */
10453 if (plt_offset & 1)
10454 plt_offset--;
10455 else
10456 {
10457 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10458 -1, dynreloc_value))
10459 root_plt->offset |= 1;
10460 else
10461 return bfd_reloc_notsupported;
10462 }
10463
10464 /* Static relocations always resolve to the .iplt entry. */
10465 st_type = STT_FUNC;
10466 value = (splt->output_section->vma
10467 + splt->output_offset
10468 + plt_offset);
10469 branch_type = ST_BRANCH_TO_ARM;
10470
10471 /* If there are non-call relocations that resolve to the .iplt
10472 entry, then all dynamic ones must too. */
10473 if (arm_plt->noncall_refcount != 0)
10474 {
10475 dynreloc_st_type = st_type;
10476 dynreloc_value = value;
10477 }
10478 }
10479 else
10480 /* We populate the .plt entry in finish_dynamic_symbol. */
10481 splt = globals->root.splt;
10482 }
10483 else
10484 {
10485 splt = NULL;
10486 plt_offset = (bfd_vma) -1;
10487 gotplt_offset = (bfd_vma) -1;
10488 }
10489
10490 resolved_to_zero = (h != NULL
10491 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10492
10493 switch (r_type)
10494 {
10495 case R_ARM_NONE:
10496 /* We don't need to find a value for this symbol. It's just a
10497 marker. */
10498 *unresolved_reloc_p = FALSE;
10499 return bfd_reloc_ok;
10500
10501 case R_ARM_ABS12:
10502 if (!globals->vxworks_p)
10503 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10504 /* Fall through. */
10505
10506 case R_ARM_PC24:
10507 case R_ARM_ABS32:
10508 case R_ARM_ABS32_NOI:
10509 case R_ARM_REL32:
10510 case R_ARM_REL32_NOI:
10511 case R_ARM_CALL:
10512 case R_ARM_JUMP24:
10513 case R_ARM_XPC25:
10514 case R_ARM_PREL31:
10515 case R_ARM_PLT32:
10516 /* Handle relocations which should use the PLT entry. ABS32/REL32
10517 will use the symbol's value, which may point to a PLT entry, but we
10518 don't need to handle that here. If we created a PLT entry, all
10519 branches in this object should go to it, except if the PLT is too
10520 far away, in which case a long branch stub should be inserted. */
10521 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10522 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10523 && r_type != R_ARM_CALL
10524 && r_type != R_ARM_JUMP24
10525 && r_type != R_ARM_PLT32)
10526 && plt_offset != (bfd_vma) -1)
10527 {
10528 /* If we've created a .plt section, and assigned a PLT entry
10529 to this function, it must either be a STT_GNU_IFUNC reference
10530 or not be known to bind locally. In other cases, we should
10531 have cleared the PLT entry by now. */
10532 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10533
10534 value = (splt->output_section->vma
10535 + splt->output_offset
10536 + plt_offset);
10537 *unresolved_reloc_p = FALSE;
10538 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10539 contents, rel->r_offset, value,
10540 rel->r_addend);
10541 }
10542
10543 /* When generating a shared object or relocatable executable, these
10544 relocations are copied into the output file to be resolved at
10545 run time. */
10546 if ((bfd_link_pic (info)
10547 || globals->root.is_relocatable_executable
10548 || globals->fdpic_p)
10549 && (input_section->flags & SEC_ALLOC)
10550 && !(globals->vxworks_p
10551 && strcmp (input_section->output_section->name,
10552 ".tls_vars") == 0)
10553 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10554 || !SYMBOL_CALLS_LOCAL (info, h))
10555 && !(input_bfd == globals->stub_bfd
10556 && strstr (input_section->name, STUB_SUFFIX))
10557 && (h == NULL
10558 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10559 && !resolved_to_zero)
10560 || h->root.type != bfd_link_hash_undefweak)
10561 && r_type != R_ARM_PC24
10562 && r_type != R_ARM_CALL
10563 && r_type != R_ARM_JUMP24
10564 && r_type != R_ARM_PREL31
10565 && r_type != R_ARM_PLT32)
10566 {
10567 Elf_Internal_Rela outrel;
10568 bfd_boolean skip, relocate;
10569 int isrofixup = 0;
10570
10571 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10572 && !h->def_regular)
10573 {
10574 char *v = _("shared object");
10575
10576 if (bfd_link_executable (info))
10577 v = _("PIE executable");
10578
10579 _bfd_error_handler
10580 (_("%pB: relocation %s against external or undefined symbol `%s'"
10581 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10582 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10583 return bfd_reloc_notsupported;
10584 }
10585
10586 *unresolved_reloc_p = FALSE;
10587
10588 if (sreloc == NULL && globals->root.dynamic_sections_created)
10589 {
10590 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10591 ! globals->use_rel);
10592
10593 if (sreloc == NULL)
10594 return bfd_reloc_notsupported;
10595 }
10596
10597 skip = FALSE;
10598 relocate = FALSE;
10599
10600 outrel.r_addend = addend;
10601 outrel.r_offset =
10602 _bfd_elf_section_offset (output_bfd, info, input_section,
10603 rel->r_offset);
10604 if (outrel.r_offset == (bfd_vma) -1)
10605 skip = TRUE;
10606 else if (outrel.r_offset == (bfd_vma) -2)
10607 skip = TRUE, relocate = TRUE;
10608 outrel.r_offset += (input_section->output_section->vma
10609 + input_section->output_offset);
10610
10611 if (skip)
10612 memset (&outrel, 0, sizeof outrel);
10613 else if (h != NULL
10614 && h->dynindx != -1
10615 && (!bfd_link_pic (info)
10616 || !(bfd_link_pie (info)
10617 || SYMBOLIC_BIND (info, h))
10618 || !h->def_regular))
10619 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10620 else
10621 {
10622 int symbol;
10623
10624 /* This symbol is local, or marked to become local. */
10625 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10626 || (globals->fdpic_p && !bfd_link_pic(info)));
10627 if (globals->symbian_p)
10628 {
10629 asection *osec;
10630
10631 /* On Symbian OS, the data segment and text segement
10632 can be relocated independently. Therefore, we
10633 must indicate the segment to which this
10634 relocation is relative. The BPABI allows us to
10635 use any symbol in the right segment; we just use
10636 the section symbol as it is convenient. (We
10637 cannot use the symbol given by "h" directly as it
10638 will not appear in the dynamic symbol table.)
10639
10640 Note that the dynamic linker ignores the section
10641 symbol value, so we don't subtract osec->vma
10642 from the emitted reloc addend. */
10643 if (sym_sec)
10644 osec = sym_sec->output_section;
10645 else
10646 osec = input_section->output_section;
10647 symbol = elf_section_data (osec)->dynindx;
10648 if (symbol == 0)
10649 {
10650 struct elf_link_hash_table *htab = elf_hash_table (info);
10651
10652 if ((osec->flags & SEC_READONLY) == 0
10653 && htab->data_index_section != NULL)
10654 osec = htab->data_index_section;
10655 else
10656 osec = htab->text_index_section;
10657 symbol = elf_section_data (osec)->dynindx;
10658 }
10659 BFD_ASSERT (symbol != 0);
10660 }
10661 else
10662 /* On SVR4-ish systems, the dynamic loader cannot
10663 relocate the text and data segments independently,
10664 so the symbol does not matter. */
10665 symbol = 0;
10666 if (dynreloc_st_type == STT_GNU_IFUNC)
10667 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10668 to the .iplt entry. Instead, every non-call reference
10669 must use an R_ARM_IRELATIVE relocation to obtain the
10670 correct run-time address. */
10671 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10672 else if (globals->fdpic_p && !bfd_link_pic(info))
10673 isrofixup = 1;
10674 else
10675 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10676 if (globals->use_rel)
10677 relocate = TRUE;
10678 else
10679 outrel.r_addend += dynreloc_value;
10680 }
10681
10682 if (isrofixup)
10683 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10684 else
10685 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10686
10687 /* If this reloc is against an external symbol, we do not want to
10688 fiddle with the addend. Otherwise, we need to include the symbol
10689 value so that it becomes an addend for the dynamic reloc. */
10690 if (! relocate)
10691 return bfd_reloc_ok;
10692
10693 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10694 contents, rel->r_offset,
10695 dynreloc_value, (bfd_vma) 0);
10696 }
10697 else switch (r_type)
10698 {
10699 case R_ARM_ABS12:
10700 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10701
10702 case R_ARM_XPC25: /* Arm BLX instruction. */
10703 case R_ARM_CALL:
10704 case R_ARM_JUMP24:
10705 case R_ARM_PC24: /* Arm B/BL instruction. */
10706 case R_ARM_PLT32:
10707 {
10708 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10709
10710 if (r_type == R_ARM_XPC25)
10711 {
10712 /* Check for Arm calling Arm function. */
10713 /* FIXME: Should we translate the instruction into a BL
10714 instruction instead ? */
10715 if (branch_type != ST_BRANCH_TO_THUMB)
10716 _bfd_error_handler
10717 (_("\%pB: warning: %s BLX instruction targets"
10718 " %s function '%s'"),
10719 input_bfd, "ARM",
10720 "ARM", h ? h->root.root.string : "(local)");
10721 }
10722 else if (r_type == R_ARM_PC24)
10723 {
10724 /* Check for Arm calling Thumb function. */
10725 if (branch_type == ST_BRANCH_TO_THUMB)
10726 {
10727 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10728 output_bfd, input_section,
10729 hit_data, sym_sec, rel->r_offset,
10730 signed_addend, value,
10731 error_message))
10732 return bfd_reloc_ok;
10733 else
10734 return bfd_reloc_dangerous;
10735 }
10736 }
10737
10738 /* Check if a stub has to be inserted because the
10739 destination is too far or we are changing mode. */
10740 if ( r_type == R_ARM_CALL
10741 || r_type == R_ARM_JUMP24
10742 || r_type == R_ARM_PLT32)
10743 {
10744 enum elf32_arm_stub_type stub_type = arm_stub_none;
10745 struct elf32_arm_link_hash_entry *hash;
10746
10747 hash = (struct elf32_arm_link_hash_entry *) h;
10748 stub_type = arm_type_of_stub (info, input_section, rel,
10749 st_type, &branch_type,
10750 hash, value, sym_sec,
10751 input_bfd, sym_name);
10752
10753 if (stub_type != arm_stub_none)
10754 {
10755 /* The target is out of reach, so redirect the
10756 branch to the local stub for this function. */
10757 stub_entry = elf32_arm_get_stub_entry (input_section,
10758 sym_sec, h,
10759 rel, globals,
10760 stub_type);
10761 {
10762 if (stub_entry != NULL)
10763 value = (stub_entry->stub_offset
10764 + stub_entry->stub_sec->output_offset
10765 + stub_entry->stub_sec->output_section->vma);
10766
10767 if (plt_offset != (bfd_vma) -1)
10768 *unresolved_reloc_p = FALSE;
10769 }
10770 }
10771 else
10772 {
10773 /* If the call goes through a PLT entry, make sure to
10774 check distance to the right destination address. */
10775 if (plt_offset != (bfd_vma) -1)
10776 {
10777 value = (splt->output_section->vma
10778 + splt->output_offset
10779 + plt_offset);
10780 *unresolved_reloc_p = FALSE;
10781 /* The PLT entry is in ARM mode, regardless of the
10782 target function. */
10783 branch_type = ST_BRANCH_TO_ARM;
10784 }
10785 }
10786 }
10787
10788 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10789 where:
10790 S is the address of the symbol in the relocation.
10791 P is address of the instruction being relocated.
10792 A is the addend (extracted from the instruction) in bytes.
10793
10794 S is held in 'value'.
10795 P is the base address of the section containing the
10796 instruction plus the offset of the reloc into that
10797 section, ie:
10798 (input_section->output_section->vma +
10799 input_section->output_offset +
10800 rel->r_offset).
10801 A is the addend, converted into bytes, ie:
10802 (signed_addend * 4)
10803
10804 Note: None of these operations have knowledge of the pipeline
10805 size of the processor, thus it is up to the assembler to
10806 encode this information into the addend. */
10807 value -= (input_section->output_section->vma
10808 + input_section->output_offset);
10809 value -= rel->r_offset;
10810 if (globals->use_rel)
10811 value += (signed_addend << howto->size);
10812 else
10813 /* RELA addends do not have to be adjusted by howto->size. */
10814 value += signed_addend;
10815
10816 signed_addend = value;
10817 signed_addend >>= howto->rightshift;
10818
10819 /* A branch to an undefined weak symbol is turned into a jump to
10820 the next instruction unless a PLT entry will be created.
10821 Do the same for local undefined symbols (but not for STN_UNDEF).
10822 The jump to the next instruction is optimized as a NOP depending
10823 on the architecture. */
10824 if (h ? (h->root.type == bfd_link_hash_undefweak
10825 && plt_offset == (bfd_vma) -1)
10826 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10827 {
10828 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10829
10830 if (arch_has_arm_nop (globals))
10831 value |= 0x0320f000;
10832 else
10833 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10834 }
10835 else
10836 {
10837 /* Perform a signed range check. */
10838 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10839 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10840 return bfd_reloc_overflow;
10841
10842 addend = (value & 2);
10843
10844 value = (signed_addend & howto->dst_mask)
10845 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10846
10847 if (r_type == R_ARM_CALL)
10848 {
10849 /* Set the H bit in the BLX instruction. */
10850 if (branch_type == ST_BRANCH_TO_THUMB)
10851 {
10852 if (addend)
10853 value |= (1 << 24);
10854 else
10855 value &= ~(bfd_vma)(1 << 24);
10856 }
10857
10858 /* Select the correct instruction (BL or BLX). */
10859 /* Only if we are not handling a BL to a stub. In this
10860 case, mode switching is performed by the stub. */
10861 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10862 value |= (1 << 28);
10863 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10864 {
10865 value &= ~(bfd_vma)(1 << 28);
10866 value |= (1 << 24);
10867 }
10868 }
10869 }
10870 }
10871 break;
10872
10873 case R_ARM_ABS32:
10874 value += addend;
10875 if (branch_type == ST_BRANCH_TO_THUMB)
10876 value |= 1;
10877 break;
10878
10879 case R_ARM_ABS32_NOI:
10880 value += addend;
10881 break;
10882
10883 case R_ARM_REL32:
10884 value += addend;
10885 if (branch_type == ST_BRANCH_TO_THUMB)
10886 value |= 1;
10887 value -= (input_section->output_section->vma
10888 + input_section->output_offset + rel->r_offset);
10889 break;
10890
10891 case R_ARM_REL32_NOI:
10892 value += addend;
10893 value -= (input_section->output_section->vma
10894 + input_section->output_offset + rel->r_offset);
10895 break;
10896
10897 case R_ARM_PREL31:
10898 value -= (input_section->output_section->vma
10899 + input_section->output_offset + rel->r_offset);
10900 value += signed_addend;
10901 if (! h || h->root.type != bfd_link_hash_undefweak)
10902 {
10903 /* Check for overflow. */
10904 if ((value ^ (value >> 1)) & (1 << 30))
10905 return bfd_reloc_overflow;
10906 }
10907 value &= 0x7fffffff;
10908 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10909 if (branch_type == ST_BRANCH_TO_THUMB)
10910 value |= 1;
10911 break;
10912 }
10913
10914 bfd_put_32 (input_bfd, value, hit_data);
10915 return bfd_reloc_ok;
10916
10917 case R_ARM_ABS8:
10918 /* PR 16202: Refectch the addend using the correct size. */
10919 if (globals->use_rel)
10920 addend = bfd_get_8 (input_bfd, hit_data);
10921 value += addend;
10922
10923 /* There is no way to tell whether the user intended to use a signed or
10924 unsigned addend. When checking for overflow we accept either,
10925 as specified by the AAELF. */
10926 if ((long) value > 0xff || (long) value < -0x80)
10927 return bfd_reloc_overflow;
10928
10929 bfd_put_8 (input_bfd, value, hit_data);
10930 return bfd_reloc_ok;
10931
10932 case R_ARM_ABS16:
10933 /* PR 16202: Refectch the addend using the correct size. */
10934 if (globals->use_rel)
10935 addend = bfd_get_16 (input_bfd, hit_data);
10936 value += addend;
10937
10938 /* See comment for R_ARM_ABS8. */
10939 if ((long) value > 0xffff || (long) value < -0x8000)
10940 return bfd_reloc_overflow;
10941
10942 bfd_put_16 (input_bfd, value, hit_data);
10943 return bfd_reloc_ok;
10944
10945 case R_ARM_THM_ABS5:
10946 /* Support ldr and str instructions for the thumb. */
10947 if (globals->use_rel)
10948 {
10949 /* Need to refetch addend. */
10950 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10951 /* ??? Need to determine shift amount from operand size. */
10952 addend >>= howto->rightshift;
10953 }
10954 value += addend;
10955
10956 /* ??? Isn't value unsigned? */
10957 if ((long) value > 0x1f || (long) value < -0x10)
10958 return bfd_reloc_overflow;
10959
10960 /* ??? Value needs to be properly shifted into place first. */
10961 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10962 bfd_put_16 (input_bfd, value, hit_data);
10963 return bfd_reloc_ok;
10964
10965 case R_ARM_THM_ALU_PREL_11_0:
10966 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10967 {
10968 bfd_vma insn;
10969 bfd_signed_vma relocation;
10970
10971 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10972 | bfd_get_16 (input_bfd, hit_data + 2);
10973
10974 if (globals->use_rel)
10975 {
10976 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10977 | ((insn & (1 << 26)) >> 15);
10978 if (insn & 0xf00000)
10979 signed_addend = -signed_addend;
10980 }
10981
10982 relocation = value + signed_addend;
10983 relocation -= Pa (input_section->output_section->vma
10984 + input_section->output_offset
10985 + rel->r_offset);
10986
10987 /* PR 21523: Use an absolute value. The user of this reloc will
10988 have already selected an ADD or SUB insn appropriately. */
10989 value = llabs (relocation);
10990
10991 if (value >= 0x1000)
10992 return bfd_reloc_overflow;
10993
10994 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10995 if (branch_type == ST_BRANCH_TO_THUMB)
10996 value |= 1;
10997
10998 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10999 | ((value & 0x700) << 4)
11000 | ((value & 0x800) << 15);
11001 if (relocation < 0)
11002 insn |= 0xa00000;
11003
11004 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11005 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11006
11007 return bfd_reloc_ok;
11008 }
11009
11010 case R_ARM_THM_PC8:
11011 /* PR 10073: This reloc is not generated by the GNU toolchain,
11012 but it is supported for compatibility with third party libraries
11013 generated by other compilers, specifically the ARM/IAR. */
11014 {
11015 bfd_vma insn;
11016 bfd_signed_vma relocation;
11017
11018 insn = bfd_get_16 (input_bfd, hit_data);
11019
11020 if (globals->use_rel)
11021 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
11022
11023 relocation = value + addend;
11024 relocation -= Pa (input_section->output_section->vma
11025 + input_section->output_offset
11026 + rel->r_offset);
11027
11028 value = relocation;
11029
11030 /* We do not check for overflow of this reloc. Although strictly
11031 speaking this is incorrect, it appears to be necessary in order
11032 to work with IAR generated relocs. Since GCC and GAS do not
11033 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11034 a problem for them. */
11035 value &= 0x3fc;
11036
11037 insn = (insn & 0xff00) | (value >> 2);
11038
11039 bfd_put_16 (input_bfd, insn, hit_data);
11040
11041 return bfd_reloc_ok;
11042 }
11043
11044 case R_ARM_THM_PC12:
11045 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11046 {
11047 bfd_vma insn;
11048 bfd_signed_vma relocation;
11049
11050 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
11051 | bfd_get_16 (input_bfd, hit_data + 2);
11052
11053 if (globals->use_rel)
11054 {
11055 signed_addend = insn & 0xfff;
11056 if (!(insn & (1 << 23)))
11057 signed_addend = -signed_addend;
11058 }
11059
11060 relocation = value + signed_addend;
11061 relocation -= Pa (input_section->output_section->vma
11062 + input_section->output_offset
11063 + rel->r_offset);
11064
11065 value = relocation;
11066
11067 if (value >= 0x1000)
11068 return bfd_reloc_overflow;
11069
11070 insn = (insn & 0xff7ff000) | value;
11071 if (relocation >= 0)
11072 insn |= (1 << 23);
11073
11074 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11075 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11076
11077 return bfd_reloc_ok;
11078 }
11079
11080 case R_ARM_THM_XPC22:
11081 case R_ARM_THM_CALL:
11082 case R_ARM_THM_JUMP24:
11083 /* Thumb BL (branch long instruction). */
11084 {
11085 bfd_vma relocation;
11086 bfd_vma reloc_sign;
11087 bfd_boolean overflow = FALSE;
11088 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11089 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11090 bfd_signed_vma reloc_signed_max;
11091 bfd_signed_vma reloc_signed_min;
11092 bfd_vma check;
11093 bfd_signed_vma signed_check;
11094 int bitsize;
11095 const int thumb2 = using_thumb2 (globals);
11096 const int thumb2_bl = using_thumb2_bl (globals);
11097
11098 /* A branch to an undefined weak symbol is turned into a jump to
11099 the next instruction unless a PLT entry will be created.
11100 The jump to the next instruction is optimized as a NOP.W for
11101 Thumb-2 enabled architectures. */
11102 if (h && h->root.type == bfd_link_hash_undefweak
11103 && plt_offset == (bfd_vma) -1)
11104 {
11105 if (thumb2)
11106 {
11107 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11108 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11109 }
11110 else
11111 {
11112 bfd_put_16 (input_bfd, 0xe000, hit_data);
11113 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11114 }
11115 return bfd_reloc_ok;
11116 }
11117
11118 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11119 with Thumb-1) involving the J1 and J2 bits. */
11120 if (globals->use_rel)
11121 {
11122 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11123 bfd_vma upper = upper_insn & 0x3ff;
11124 bfd_vma lower = lower_insn & 0x7ff;
11125 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11126 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11127 bfd_vma i1 = j1 ^ s ? 0 : 1;
11128 bfd_vma i2 = j2 ^ s ? 0 : 1;
11129
11130 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11131 /* Sign extend. */
11132 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11133
11134 signed_addend = addend;
11135 }
11136
11137 if (r_type == R_ARM_THM_XPC22)
11138 {
11139 /* Check for Thumb to Thumb call. */
11140 /* FIXME: Should we translate the instruction into a BL
11141 instruction instead ? */
11142 if (branch_type == ST_BRANCH_TO_THUMB)
11143 _bfd_error_handler
11144 (_("%pB: warning: %s BLX instruction targets"
11145 " %s function '%s'"),
11146 input_bfd, "Thumb",
11147 "Thumb", h ? h->root.root.string : "(local)");
11148 }
11149 else
11150 {
11151 /* If it is not a call to Thumb, assume call to Arm.
11152 If it is a call relative to a section name, then it is not a
11153 function call at all, but rather a long jump. Calls through
11154 the PLT do not require stubs. */
11155 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11156 {
11157 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11158 {
11159 /* Convert BL to BLX. */
11160 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11161 }
11162 else if (( r_type != R_ARM_THM_CALL)
11163 && (r_type != R_ARM_THM_JUMP24))
11164 {
11165 if (elf32_thumb_to_arm_stub
11166 (info, sym_name, input_bfd, output_bfd, input_section,
11167 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11168 error_message))
11169 return bfd_reloc_ok;
11170 else
11171 return bfd_reloc_dangerous;
11172 }
11173 }
11174 else if (branch_type == ST_BRANCH_TO_THUMB
11175 && globals->use_blx
11176 && r_type == R_ARM_THM_CALL)
11177 {
11178 /* Make sure this is a BL. */
11179 lower_insn |= 0x1800;
11180 }
11181 }
11182
11183 enum elf32_arm_stub_type stub_type = arm_stub_none;
11184 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11185 {
11186 /* Check if a stub has to be inserted because the destination
11187 is too far. */
11188 struct elf32_arm_stub_hash_entry *stub_entry;
11189 struct elf32_arm_link_hash_entry *hash;
11190
11191 hash = (struct elf32_arm_link_hash_entry *) h;
11192
11193 stub_type = arm_type_of_stub (info, input_section, rel,
11194 st_type, &branch_type,
11195 hash, value, sym_sec,
11196 input_bfd, sym_name);
11197
11198 if (stub_type != arm_stub_none)
11199 {
11200 /* The target is out of reach or we are changing modes, so
11201 redirect the branch to the local stub for this
11202 function. */
11203 stub_entry = elf32_arm_get_stub_entry (input_section,
11204 sym_sec, h,
11205 rel, globals,
11206 stub_type);
11207 if (stub_entry != NULL)
11208 {
11209 value = (stub_entry->stub_offset
11210 + stub_entry->stub_sec->output_offset
11211 + stub_entry->stub_sec->output_section->vma);
11212
11213 if (plt_offset != (bfd_vma) -1)
11214 *unresolved_reloc_p = FALSE;
11215 }
11216
11217 /* If this call becomes a call to Arm, force BLX. */
11218 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11219 {
11220 if ((stub_entry
11221 && !arm_stub_is_thumb (stub_entry->stub_type))
11222 || branch_type != ST_BRANCH_TO_THUMB)
11223 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11224 }
11225 }
11226 }
11227
11228 /* Handle calls via the PLT. */
11229 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11230 {
11231 value = (splt->output_section->vma
11232 + splt->output_offset
11233 + plt_offset);
11234
11235 if (globals->use_blx
11236 && r_type == R_ARM_THM_CALL
11237 && ! using_thumb_only (globals))
11238 {
11239 /* If the Thumb BLX instruction is available, convert
11240 the BL to a BLX instruction to call the ARM-mode
11241 PLT entry. */
11242 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11243 branch_type = ST_BRANCH_TO_ARM;
11244 }
11245 else
11246 {
11247 if (! using_thumb_only (globals))
11248 /* Target the Thumb stub before the ARM PLT entry. */
11249 value -= PLT_THUMB_STUB_SIZE;
11250 branch_type = ST_BRANCH_TO_THUMB;
11251 }
11252 *unresolved_reloc_p = FALSE;
11253 }
11254
11255 relocation = value + signed_addend;
11256
11257 relocation -= (input_section->output_section->vma
11258 + input_section->output_offset
11259 + rel->r_offset);
11260
11261 check = relocation >> howto->rightshift;
11262
11263 /* If this is a signed value, the rightshift just dropped
11264 leading 1 bits (assuming twos complement). */
11265 if ((bfd_signed_vma) relocation >= 0)
11266 signed_check = check;
11267 else
11268 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11269
11270 /* Calculate the permissable maximum and minimum values for
11271 this relocation according to whether we're relocating for
11272 Thumb-2 or not. */
11273 bitsize = howto->bitsize;
11274 if (!thumb2_bl)
11275 bitsize -= 2;
11276 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11277 reloc_signed_min = ~reloc_signed_max;
11278
11279 /* Assumes two's complement. */
11280 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11281 overflow = TRUE;
11282
11283 if ((lower_insn & 0x5000) == 0x4000)
11284 /* For a BLX instruction, make sure that the relocation is rounded up
11285 to a word boundary. This follows the semantics of the instruction
11286 which specifies that bit 1 of the target address will come from bit
11287 1 of the base address. */
11288 relocation = (relocation + 2) & ~ 3;
11289
11290 /* Put RELOCATION back into the insn. Assumes two's complement.
11291 We use the Thumb-2 encoding, which is safe even if dealing with
11292 a Thumb-1 instruction by virtue of our overflow check above. */
11293 reloc_sign = (signed_check < 0) ? 1 : 0;
11294 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11295 | ((relocation >> 12) & 0x3ff)
11296 | (reloc_sign << 10);
11297 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11298 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11299 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11300 | ((relocation >> 1) & 0x7ff);
11301
11302 /* Put the relocated value back in the object file: */
11303 bfd_put_16 (input_bfd, upper_insn, hit_data);
11304 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11305
11306 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11307 }
11308 break;
11309
11310 case R_ARM_THM_JUMP19:
11311 /* Thumb32 conditional branch instruction. */
11312 {
11313 bfd_vma relocation;
11314 bfd_boolean overflow = FALSE;
11315 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11316 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11317 bfd_signed_vma reloc_signed_max = 0xffffe;
11318 bfd_signed_vma reloc_signed_min = -0x100000;
11319 bfd_signed_vma signed_check;
11320 enum elf32_arm_stub_type stub_type = arm_stub_none;
11321 struct elf32_arm_stub_hash_entry *stub_entry;
11322 struct elf32_arm_link_hash_entry *hash;
11323
11324 /* Need to refetch the addend, reconstruct the top three bits,
11325 and squish the two 11 bit pieces together. */
11326 if (globals->use_rel)
11327 {
11328 bfd_vma S = (upper_insn & 0x0400) >> 10;
11329 bfd_vma upper = (upper_insn & 0x003f);
11330 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11331 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11332 bfd_vma lower = (lower_insn & 0x07ff);
11333
11334 upper |= J1 << 6;
11335 upper |= J2 << 7;
11336 upper |= (!S) << 8;
11337 upper -= 0x0100; /* Sign extend. */
11338
11339 addend = (upper << 12) | (lower << 1);
11340 signed_addend = addend;
11341 }
11342
11343 /* Handle calls via the PLT. */
11344 if (plt_offset != (bfd_vma) -1)
11345 {
11346 value = (splt->output_section->vma
11347 + splt->output_offset
11348 + plt_offset);
11349 /* Target the Thumb stub before the ARM PLT entry. */
11350 value -= PLT_THUMB_STUB_SIZE;
11351 *unresolved_reloc_p = FALSE;
11352 }
11353
11354 hash = (struct elf32_arm_link_hash_entry *)h;
11355
11356 stub_type = arm_type_of_stub (info, input_section, rel,
11357 st_type, &branch_type,
11358 hash, value, sym_sec,
11359 input_bfd, sym_name);
11360 if (stub_type != arm_stub_none)
11361 {
11362 stub_entry = elf32_arm_get_stub_entry (input_section,
11363 sym_sec, h,
11364 rel, globals,
11365 stub_type);
11366 if (stub_entry != NULL)
11367 {
11368 value = (stub_entry->stub_offset
11369 + stub_entry->stub_sec->output_offset
11370 + stub_entry->stub_sec->output_section->vma);
11371 }
11372 }
11373
11374 relocation = value + signed_addend;
11375 relocation -= (input_section->output_section->vma
11376 + input_section->output_offset
11377 + rel->r_offset);
11378 signed_check = (bfd_signed_vma) relocation;
11379
11380 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11381 overflow = TRUE;
11382
11383 /* Put RELOCATION back into the insn. */
11384 {
11385 bfd_vma S = (relocation & 0x00100000) >> 20;
11386 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11387 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11388 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11389 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11390
11391 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11392 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11393 }
11394
11395 /* Put the relocated value back in the object file: */
11396 bfd_put_16 (input_bfd, upper_insn, hit_data);
11397 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11398
11399 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11400 }
11401
11402 case R_ARM_THM_JUMP11:
11403 case R_ARM_THM_JUMP8:
11404 case R_ARM_THM_JUMP6:
11405 /* Thumb B (branch) instruction). */
11406 {
11407 bfd_signed_vma relocation;
11408 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11409 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11410 bfd_signed_vma signed_check;
11411
11412 /* CZB cannot jump backward. */
11413 if (r_type == R_ARM_THM_JUMP6)
11414 reloc_signed_min = 0;
11415
11416 if (globals->use_rel)
11417 {
11418 /* Need to refetch addend. */
11419 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11420 if (addend & ((howto->src_mask + 1) >> 1))
11421 {
11422 signed_addend = -1;
11423 signed_addend &= ~ howto->src_mask;
11424 signed_addend |= addend;
11425 }
11426 else
11427 signed_addend = addend;
11428 /* The value in the insn has been right shifted. We need to
11429 undo this, so that we can perform the address calculation
11430 in terms of bytes. */
11431 signed_addend <<= howto->rightshift;
11432 }
11433 relocation = value + signed_addend;
11434
11435 relocation -= (input_section->output_section->vma
11436 + input_section->output_offset
11437 + rel->r_offset);
11438
11439 relocation >>= howto->rightshift;
11440 signed_check = relocation;
11441
11442 if (r_type == R_ARM_THM_JUMP6)
11443 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11444 else
11445 relocation &= howto->dst_mask;
11446 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11447
11448 bfd_put_16 (input_bfd, relocation, hit_data);
11449
11450 /* Assumes two's complement. */
11451 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11452 return bfd_reloc_overflow;
11453
11454 return bfd_reloc_ok;
11455 }
11456
11457 case R_ARM_ALU_PCREL7_0:
11458 case R_ARM_ALU_PCREL15_8:
11459 case R_ARM_ALU_PCREL23_15:
11460 {
11461 bfd_vma insn;
11462 bfd_vma relocation;
11463
11464 insn = bfd_get_32 (input_bfd, hit_data);
11465 if (globals->use_rel)
11466 {
11467 /* Extract the addend. */
11468 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11469 signed_addend = addend;
11470 }
11471 relocation = value + signed_addend;
11472
11473 relocation -= (input_section->output_section->vma
11474 + input_section->output_offset
11475 + rel->r_offset);
11476 insn = (insn & ~0xfff)
11477 | ((howto->bitpos << 7) & 0xf00)
11478 | ((relocation >> howto->bitpos) & 0xff);
11479 bfd_put_32 (input_bfd, value, hit_data);
11480 }
11481 return bfd_reloc_ok;
11482
11483 case R_ARM_GNU_VTINHERIT:
11484 case R_ARM_GNU_VTENTRY:
11485 return bfd_reloc_ok;
11486
11487 case R_ARM_GOTOFF32:
11488 /* Relocation is relative to the start of the
11489 global offset table. */
11490
11491 BFD_ASSERT (sgot != NULL);
11492 if (sgot == NULL)
11493 return bfd_reloc_notsupported;
11494
11495 /* If we are addressing a Thumb function, we need to adjust the
11496 address by one, so that attempts to call the function pointer will
11497 correctly interpret it as Thumb code. */
11498 if (branch_type == ST_BRANCH_TO_THUMB)
11499 value += 1;
11500
11501 /* Note that sgot->output_offset is not involved in this
11502 calculation. We always want the start of .got. If we
11503 define _GLOBAL_OFFSET_TABLE in a different way, as is
11504 permitted by the ABI, we might have to change this
11505 calculation. */
11506 value -= sgot->output_section->vma;
11507 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11508 contents, rel->r_offset, value,
11509 rel->r_addend);
11510
11511 case R_ARM_GOTPC:
11512 /* Use global offset table as symbol value. */
11513 BFD_ASSERT (sgot != NULL);
11514
11515 if (sgot == NULL)
11516 return bfd_reloc_notsupported;
11517
11518 *unresolved_reloc_p = FALSE;
11519 value = sgot->output_section->vma;
11520 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11521 contents, rel->r_offset, value,
11522 rel->r_addend);
11523
11524 case R_ARM_GOT32:
11525 case R_ARM_GOT_PREL:
11526 /* Relocation is to the entry for this symbol in the
11527 global offset table. */
11528 if (sgot == NULL)
11529 return bfd_reloc_notsupported;
11530
11531 if (dynreloc_st_type == STT_GNU_IFUNC
11532 && plt_offset != (bfd_vma) -1
11533 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11534 {
11535 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11536 symbol, and the relocation resolves directly to the runtime
11537 target rather than to the .iplt entry. This means that any
11538 .got entry would be the same value as the .igot.plt entry,
11539 so there's no point creating both. */
11540 sgot = globals->root.igotplt;
11541 value = sgot->output_offset + gotplt_offset;
11542 }
11543 else if (h != NULL)
11544 {
11545 bfd_vma off;
11546
11547 off = h->got.offset;
11548 BFD_ASSERT (off != (bfd_vma) -1);
11549 if ((off & 1) != 0)
11550 {
11551 /* We have already processsed one GOT relocation against
11552 this symbol. */
11553 off &= ~1;
11554 if (globals->root.dynamic_sections_created
11555 && !SYMBOL_REFERENCES_LOCAL (info, h))
11556 *unresolved_reloc_p = FALSE;
11557 }
11558 else
11559 {
11560 Elf_Internal_Rela outrel;
11561 int isrofixup = 0;
11562
11563 if (((h->dynindx != -1) || globals->fdpic_p)
11564 && !SYMBOL_REFERENCES_LOCAL (info, h))
11565 {
11566 /* If the symbol doesn't resolve locally in a static
11567 object, we have an undefined reference. If the
11568 symbol doesn't resolve locally in a dynamic object,
11569 it should be resolved by the dynamic linker. */
11570 if (globals->root.dynamic_sections_created)
11571 {
11572 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11573 *unresolved_reloc_p = FALSE;
11574 }
11575 else
11576 outrel.r_info = 0;
11577 outrel.r_addend = 0;
11578 }
11579 else
11580 {
11581 if (dynreloc_st_type == STT_GNU_IFUNC)
11582 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11583 else if (bfd_link_pic (info)
11584 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11585 || h->root.type != bfd_link_hash_undefweak))
11586 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11587 else
11588 {
11589 outrel.r_info = 0;
11590 if (globals->fdpic_p)
11591 isrofixup = 1;
11592 }
11593 outrel.r_addend = dynreloc_value;
11594 }
11595
11596 /* The GOT entry is initialized to zero by default.
11597 See if we should install a different value. */
11598 if (outrel.r_addend != 0
11599 && (globals->use_rel || outrel.r_info == 0))
11600 {
11601 bfd_put_32 (output_bfd, outrel.r_addend,
11602 sgot->contents + off);
11603 outrel.r_addend = 0;
11604 }
11605
11606 if (isrofixup)
11607 arm_elf_add_rofixup (output_bfd,
11608 elf32_arm_hash_table(info)->srofixup,
11609 sgot->output_section->vma
11610 + sgot->output_offset + off);
11611
11612 else if (outrel.r_info != 0)
11613 {
11614 outrel.r_offset = (sgot->output_section->vma
11615 + sgot->output_offset
11616 + off);
11617 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11618 }
11619
11620 h->got.offset |= 1;
11621 }
11622 value = sgot->output_offset + off;
11623 }
11624 else
11625 {
11626 bfd_vma off;
11627
11628 BFD_ASSERT (local_got_offsets != NULL
11629 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11630
11631 off = local_got_offsets[r_symndx];
11632
11633 /* The offset must always be a multiple of 4. We use the
11634 least significant bit to record whether we have already
11635 generated the necessary reloc. */
11636 if ((off & 1) != 0)
11637 off &= ~1;
11638 else
11639 {
11640 Elf_Internal_Rela outrel;
11641 int isrofixup = 0;
11642
11643 if (dynreloc_st_type == STT_GNU_IFUNC)
11644 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11645 else if (bfd_link_pic (info))
11646 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11647 else
11648 {
11649 outrel.r_info = 0;
11650 if (globals->fdpic_p)
11651 isrofixup = 1;
11652 }
11653
11654 /* The GOT entry is initialized to zero by default.
11655 See if we should install a different value. */
11656 if (globals->use_rel || outrel.r_info == 0)
11657 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11658
11659 if (isrofixup)
11660 arm_elf_add_rofixup (output_bfd,
11661 globals->srofixup,
11662 sgot->output_section->vma
11663 + sgot->output_offset + off);
11664
11665 else if (outrel.r_info != 0)
11666 {
11667 outrel.r_addend = addend + dynreloc_value;
11668 outrel.r_offset = (sgot->output_section->vma
11669 + sgot->output_offset
11670 + off);
11671 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11672 }
11673
11674 local_got_offsets[r_symndx] |= 1;
11675 }
11676
11677 value = sgot->output_offset + off;
11678 }
11679 if (r_type != R_ARM_GOT32)
11680 value += sgot->output_section->vma;
11681
11682 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11683 contents, rel->r_offset, value,
11684 rel->r_addend);
11685
11686 case R_ARM_TLS_LDO32:
11687 value = value - dtpoff_base (info);
11688
11689 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11690 contents, rel->r_offset, value,
11691 rel->r_addend);
11692
11693 case R_ARM_TLS_LDM32:
11694 case R_ARM_TLS_LDM32_FDPIC:
11695 {
11696 bfd_vma off;
11697
11698 if (sgot == NULL)
11699 abort ();
11700
11701 off = globals->tls_ldm_got.offset;
11702
11703 if ((off & 1) != 0)
11704 off &= ~1;
11705 else
11706 {
11707 /* If we don't know the module number, create a relocation
11708 for it. */
11709 if (bfd_link_dll (info))
11710 {
11711 Elf_Internal_Rela outrel;
11712
11713 if (srelgot == NULL)
11714 abort ();
11715
11716 outrel.r_addend = 0;
11717 outrel.r_offset = (sgot->output_section->vma
11718 + sgot->output_offset + off);
11719 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11720
11721 if (globals->use_rel)
11722 bfd_put_32 (output_bfd, outrel.r_addend,
11723 sgot->contents + off);
11724
11725 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11726 }
11727 else
11728 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11729
11730 globals->tls_ldm_got.offset |= 1;
11731 }
11732
11733 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11734 {
11735 bfd_put_32(output_bfd,
11736 globals->root.sgot->output_offset + off,
11737 contents + rel->r_offset);
11738
11739 return bfd_reloc_ok;
11740 }
11741 else
11742 {
11743 value = sgot->output_section->vma + sgot->output_offset + off
11744 - (input_section->output_section->vma
11745 + input_section->output_offset + rel->r_offset);
11746
11747 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11748 contents, rel->r_offset, value,
11749 rel->r_addend);
11750 }
11751 }
11752
11753 case R_ARM_TLS_CALL:
11754 case R_ARM_THM_TLS_CALL:
11755 case R_ARM_TLS_GD32:
11756 case R_ARM_TLS_GD32_FDPIC:
11757 case R_ARM_TLS_IE32:
11758 case R_ARM_TLS_IE32_FDPIC:
11759 case R_ARM_TLS_GOTDESC:
11760 case R_ARM_TLS_DESCSEQ:
11761 case R_ARM_THM_TLS_DESCSEQ:
11762 {
11763 bfd_vma off, offplt;
11764 int indx = 0;
11765 char tls_type;
11766
11767 BFD_ASSERT (sgot != NULL);
11768
11769 if (h != NULL)
11770 {
11771 bfd_boolean dyn;
11772 dyn = globals->root.dynamic_sections_created;
11773 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11774 bfd_link_pic (info),
11775 h)
11776 && (!bfd_link_pic (info)
11777 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11778 {
11779 *unresolved_reloc_p = FALSE;
11780 indx = h->dynindx;
11781 }
11782 off = h->got.offset;
11783 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11784 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11785 }
11786 else
11787 {
11788 BFD_ASSERT (local_got_offsets != NULL);
11789 off = local_got_offsets[r_symndx];
11790 offplt = local_tlsdesc_gotents[r_symndx];
11791 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11792 }
11793
11794 /* Linker relaxations happens from one of the
11795 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11796 if (ELF32_R_TYPE(rel->r_info) != r_type)
11797 tls_type = GOT_TLS_IE;
11798
11799 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11800
11801 if ((off & 1) != 0)
11802 off &= ~1;
11803 else
11804 {
11805 bfd_boolean need_relocs = FALSE;
11806 Elf_Internal_Rela outrel;
11807 int cur_off = off;
11808
11809 /* The GOT entries have not been initialized yet. Do it
11810 now, and emit any relocations. If both an IE GOT and a
11811 GD GOT are necessary, we emit the GD first. */
11812
11813 if ((bfd_link_dll (info) || indx != 0)
11814 && (h == NULL
11815 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11816 && !resolved_to_zero)
11817 || h->root.type != bfd_link_hash_undefweak))
11818 {
11819 need_relocs = TRUE;
11820 BFD_ASSERT (srelgot != NULL);
11821 }
11822
11823 if (tls_type & GOT_TLS_GDESC)
11824 {
11825 bfd_byte *loc;
11826
11827 /* We should have relaxed, unless this is an undefined
11828 weak symbol. */
11829 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11830 || bfd_link_dll (info));
11831 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11832 <= globals->root.sgotplt->size);
11833
11834 outrel.r_addend = 0;
11835 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11836 + globals->root.sgotplt->output_offset
11837 + offplt
11838 + globals->sgotplt_jump_table_size);
11839
11840 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11841 sreloc = globals->root.srelplt;
11842 loc = sreloc->contents;
11843 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11844 BFD_ASSERT (loc + RELOC_SIZE (globals)
11845 <= sreloc->contents + sreloc->size);
11846
11847 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11848
11849 /* For globals, the first word in the relocation gets
11850 the relocation index and the top bit set, or zero,
11851 if we're binding now. For locals, it gets the
11852 symbol's offset in the tls section. */
11853 bfd_put_32 (output_bfd,
11854 !h ? value - elf_hash_table (info)->tls_sec->vma
11855 : info->flags & DF_BIND_NOW ? 0
11856 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11857 globals->root.sgotplt->contents + offplt
11858 + globals->sgotplt_jump_table_size);
11859
11860 /* Second word in the relocation is always zero. */
11861 bfd_put_32 (output_bfd, 0,
11862 globals->root.sgotplt->contents + offplt
11863 + globals->sgotplt_jump_table_size + 4);
11864 }
11865 if (tls_type & GOT_TLS_GD)
11866 {
11867 if (need_relocs)
11868 {
11869 outrel.r_addend = 0;
11870 outrel.r_offset = (sgot->output_section->vma
11871 + sgot->output_offset
11872 + cur_off);
11873 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11874
11875 if (globals->use_rel)
11876 bfd_put_32 (output_bfd, outrel.r_addend,
11877 sgot->contents + cur_off);
11878
11879 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11880
11881 if (indx == 0)
11882 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11883 sgot->contents + cur_off + 4);
11884 else
11885 {
11886 outrel.r_addend = 0;
11887 outrel.r_info = ELF32_R_INFO (indx,
11888 R_ARM_TLS_DTPOFF32);
11889 outrel.r_offset += 4;
11890
11891 if (globals->use_rel)
11892 bfd_put_32 (output_bfd, outrel.r_addend,
11893 sgot->contents + cur_off + 4);
11894
11895 elf32_arm_add_dynreloc (output_bfd, info,
11896 srelgot, &outrel);
11897 }
11898 }
11899 else
11900 {
11901 /* If we are not emitting relocations for a
11902 general dynamic reference, then we must be in a
11903 static link or an executable link with the
11904 symbol binding locally. Mark it as belonging
11905 to module 1, the executable. */
11906 bfd_put_32 (output_bfd, 1,
11907 sgot->contents + cur_off);
11908 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11909 sgot->contents + cur_off + 4);
11910 }
11911
11912 cur_off += 8;
11913 }
11914
11915 if (tls_type & GOT_TLS_IE)
11916 {
11917 if (need_relocs)
11918 {
11919 if (indx == 0)
11920 outrel.r_addend = value - dtpoff_base (info);
11921 else
11922 outrel.r_addend = 0;
11923 outrel.r_offset = (sgot->output_section->vma
11924 + sgot->output_offset
11925 + cur_off);
11926 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11927
11928 if (globals->use_rel)
11929 bfd_put_32 (output_bfd, outrel.r_addend,
11930 sgot->contents + cur_off);
11931
11932 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11933 }
11934 else
11935 bfd_put_32 (output_bfd, tpoff (info, value),
11936 sgot->contents + cur_off);
11937 cur_off += 4;
11938 }
11939
11940 if (h != NULL)
11941 h->got.offset |= 1;
11942 else
11943 local_got_offsets[r_symndx] |= 1;
11944 }
11945
11946 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11947 off += 8;
11948 else if (tls_type & GOT_TLS_GDESC)
11949 off = offplt;
11950
11951 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11952 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11953 {
11954 bfd_signed_vma offset;
11955 /* TLS stubs are arm mode. The original symbol is a
11956 data object, so branch_type is bogus. */
11957 branch_type = ST_BRANCH_TO_ARM;
11958 enum elf32_arm_stub_type stub_type
11959 = arm_type_of_stub (info, input_section, rel,
11960 st_type, &branch_type,
11961 (struct elf32_arm_link_hash_entry *)h,
11962 globals->tls_trampoline, globals->root.splt,
11963 input_bfd, sym_name);
11964
11965 if (stub_type != arm_stub_none)
11966 {
11967 struct elf32_arm_stub_hash_entry *stub_entry
11968 = elf32_arm_get_stub_entry
11969 (input_section, globals->root.splt, 0, rel,
11970 globals, stub_type);
11971 offset = (stub_entry->stub_offset
11972 + stub_entry->stub_sec->output_offset
11973 + stub_entry->stub_sec->output_section->vma);
11974 }
11975 else
11976 offset = (globals->root.splt->output_section->vma
11977 + globals->root.splt->output_offset
11978 + globals->tls_trampoline);
11979
11980 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11981 {
11982 unsigned long inst;
11983
11984 offset -= (input_section->output_section->vma
11985 + input_section->output_offset
11986 + rel->r_offset + 8);
11987
11988 inst = offset >> 2;
11989 inst &= 0x00ffffff;
11990 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11991 }
11992 else
11993 {
11994 /* Thumb blx encodes the offset in a complicated
11995 fashion. */
11996 unsigned upper_insn, lower_insn;
11997 unsigned neg;
11998
11999 offset -= (input_section->output_section->vma
12000 + input_section->output_offset
12001 + rel->r_offset + 4);
12002
12003 if (stub_type != arm_stub_none
12004 && arm_stub_is_thumb (stub_type))
12005 {
12006 lower_insn = 0xd000;
12007 }
12008 else
12009 {
12010 lower_insn = 0xc000;
12011 /* Round up the offset to a word boundary. */
12012 offset = (offset + 2) & ~2;
12013 }
12014
12015 neg = offset < 0;
12016 upper_insn = (0xf000
12017 | ((offset >> 12) & 0x3ff)
12018 | (neg << 10));
12019 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
12020 | (((!((offset >> 22) & 1)) ^ neg) << 11)
12021 | ((offset >> 1) & 0x7ff);
12022 bfd_put_16 (input_bfd, upper_insn, hit_data);
12023 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12024 return bfd_reloc_ok;
12025 }
12026 }
12027 /* These relocations needs special care, as besides the fact
12028 they point somewhere in .gotplt, the addend must be
12029 adjusted accordingly depending on the type of instruction
12030 we refer to. */
12031 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
12032 {
12033 unsigned long data, insn;
12034 unsigned thumb;
12035
12036 data = bfd_get_signed_32 (input_bfd, hit_data);
12037 thumb = data & 1;
12038 data &= ~1ul;
12039
12040 if (thumb)
12041 {
12042 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
12043 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
12044 insn = (insn << 16)
12045 | bfd_get_16 (input_bfd,
12046 contents + rel->r_offset - data + 2);
12047 if ((insn & 0xf800c000) == 0xf000c000)
12048 /* bl/blx */
12049 value = -6;
12050 else if ((insn & 0xffffff00) == 0x4400)
12051 /* add */
12052 value = -5;
12053 else
12054 {
12055 _bfd_error_handler
12056 /* xgettext:c-format */
12057 (_("%pB(%pA+%#" PRIx64 "): "
12058 "unexpected %s instruction '%#lx' "
12059 "referenced by TLS_GOTDESC"),
12060 input_bfd, input_section, (uint64_t) rel->r_offset,
12061 "Thumb", insn);
12062 return bfd_reloc_notsupported;
12063 }
12064 }
12065 else
12066 {
12067 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
12068
12069 switch (insn >> 24)
12070 {
12071 case 0xeb: /* bl */
12072 case 0xfa: /* blx */
12073 value = -4;
12074 break;
12075
12076 case 0xe0: /* add */
12077 value = -8;
12078 break;
12079
12080 default:
12081 _bfd_error_handler
12082 /* xgettext:c-format */
12083 (_("%pB(%pA+%#" PRIx64 "): "
12084 "unexpected %s instruction '%#lx' "
12085 "referenced by TLS_GOTDESC"),
12086 input_bfd, input_section, (uint64_t) rel->r_offset,
12087 "ARM", insn);
12088 return bfd_reloc_notsupported;
12089 }
12090 }
12091
12092 value += ((globals->root.sgotplt->output_section->vma
12093 + globals->root.sgotplt->output_offset + off)
12094 - (input_section->output_section->vma
12095 + input_section->output_offset
12096 + rel->r_offset)
12097 + globals->sgotplt_jump_table_size);
12098 }
12099 else
12100 value = ((globals->root.sgot->output_section->vma
12101 + globals->root.sgot->output_offset + off)
12102 - (input_section->output_section->vma
12103 + input_section->output_offset + rel->r_offset));
12104
12105 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12106 r_type == R_ARM_TLS_IE32_FDPIC))
12107 {
12108 /* For FDPIC relocations, resolve to the offset of the GOT
12109 entry from the start of GOT. */
12110 bfd_put_32(output_bfd,
12111 globals->root.sgot->output_offset + off,
12112 contents + rel->r_offset);
12113
12114 return bfd_reloc_ok;
12115 }
12116 else
12117 {
12118 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12119 contents, rel->r_offset, value,
12120 rel->r_addend);
12121 }
12122 }
12123
12124 case R_ARM_TLS_LE32:
12125 if (bfd_link_dll (info))
12126 {
12127 _bfd_error_handler
12128 /* xgettext:c-format */
12129 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12130 "in shared object"),
12131 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12132 return bfd_reloc_notsupported;
12133 }
12134 else
12135 value = tpoff (info, value);
12136
12137 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12138 contents, rel->r_offset, value,
12139 rel->r_addend);
12140
12141 case R_ARM_V4BX:
12142 if (globals->fix_v4bx)
12143 {
12144 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12145
12146 /* Ensure that we have a BX instruction. */
12147 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12148
12149 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12150 {
12151 /* Branch to veneer. */
12152 bfd_vma glue_addr;
12153 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12154 glue_addr -= input_section->output_section->vma
12155 + input_section->output_offset
12156 + rel->r_offset + 8;
12157 insn = (insn & 0xf0000000) | 0x0a000000
12158 | ((glue_addr >> 2) & 0x00ffffff);
12159 }
12160 else
12161 {
12162 /* Preserve Rm (lowest four bits) and the condition code
12163 (highest four bits). Other bits encode MOV PC,Rm. */
12164 insn = (insn & 0xf000000f) | 0x01a0f000;
12165 }
12166
12167 bfd_put_32 (input_bfd, insn, hit_data);
12168 }
12169 return bfd_reloc_ok;
12170
12171 case R_ARM_MOVW_ABS_NC:
12172 case R_ARM_MOVT_ABS:
12173 case R_ARM_MOVW_PREL_NC:
12174 case R_ARM_MOVT_PREL:
12175 /* Until we properly support segment-base-relative addressing then
12176 we assume the segment base to be zero, as for the group relocations.
12177 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12178 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12179 case R_ARM_MOVW_BREL_NC:
12180 case R_ARM_MOVW_BREL:
12181 case R_ARM_MOVT_BREL:
12182 {
12183 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12184
12185 if (globals->use_rel)
12186 {
12187 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12188 signed_addend = (addend ^ 0x8000) - 0x8000;
12189 }
12190
12191 value += signed_addend;
12192
12193 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12194 value -= (input_section->output_section->vma
12195 + input_section->output_offset + rel->r_offset);
12196
12197 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12198 return bfd_reloc_overflow;
12199
12200 if (branch_type == ST_BRANCH_TO_THUMB)
12201 value |= 1;
12202
12203 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12204 || r_type == R_ARM_MOVT_BREL)
12205 value >>= 16;
12206
12207 insn &= 0xfff0f000;
12208 insn |= value & 0xfff;
12209 insn |= (value & 0xf000) << 4;
12210 bfd_put_32 (input_bfd, insn, hit_data);
12211 }
12212 return bfd_reloc_ok;
12213
12214 case R_ARM_THM_MOVW_ABS_NC:
12215 case R_ARM_THM_MOVT_ABS:
12216 case R_ARM_THM_MOVW_PREL_NC:
12217 case R_ARM_THM_MOVT_PREL:
12218 /* Until we properly support segment-base-relative addressing then
12219 we assume the segment base to be zero, as for the above relocations.
12220 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12221 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12222 as R_ARM_THM_MOVT_ABS. */
12223 case R_ARM_THM_MOVW_BREL_NC:
12224 case R_ARM_THM_MOVW_BREL:
12225 case R_ARM_THM_MOVT_BREL:
12226 {
12227 bfd_vma insn;
12228
12229 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12230 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12231
12232 if (globals->use_rel)
12233 {
12234 addend = ((insn >> 4) & 0xf000)
12235 | ((insn >> 15) & 0x0800)
12236 | ((insn >> 4) & 0x0700)
12237 | (insn & 0x00ff);
12238 signed_addend = (addend ^ 0x8000) - 0x8000;
12239 }
12240
12241 value += signed_addend;
12242
12243 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12244 value -= (input_section->output_section->vma
12245 + input_section->output_offset + rel->r_offset);
12246
12247 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12248 return bfd_reloc_overflow;
12249
12250 if (branch_type == ST_BRANCH_TO_THUMB)
12251 value |= 1;
12252
12253 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12254 || r_type == R_ARM_THM_MOVT_BREL)
12255 value >>= 16;
12256
12257 insn &= 0xfbf08f00;
12258 insn |= (value & 0xf000) << 4;
12259 insn |= (value & 0x0800) << 15;
12260 insn |= (value & 0x0700) << 4;
12261 insn |= (value & 0x00ff);
12262
12263 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12264 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12265 }
12266 return bfd_reloc_ok;
12267
12268 case R_ARM_ALU_PC_G0_NC:
12269 case R_ARM_ALU_PC_G1_NC:
12270 case R_ARM_ALU_PC_G0:
12271 case R_ARM_ALU_PC_G1:
12272 case R_ARM_ALU_PC_G2:
12273 case R_ARM_ALU_SB_G0_NC:
12274 case R_ARM_ALU_SB_G1_NC:
12275 case R_ARM_ALU_SB_G0:
12276 case R_ARM_ALU_SB_G1:
12277 case R_ARM_ALU_SB_G2:
12278 {
12279 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12280 bfd_vma pc = input_section->output_section->vma
12281 + input_section->output_offset + rel->r_offset;
12282 /* sb is the origin of the *segment* containing the symbol. */
12283 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12284 bfd_vma residual;
12285 bfd_vma g_n;
12286 bfd_signed_vma signed_value;
12287 int group = 0;
12288
12289 /* Determine which group of bits to select. */
12290 switch (r_type)
12291 {
12292 case R_ARM_ALU_PC_G0_NC:
12293 case R_ARM_ALU_PC_G0:
12294 case R_ARM_ALU_SB_G0_NC:
12295 case R_ARM_ALU_SB_G0:
12296 group = 0;
12297 break;
12298
12299 case R_ARM_ALU_PC_G1_NC:
12300 case R_ARM_ALU_PC_G1:
12301 case R_ARM_ALU_SB_G1_NC:
12302 case R_ARM_ALU_SB_G1:
12303 group = 1;
12304 break;
12305
12306 case R_ARM_ALU_PC_G2:
12307 case R_ARM_ALU_SB_G2:
12308 group = 2;
12309 break;
12310
12311 default:
12312 abort ();
12313 }
12314
12315 /* If REL, extract the addend from the insn. If RELA, it will
12316 have already been fetched for us. */
12317 if (globals->use_rel)
12318 {
12319 int negative;
12320 bfd_vma constant = insn & 0xff;
12321 bfd_vma rotation = (insn & 0xf00) >> 8;
12322
12323 if (rotation == 0)
12324 signed_addend = constant;
12325 else
12326 {
12327 /* Compensate for the fact that in the instruction, the
12328 rotation is stored in multiples of 2 bits. */
12329 rotation *= 2;
12330
12331 /* Rotate "constant" right by "rotation" bits. */
12332 signed_addend = (constant >> rotation) |
12333 (constant << (8 * sizeof (bfd_vma) - rotation));
12334 }
12335
12336 /* Determine if the instruction is an ADD or a SUB.
12337 (For REL, this determines the sign of the addend.) */
12338 negative = identify_add_or_sub (insn);
12339 if (negative == 0)
12340 {
12341 _bfd_error_handler
12342 /* xgettext:c-format */
12343 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12344 "are allowed for ALU group relocations"),
12345 input_bfd, input_section, (uint64_t) rel->r_offset);
12346 return bfd_reloc_overflow;
12347 }
12348
12349 signed_addend *= negative;
12350 }
12351
12352 /* Compute the value (X) to go in the place. */
12353 if (r_type == R_ARM_ALU_PC_G0_NC
12354 || r_type == R_ARM_ALU_PC_G1_NC
12355 || r_type == R_ARM_ALU_PC_G0
12356 || r_type == R_ARM_ALU_PC_G1
12357 || r_type == R_ARM_ALU_PC_G2)
12358 /* PC relative. */
12359 signed_value = value - pc + signed_addend;
12360 else
12361 /* Section base relative. */
12362 signed_value = value - sb + signed_addend;
12363
12364 /* If the target symbol is a Thumb function, then set the
12365 Thumb bit in the address. */
12366 if (branch_type == ST_BRANCH_TO_THUMB)
12367 signed_value |= 1;
12368
12369 /* Calculate the value of the relevant G_n, in encoded
12370 constant-with-rotation format. */
12371 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12372 group, &residual);
12373
12374 /* Check for overflow if required. */
12375 if ((r_type == R_ARM_ALU_PC_G0
12376 || r_type == R_ARM_ALU_PC_G1
12377 || r_type == R_ARM_ALU_PC_G2
12378 || r_type == R_ARM_ALU_SB_G0
12379 || r_type == R_ARM_ALU_SB_G1
12380 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12381 {
12382 _bfd_error_handler
12383 /* xgettext:c-format */
12384 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12385 "splitting %#" PRIx64 " for group relocation %s"),
12386 input_bfd, input_section, (uint64_t) rel->r_offset,
12387 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12388 howto->name);
12389 return bfd_reloc_overflow;
12390 }
12391
12392 /* Mask out the value and the ADD/SUB part of the opcode; take care
12393 not to destroy the S bit. */
12394 insn &= 0xff1ff000;
12395
12396 /* Set the opcode according to whether the value to go in the
12397 place is negative. */
12398 if (signed_value < 0)
12399 insn |= 1 << 22;
12400 else
12401 insn |= 1 << 23;
12402
12403 /* Encode the offset. */
12404 insn |= g_n;
12405
12406 bfd_put_32 (input_bfd, insn, hit_data);
12407 }
12408 return bfd_reloc_ok;
12409
12410 case R_ARM_LDR_PC_G0:
12411 case R_ARM_LDR_PC_G1:
12412 case R_ARM_LDR_PC_G2:
12413 case R_ARM_LDR_SB_G0:
12414 case R_ARM_LDR_SB_G1:
12415 case R_ARM_LDR_SB_G2:
12416 {
12417 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12418 bfd_vma pc = input_section->output_section->vma
12419 + input_section->output_offset + rel->r_offset;
12420 /* sb is the origin of the *segment* containing the symbol. */
12421 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12422 bfd_vma residual;
12423 bfd_signed_vma signed_value;
12424 int group = 0;
12425
12426 /* Determine which groups of bits to calculate. */
12427 switch (r_type)
12428 {
12429 case R_ARM_LDR_PC_G0:
12430 case R_ARM_LDR_SB_G0:
12431 group = 0;
12432 break;
12433
12434 case R_ARM_LDR_PC_G1:
12435 case R_ARM_LDR_SB_G1:
12436 group = 1;
12437 break;
12438
12439 case R_ARM_LDR_PC_G2:
12440 case R_ARM_LDR_SB_G2:
12441 group = 2;
12442 break;
12443
12444 default:
12445 abort ();
12446 }
12447
12448 /* If REL, extract the addend from the insn. If RELA, it will
12449 have already been fetched for us. */
12450 if (globals->use_rel)
12451 {
12452 int negative = (insn & (1 << 23)) ? 1 : -1;
12453 signed_addend = negative * (insn & 0xfff);
12454 }
12455
12456 /* Compute the value (X) to go in the place. */
12457 if (r_type == R_ARM_LDR_PC_G0
12458 || r_type == R_ARM_LDR_PC_G1
12459 || r_type == R_ARM_LDR_PC_G2)
12460 /* PC relative. */
12461 signed_value = value - pc + signed_addend;
12462 else
12463 /* Section base relative. */
12464 signed_value = value - sb + signed_addend;
12465
12466 /* Calculate the value of the relevant G_{n-1} to obtain
12467 the residual at that stage. */
12468 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12469 group - 1, &residual);
12470
12471 /* Check for overflow. */
12472 if (residual >= 0x1000)
12473 {
12474 _bfd_error_handler
12475 /* xgettext:c-format */
12476 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12477 "splitting %#" PRIx64 " for group relocation %s"),
12478 input_bfd, input_section, (uint64_t) rel->r_offset,
12479 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12480 howto->name);
12481 return bfd_reloc_overflow;
12482 }
12483
12484 /* Mask out the value and U bit. */
12485 insn &= 0xff7ff000;
12486
12487 /* Set the U bit if the value to go in the place is non-negative. */
12488 if (signed_value >= 0)
12489 insn |= 1 << 23;
12490
12491 /* Encode the offset. */
12492 insn |= residual;
12493
12494 bfd_put_32 (input_bfd, insn, hit_data);
12495 }
12496 return bfd_reloc_ok;
12497
12498 case R_ARM_LDRS_PC_G0:
12499 case R_ARM_LDRS_PC_G1:
12500 case R_ARM_LDRS_PC_G2:
12501 case R_ARM_LDRS_SB_G0:
12502 case R_ARM_LDRS_SB_G1:
12503 case R_ARM_LDRS_SB_G2:
12504 {
12505 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12506 bfd_vma pc = input_section->output_section->vma
12507 + input_section->output_offset + rel->r_offset;
12508 /* sb is the origin of the *segment* containing the symbol. */
12509 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12510 bfd_vma residual;
12511 bfd_signed_vma signed_value;
12512 int group = 0;
12513
12514 /* Determine which groups of bits to calculate. */
12515 switch (r_type)
12516 {
12517 case R_ARM_LDRS_PC_G0:
12518 case R_ARM_LDRS_SB_G0:
12519 group = 0;
12520 break;
12521
12522 case R_ARM_LDRS_PC_G1:
12523 case R_ARM_LDRS_SB_G1:
12524 group = 1;
12525 break;
12526
12527 case R_ARM_LDRS_PC_G2:
12528 case R_ARM_LDRS_SB_G2:
12529 group = 2;
12530 break;
12531
12532 default:
12533 abort ();
12534 }
12535
12536 /* If REL, extract the addend from the insn. If RELA, it will
12537 have already been fetched for us. */
12538 if (globals->use_rel)
12539 {
12540 int negative = (insn & (1 << 23)) ? 1 : -1;
12541 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12542 }
12543
12544 /* Compute the value (X) to go in the place. */
12545 if (r_type == R_ARM_LDRS_PC_G0
12546 || r_type == R_ARM_LDRS_PC_G1
12547 || r_type == R_ARM_LDRS_PC_G2)
12548 /* PC relative. */
12549 signed_value = value - pc + signed_addend;
12550 else
12551 /* Section base relative. */
12552 signed_value = value - sb + signed_addend;
12553
12554 /* Calculate the value of the relevant G_{n-1} to obtain
12555 the residual at that stage. */
12556 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12557 group - 1, &residual);
12558
12559 /* Check for overflow. */
12560 if (residual >= 0x100)
12561 {
12562 _bfd_error_handler
12563 /* xgettext:c-format */
12564 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12565 "splitting %#" PRIx64 " for group relocation %s"),
12566 input_bfd, input_section, (uint64_t) rel->r_offset,
12567 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12568 howto->name);
12569 return bfd_reloc_overflow;
12570 }
12571
12572 /* Mask out the value and U bit. */
12573 insn &= 0xff7ff0f0;
12574
12575 /* Set the U bit if the value to go in the place is non-negative. */
12576 if (signed_value >= 0)
12577 insn |= 1 << 23;
12578
12579 /* Encode the offset. */
12580 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12581
12582 bfd_put_32 (input_bfd, insn, hit_data);
12583 }
12584 return bfd_reloc_ok;
12585
12586 case R_ARM_LDC_PC_G0:
12587 case R_ARM_LDC_PC_G1:
12588 case R_ARM_LDC_PC_G2:
12589 case R_ARM_LDC_SB_G0:
12590 case R_ARM_LDC_SB_G1:
12591 case R_ARM_LDC_SB_G2:
12592 {
12593 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12594 bfd_vma pc = input_section->output_section->vma
12595 + input_section->output_offset + rel->r_offset;
12596 /* sb is the origin of the *segment* containing the symbol. */
12597 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12598 bfd_vma residual;
12599 bfd_signed_vma signed_value;
12600 int group = 0;
12601
12602 /* Determine which groups of bits to calculate. */
12603 switch (r_type)
12604 {
12605 case R_ARM_LDC_PC_G0:
12606 case R_ARM_LDC_SB_G0:
12607 group = 0;
12608 break;
12609
12610 case R_ARM_LDC_PC_G1:
12611 case R_ARM_LDC_SB_G1:
12612 group = 1;
12613 break;
12614
12615 case R_ARM_LDC_PC_G2:
12616 case R_ARM_LDC_SB_G2:
12617 group = 2;
12618 break;
12619
12620 default:
12621 abort ();
12622 }
12623
12624 /* If REL, extract the addend from the insn. If RELA, it will
12625 have already been fetched for us. */
12626 if (globals->use_rel)
12627 {
12628 int negative = (insn & (1 << 23)) ? 1 : -1;
12629 signed_addend = negative * ((insn & 0xff) << 2);
12630 }
12631
12632 /* Compute the value (X) to go in the place. */
12633 if (r_type == R_ARM_LDC_PC_G0
12634 || r_type == R_ARM_LDC_PC_G1
12635 || r_type == R_ARM_LDC_PC_G2)
12636 /* PC relative. */
12637 signed_value = value - pc + signed_addend;
12638 else
12639 /* Section base relative. */
12640 signed_value = value - sb + signed_addend;
12641
12642 /* Calculate the value of the relevant G_{n-1} to obtain
12643 the residual at that stage. */
12644 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12645 group - 1, &residual);
12646
12647 /* Check for overflow. (The absolute value to go in the place must be
12648 divisible by four and, after having been divided by four, must
12649 fit in eight bits.) */
12650 if ((residual & 0x3) != 0 || residual >= 0x400)
12651 {
12652 _bfd_error_handler
12653 /* xgettext:c-format */
12654 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12655 "splitting %#" PRIx64 " for group relocation %s"),
12656 input_bfd, input_section, (uint64_t) rel->r_offset,
12657 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12658 howto->name);
12659 return bfd_reloc_overflow;
12660 }
12661
12662 /* Mask out the value and U bit. */
12663 insn &= 0xff7fff00;
12664
12665 /* Set the U bit if the value to go in the place is non-negative. */
12666 if (signed_value >= 0)
12667 insn |= 1 << 23;
12668
12669 /* Encode the offset. */
12670 insn |= residual >> 2;
12671
12672 bfd_put_32 (input_bfd, insn, hit_data);
12673 }
12674 return bfd_reloc_ok;
12675
12676 case R_ARM_THM_ALU_ABS_G0_NC:
12677 case R_ARM_THM_ALU_ABS_G1_NC:
12678 case R_ARM_THM_ALU_ABS_G2_NC:
12679 case R_ARM_THM_ALU_ABS_G3_NC:
12680 {
12681 const int shift_array[4] = {0, 8, 16, 24};
12682 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12683 bfd_vma addr = value;
12684 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12685
12686 /* Compute address. */
12687 if (globals->use_rel)
12688 signed_addend = insn & 0xff;
12689 addr += signed_addend;
12690 if (branch_type == ST_BRANCH_TO_THUMB)
12691 addr |= 1;
12692 /* Clean imm8 insn. */
12693 insn &= 0xff00;
12694 /* And update with correct part of address. */
12695 insn |= (addr >> shift) & 0xff;
12696 /* Update insn. */
12697 bfd_put_16 (input_bfd, insn, hit_data);
12698 }
12699
12700 *unresolved_reloc_p = FALSE;
12701 return bfd_reloc_ok;
12702
12703 case R_ARM_GOTOFFFUNCDESC:
12704 {
12705 if (h == NULL)
12706 {
12707 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12708 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12709 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12710 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12711 bfd_vma seg = -1;
12712
12713 if (bfd_link_pic(info) && dynindx == 0)
12714 abort();
12715
12716 /* Resolve relocation. */
12717 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12718 , contents + rel->r_offset);
12719 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12720 not done yet. */
12721 arm_elf_fill_funcdesc(output_bfd, info,
12722 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12723 dynindx, offset, addr, dynreloc_value, seg);
12724 }
12725 else
12726 {
12727 int dynindx;
12728 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12729 bfd_vma addr;
12730 bfd_vma seg = -1;
12731
12732 /* For static binaries, sym_sec can be null. */
12733 if (sym_sec)
12734 {
12735 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12736 addr = dynreloc_value - sym_sec->output_section->vma;
12737 }
12738 else
12739 {
12740 dynindx = 0;
12741 addr = 0;
12742 }
12743
12744 if (bfd_link_pic(info) && dynindx == 0)
12745 abort();
12746
12747 /* This case cannot occur since funcdesc is allocated by
12748 the dynamic loader so we cannot resolve the relocation. */
12749 if (h->dynindx != -1)
12750 abort();
12751
12752 /* Resolve relocation. */
12753 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12754 contents + rel->r_offset);
12755 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12756 arm_elf_fill_funcdesc(output_bfd, info,
12757 &eh->fdpic_cnts.funcdesc_offset,
12758 dynindx, offset, addr, dynreloc_value, seg);
12759 }
12760 }
12761 *unresolved_reloc_p = FALSE;
12762 return bfd_reloc_ok;
12763
12764 case R_ARM_GOTFUNCDESC:
12765 {
12766 if (h != NULL)
12767 {
12768 Elf_Internal_Rela outrel;
12769
12770 /* Resolve relocation. */
12771 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12772 + sgot->output_offset),
12773 contents + rel->r_offset);
12774 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12775 if(h->dynindx == -1)
12776 {
12777 int dynindx;
12778 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12779 bfd_vma addr;
12780 bfd_vma seg = -1;
12781
12782 /* For static binaries sym_sec can be null. */
12783 if (sym_sec)
12784 {
12785 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12786 addr = dynreloc_value - sym_sec->output_section->vma;
12787 }
12788 else
12789 {
12790 dynindx = 0;
12791 addr = 0;
12792 }
12793
12794 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12795 arm_elf_fill_funcdesc(output_bfd, info,
12796 &eh->fdpic_cnts.funcdesc_offset,
12797 dynindx, offset, addr, dynreloc_value, seg);
12798 }
12799
12800 /* Add a dynamic relocation on GOT entry if not already done. */
12801 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12802 {
12803 if (h->dynindx == -1)
12804 {
12805 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12806 if (h->root.type == bfd_link_hash_undefweak)
12807 bfd_put_32(output_bfd, 0, sgot->contents
12808 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12809 else
12810 bfd_put_32(output_bfd, sgot->output_section->vma
12811 + sgot->output_offset
12812 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12813 sgot->contents
12814 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12815 }
12816 else
12817 {
12818 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12819 }
12820 outrel.r_offset = sgot->output_section->vma
12821 + sgot->output_offset
12822 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12823 outrel.r_addend = 0;
12824 if (h->dynindx == -1 && !bfd_link_pic(info))
12825 if (h->root.type == bfd_link_hash_undefweak)
12826 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12827 else
12828 arm_elf_add_rofixup(output_bfd, globals->srofixup,
12829 outrel.r_offset);
12830 else
12831 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12832 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12833 }
12834 }
12835 else
12836 {
12837 /* Such relocation on static function should not have been
12838 emitted by the compiler. */
12839 abort();
12840 }
12841 }
12842 *unresolved_reloc_p = FALSE;
12843 return bfd_reloc_ok;
12844
12845 case R_ARM_FUNCDESC:
12846 {
12847 if (h == NULL)
12848 {
12849 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12850 Elf_Internal_Rela outrel;
12851 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12852 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12853 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12854 bfd_vma seg = -1;
12855
12856 if (bfd_link_pic(info) && dynindx == 0)
12857 abort();
12858
12859 /* Replace static FUNCDESC relocation with a
12860 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12861 executable. */
12862 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12863 outrel.r_offset = input_section->output_section->vma
12864 + input_section->output_offset + rel->r_offset;
12865 outrel.r_addend = 0;
12866 if (bfd_link_pic(info))
12867 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12868 else
12869 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12870
12871 bfd_put_32 (input_bfd, sgot->output_section->vma
12872 + sgot->output_offset + offset, hit_data);
12873
12874 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12875 arm_elf_fill_funcdesc(output_bfd, info,
12876 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12877 dynindx, offset, addr, dynreloc_value, seg);
12878 }
12879 else
12880 {
12881 if (h->dynindx == -1)
12882 {
12883 int dynindx;
12884 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12885 bfd_vma addr;
12886 bfd_vma seg = -1;
12887 Elf_Internal_Rela outrel;
12888
12889 /* For static binaries sym_sec can be null. */
12890 if (sym_sec)
12891 {
12892 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12893 addr = dynreloc_value - sym_sec->output_section->vma;
12894 }
12895 else
12896 {
12897 dynindx = 0;
12898 addr = 0;
12899 }
12900
12901 if (bfd_link_pic(info) && dynindx == 0)
12902 abort();
12903
12904 /* Replace static FUNCDESC relocation with a
12905 R_ARM_RELATIVE dynamic relocation. */
12906 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12907 outrel.r_offset = input_section->output_section->vma
12908 + input_section->output_offset + rel->r_offset;
12909 outrel.r_addend = 0;
12910 if (bfd_link_pic(info))
12911 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12912 else
12913 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12914
12915 bfd_put_32 (input_bfd, sgot->output_section->vma
12916 + sgot->output_offset + offset, hit_data);
12917
12918 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12919 arm_elf_fill_funcdesc(output_bfd, info,
12920 &eh->fdpic_cnts.funcdesc_offset,
12921 dynindx, offset, addr, dynreloc_value, seg);
12922 }
12923 else
12924 {
12925 Elf_Internal_Rela outrel;
12926
12927 /* Add a dynamic relocation. */
12928 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12929 outrel.r_offset = input_section->output_section->vma
12930 + input_section->output_offset + rel->r_offset;
12931 outrel.r_addend = 0;
12932 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12933 }
12934 }
12935 }
12936 *unresolved_reloc_p = FALSE;
12937 return bfd_reloc_ok;
12938
12939 case R_ARM_THM_BF16:
12940 {
12941 bfd_vma relocation;
12942 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12943 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12944
12945 if (globals->use_rel)
12946 {
12947 bfd_vma immA = (upper_insn & 0x001f);
12948 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12949 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12950 addend = (immA << 12);
12951 addend |= (immB << 2);
12952 addend |= (immC << 1);
12953 addend |= 1;
12954 /* Sign extend. */
12955 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12956 }
12957
12958 relocation = value + signed_addend;
12959 relocation -= (input_section->output_section->vma
12960 + input_section->output_offset
12961 + rel->r_offset);
12962
12963 /* Put RELOCATION back into the insn. */
12964 {
12965 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12966 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12967 bfd_vma immC = (relocation & 0x00000002) >> 1;
12968
12969 upper_insn = (upper_insn & 0xffe0) | immA;
12970 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12971 }
12972
12973 /* Put the relocated value back in the object file: */
12974 bfd_put_16 (input_bfd, upper_insn, hit_data);
12975 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12976
12977 return bfd_reloc_ok;
12978 }
12979
12980 case R_ARM_THM_BF12:
12981 {
12982 bfd_vma relocation;
12983 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12984 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12985
12986 if (globals->use_rel)
12987 {
12988 bfd_vma immA = (upper_insn & 0x0001);
12989 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12990 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12991 addend = (immA << 12);
12992 addend |= (immB << 2);
12993 addend |= (immC << 1);
12994 addend |= 1;
12995 /* Sign extend. */
12996 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12997 signed_addend = addend;
12998 }
12999
13000 relocation = value + signed_addend;
13001 relocation -= (input_section->output_section->vma
13002 + input_section->output_offset
13003 + rel->r_offset);
13004
13005 /* Put RELOCATION back into the insn. */
13006 {
13007 bfd_vma immA = (relocation & 0x00001000) >> 12;
13008 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13009 bfd_vma immC = (relocation & 0x00000002) >> 1;
13010
13011 upper_insn = (upper_insn & 0xfffe) | immA;
13012 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13013 }
13014
13015 /* Put the relocated value back in the object file: */
13016 bfd_put_16 (input_bfd, upper_insn, hit_data);
13017 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13018
13019 return bfd_reloc_ok;
13020 }
13021
13022 case R_ARM_THM_BF18:
13023 {
13024 bfd_vma relocation;
13025 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
13026 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
13027
13028 if (globals->use_rel)
13029 {
13030 bfd_vma immA = (upper_insn & 0x007f);
13031 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
13032 bfd_vma immC = (lower_insn & 0x0800) >> 11;
13033 addend = (immA << 12);
13034 addend |= (immB << 2);
13035 addend |= (immC << 1);
13036 addend |= 1;
13037 /* Sign extend. */
13038 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
13039 signed_addend = addend;
13040 }
13041
13042 relocation = value + signed_addend;
13043 relocation -= (input_section->output_section->vma
13044 + input_section->output_offset
13045 + rel->r_offset);
13046
13047 /* Put RELOCATION back into the insn. */
13048 {
13049 bfd_vma immA = (relocation & 0x0007f000) >> 12;
13050 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13051 bfd_vma immC = (relocation & 0x00000002) >> 1;
13052
13053 upper_insn = (upper_insn & 0xff80) | immA;
13054 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13055 }
13056
13057 /* Put the relocated value back in the object file: */
13058 bfd_put_16 (input_bfd, upper_insn, hit_data);
13059 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13060
13061 return bfd_reloc_ok;
13062 }
13063
13064 default:
13065 return bfd_reloc_notsupported;
13066 }
13067 }
13068
13069 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13070 static void
13071 arm_add_to_rel (bfd * abfd,
13072 bfd_byte * address,
13073 reloc_howto_type * howto,
13074 bfd_signed_vma increment)
13075 {
13076 bfd_signed_vma addend;
13077
13078 if (howto->type == R_ARM_THM_CALL
13079 || howto->type == R_ARM_THM_JUMP24)
13080 {
13081 int upper_insn, lower_insn;
13082 int upper, lower;
13083
13084 upper_insn = bfd_get_16 (abfd, address);
13085 lower_insn = bfd_get_16 (abfd, address + 2);
13086 upper = upper_insn & 0x7ff;
13087 lower = lower_insn & 0x7ff;
13088
13089 addend = (upper << 12) | (lower << 1);
13090 addend += increment;
13091 addend >>= 1;
13092
13093 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13094 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13095
13096 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13097 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13098 }
13099 else
13100 {
13101 bfd_vma contents;
13102
13103 contents = bfd_get_32 (abfd, address);
13104
13105 /* Get the (signed) value from the instruction. */
13106 addend = contents & howto->src_mask;
13107 if (addend & ((howto->src_mask + 1) >> 1))
13108 {
13109 bfd_signed_vma mask;
13110
13111 mask = -1;
13112 mask &= ~ howto->src_mask;
13113 addend |= mask;
13114 }
13115
13116 /* Add in the increment, (which is a byte value). */
13117 switch (howto->type)
13118 {
13119 default:
13120 addend += increment;
13121 break;
13122
13123 case R_ARM_PC24:
13124 case R_ARM_PLT32:
13125 case R_ARM_CALL:
13126 case R_ARM_JUMP24:
13127 addend <<= howto->size;
13128 addend += increment;
13129
13130 /* Should we check for overflow here ? */
13131
13132 /* Drop any undesired bits. */
13133 addend >>= howto->rightshift;
13134 break;
13135 }
13136
13137 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13138
13139 bfd_put_32 (abfd, contents, address);
13140 }
13141 }
13142
13143 #define IS_ARM_TLS_RELOC(R_TYPE) \
13144 ((R_TYPE) == R_ARM_TLS_GD32 \
13145 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13146 || (R_TYPE) == R_ARM_TLS_LDO32 \
13147 || (R_TYPE) == R_ARM_TLS_LDM32 \
13148 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13149 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13150 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13151 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13152 || (R_TYPE) == R_ARM_TLS_LE32 \
13153 || (R_TYPE) == R_ARM_TLS_IE32 \
13154 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13155 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13156
13157 /* Specific set of relocations for the gnu tls dialect. */
13158 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13159 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13160 || (R_TYPE) == R_ARM_TLS_CALL \
13161 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13162 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13163 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13164
13165 /* Relocate an ARM ELF section. */
13166
13167 static bfd_boolean
13168 elf32_arm_relocate_section (bfd * output_bfd,
13169 struct bfd_link_info * info,
13170 bfd * input_bfd,
13171 asection * input_section,
13172 bfd_byte * contents,
13173 Elf_Internal_Rela * relocs,
13174 Elf_Internal_Sym * local_syms,
13175 asection ** local_sections)
13176 {
13177 Elf_Internal_Shdr *symtab_hdr;
13178 struct elf_link_hash_entry **sym_hashes;
13179 Elf_Internal_Rela *rel;
13180 Elf_Internal_Rela *relend;
13181 const char *name;
13182 struct elf32_arm_link_hash_table * globals;
13183
13184 globals = elf32_arm_hash_table (info);
13185 if (globals == NULL)
13186 return FALSE;
13187
13188 symtab_hdr = & elf_symtab_hdr (input_bfd);
13189 sym_hashes = elf_sym_hashes (input_bfd);
13190
13191 rel = relocs;
13192 relend = relocs + input_section->reloc_count;
13193 for (; rel < relend; rel++)
13194 {
13195 int r_type;
13196 reloc_howto_type * howto;
13197 unsigned long r_symndx;
13198 Elf_Internal_Sym * sym;
13199 asection * sec;
13200 struct elf_link_hash_entry * h;
13201 bfd_vma relocation;
13202 bfd_reloc_status_type r;
13203 arelent bfd_reloc;
13204 char sym_type;
13205 bfd_boolean unresolved_reloc = FALSE;
13206 char *error_message = NULL;
13207
13208 r_symndx = ELF32_R_SYM (rel->r_info);
13209 r_type = ELF32_R_TYPE (rel->r_info);
13210 r_type = arm_real_reloc_type (globals, r_type);
13211
13212 if ( r_type == R_ARM_GNU_VTENTRY
13213 || r_type == R_ARM_GNU_VTINHERIT)
13214 continue;
13215
13216 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13217
13218 if (howto == NULL)
13219 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13220
13221 h = NULL;
13222 sym = NULL;
13223 sec = NULL;
13224
13225 if (r_symndx < symtab_hdr->sh_info)
13226 {
13227 sym = local_syms + r_symndx;
13228 sym_type = ELF32_ST_TYPE (sym->st_info);
13229 sec = local_sections[r_symndx];
13230
13231 /* An object file might have a reference to a local
13232 undefined symbol. This is a daft object file, but we
13233 should at least do something about it. V4BX & NONE
13234 relocations do not use the symbol and are explicitly
13235 allowed to use the undefined symbol, so allow those.
13236 Likewise for relocations against STN_UNDEF. */
13237 if (r_type != R_ARM_V4BX
13238 && r_type != R_ARM_NONE
13239 && r_symndx != STN_UNDEF
13240 && bfd_is_und_section (sec)
13241 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13242 (*info->callbacks->undefined_symbol)
13243 (info, bfd_elf_string_from_elf_section
13244 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13245 input_bfd, input_section,
13246 rel->r_offset, TRUE);
13247
13248 if (globals->use_rel)
13249 {
13250 relocation = (sec->output_section->vma
13251 + sec->output_offset
13252 + sym->st_value);
13253 if (!bfd_link_relocatable (info)
13254 && (sec->flags & SEC_MERGE)
13255 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13256 {
13257 asection *msec;
13258 bfd_vma addend, value;
13259
13260 switch (r_type)
13261 {
13262 case R_ARM_MOVW_ABS_NC:
13263 case R_ARM_MOVT_ABS:
13264 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13265 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13266 addend = (addend ^ 0x8000) - 0x8000;
13267 break;
13268
13269 case R_ARM_THM_MOVW_ABS_NC:
13270 case R_ARM_THM_MOVT_ABS:
13271 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13272 << 16;
13273 value |= bfd_get_16 (input_bfd,
13274 contents + rel->r_offset + 2);
13275 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13276 | ((value & 0x04000000) >> 15);
13277 addend = (addend ^ 0x8000) - 0x8000;
13278 break;
13279
13280 default:
13281 if (howto->rightshift
13282 || (howto->src_mask & (howto->src_mask + 1)))
13283 {
13284 _bfd_error_handler
13285 /* xgettext:c-format */
13286 (_("%pB(%pA+%#" PRIx64 "): "
13287 "%s relocation against SEC_MERGE section"),
13288 input_bfd, input_section,
13289 (uint64_t) rel->r_offset, howto->name);
13290 return FALSE;
13291 }
13292
13293 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13294
13295 /* Get the (signed) value from the instruction. */
13296 addend = value & howto->src_mask;
13297 if (addend & ((howto->src_mask + 1) >> 1))
13298 {
13299 bfd_signed_vma mask;
13300
13301 mask = -1;
13302 mask &= ~ howto->src_mask;
13303 addend |= mask;
13304 }
13305 break;
13306 }
13307
13308 msec = sec;
13309 addend =
13310 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13311 - relocation;
13312 addend += msec->output_section->vma + msec->output_offset;
13313
13314 /* Cases here must match those in the preceding
13315 switch statement. */
13316 switch (r_type)
13317 {
13318 case R_ARM_MOVW_ABS_NC:
13319 case R_ARM_MOVT_ABS:
13320 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13321 | (addend & 0xfff);
13322 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13323 break;
13324
13325 case R_ARM_THM_MOVW_ABS_NC:
13326 case R_ARM_THM_MOVT_ABS:
13327 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13328 | (addend & 0xff) | ((addend & 0x0800) << 15);
13329 bfd_put_16 (input_bfd, value >> 16,
13330 contents + rel->r_offset);
13331 bfd_put_16 (input_bfd, value,
13332 contents + rel->r_offset + 2);
13333 break;
13334
13335 default:
13336 value = (value & ~ howto->dst_mask)
13337 | (addend & howto->dst_mask);
13338 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13339 break;
13340 }
13341 }
13342 }
13343 else
13344 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13345 }
13346 else
13347 {
13348 bfd_boolean warned, ignored;
13349
13350 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13351 r_symndx, symtab_hdr, sym_hashes,
13352 h, sec, relocation,
13353 unresolved_reloc, warned, ignored);
13354
13355 sym_type = h->type;
13356 }
13357
13358 if (sec != NULL && discarded_section (sec))
13359 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13360 rel, 1, relend, howto, 0, contents);
13361
13362 if (bfd_link_relocatable (info))
13363 {
13364 /* This is a relocatable link. We don't have to change
13365 anything, unless the reloc is against a section symbol,
13366 in which case we have to adjust according to where the
13367 section symbol winds up in the output section. */
13368 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13369 {
13370 if (globals->use_rel)
13371 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13372 howto, (bfd_signed_vma) sec->output_offset);
13373 else
13374 rel->r_addend += sec->output_offset;
13375 }
13376 continue;
13377 }
13378
13379 if (h != NULL)
13380 name = h->root.root.string;
13381 else
13382 {
13383 name = (bfd_elf_string_from_elf_section
13384 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13385 if (name == NULL || *name == '\0')
13386 name = bfd_section_name (sec);
13387 }
13388
13389 if (r_symndx != STN_UNDEF
13390 && r_type != R_ARM_NONE
13391 && (h == NULL
13392 || h->root.type == bfd_link_hash_defined
13393 || h->root.type == bfd_link_hash_defweak)
13394 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13395 {
13396 _bfd_error_handler
13397 ((sym_type == STT_TLS
13398 /* xgettext:c-format */
13399 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13400 /* xgettext:c-format */
13401 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13402 input_bfd,
13403 input_section,
13404 (uint64_t) rel->r_offset,
13405 howto->name,
13406 name);
13407 }
13408
13409 /* We call elf32_arm_final_link_relocate unless we're completely
13410 done, i.e., the relaxation produced the final output we want,
13411 and we won't let anybody mess with it. Also, we have to do
13412 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13413 both in relaxed and non-relaxed cases. */
13414 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13415 || (IS_ARM_TLS_GNU_RELOC (r_type)
13416 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13417 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13418 & GOT_TLS_GDESC)))
13419 {
13420 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13421 contents, rel, h == NULL);
13422 /* This may have been marked unresolved because it came from
13423 a shared library. But we've just dealt with that. */
13424 unresolved_reloc = 0;
13425 }
13426 else
13427 r = bfd_reloc_continue;
13428
13429 if (r == bfd_reloc_continue)
13430 {
13431 unsigned char branch_type =
13432 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13433 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13434
13435 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13436 input_section, contents, rel,
13437 relocation, info, sec, name,
13438 sym_type, branch_type, h,
13439 &unresolved_reloc,
13440 &error_message);
13441 }
13442
13443 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13444 because such sections are not SEC_ALLOC and thus ld.so will
13445 not process them. */
13446 if (unresolved_reloc
13447 && !((input_section->flags & SEC_DEBUGGING) != 0
13448 && h->def_dynamic)
13449 && _bfd_elf_section_offset (output_bfd, info, input_section,
13450 rel->r_offset) != (bfd_vma) -1)
13451 {
13452 _bfd_error_handler
13453 /* xgettext:c-format */
13454 (_("%pB(%pA+%#" PRIx64 "): "
13455 "unresolvable %s relocation against symbol `%s'"),
13456 input_bfd,
13457 input_section,
13458 (uint64_t) rel->r_offset,
13459 howto->name,
13460 h->root.root.string);
13461 return FALSE;
13462 }
13463
13464 if (r != bfd_reloc_ok)
13465 {
13466 switch (r)
13467 {
13468 case bfd_reloc_overflow:
13469 /* If the overflowing reloc was to an undefined symbol,
13470 we have already printed one error message and there
13471 is no point complaining again. */
13472 if (!h || h->root.type != bfd_link_hash_undefined)
13473 (*info->callbacks->reloc_overflow)
13474 (info, (h ? &h->root : NULL), name, howto->name,
13475 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13476 break;
13477
13478 case bfd_reloc_undefined:
13479 (*info->callbacks->undefined_symbol)
13480 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13481 break;
13482
13483 case bfd_reloc_outofrange:
13484 error_message = _("out of range");
13485 goto common_error;
13486
13487 case bfd_reloc_notsupported:
13488 error_message = _("unsupported relocation");
13489 goto common_error;
13490
13491 case bfd_reloc_dangerous:
13492 /* error_message should already be set. */
13493 goto common_error;
13494
13495 default:
13496 error_message = _("unknown error");
13497 /* Fall through. */
13498
13499 common_error:
13500 BFD_ASSERT (error_message != NULL);
13501 (*info->callbacks->reloc_dangerous)
13502 (info, error_message, input_bfd, input_section, rel->r_offset);
13503 break;
13504 }
13505 }
13506 }
13507
13508 return TRUE;
13509 }
13510
13511 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13512 adds the edit to the start of the list. (The list must be built in order of
13513 ascending TINDEX: the function's callers are primarily responsible for
13514 maintaining that condition). */
13515
13516 static void
13517 add_unwind_table_edit (arm_unwind_table_edit **head,
13518 arm_unwind_table_edit **tail,
13519 arm_unwind_edit_type type,
13520 asection *linked_section,
13521 unsigned int tindex)
13522 {
13523 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13524 xmalloc (sizeof (arm_unwind_table_edit));
13525
13526 new_edit->type = type;
13527 new_edit->linked_section = linked_section;
13528 new_edit->index = tindex;
13529
13530 if (tindex > 0)
13531 {
13532 new_edit->next = NULL;
13533
13534 if (*tail)
13535 (*tail)->next = new_edit;
13536
13537 (*tail) = new_edit;
13538
13539 if (!*head)
13540 (*head) = new_edit;
13541 }
13542 else
13543 {
13544 new_edit->next = *head;
13545
13546 if (!*tail)
13547 *tail = new_edit;
13548
13549 *head = new_edit;
13550 }
13551 }
13552
13553 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13554
13555 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13556 static void
13557 adjust_exidx_size(asection *exidx_sec, int adjust)
13558 {
13559 asection *out_sec;
13560
13561 if (!exidx_sec->rawsize)
13562 exidx_sec->rawsize = exidx_sec->size;
13563
13564 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13565 out_sec = exidx_sec->output_section;
13566 /* Adjust size of output section. */
13567 bfd_set_section_size (out_sec, out_sec->size +adjust);
13568 }
13569
13570 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13571 static void
13572 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13573 {
13574 struct _arm_elf_section_data *exidx_arm_data;
13575
13576 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13577 add_unwind_table_edit (
13578 &exidx_arm_data->u.exidx.unwind_edit_list,
13579 &exidx_arm_data->u.exidx.unwind_edit_tail,
13580 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13581
13582 exidx_arm_data->additional_reloc_count++;
13583
13584 adjust_exidx_size(exidx_sec, 8);
13585 }
13586
13587 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13588 made to those tables, such that:
13589
13590 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13591 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13592 codes which have been inlined into the index).
13593
13594 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13595
13596 The edits are applied when the tables are written
13597 (in elf32_arm_write_section). */
13598
13599 bfd_boolean
13600 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13601 unsigned int num_text_sections,
13602 struct bfd_link_info *info,
13603 bfd_boolean merge_exidx_entries)
13604 {
13605 bfd *inp;
13606 unsigned int last_second_word = 0, i;
13607 asection *last_exidx_sec = NULL;
13608 asection *last_text_sec = NULL;
13609 int last_unwind_type = -1;
13610
13611 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13612 text sections. */
13613 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13614 {
13615 asection *sec;
13616
13617 for (sec = inp->sections; sec != NULL; sec = sec->next)
13618 {
13619 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13620 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13621
13622 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13623 continue;
13624
13625 if (elf_sec->linked_to)
13626 {
13627 Elf_Internal_Shdr *linked_hdr
13628 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13629 struct _arm_elf_section_data *linked_sec_arm_data
13630 = get_arm_elf_section_data (linked_hdr->bfd_section);
13631
13632 if (linked_sec_arm_data == NULL)
13633 continue;
13634
13635 /* Link this .ARM.exidx section back from the text section it
13636 describes. */
13637 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13638 }
13639 }
13640 }
13641
13642 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13643 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13644 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13645
13646 for (i = 0; i < num_text_sections; i++)
13647 {
13648 asection *sec = text_section_order[i];
13649 asection *exidx_sec;
13650 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13651 struct _arm_elf_section_data *exidx_arm_data;
13652 bfd_byte *contents = NULL;
13653 int deleted_exidx_bytes = 0;
13654 bfd_vma j;
13655 arm_unwind_table_edit *unwind_edit_head = NULL;
13656 arm_unwind_table_edit *unwind_edit_tail = NULL;
13657 Elf_Internal_Shdr *hdr;
13658 bfd *ibfd;
13659
13660 if (arm_data == NULL)
13661 continue;
13662
13663 exidx_sec = arm_data->u.text.arm_exidx_sec;
13664 if (exidx_sec == NULL)
13665 {
13666 /* Section has no unwind data. */
13667 if (last_unwind_type == 0 || !last_exidx_sec)
13668 continue;
13669
13670 /* Ignore zero sized sections. */
13671 if (sec->size == 0)
13672 continue;
13673
13674 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13675 last_unwind_type = 0;
13676 continue;
13677 }
13678
13679 /* Skip /DISCARD/ sections. */
13680 if (bfd_is_abs_section (exidx_sec->output_section))
13681 continue;
13682
13683 hdr = &elf_section_data (exidx_sec)->this_hdr;
13684 if (hdr->sh_type != SHT_ARM_EXIDX)
13685 continue;
13686
13687 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13688 if (exidx_arm_data == NULL)
13689 continue;
13690
13691 ibfd = exidx_sec->owner;
13692
13693 if (hdr->contents != NULL)
13694 contents = hdr->contents;
13695 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13696 /* An error? */
13697 continue;
13698
13699 if (last_unwind_type > 0)
13700 {
13701 unsigned int first_word = bfd_get_32 (ibfd, contents);
13702 /* Add cantunwind if first unwind item does not match section
13703 start. */
13704 if (first_word != sec->vma)
13705 {
13706 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13707 last_unwind_type = 0;
13708 }
13709 }
13710
13711 for (j = 0; j < hdr->sh_size; j += 8)
13712 {
13713 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13714 int unwind_type;
13715 int elide = 0;
13716
13717 /* An EXIDX_CANTUNWIND entry. */
13718 if (second_word == 1)
13719 {
13720 if (last_unwind_type == 0)
13721 elide = 1;
13722 unwind_type = 0;
13723 }
13724 /* Inlined unwinding data. Merge if equal to previous. */
13725 else if ((second_word & 0x80000000) != 0)
13726 {
13727 if (merge_exidx_entries
13728 && last_second_word == second_word && last_unwind_type == 1)
13729 elide = 1;
13730 unwind_type = 1;
13731 last_second_word = second_word;
13732 }
13733 /* Normal table entry. In theory we could merge these too,
13734 but duplicate entries are likely to be much less common. */
13735 else
13736 unwind_type = 2;
13737
13738 if (elide && !bfd_link_relocatable (info))
13739 {
13740 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13741 DELETE_EXIDX_ENTRY, NULL, j / 8);
13742
13743 deleted_exidx_bytes += 8;
13744 }
13745
13746 last_unwind_type = unwind_type;
13747 }
13748
13749 /* Free contents if we allocated it ourselves. */
13750 if (contents != hdr->contents)
13751 free (contents);
13752
13753 /* Record edits to be applied later (in elf32_arm_write_section). */
13754 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13755 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13756
13757 if (deleted_exidx_bytes > 0)
13758 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13759
13760 last_exidx_sec = exidx_sec;
13761 last_text_sec = sec;
13762 }
13763
13764 /* Add terminating CANTUNWIND entry. */
13765 if (!bfd_link_relocatable (info) && last_exidx_sec
13766 && last_unwind_type != 0)
13767 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13768
13769 return TRUE;
13770 }
13771
13772 static bfd_boolean
13773 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13774 bfd *ibfd, const char *name)
13775 {
13776 asection *sec, *osec;
13777
13778 sec = bfd_get_linker_section (ibfd, name);
13779 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13780 return TRUE;
13781
13782 osec = sec->output_section;
13783 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13784 return TRUE;
13785
13786 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13787 sec->output_offset, sec->size))
13788 return FALSE;
13789
13790 return TRUE;
13791 }
13792
13793 static bfd_boolean
13794 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13795 {
13796 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13797 asection *sec, *osec;
13798
13799 if (globals == NULL)
13800 return FALSE;
13801
13802 /* Invoke the regular ELF backend linker to do all the work. */
13803 if (!bfd_elf_final_link (abfd, info))
13804 return FALSE;
13805
13806 /* Process stub sections (eg BE8 encoding, ...). */
13807 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13808 unsigned int i;
13809 for (i=0; i<htab->top_id; i++)
13810 {
13811 sec = htab->stub_group[i].stub_sec;
13812 /* Only process it once, in its link_sec slot. */
13813 if (sec && i == htab->stub_group[i].link_sec->id)
13814 {
13815 osec = sec->output_section;
13816 elf32_arm_write_section (abfd, info, sec, sec->contents);
13817 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13818 sec->output_offset, sec->size))
13819 return FALSE;
13820 }
13821 }
13822
13823 /* Write out any glue sections now that we have created all the
13824 stubs. */
13825 if (globals->bfd_of_glue_owner != NULL)
13826 {
13827 if (! elf32_arm_output_glue_section (info, abfd,
13828 globals->bfd_of_glue_owner,
13829 ARM2THUMB_GLUE_SECTION_NAME))
13830 return FALSE;
13831
13832 if (! elf32_arm_output_glue_section (info, abfd,
13833 globals->bfd_of_glue_owner,
13834 THUMB2ARM_GLUE_SECTION_NAME))
13835 return FALSE;
13836
13837 if (! elf32_arm_output_glue_section (info, abfd,
13838 globals->bfd_of_glue_owner,
13839 VFP11_ERRATUM_VENEER_SECTION_NAME))
13840 return FALSE;
13841
13842 if (! elf32_arm_output_glue_section (info, abfd,
13843 globals->bfd_of_glue_owner,
13844 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13845 return FALSE;
13846
13847 if (! elf32_arm_output_glue_section (info, abfd,
13848 globals->bfd_of_glue_owner,
13849 ARM_BX_GLUE_SECTION_NAME))
13850 return FALSE;
13851 }
13852
13853 return TRUE;
13854 }
13855
13856 /* Return a best guess for the machine number based on the attributes. */
13857
13858 static unsigned int
13859 bfd_arm_get_mach_from_attributes (bfd * abfd)
13860 {
13861 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13862
13863 switch (arch)
13864 {
13865 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13866 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13867 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13868 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13869
13870 case TAG_CPU_ARCH_V5TE:
13871 {
13872 char * name;
13873
13874 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13875 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13876
13877 if (name)
13878 {
13879 if (strcmp (name, "IWMMXT2") == 0)
13880 return bfd_mach_arm_iWMMXt2;
13881
13882 if (strcmp (name, "IWMMXT") == 0)
13883 return bfd_mach_arm_iWMMXt;
13884
13885 if (strcmp (name, "XSCALE") == 0)
13886 {
13887 int wmmx;
13888
13889 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13890 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13891 switch (wmmx)
13892 {
13893 case 1: return bfd_mach_arm_iWMMXt;
13894 case 2: return bfd_mach_arm_iWMMXt2;
13895 default: return bfd_mach_arm_XScale;
13896 }
13897 }
13898 }
13899
13900 return bfd_mach_arm_5TE;
13901 }
13902
13903 case TAG_CPU_ARCH_V5TEJ:
13904 return bfd_mach_arm_5TEJ;
13905 case TAG_CPU_ARCH_V6:
13906 return bfd_mach_arm_6;
13907 case TAG_CPU_ARCH_V6KZ:
13908 return bfd_mach_arm_6KZ;
13909 case TAG_CPU_ARCH_V6T2:
13910 return bfd_mach_arm_6T2;
13911 case TAG_CPU_ARCH_V6K:
13912 return bfd_mach_arm_6K;
13913 case TAG_CPU_ARCH_V7:
13914 return bfd_mach_arm_7;
13915 case TAG_CPU_ARCH_V6_M:
13916 return bfd_mach_arm_6M;
13917 case TAG_CPU_ARCH_V6S_M:
13918 return bfd_mach_arm_6SM;
13919 case TAG_CPU_ARCH_V7E_M:
13920 return bfd_mach_arm_7EM;
13921 case TAG_CPU_ARCH_V8:
13922 return bfd_mach_arm_8;
13923 case TAG_CPU_ARCH_V8R:
13924 return bfd_mach_arm_8R;
13925 case TAG_CPU_ARCH_V8M_BASE:
13926 return bfd_mach_arm_8M_BASE;
13927 case TAG_CPU_ARCH_V8M_MAIN:
13928 return bfd_mach_arm_8M_MAIN;
13929 case TAG_CPU_ARCH_V8_1M_MAIN:
13930 return bfd_mach_arm_8_1M_MAIN;
13931
13932 default:
13933 /* Force entry to be added for any new known Tag_CPU_arch value. */
13934 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13935
13936 /* Unknown Tag_CPU_arch value. */
13937 return bfd_mach_arm_unknown;
13938 }
13939 }
13940
13941 /* Set the right machine number. */
13942
13943 static bfd_boolean
13944 elf32_arm_object_p (bfd *abfd)
13945 {
13946 unsigned int mach;
13947
13948 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13949
13950 if (mach == bfd_mach_arm_unknown)
13951 {
13952 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13953 mach = bfd_mach_arm_ep9312;
13954 else
13955 mach = bfd_arm_get_mach_from_attributes (abfd);
13956 }
13957
13958 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13959 return TRUE;
13960 }
13961
13962 /* Function to keep ARM specific flags in the ELF header. */
13963
13964 static bfd_boolean
13965 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13966 {
13967 if (elf_flags_init (abfd)
13968 && elf_elfheader (abfd)->e_flags != flags)
13969 {
13970 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13971 {
13972 if (flags & EF_ARM_INTERWORK)
13973 _bfd_error_handler
13974 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13975 abfd);
13976 else
13977 _bfd_error_handler
13978 (_("warning: clearing the interworking flag of %pB due to outside request"),
13979 abfd);
13980 }
13981 }
13982 else
13983 {
13984 elf_elfheader (abfd)->e_flags = flags;
13985 elf_flags_init (abfd) = TRUE;
13986 }
13987
13988 return TRUE;
13989 }
13990
13991 /* Copy backend specific data from one object module to another. */
13992
13993 static bfd_boolean
13994 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13995 {
13996 flagword in_flags;
13997 flagword out_flags;
13998
13999 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
14000 return TRUE;
14001
14002 in_flags = elf_elfheader (ibfd)->e_flags;
14003 out_flags = elf_elfheader (obfd)->e_flags;
14004
14005 if (elf_flags_init (obfd)
14006 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
14007 && in_flags != out_flags)
14008 {
14009 /* Cannot mix APCS26 and APCS32 code. */
14010 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14011 return FALSE;
14012
14013 /* Cannot mix float APCS and non-float APCS code. */
14014 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14015 return FALSE;
14016
14017 /* If the src and dest have different interworking flags
14018 then turn off the interworking bit. */
14019 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14020 {
14021 if (out_flags & EF_ARM_INTERWORK)
14022 _bfd_error_handler
14023 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14024 obfd, ibfd);
14025
14026 in_flags &= ~EF_ARM_INTERWORK;
14027 }
14028
14029 /* Likewise for PIC, though don't warn for this case. */
14030 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
14031 in_flags &= ~EF_ARM_PIC;
14032 }
14033
14034 elf_elfheader (obfd)->e_flags = in_flags;
14035 elf_flags_init (obfd) = TRUE;
14036
14037 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
14038 }
14039
14040 /* Values for Tag_ABI_PCS_R9_use. */
14041 enum
14042 {
14043 AEABI_R9_V6,
14044 AEABI_R9_SB,
14045 AEABI_R9_TLS,
14046 AEABI_R9_unused
14047 };
14048
14049 /* Values for Tag_ABI_PCS_RW_data. */
14050 enum
14051 {
14052 AEABI_PCS_RW_data_absolute,
14053 AEABI_PCS_RW_data_PCrel,
14054 AEABI_PCS_RW_data_SBrel,
14055 AEABI_PCS_RW_data_unused
14056 };
14057
14058 /* Values for Tag_ABI_enum_size. */
14059 enum
14060 {
14061 AEABI_enum_unused,
14062 AEABI_enum_short,
14063 AEABI_enum_wide,
14064 AEABI_enum_forced_wide
14065 };
14066
14067 /* Determine whether an object attribute tag takes an integer, a
14068 string or both. */
14069
14070 static int
14071 elf32_arm_obj_attrs_arg_type (int tag)
14072 {
14073 if (tag == Tag_compatibility)
14074 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14075 else if (tag == Tag_nodefaults)
14076 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14077 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14078 return ATTR_TYPE_FLAG_STR_VAL;
14079 else if (tag < 32)
14080 return ATTR_TYPE_FLAG_INT_VAL;
14081 else
14082 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14083 }
14084
14085 /* The ABI defines that Tag_conformance should be emitted first, and that
14086 Tag_nodefaults should be second (if either is defined). This sets those
14087 two positions, and bumps up the position of all the remaining tags to
14088 compensate. */
14089 static int
14090 elf32_arm_obj_attrs_order (int num)
14091 {
14092 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14093 return Tag_conformance;
14094 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14095 return Tag_nodefaults;
14096 if ((num - 2) < Tag_nodefaults)
14097 return num - 2;
14098 if ((num - 1) < Tag_conformance)
14099 return num - 1;
14100 return num;
14101 }
14102
14103 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14104 static bfd_boolean
14105 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14106 {
14107 if ((tag & 127) < 64)
14108 {
14109 _bfd_error_handler
14110 (_("%pB: unknown mandatory EABI object attribute %d"),
14111 abfd, tag);
14112 bfd_set_error (bfd_error_bad_value);
14113 return FALSE;
14114 }
14115 else
14116 {
14117 _bfd_error_handler
14118 (_("warning: %pB: unknown EABI object attribute %d"),
14119 abfd, tag);
14120 return TRUE;
14121 }
14122 }
14123
14124 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14125 Returns -1 if no architecture could be read. */
14126
14127 static int
14128 get_secondary_compatible_arch (bfd *abfd)
14129 {
14130 obj_attribute *attr =
14131 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14132
14133 /* Note: the tag and its argument below are uleb128 values, though
14134 currently-defined values fit in one byte for each. */
14135 if (attr->s
14136 && attr->s[0] == Tag_CPU_arch
14137 && (attr->s[1] & 128) != 128
14138 && attr->s[2] == 0)
14139 return attr->s[1];
14140
14141 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14142 return -1;
14143 }
14144
14145 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14146 The tag is removed if ARCH is -1. */
14147
14148 static void
14149 set_secondary_compatible_arch (bfd *abfd, int arch)
14150 {
14151 obj_attribute *attr =
14152 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14153
14154 if (arch == -1)
14155 {
14156 attr->s = NULL;
14157 return;
14158 }
14159
14160 /* Note: the tag and its argument below are uleb128 values, though
14161 currently-defined values fit in one byte for each. */
14162 if (!attr->s)
14163 attr->s = (char *) bfd_alloc (abfd, 3);
14164 attr->s[0] = Tag_CPU_arch;
14165 attr->s[1] = arch;
14166 attr->s[2] = '\0';
14167 }
14168
14169 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14170 into account. */
14171
14172 static int
14173 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14174 int newtag, int secondary_compat)
14175 {
14176 #define T(X) TAG_CPU_ARCH_##X
14177 int tagl, tagh, result;
14178 const int v6t2[] =
14179 {
14180 T(V6T2), /* PRE_V4. */
14181 T(V6T2), /* V4. */
14182 T(V6T2), /* V4T. */
14183 T(V6T2), /* V5T. */
14184 T(V6T2), /* V5TE. */
14185 T(V6T2), /* V5TEJ. */
14186 T(V6T2), /* V6. */
14187 T(V7), /* V6KZ. */
14188 T(V6T2) /* V6T2. */
14189 };
14190 const int v6k[] =
14191 {
14192 T(V6K), /* PRE_V4. */
14193 T(V6K), /* V4. */
14194 T(V6K), /* V4T. */
14195 T(V6K), /* V5T. */
14196 T(V6K), /* V5TE. */
14197 T(V6K), /* V5TEJ. */
14198 T(V6K), /* V6. */
14199 T(V6KZ), /* V6KZ. */
14200 T(V7), /* V6T2. */
14201 T(V6K) /* V6K. */
14202 };
14203 const int v7[] =
14204 {
14205 T(V7), /* PRE_V4. */
14206 T(V7), /* V4. */
14207 T(V7), /* V4T. */
14208 T(V7), /* V5T. */
14209 T(V7), /* V5TE. */
14210 T(V7), /* V5TEJ. */
14211 T(V7), /* V6. */
14212 T(V7), /* V6KZ. */
14213 T(V7), /* V6T2. */
14214 T(V7), /* V6K. */
14215 T(V7) /* V7. */
14216 };
14217 const int v6_m[] =
14218 {
14219 -1, /* PRE_V4. */
14220 -1, /* V4. */
14221 T(V6K), /* V4T. */
14222 T(V6K), /* V5T. */
14223 T(V6K), /* V5TE. */
14224 T(V6K), /* V5TEJ. */
14225 T(V6K), /* V6. */
14226 T(V6KZ), /* V6KZ. */
14227 T(V7), /* V6T2. */
14228 T(V6K), /* V6K. */
14229 T(V7), /* V7. */
14230 T(V6_M) /* V6_M. */
14231 };
14232 const int v6s_m[] =
14233 {
14234 -1, /* PRE_V4. */
14235 -1, /* V4. */
14236 T(V6K), /* V4T. */
14237 T(V6K), /* V5T. */
14238 T(V6K), /* V5TE. */
14239 T(V6K), /* V5TEJ. */
14240 T(V6K), /* V6. */
14241 T(V6KZ), /* V6KZ. */
14242 T(V7), /* V6T2. */
14243 T(V6K), /* V6K. */
14244 T(V7), /* V7. */
14245 T(V6S_M), /* V6_M. */
14246 T(V6S_M) /* V6S_M. */
14247 };
14248 const int v7e_m[] =
14249 {
14250 -1, /* PRE_V4. */
14251 -1, /* V4. */
14252 T(V7E_M), /* V4T. */
14253 T(V7E_M), /* V5T. */
14254 T(V7E_M), /* V5TE. */
14255 T(V7E_M), /* V5TEJ. */
14256 T(V7E_M), /* V6. */
14257 T(V7E_M), /* V6KZ. */
14258 T(V7E_M), /* V6T2. */
14259 T(V7E_M), /* V6K. */
14260 T(V7E_M), /* V7. */
14261 T(V7E_M), /* V6_M. */
14262 T(V7E_M), /* V6S_M. */
14263 T(V7E_M) /* V7E_M. */
14264 };
14265 const int v8[] =
14266 {
14267 T(V8), /* PRE_V4. */
14268 T(V8), /* V4. */
14269 T(V8), /* V4T. */
14270 T(V8), /* V5T. */
14271 T(V8), /* V5TE. */
14272 T(V8), /* V5TEJ. */
14273 T(V8), /* V6. */
14274 T(V8), /* V6KZ. */
14275 T(V8), /* V6T2. */
14276 T(V8), /* V6K. */
14277 T(V8), /* V7. */
14278 T(V8), /* V6_M. */
14279 T(V8), /* V6S_M. */
14280 T(V8), /* V7E_M. */
14281 T(V8) /* V8. */
14282 };
14283 const int v8r[] =
14284 {
14285 T(V8R), /* PRE_V4. */
14286 T(V8R), /* V4. */
14287 T(V8R), /* V4T. */
14288 T(V8R), /* V5T. */
14289 T(V8R), /* V5TE. */
14290 T(V8R), /* V5TEJ. */
14291 T(V8R), /* V6. */
14292 T(V8R), /* V6KZ. */
14293 T(V8R), /* V6T2. */
14294 T(V8R), /* V6K. */
14295 T(V8R), /* V7. */
14296 T(V8R), /* V6_M. */
14297 T(V8R), /* V6S_M. */
14298 T(V8R), /* V7E_M. */
14299 T(V8), /* V8. */
14300 T(V8R), /* V8R. */
14301 };
14302 const int v8m_baseline[] =
14303 {
14304 -1, /* PRE_V4. */
14305 -1, /* V4. */
14306 -1, /* V4T. */
14307 -1, /* V5T. */
14308 -1, /* V5TE. */
14309 -1, /* V5TEJ. */
14310 -1, /* V6. */
14311 -1, /* V6KZ. */
14312 -1, /* V6T2. */
14313 -1, /* V6K. */
14314 -1, /* V7. */
14315 T(V8M_BASE), /* V6_M. */
14316 T(V8M_BASE), /* V6S_M. */
14317 -1, /* V7E_M. */
14318 -1, /* V8. */
14319 -1, /* V8R. */
14320 T(V8M_BASE) /* V8-M BASELINE. */
14321 };
14322 const int v8m_mainline[] =
14323 {
14324 -1, /* PRE_V4. */
14325 -1, /* V4. */
14326 -1, /* V4T. */
14327 -1, /* V5T. */
14328 -1, /* V5TE. */
14329 -1, /* V5TEJ. */
14330 -1, /* V6. */
14331 -1, /* V6KZ. */
14332 -1, /* V6T2. */
14333 -1, /* V6K. */
14334 T(V8M_MAIN), /* V7. */
14335 T(V8M_MAIN), /* V6_M. */
14336 T(V8M_MAIN), /* V6S_M. */
14337 T(V8M_MAIN), /* V7E_M. */
14338 -1, /* V8. */
14339 -1, /* V8R. */
14340 T(V8M_MAIN), /* V8-M BASELINE. */
14341 T(V8M_MAIN) /* V8-M MAINLINE. */
14342 };
14343 const int v8_1m_mainline[] =
14344 {
14345 -1, /* PRE_V4. */
14346 -1, /* V4. */
14347 -1, /* V4T. */
14348 -1, /* V5T. */
14349 -1, /* V5TE. */
14350 -1, /* V5TEJ. */
14351 -1, /* V6. */
14352 -1, /* V6KZ. */
14353 -1, /* V6T2. */
14354 -1, /* V6K. */
14355 T(V8_1M_MAIN), /* V7. */
14356 T(V8_1M_MAIN), /* V6_M. */
14357 T(V8_1M_MAIN), /* V6S_M. */
14358 T(V8_1M_MAIN), /* V7E_M. */
14359 -1, /* V8. */
14360 -1, /* V8R. */
14361 T(V8_1M_MAIN), /* V8-M BASELINE. */
14362 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14363 -1, /* Unused (18). */
14364 -1, /* Unused (19). */
14365 -1, /* Unused (20). */
14366 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14367 };
14368 const int v4t_plus_v6_m[] =
14369 {
14370 -1, /* PRE_V4. */
14371 -1, /* V4. */
14372 T(V4T), /* V4T. */
14373 T(V5T), /* V5T. */
14374 T(V5TE), /* V5TE. */
14375 T(V5TEJ), /* V5TEJ. */
14376 T(V6), /* V6. */
14377 T(V6KZ), /* V6KZ. */
14378 T(V6T2), /* V6T2. */
14379 T(V6K), /* V6K. */
14380 T(V7), /* V7. */
14381 T(V6_M), /* V6_M. */
14382 T(V6S_M), /* V6S_M. */
14383 T(V7E_M), /* V7E_M. */
14384 T(V8), /* V8. */
14385 -1, /* V8R. */
14386 T(V8M_BASE), /* V8-M BASELINE. */
14387 T(V8M_MAIN), /* V8-M MAINLINE. */
14388 -1, /* Unused (18). */
14389 -1, /* Unused (19). */
14390 -1, /* Unused (20). */
14391 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14392 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14393 };
14394 const int *comb[] =
14395 {
14396 v6t2,
14397 v6k,
14398 v7,
14399 v6_m,
14400 v6s_m,
14401 v7e_m,
14402 v8,
14403 v8r,
14404 v8m_baseline,
14405 v8m_mainline,
14406 NULL,
14407 NULL,
14408 NULL,
14409 v8_1m_mainline,
14410 /* Pseudo-architecture. */
14411 v4t_plus_v6_m
14412 };
14413
14414 /* Check we've not got a higher architecture than we know about. */
14415
14416 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14417 {
14418 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14419 return -1;
14420 }
14421
14422 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14423
14424 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14425 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14426 oldtag = T(V4T_PLUS_V6_M);
14427
14428 /* And override the new tag if we have a Tag_also_compatible_with on the
14429 input. */
14430
14431 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14432 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14433 newtag = T(V4T_PLUS_V6_M);
14434
14435 tagl = (oldtag < newtag) ? oldtag : newtag;
14436 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14437
14438 /* Architectures before V6KZ add features monotonically. */
14439 if (tagh <= TAG_CPU_ARCH_V6KZ)
14440 return result;
14441
14442 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14443
14444 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14445 as the canonical version. */
14446 if (result == T(V4T_PLUS_V6_M))
14447 {
14448 result = T(V4T);
14449 *secondary_compat_out = T(V6_M);
14450 }
14451 else
14452 *secondary_compat_out = -1;
14453
14454 if (result == -1)
14455 {
14456 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14457 ibfd, oldtag, newtag);
14458 return -1;
14459 }
14460
14461 return result;
14462 #undef T
14463 }
14464
14465 /* Query attributes object to see if integer divide instructions may be
14466 present in an object. */
14467 static bfd_boolean
14468 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14469 {
14470 int arch = attr[Tag_CPU_arch].i;
14471 int profile = attr[Tag_CPU_arch_profile].i;
14472
14473 switch (attr[Tag_DIV_use].i)
14474 {
14475 case 0:
14476 /* Integer divide allowed if instruction contained in archetecture. */
14477 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14478 return TRUE;
14479 else if (arch >= TAG_CPU_ARCH_V7E_M)
14480 return TRUE;
14481 else
14482 return FALSE;
14483
14484 case 1:
14485 /* Integer divide explicitly prohibited. */
14486 return FALSE;
14487
14488 default:
14489 /* Unrecognised case - treat as allowing divide everywhere. */
14490 case 2:
14491 /* Integer divide allowed in ARM state. */
14492 return TRUE;
14493 }
14494 }
14495
14496 /* Query attributes object to see if integer divide instructions are
14497 forbidden to be in the object. This is not the inverse of
14498 elf32_arm_attributes_accept_div. */
14499 static bfd_boolean
14500 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14501 {
14502 return attr[Tag_DIV_use].i == 1;
14503 }
14504
14505 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14506 are conflicting attributes. */
14507
14508 static bfd_boolean
14509 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14510 {
14511 bfd *obfd = info->output_bfd;
14512 obj_attribute *in_attr;
14513 obj_attribute *out_attr;
14514 /* Some tags have 0 = don't care, 1 = strong requirement,
14515 2 = weak requirement. */
14516 static const int order_021[3] = {0, 2, 1};
14517 int i;
14518 bfd_boolean result = TRUE;
14519 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14520
14521 /* Skip the linker stubs file. This preserves previous behavior
14522 of accepting unknown attributes in the first input file - but
14523 is that a bug? */
14524 if (ibfd->flags & BFD_LINKER_CREATED)
14525 return TRUE;
14526
14527 /* Skip any input that hasn't attribute section.
14528 This enables to link object files without attribute section with
14529 any others. */
14530 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14531 return TRUE;
14532
14533 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14534 {
14535 /* This is the first object. Copy the attributes. */
14536 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14537
14538 out_attr = elf_known_obj_attributes_proc (obfd);
14539
14540 /* Use the Tag_null value to indicate the attributes have been
14541 initialized. */
14542 out_attr[0].i = 1;
14543
14544 /* We do not output objects with Tag_MPextension_use_legacy - we move
14545 the attribute's value to Tag_MPextension_use. */
14546 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14547 {
14548 if (out_attr[Tag_MPextension_use].i != 0
14549 && out_attr[Tag_MPextension_use_legacy].i
14550 != out_attr[Tag_MPextension_use].i)
14551 {
14552 _bfd_error_handler
14553 (_("Error: %pB has both the current and legacy "
14554 "Tag_MPextension_use attributes"), ibfd);
14555 result = FALSE;
14556 }
14557
14558 out_attr[Tag_MPextension_use] =
14559 out_attr[Tag_MPextension_use_legacy];
14560 out_attr[Tag_MPextension_use_legacy].type = 0;
14561 out_attr[Tag_MPextension_use_legacy].i = 0;
14562 }
14563
14564 return result;
14565 }
14566
14567 in_attr = elf_known_obj_attributes_proc (ibfd);
14568 out_attr = elf_known_obj_attributes_proc (obfd);
14569 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14570 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14571 {
14572 /* Ignore mismatches if the object doesn't use floating point or is
14573 floating point ABI independent. */
14574 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14575 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14576 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14577 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14578 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14579 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14580 {
14581 _bfd_error_handler
14582 (_("error: %pB uses VFP register arguments, %pB does not"),
14583 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14584 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14585 result = FALSE;
14586 }
14587 }
14588
14589 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14590 {
14591 /* Merge this attribute with existing attributes. */
14592 switch (i)
14593 {
14594 case Tag_CPU_raw_name:
14595 case Tag_CPU_name:
14596 /* These are merged after Tag_CPU_arch. */
14597 break;
14598
14599 case Tag_ABI_optimization_goals:
14600 case Tag_ABI_FP_optimization_goals:
14601 /* Use the first value seen. */
14602 break;
14603
14604 case Tag_CPU_arch:
14605 {
14606 int secondary_compat = -1, secondary_compat_out = -1;
14607 unsigned int saved_out_attr = out_attr[i].i;
14608 int arch_attr;
14609 static const char *name_table[] =
14610 {
14611 /* These aren't real CPU names, but we can't guess
14612 that from the architecture version alone. */
14613 "Pre v4",
14614 "ARM v4",
14615 "ARM v4T",
14616 "ARM v5T",
14617 "ARM v5TE",
14618 "ARM v5TEJ",
14619 "ARM v6",
14620 "ARM v6KZ",
14621 "ARM v6T2",
14622 "ARM v6K",
14623 "ARM v7",
14624 "ARM v6-M",
14625 "ARM v6S-M",
14626 "ARM v8",
14627 "",
14628 "ARM v8-M.baseline",
14629 "ARM v8-M.mainline",
14630 };
14631
14632 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14633 secondary_compat = get_secondary_compatible_arch (ibfd);
14634 secondary_compat_out = get_secondary_compatible_arch (obfd);
14635 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14636 &secondary_compat_out,
14637 in_attr[i].i,
14638 secondary_compat);
14639
14640 /* Return with error if failed to merge. */
14641 if (arch_attr == -1)
14642 return FALSE;
14643
14644 out_attr[i].i = arch_attr;
14645
14646 set_secondary_compatible_arch (obfd, secondary_compat_out);
14647
14648 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14649 if (out_attr[i].i == saved_out_attr)
14650 ; /* Leave the names alone. */
14651 else if (out_attr[i].i == in_attr[i].i)
14652 {
14653 /* The output architecture has been changed to match the
14654 input architecture. Use the input names. */
14655 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14656 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14657 : NULL;
14658 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14659 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14660 : NULL;
14661 }
14662 else
14663 {
14664 out_attr[Tag_CPU_name].s = NULL;
14665 out_attr[Tag_CPU_raw_name].s = NULL;
14666 }
14667
14668 /* If we still don't have a value for Tag_CPU_name,
14669 make one up now. Tag_CPU_raw_name remains blank. */
14670 if (out_attr[Tag_CPU_name].s == NULL
14671 && out_attr[i].i < ARRAY_SIZE (name_table))
14672 out_attr[Tag_CPU_name].s =
14673 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14674 }
14675 break;
14676
14677 case Tag_ARM_ISA_use:
14678 case Tag_THUMB_ISA_use:
14679 case Tag_WMMX_arch:
14680 case Tag_Advanced_SIMD_arch:
14681 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14682 case Tag_ABI_FP_rounding:
14683 case Tag_ABI_FP_exceptions:
14684 case Tag_ABI_FP_user_exceptions:
14685 case Tag_ABI_FP_number_model:
14686 case Tag_FP_HP_extension:
14687 case Tag_CPU_unaligned_access:
14688 case Tag_T2EE_use:
14689 case Tag_MPextension_use:
14690 case Tag_MVE_arch:
14691 /* Use the largest value specified. */
14692 if (in_attr[i].i > out_attr[i].i)
14693 out_attr[i].i = in_attr[i].i;
14694 break;
14695
14696 case Tag_ABI_align_preserved:
14697 case Tag_ABI_PCS_RO_data:
14698 /* Use the smallest value specified. */
14699 if (in_attr[i].i < out_attr[i].i)
14700 out_attr[i].i = in_attr[i].i;
14701 break;
14702
14703 case Tag_ABI_align_needed:
14704 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14705 && (in_attr[Tag_ABI_align_preserved].i == 0
14706 || out_attr[Tag_ABI_align_preserved].i == 0))
14707 {
14708 /* This error message should be enabled once all non-conformant
14709 binaries in the toolchain have had the attributes set
14710 properly.
14711 _bfd_error_handler
14712 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14713 obfd, ibfd);
14714 result = FALSE; */
14715 }
14716 /* Fall through. */
14717 case Tag_ABI_FP_denormal:
14718 case Tag_ABI_PCS_GOT_use:
14719 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14720 value if greater than 2 (for future-proofing). */
14721 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14722 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14723 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14724 out_attr[i].i = in_attr[i].i;
14725 break;
14726
14727 case Tag_Virtualization_use:
14728 /* The virtualization tag effectively stores two bits of
14729 information: the intended use of TrustZone (in bit 0), and the
14730 intended use of Virtualization (in bit 1). */
14731 if (out_attr[i].i == 0)
14732 out_attr[i].i = in_attr[i].i;
14733 else if (in_attr[i].i != 0
14734 && in_attr[i].i != out_attr[i].i)
14735 {
14736 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14737 out_attr[i].i = 3;
14738 else
14739 {
14740 _bfd_error_handler
14741 (_("error: %pB: unable to merge virtualization attributes "
14742 "with %pB"),
14743 obfd, ibfd);
14744 result = FALSE;
14745 }
14746 }
14747 break;
14748
14749 case Tag_CPU_arch_profile:
14750 if (out_attr[i].i != in_attr[i].i)
14751 {
14752 /* 0 will merge with anything.
14753 'A' and 'S' merge to 'A'.
14754 'R' and 'S' merge to 'R'.
14755 'M' and 'A|R|S' is an error. */
14756 if (out_attr[i].i == 0
14757 || (out_attr[i].i == 'S'
14758 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14759 out_attr[i].i = in_attr[i].i;
14760 else if (in_attr[i].i == 0
14761 || (in_attr[i].i == 'S'
14762 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14763 ; /* Do nothing. */
14764 else
14765 {
14766 _bfd_error_handler
14767 (_("error: %pB: conflicting architecture profiles %c/%c"),
14768 ibfd,
14769 in_attr[i].i ? in_attr[i].i : '0',
14770 out_attr[i].i ? out_attr[i].i : '0');
14771 result = FALSE;
14772 }
14773 }
14774 break;
14775
14776 case Tag_DSP_extension:
14777 /* No need to change output value if any of:
14778 - pre (<=) ARMv5T input architecture (do not have DSP)
14779 - M input profile not ARMv7E-M and do not have DSP. */
14780 if (in_attr[Tag_CPU_arch].i <= 3
14781 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14782 && in_attr[Tag_CPU_arch].i != 13
14783 && in_attr[i].i == 0))
14784 ; /* Do nothing. */
14785 /* Output value should be 0 if DSP part of architecture, ie.
14786 - post (>=) ARMv5te architecture output
14787 - A, R or S profile output or ARMv7E-M output architecture. */
14788 else if (out_attr[Tag_CPU_arch].i >= 4
14789 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14790 || out_attr[Tag_CPU_arch_profile].i == 'R'
14791 || out_attr[Tag_CPU_arch_profile].i == 'S'
14792 || out_attr[Tag_CPU_arch].i == 13))
14793 out_attr[i].i = 0;
14794 /* Otherwise, DSP instructions are added and not part of output
14795 architecture. */
14796 else
14797 out_attr[i].i = 1;
14798 break;
14799
14800 case Tag_FP_arch:
14801 {
14802 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14803 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14804 when it's 0. It might mean absence of FP hardware if
14805 Tag_FP_arch is zero. */
14806
14807 #define VFP_VERSION_COUNT 9
14808 static const struct
14809 {
14810 int ver;
14811 int regs;
14812 } vfp_versions[VFP_VERSION_COUNT] =
14813 {
14814 {0, 0},
14815 {1, 16},
14816 {2, 16},
14817 {3, 32},
14818 {3, 16},
14819 {4, 32},
14820 {4, 16},
14821 {8, 32},
14822 {8, 16}
14823 };
14824 int ver;
14825 int regs;
14826 int newval;
14827
14828 /* If the output has no requirement about FP hardware,
14829 follow the requirement of the input. */
14830 if (out_attr[i].i == 0)
14831 {
14832 /* This assert is still reasonable, we shouldn't
14833 produce the suspicious build attribute
14834 combination (See below for in_attr). */
14835 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14836 out_attr[i].i = in_attr[i].i;
14837 out_attr[Tag_ABI_HardFP_use].i
14838 = in_attr[Tag_ABI_HardFP_use].i;
14839 break;
14840 }
14841 /* If the input has no requirement about FP hardware, do
14842 nothing. */
14843 else if (in_attr[i].i == 0)
14844 {
14845 /* We used to assert that Tag_ABI_HardFP_use was
14846 zero here, but we should never assert when
14847 consuming an object file that has suspicious
14848 build attributes. The single precision variant
14849 of 'no FP architecture' is still 'no FP
14850 architecture', so we just ignore the tag in this
14851 case. */
14852 break;
14853 }
14854
14855 /* Both the input and the output have nonzero Tag_FP_arch.
14856 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14857
14858 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14859 do nothing. */
14860 if (in_attr[Tag_ABI_HardFP_use].i == 0
14861 && out_attr[Tag_ABI_HardFP_use].i == 0)
14862 ;
14863 /* If the input and the output have different Tag_ABI_HardFP_use,
14864 the combination of them is 0 (implied by Tag_FP_arch). */
14865 else if (in_attr[Tag_ABI_HardFP_use].i
14866 != out_attr[Tag_ABI_HardFP_use].i)
14867 out_attr[Tag_ABI_HardFP_use].i = 0;
14868
14869 /* Now we can handle Tag_FP_arch. */
14870
14871 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14872 pick the biggest. */
14873 if (in_attr[i].i >= VFP_VERSION_COUNT
14874 && in_attr[i].i > out_attr[i].i)
14875 {
14876 out_attr[i] = in_attr[i];
14877 break;
14878 }
14879 /* The output uses the superset of input features
14880 (ISA version) and registers. */
14881 ver = vfp_versions[in_attr[i].i].ver;
14882 if (ver < vfp_versions[out_attr[i].i].ver)
14883 ver = vfp_versions[out_attr[i].i].ver;
14884 regs = vfp_versions[in_attr[i].i].regs;
14885 if (regs < vfp_versions[out_attr[i].i].regs)
14886 regs = vfp_versions[out_attr[i].i].regs;
14887 /* This assumes all possible supersets are also a valid
14888 options. */
14889 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14890 {
14891 if (regs == vfp_versions[newval].regs
14892 && ver == vfp_versions[newval].ver)
14893 break;
14894 }
14895 out_attr[i].i = newval;
14896 }
14897 break;
14898 case Tag_PCS_config:
14899 if (out_attr[i].i == 0)
14900 out_attr[i].i = in_attr[i].i;
14901 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14902 {
14903 /* It's sometimes ok to mix different configs, so this is only
14904 a warning. */
14905 _bfd_error_handler
14906 (_("warning: %pB: conflicting platform configuration"), ibfd);
14907 }
14908 break;
14909 case Tag_ABI_PCS_R9_use:
14910 if (in_attr[i].i != out_attr[i].i
14911 && out_attr[i].i != AEABI_R9_unused
14912 && in_attr[i].i != AEABI_R9_unused)
14913 {
14914 _bfd_error_handler
14915 (_("error: %pB: conflicting use of R9"), ibfd);
14916 result = FALSE;
14917 }
14918 if (out_attr[i].i == AEABI_R9_unused)
14919 out_attr[i].i = in_attr[i].i;
14920 break;
14921 case Tag_ABI_PCS_RW_data:
14922 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14923 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14924 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14925 {
14926 _bfd_error_handler
14927 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14928 ibfd);
14929 result = FALSE;
14930 }
14931 /* Use the smallest value specified. */
14932 if (in_attr[i].i < out_attr[i].i)
14933 out_attr[i].i = in_attr[i].i;
14934 break;
14935 case Tag_ABI_PCS_wchar_t:
14936 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14937 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14938 {
14939 _bfd_error_handler
14940 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14941 ibfd, in_attr[i].i, out_attr[i].i);
14942 }
14943 else if (in_attr[i].i && !out_attr[i].i)
14944 out_attr[i].i = in_attr[i].i;
14945 break;
14946 case Tag_ABI_enum_size:
14947 if (in_attr[i].i != AEABI_enum_unused)
14948 {
14949 if (out_attr[i].i == AEABI_enum_unused
14950 || out_attr[i].i == AEABI_enum_forced_wide)
14951 {
14952 /* The existing object is compatible with anything.
14953 Use whatever requirements the new object has. */
14954 out_attr[i].i = in_attr[i].i;
14955 }
14956 else if (in_attr[i].i != AEABI_enum_forced_wide
14957 && out_attr[i].i != in_attr[i].i
14958 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14959 {
14960 static const char *aeabi_enum_names[] =
14961 { "", "variable-size", "32-bit", "" };
14962 const char *in_name =
14963 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14964 ? aeabi_enum_names[in_attr[i].i]
14965 : "<unknown>";
14966 const char *out_name =
14967 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14968 ? aeabi_enum_names[out_attr[i].i]
14969 : "<unknown>";
14970 _bfd_error_handler
14971 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14972 ibfd, in_name, out_name);
14973 }
14974 }
14975 break;
14976 case Tag_ABI_VFP_args:
14977 /* Aready done. */
14978 break;
14979 case Tag_ABI_WMMX_args:
14980 if (in_attr[i].i != out_attr[i].i)
14981 {
14982 _bfd_error_handler
14983 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14984 ibfd, obfd);
14985 result = FALSE;
14986 }
14987 break;
14988 case Tag_compatibility:
14989 /* Merged in target-independent code. */
14990 break;
14991 case Tag_ABI_HardFP_use:
14992 /* This is handled along with Tag_FP_arch. */
14993 break;
14994 case Tag_ABI_FP_16bit_format:
14995 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14996 {
14997 if (in_attr[i].i != out_attr[i].i)
14998 {
14999 _bfd_error_handler
15000 (_("error: fp16 format mismatch between %pB and %pB"),
15001 ibfd, obfd);
15002 result = FALSE;
15003 }
15004 }
15005 if (in_attr[i].i != 0)
15006 out_attr[i].i = in_attr[i].i;
15007 break;
15008
15009 case Tag_DIV_use:
15010 /* A value of zero on input means that the divide instruction may
15011 be used if available in the base architecture as specified via
15012 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15013 the user did not want divide instructions. A value of 2
15014 explicitly means that divide instructions were allowed in ARM
15015 and Thumb state. */
15016 if (in_attr[i].i == out_attr[i].i)
15017 /* Do nothing. */ ;
15018 else if (elf32_arm_attributes_forbid_div (in_attr)
15019 && !elf32_arm_attributes_accept_div (out_attr))
15020 out_attr[i].i = 1;
15021 else if (elf32_arm_attributes_forbid_div (out_attr)
15022 && elf32_arm_attributes_accept_div (in_attr))
15023 out_attr[i].i = in_attr[i].i;
15024 else if (in_attr[i].i == 2)
15025 out_attr[i].i = in_attr[i].i;
15026 break;
15027
15028 case Tag_MPextension_use_legacy:
15029 /* We don't output objects with Tag_MPextension_use_legacy - we
15030 move the value to Tag_MPextension_use. */
15031 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15032 {
15033 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15034 {
15035 _bfd_error_handler
15036 (_("%pB has both the current and legacy "
15037 "Tag_MPextension_use attributes"),
15038 ibfd);
15039 result = FALSE;
15040 }
15041 }
15042
15043 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15044 out_attr[Tag_MPextension_use] = in_attr[i];
15045
15046 break;
15047
15048 case Tag_nodefaults:
15049 /* This tag is set if it exists, but the value is unused (and is
15050 typically zero). We don't actually need to do anything here -
15051 the merge happens automatically when the type flags are merged
15052 below. */
15053 break;
15054 case Tag_also_compatible_with:
15055 /* Already done in Tag_CPU_arch. */
15056 break;
15057 case Tag_conformance:
15058 /* Keep the attribute if it matches. Throw it away otherwise.
15059 No attribute means no claim to conform. */
15060 if (!in_attr[i].s || !out_attr[i].s
15061 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15062 out_attr[i].s = NULL;
15063 break;
15064
15065 default:
15066 result
15067 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15068 }
15069
15070 /* If out_attr was copied from in_attr then it won't have a type yet. */
15071 if (in_attr[i].type && !out_attr[i].type)
15072 out_attr[i].type = in_attr[i].type;
15073 }
15074
15075 /* Merge Tag_compatibility attributes and any common GNU ones. */
15076 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15077 return FALSE;
15078
15079 /* Check for any attributes not known on ARM. */
15080 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15081
15082 return result;
15083 }
15084
15085
15086 /* Return TRUE if the two EABI versions are incompatible. */
15087
15088 static bfd_boolean
15089 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15090 {
15091 /* v4 and v5 are the same spec before and after it was released,
15092 so allow mixing them. */
15093 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15094 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15095 return TRUE;
15096
15097 return (iver == over);
15098 }
15099
15100 /* Merge backend specific data from an object file to the output
15101 object file when linking. */
15102
15103 static bfd_boolean
15104 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15105
15106 /* Display the flags field. */
15107
15108 static bfd_boolean
15109 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15110 {
15111 FILE * file = (FILE *) ptr;
15112 unsigned long flags;
15113
15114 BFD_ASSERT (abfd != NULL && ptr != NULL);
15115
15116 /* Print normal ELF private data. */
15117 _bfd_elf_print_private_bfd_data (abfd, ptr);
15118
15119 flags = elf_elfheader (abfd)->e_flags;
15120 /* Ignore init flag - it may not be set, despite the flags field
15121 containing valid data. */
15122
15123 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
15124
15125 switch (EF_ARM_EABI_VERSION (flags))
15126 {
15127 case EF_ARM_EABI_UNKNOWN:
15128 /* The following flag bits are GNU extensions and not part of the
15129 official ARM ELF extended ABI. Hence they are only decoded if
15130 the EABI version is not set. */
15131 if (flags & EF_ARM_INTERWORK)
15132 fprintf (file, _(" [interworking enabled]"));
15133
15134 if (flags & EF_ARM_APCS_26)
15135 fprintf (file, " [APCS-26]");
15136 else
15137 fprintf (file, " [APCS-32]");
15138
15139 if (flags & EF_ARM_VFP_FLOAT)
15140 fprintf (file, _(" [VFP float format]"));
15141 else if (flags & EF_ARM_MAVERICK_FLOAT)
15142 fprintf (file, _(" [Maverick float format]"));
15143 else
15144 fprintf (file, _(" [FPA float format]"));
15145
15146 if (flags & EF_ARM_APCS_FLOAT)
15147 fprintf (file, _(" [floats passed in float registers]"));
15148
15149 if (flags & EF_ARM_PIC)
15150 fprintf (file, _(" [position independent]"));
15151
15152 if (flags & EF_ARM_NEW_ABI)
15153 fprintf (file, _(" [new ABI]"));
15154
15155 if (flags & EF_ARM_OLD_ABI)
15156 fprintf (file, _(" [old ABI]"));
15157
15158 if (flags & EF_ARM_SOFT_FLOAT)
15159 fprintf (file, _(" [software FP]"));
15160
15161 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15162 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15163 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15164 | EF_ARM_MAVERICK_FLOAT);
15165 break;
15166
15167 case EF_ARM_EABI_VER1:
15168 fprintf (file, _(" [Version1 EABI]"));
15169
15170 if (flags & EF_ARM_SYMSARESORTED)
15171 fprintf (file, _(" [sorted symbol table]"));
15172 else
15173 fprintf (file, _(" [unsorted symbol table]"));
15174
15175 flags &= ~ EF_ARM_SYMSARESORTED;
15176 break;
15177
15178 case EF_ARM_EABI_VER2:
15179 fprintf (file, _(" [Version2 EABI]"));
15180
15181 if (flags & EF_ARM_SYMSARESORTED)
15182 fprintf (file, _(" [sorted symbol table]"));
15183 else
15184 fprintf (file, _(" [unsorted symbol table]"));
15185
15186 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15187 fprintf (file, _(" [dynamic symbols use segment index]"));
15188
15189 if (flags & EF_ARM_MAPSYMSFIRST)
15190 fprintf (file, _(" [mapping symbols precede others]"));
15191
15192 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15193 | EF_ARM_MAPSYMSFIRST);
15194 break;
15195
15196 case EF_ARM_EABI_VER3:
15197 fprintf (file, _(" [Version3 EABI]"));
15198 break;
15199
15200 case EF_ARM_EABI_VER4:
15201 fprintf (file, _(" [Version4 EABI]"));
15202 goto eabi;
15203
15204 case EF_ARM_EABI_VER5:
15205 fprintf (file, _(" [Version5 EABI]"));
15206
15207 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15208 fprintf (file, _(" [soft-float ABI]"));
15209
15210 if (flags & EF_ARM_ABI_FLOAT_HARD)
15211 fprintf (file, _(" [hard-float ABI]"));
15212
15213 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15214
15215 eabi:
15216 if (flags & EF_ARM_BE8)
15217 fprintf (file, _(" [BE8]"));
15218
15219 if (flags & EF_ARM_LE8)
15220 fprintf (file, _(" [LE8]"));
15221
15222 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15223 break;
15224
15225 default:
15226 fprintf (file, _(" <EABI version unrecognised>"));
15227 break;
15228 }
15229
15230 flags &= ~ EF_ARM_EABIMASK;
15231
15232 if (flags & EF_ARM_RELEXEC)
15233 fprintf (file, _(" [relocatable executable]"));
15234
15235 if (flags & EF_ARM_PIC)
15236 fprintf (file, _(" [position independent]"));
15237
15238 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15239 fprintf (file, _(" [FDPIC ABI supplement]"));
15240
15241 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15242
15243 if (flags)
15244 fprintf (file, _("<Unrecognised flag bits set>"));
15245
15246 fputc ('\n', file);
15247
15248 return TRUE;
15249 }
15250
15251 static int
15252 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15253 {
15254 switch (ELF_ST_TYPE (elf_sym->st_info))
15255 {
15256 case STT_ARM_TFUNC:
15257 return ELF_ST_TYPE (elf_sym->st_info);
15258
15259 case STT_ARM_16BIT:
15260 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15261 This allows us to distinguish between data used by Thumb instructions
15262 and non-data (which is probably code) inside Thumb regions of an
15263 executable. */
15264 if (type != STT_OBJECT && type != STT_TLS)
15265 return ELF_ST_TYPE (elf_sym->st_info);
15266 break;
15267
15268 default:
15269 break;
15270 }
15271
15272 return type;
15273 }
15274
15275 static asection *
15276 elf32_arm_gc_mark_hook (asection *sec,
15277 struct bfd_link_info *info,
15278 Elf_Internal_Rela *rel,
15279 struct elf_link_hash_entry *h,
15280 Elf_Internal_Sym *sym)
15281 {
15282 if (h != NULL)
15283 switch (ELF32_R_TYPE (rel->r_info))
15284 {
15285 case R_ARM_GNU_VTINHERIT:
15286 case R_ARM_GNU_VTENTRY:
15287 return NULL;
15288 }
15289
15290 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15291 }
15292
15293 /* Look through the relocs for a section during the first phase. */
15294
15295 static bfd_boolean
15296 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15297 asection *sec, const Elf_Internal_Rela *relocs)
15298 {
15299 Elf_Internal_Shdr *symtab_hdr;
15300 struct elf_link_hash_entry **sym_hashes;
15301 const Elf_Internal_Rela *rel;
15302 const Elf_Internal_Rela *rel_end;
15303 bfd *dynobj;
15304 asection *sreloc;
15305 struct elf32_arm_link_hash_table *htab;
15306 bfd_boolean call_reloc_p;
15307 bfd_boolean may_become_dynamic_p;
15308 bfd_boolean may_need_local_target_p;
15309 unsigned long nsyms;
15310
15311 if (bfd_link_relocatable (info))
15312 return TRUE;
15313
15314 BFD_ASSERT (is_arm_elf (abfd));
15315
15316 htab = elf32_arm_hash_table (info);
15317 if (htab == NULL)
15318 return FALSE;
15319
15320 sreloc = NULL;
15321
15322 /* Create dynamic sections for relocatable executables so that we can
15323 copy relocations. */
15324 if (htab->root.is_relocatable_executable
15325 && ! htab->root.dynamic_sections_created)
15326 {
15327 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15328 return FALSE;
15329 }
15330
15331 if (htab->root.dynobj == NULL)
15332 htab->root.dynobj = abfd;
15333 if (!create_ifunc_sections (info))
15334 return FALSE;
15335
15336 dynobj = htab->root.dynobj;
15337
15338 symtab_hdr = & elf_symtab_hdr (abfd);
15339 sym_hashes = elf_sym_hashes (abfd);
15340 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15341
15342 rel_end = relocs + sec->reloc_count;
15343 for (rel = relocs; rel < rel_end; rel++)
15344 {
15345 Elf_Internal_Sym *isym;
15346 struct elf_link_hash_entry *h;
15347 struct elf32_arm_link_hash_entry *eh;
15348 unsigned int r_symndx;
15349 int r_type;
15350
15351 r_symndx = ELF32_R_SYM (rel->r_info);
15352 r_type = ELF32_R_TYPE (rel->r_info);
15353 r_type = arm_real_reloc_type (htab, r_type);
15354
15355 if (r_symndx >= nsyms
15356 /* PR 9934: It is possible to have relocations that do not
15357 refer to symbols, thus it is also possible to have an
15358 object file containing relocations but no symbol table. */
15359 && (r_symndx > STN_UNDEF || nsyms > 0))
15360 {
15361 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15362 r_symndx);
15363 return FALSE;
15364 }
15365
15366 h = NULL;
15367 isym = NULL;
15368 if (nsyms > 0)
15369 {
15370 if (r_symndx < symtab_hdr->sh_info)
15371 {
15372 /* A local symbol. */
15373 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15374 abfd, r_symndx);
15375 if (isym == NULL)
15376 return FALSE;
15377 }
15378 else
15379 {
15380 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15381 while (h->root.type == bfd_link_hash_indirect
15382 || h->root.type == bfd_link_hash_warning)
15383 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15384 }
15385 }
15386
15387 eh = (struct elf32_arm_link_hash_entry *) h;
15388
15389 call_reloc_p = FALSE;
15390 may_become_dynamic_p = FALSE;
15391 may_need_local_target_p = FALSE;
15392
15393 /* Could be done earlier, if h were already available. */
15394 r_type = elf32_arm_tls_transition (info, r_type, h);
15395 switch (r_type)
15396 {
15397 case R_ARM_GOTOFFFUNCDESC:
15398 {
15399 if (h == NULL)
15400 {
15401 if (!elf32_arm_allocate_local_sym_info (abfd))
15402 return FALSE;
15403 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15404 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15405 }
15406 else
15407 {
15408 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15409 }
15410 }
15411 break;
15412
15413 case R_ARM_GOTFUNCDESC:
15414 {
15415 if (h == NULL)
15416 {
15417 /* Such a relocation is not supposed to be generated
15418 by gcc on a static function. */
15419 /* Anyway if needed it could be handled. */
15420 abort();
15421 }
15422 else
15423 {
15424 eh->fdpic_cnts.gotfuncdesc_cnt++;
15425 }
15426 }
15427 break;
15428
15429 case R_ARM_FUNCDESC:
15430 {
15431 if (h == NULL)
15432 {
15433 if (!elf32_arm_allocate_local_sym_info (abfd))
15434 return FALSE;
15435 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15436 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15437 }
15438 else
15439 {
15440 eh->fdpic_cnts.funcdesc_cnt++;
15441 }
15442 }
15443 break;
15444
15445 case R_ARM_GOT32:
15446 case R_ARM_GOT_PREL:
15447 case R_ARM_TLS_GD32:
15448 case R_ARM_TLS_GD32_FDPIC:
15449 case R_ARM_TLS_IE32:
15450 case R_ARM_TLS_IE32_FDPIC:
15451 case R_ARM_TLS_GOTDESC:
15452 case R_ARM_TLS_DESCSEQ:
15453 case R_ARM_THM_TLS_DESCSEQ:
15454 case R_ARM_TLS_CALL:
15455 case R_ARM_THM_TLS_CALL:
15456 /* This symbol requires a global offset table entry. */
15457 {
15458 int tls_type, old_tls_type;
15459
15460 switch (r_type)
15461 {
15462 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15463 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15464
15465 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15466 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15467
15468 case R_ARM_TLS_GOTDESC:
15469 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15470 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15471 tls_type = GOT_TLS_GDESC; break;
15472
15473 default: tls_type = GOT_NORMAL; break;
15474 }
15475
15476 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15477 info->flags |= DF_STATIC_TLS;
15478
15479 if (h != NULL)
15480 {
15481 h->got.refcount++;
15482 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15483 }
15484 else
15485 {
15486 /* This is a global offset table entry for a local symbol. */
15487 if (!elf32_arm_allocate_local_sym_info (abfd))
15488 return FALSE;
15489 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15490 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15491 }
15492
15493 /* If a variable is accessed with both tls methods, two
15494 slots may be created. */
15495 if (GOT_TLS_GD_ANY_P (old_tls_type)
15496 && GOT_TLS_GD_ANY_P (tls_type))
15497 tls_type |= old_tls_type;
15498
15499 /* We will already have issued an error message if there
15500 is a TLS/non-TLS mismatch, based on the symbol
15501 type. So just combine any TLS types needed. */
15502 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15503 && tls_type != GOT_NORMAL)
15504 tls_type |= old_tls_type;
15505
15506 /* If the symbol is accessed in both IE and GDESC
15507 method, we're able to relax. Turn off the GDESC flag,
15508 without messing up with any other kind of tls types
15509 that may be involved. */
15510 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15511 tls_type &= ~GOT_TLS_GDESC;
15512
15513 if (old_tls_type != tls_type)
15514 {
15515 if (h != NULL)
15516 elf32_arm_hash_entry (h)->tls_type = tls_type;
15517 else
15518 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15519 }
15520 }
15521 /* Fall through. */
15522
15523 case R_ARM_TLS_LDM32:
15524 case R_ARM_TLS_LDM32_FDPIC:
15525 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15526 htab->tls_ldm_got.refcount++;
15527 /* Fall through. */
15528
15529 case R_ARM_GOTOFF32:
15530 case R_ARM_GOTPC:
15531 if (htab->root.sgot == NULL
15532 && !create_got_section (htab->root.dynobj, info))
15533 return FALSE;
15534 break;
15535
15536 case R_ARM_PC24:
15537 case R_ARM_PLT32:
15538 case R_ARM_CALL:
15539 case R_ARM_JUMP24:
15540 case R_ARM_PREL31:
15541 case R_ARM_THM_CALL:
15542 case R_ARM_THM_JUMP24:
15543 case R_ARM_THM_JUMP19:
15544 call_reloc_p = TRUE;
15545 may_need_local_target_p = TRUE;
15546 break;
15547
15548 case R_ARM_ABS12:
15549 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15550 ldr __GOTT_INDEX__ offsets. */
15551 if (!htab->vxworks_p)
15552 {
15553 may_need_local_target_p = TRUE;
15554 break;
15555 }
15556 else goto jump_over;
15557
15558 /* Fall through. */
15559
15560 case R_ARM_MOVW_ABS_NC:
15561 case R_ARM_MOVT_ABS:
15562 case R_ARM_THM_MOVW_ABS_NC:
15563 case R_ARM_THM_MOVT_ABS:
15564 if (bfd_link_pic (info))
15565 {
15566 _bfd_error_handler
15567 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15568 abfd, elf32_arm_howto_table_1[r_type].name,
15569 (h) ? h->root.root.string : "a local symbol");
15570 bfd_set_error (bfd_error_bad_value);
15571 return FALSE;
15572 }
15573
15574 /* Fall through. */
15575 case R_ARM_ABS32:
15576 case R_ARM_ABS32_NOI:
15577 jump_over:
15578 if (h != NULL && bfd_link_executable (info))
15579 {
15580 h->pointer_equality_needed = 1;
15581 }
15582 /* Fall through. */
15583 case R_ARM_REL32:
15584 case R_ARM_REL32_NOI:
15585 case R_ARM_MOVW_PREL_NC:
15586 case R_ARM_MOVT_PREL:
15587 case R_ARM_THM_MOVW_PREL_NC:
15588 case R_ARM_THM_MOVT_PREL:
15589
15590 /* Should the interworking branches be listed here? */
15591 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15592 || htab->fdpic_p)
15593 && (sec->flags & SEC_ALLOC) != 0)
15594 {
15595 if (h == NULL
15596 && elf32_arm_howto_from_type (r_type)->pc_relative)
15597 {
15598 /* In shared libraries and relocatable executables,
15599 we treat local relative references as calls;
15600 see the related SYMBOL_CALLS_LOCAL code in
15601 allocate_dynrelocs. */
15602 call_reloc_p = TRUE;
15603 may_need_local_target_p = TRUE;
15604 }
15605 else
15606 /* We are creating a shared library or relocatable
15607 executable, and this is a reloc against a global symbol,
15608 or a non-PC-relative reloc against a local symbol.
15609 We may need to copy the reloc into the output. */
15610 may_become_dynamic_p = TRUE;
15611 }
15612 else
15613 may_need_local_target_p = TRUE;
15614 break;
15615
15616 /* This relocation describes the C++ object vtable hierarchy.
15617 Reconstruct it for later use during GC. */
15618 case R_ARM_GNU_VTINHERIT:
15619 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15620 return FALSE;
15621 break;
15622
15623 /* This relocation describes which C++ vtable entries are actually
15624 used. Record for later use during GC. */
15625 case R_ARM_GNU_VTENTRY:
15626 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15627 return FALSE;
15628 break;
15629 }
15630
15631 if (h != NULL)
15632 {
15633 if (call_reloc_p)
15634 /* We may need a .plt entry if the function this reloc
15635 refers to is in a different object, regardless of the
15636 symbol's type. We can't tell for sure yet, because
15637 something later might force the symbol local. */
15638 h->needs_plt = 1;
15639 else if (may_need_local_target_p)
15640 /* If this reloc is in a read-only section, we might
15641 need a copy reloc. We can't check reliably at this
15642 stage whether the section is read-only, as input
15643 sections have not yet been mapped to output sections.
15644 Tentatively set the flag for now, and correct in
15645 adjust_dynamic_symbol. */
15646 h->non_got_ref = 1;
15647 }
15648
15649 if (may_need_local_target_p
15650 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15651 {
15652 union gotplt_union *root_plt;
15653 struct arm_plt_info *arm_plt;
15654 struct arm_local_iplt_info *local_iplt;
15655
15656 if (h != NULL)
15657 {
15658 root_plt = &h->plt;
15659 arm_plt = &eh->plt;
15660 }
15661 else
15662 {
15663 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15664 if (local_iplt == NULL)
15665 return FALSE;
15666 root_plt = &local_iplt->root;
15667 arm_plt = &local_iplt->arm;
15668 }
15669
15670 /* If the symbol is a function that doesn't bind locally,
15671 this relocation will need a PLT entry. */
15672 if (root_plt->refcount != -1)
15673 root_plt->refcount += 1;
15674
15675 if (!call_reloc_p)
15676 arm_plt->noncall_refcount++;
15677
15678 /* It's too early to use htab->use_blx here, so we have to
15679 record possible blx references separately from
15680 relocs that definitely need a thumb stub. */
15681
15682 if (r_type == R_ARM_THM_CALL)
15683 arm_plt->maybe_thumb_refcount += 1;
15684
15685 if (r_type == R_ARM_THM_JUMP24
15686 || r_type == R_ARM_THM_JUMP19)
15687 arm_plt->thumb_refcount += 1;
15688 }
15689
15690 if (may_become_dynamic_p)
15691 {
15692 struct elf_dyn_relocs *p, **head;
15693
15694 /* Create a reloc section in dynobj. */
15695 if (sreloc == NULL)
15696 {
15697 sreloc = _bfd_elf_make_dynamic_reloc_section
15698 (sec, dynobj, 2, abfd, ! htab->use_rel);
15699
15700 if (sreloc == NULL)
15701 return FALSE;
15702
15703 /* BPABI objects never have dynamic relocations mapped. */
15704 if (htab->symbian_p)
15705 {
15706 flagword flags;
15707
15708 flags = bfd_section_flags (sreloc);
15709 flags &= ~(SEC_LOAD | SEC_ALLOC);
15710 bfd_set_section_flags (sreloc, flags);
15711 }
15712 }
15713
15714 /* If this is a global symbol, count the number of
15715 relocations we need for this symbol. */
15716 if (h != NULL)
15717 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15718 else
15719 {
15720 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15721 if (head == NULL)
15722 return FALSE;
15723 }
15724
15725 p = *head;
15726 if (p == NULL || p->sec != sec)
15727 {
15728 bfd_size_type amt = sizeof *p;
15729
15730 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15731 if (p == NULL)
15732 return FALSE;
15733 p->next = *head;
15734 *head = p;
15735 p->sec = sec;
15736 p->count = 0;
15737 p->pc_count = 0;
15738 }
15739
15740 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15741 p->pc_count += 1;
15742 p->count += 1;
15743 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15744 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15745 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15746 that will become rofixup. */
15747 /* This is due to the fact that we suppose all will become rofixup. */
15748 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15749 _bfd_error_handler
15750 (_("FDPIC does not yet support %s relocation"
15751 " to become dynamic for executable"),
15752 elf32_arm_howto_table_1[r_type].name);
15753 abort();
15754 }
15755 }
15756 }
15757
15758 return TRUE;
15759 }
15760
15761 static void
15762 elf32_arm_update_relocs (asection *o,
15763 struct bfd_elf_section_reloc_data *reldata)
15764 {
15765 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15766 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15767 const struct elf_backend_data *bed;
15768 _arm_elf_section_data *eado;
15769 struct bfd_link_order *p;
15770 bfd_byte *erela_head, *erela;
15771 Elf_Internal_Rela *irela_head, *irela;
15772 Elf_Internal_Shdr *rel_hdr;
15773 bfd *abfd;
15774 unsigned int count;
15775
15776 eado = get_arm_elf_section_data (o);
15777
15778 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15779 return;
15780
15781 abfd = o->owner;
15782 bed = get_elf_backend_data (abfd);
15783 rel_hdr = reldata->hdr;
15784
15785 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15786 {
15787 swap_in = bed->s->swap_reloc_in;
15788 swap_out = bed->s->swap_reloc_out;
15789 }
15790 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15791 {
15792 swap_in = bed->s->swap_reloca_in;
15793 swap_out = bed->s->swap_reloca_out;
15794 }
15795 else
15796 abort ();
15797
15798 erela_head = rel_hdr->contents;
15799 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15800 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15801
15802 erela = erela_head;
15803 irela = irela_head;
15804 count = 0;
15805
15806 for (p = o->map_head.link_order; p; p = p->next)
15807 {
15808 if (p->type == bfd_section_reloc_link_order
15809 || p->type == bfd_symbol_reloc_link_order)
15810 {
15811 (*swap_in) (abfd, erela, irela);
15812 erela += rel_hdr->sh_entsize;
15813 irela++;
15814 count++;
15815 }
15816 else if (p->type == bfd_indirect_link_order)
15817 {
15818 struct bfd_elf_section_reloc_data *input_reldata;
15819 arm_unwind_table_edit *edit_list, *edit_tail;
15820 _arm_elf_section_data *eadi;
15821 bfd_size_type j;
15822 bfd_vma offset;
15823 asection *i;
15824
15825 i = p->u.indirect.section;
15826
15827 eadi = get_arm_elf_section_data (i);
15828 edit_list = eadi->u.exidx.unwind_edit_list;
15829 edit_tail = eadi->u.exidx.unwind_edit_tail;
15830 offset = i->output_offset;
15831
15832 if (eadi->elf.rel.hdr &&
15833 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15834 input_reldata = &eadi->elf.rel;
15835 else if (eadi->elf.rela.hdr &&
15836 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15837 input_reldata = &eadi->elf.rela;
15838 else
15839 abort ();
15840
15841 if (edit_list)
15842 {
15843 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15844 {
15845 arm_unwind_table_edit *edit_node, *edit_next;
15846 bfd_vma bias;
15847 bfd_vma reloc_index;
15848
15849 (*swap_in) (abfd, erela, irela);
15850 reloc_index = (irela->r_offset - offset) / 8;
15851
15852 bias = 0;
15853 edit_node = edit_list;
15854 for (edit_next = edit_list;
15855 edit_next && edit_next->index <= reloc_index;
15856 edit_next = edit_node->next)
15857 {
15858 bias++;
15859 edit_node = edit_next;
15860 }
15861
15862 if (edit_node->type != DELETE_EXIDX_ENTRY
15863 || edit_node->index != reloc_index)
15864 {
15865 irela->r_offset -= bias * 8;
15866 irela++;
15867 count++;
15868 }
15869
15870 erela += rel_hdr->sh_entsize;
15871 }
15872
15873 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15874 {
15875 /* New relocation entity. */
15876 asection *text_sec = edit_tail->linked_section;
15877 asection *text_out = text_sec->output_section;
15878 bfd_vma exidx_offset = offset + i->size - 8;
15879
15880 irela->r_addend = 0;
15881 irela->r_offset = exidx_offset;
15882 irela->r_info = ELF32_R_INFO
15883 (text_out->target_index, R_ARM_PREL31);
15884 irela++;
15885 count++;
15886 }
15887 }
15888 else
15889 {
15890 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15891 {
15892 (*swap_in) (abfd, erela, irela);
15893 erela += rel_hdr->sh_entsize;
15894 irela++;
15895 }
15896
15897 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15898 }
15899 }
15900 }
15901
15902 reldata->count = count;
15903 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15904
15905 erela = erela_head;
15906 irela = irela_head;
15907 while (count > 0)
15908 {
15909 (*swap_out) (abfd, irela, erela);
15910 erela += rel_hdr->sh_entsize;
15911 irela++;
15912 count--;
15913 }
15914
15915 free (irela_head);
15916
15917 /* Hashes are no longer valid. */
15918 free (reldata->hashes);
15919 reldata->hashes = NULL;
15920 }
15921
15922 /* Unwinding tables are not referenced directly. This pass marks them as
15923 required if the corresponding code section is marked. Similarly, ARMv8-M
15924 secure entry functions can only be referenced by SG veneers which are
15925 created after the GC process. They need to be marked in case they reside in
15926 their own section (as would be the case if code was compiled with
15927 -ffunction-sections). */
15928
15929 static bfd_boolean
15930 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15931 elf_gc_mark_hook_fn gc_mark_hook)
15932 {
15933 bfd *sub;
15934 Elf_Internal_Shdr **elf_shdrp;
15935 asection *cmse_sec;
15936 obj_attribute *out_attr;
15937 Elf_Internal_Shdr *symtab_hdr;
15938 unsigned i, sym_count, ext_start;
15939 const struct elf_backend_data *bed;
15940 struct elf_link_hash_entry **sym_hashes;
15941 struct elf32_arm_link_hash_entry *cmse_hash;
15942 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15943 bfd_boolean debug_sec_need_to_be_marked = FALSE;
15944 asection *isec;
15945
15946 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15947
15948 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15949 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15950 && out_attr[Tag_CPU_arch_profile].i == 'M';
15951
15952 /* Marking EH data may cause additional code sections to be marked,
15953 requiring multiple passes. */
15954 again = TRUE;
15955 while (again)
15956 {
15957 again = FALSE;
15958 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15959 {
15960 asection *o;
15961
15962 if (! is_arm_elf (sub))
15963 continue;
15964
15965 elf_shdrp = elf_elfsections (sub);
15966 for (o = sub->sections; o != NULL; o = o->next)
15967 {
15968 Elf_Internal_Shdr *hdr;
15969
15970 hdr = &elf_section_data (o)->this_hdr;
15971 if (hdr->sh_type == SHT_ARM_EXIDX
15972 && hdr->sh_link
15973 && hdr->sh_link < elf_numsections (sub)
15974 && !o->gc_mark
15975 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15976 {
15977 again = TRUE;
15978 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15979 return FALSE;
15980 }
15981 }
15982
15983 /* Mark section holding ARMv8-M secure entry functions. We mark all
15984 of them so no need for a second browsing. */
15985 if (is_v8m && first_bfd_browse)
15986 {
15987 sym_hashes = elf_sym_hashes (sub);
15988 bed = get_elf_backend_data (sub);
15989 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15990 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15991 ext_start = symtab_hdr->sh_info;
15992
15993 /* Scan symbols. */
15994 for (i = ext_start; i < sym_count; i++)
15995 {
15996 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15997
15998 /* Assume it is a special symbol. If not, cmse_scan will
15999 warn about it and user can do something about it. */
16000 if (CONST_STRNEQ (cmse_hash->root.root.root.string,
16001 CMSE_PREFIX))
16002 {
16003 cmse_sec = cmse_hash->root.root.u.def.section;
16004 if (!cmse_sec->gc_mark
16005 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
16006 return FALSE;
16007 /* The debug sections related to these secure entry
16008 functions are marked on enabling below flag. */
16009 debug_sec_need_to_be_marked = TRUE;
16010 }
16011 }
16012
16013 if (debug_sec_need_to_be_marked)
16014 {
16015 /* Looping over all the sections of the object file containing
16016 Armv8-M secure entry functions and marking all the debug
16017 sections. */
16018 for (isec = sub->sections; isec != NULL; isec = isec->next)
16019 {
16020 /* If not a debug sections, skip it. */
16021 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
16022 isec->gc_mark = 1 ;
16023 }
16024 debug_sec_need_to_be_marked = FALSE;
16025 }
16026 }
16027 }
16028 first_bfd_browse = FALSE;
16029 }
16030
16031 return TRUE;
16032 }
16033
16034 /* Treat mapping symbols as special target symbols. */
16035
16036 static bfd_boolean
16037 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16038 {
16039 return bfd_is_arm_special_symbol_name (sym->name,
16040 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16041 }
16042
16043 /* This is a version of _bfd_elf_find_function() from dwarf2.c except that
16044 ARM mapping symbols are ignored when looking for function names
16045 and STT_ARM_TFUNC is considered to a function type. */
16046
16047 static bfd_boolean
16048 arm_elf_find_function (bfd * abfd,
16049 asymbol ** symbols,
16050 asection * section,
16051 bfd_vma offset,
16052 const char ** filename_ptr,
16053 const char ** functionname_ptr)
16054 {
16055 const char * filename = NULL;
16056 asymbol * func = NULL;
16057 bfd_vma low_func = 0;
16058 asymbol ** p;
16059
16060 if (symbols == NULL)
16061 return FALSE;
16062
16063 if (bfd_get_flavour (abfd) != bfd_target_elf_flavour)
16064 return FALSE;
16065
16066 for (p = symbols; *p != NULL; p++)
16067 {
16068 elf_symbol_type *q;
16069
16070 q = (elf_symbol_type *) *p;
16071
16072 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
16073 {
16074 default:
16075 break;
16076 case STT_FILE:
16077 filename = bfd_asymbol_name (&q->symbol);
16078 break;
16079 case STT_FUNC:
16080 case STT_ARM_TFUNC:
16081 case STT_NOTYPE:
16082 /* Skip mapping symbols. */
16083 if ((q->symbol.flags & BSF_LOCAL)
16084 && bfd_is_arm_special_symbol_name (q->symbol.name,
16085 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16086 continue;
16087 /* Fall through. */
16088 if (bfd_asymbol_section (&q->symbol) == section
16089 && q->symbol.value >= low_func
16090 && q->symbol.value <= offset)
16091 {
16092 func = (asymbol *) q;
16093 low_func = q->symbol.value;
16094 }
16095 break;
16096 }
16097 }
16098
16099 if (func == NULL)
16100 return FALSE;
16101
16102 if (filename_ptr)
16103 *filename_ptr = filename;
16104 if (functionname_ptr)
16105 *functionname_ptr = bfd_asymbol_name (func);
16106
16107 return TRUE;
16108 }
16109
16110
16111 /* Find the nearest line to a particular section and offset, for error
16112 reporting. This code is a duplicate of the code in elf.c, except
16113 that it uses arm_elf_find_function. */
16114
16115 static bfd_boolean
16116 elf32_arm_find_nearest_line (bfd * abfd,
16117 asymbol ** symbols,
16118 asection * section,
16119 bfd_vma offset,
16120 const char ** filename_ptr,
16121 const char ** functionname_ptr,
16122 unsigned int * line_ptr,
16123 unsigned int * discriminator_ptr)
16124 {
16125 bfd_boolean found = FALSE;
16126
16127 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
16128 filename_ptr, functionname_ptr,
16129 line_ptr, discriminator_ptr,
16130 dwarf_debug_sections,
16131 & elf_tdata (abfd)->dwarf2_find_line_info))
16132 {
16133 if (!*functionname_ptr)
16134 arm_elf_find_function (abfd, symbols, section, offset,
16135 *filename_ptr ? NULL : filename_ptr,
16136 functionname_ptr);
16137
16138 return TRUE;
16139 }
16140
16141 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
16142 uses DWARF1. */
16143
16144 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
16145 & found, filename_ptr,
16146 functionname_ptr, line_ptr,
16147 & elf_tdata (abfd)->line_info))
16148 return FALSE;
16149
16150 if (found && (*functionname_ptr || *line_ptr))
16151 return TRUE;
16152
16153 if (symbols == NULL)
16154 return FALSE;
16155
16156 if (! arm_elf_find_function (abfd, symbols, section, offset,
16157 filename_ptr, functionname_ptr))
16158 return FALSE;
16159
16160 *line_ptr = 0;
16161 return TRUE;
16162 }
16163
16164 static bfd_boolean
16165 elf32_arm_find_inliner_info (bfd * abfd,
16166 const char ** filename_ptr,
16167 const char ** functionname_ptr,
16168 unsigned int * line_ptr)
16169 {
16170 bfd_boolean found;
16171 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16172 functionname_ptr, line_ptr,
16173 & elf_tdata (abfd)->dwarf2_find_line_info);
16174 return found;
16175 }
16176
16177 /* Find dynamic relocs for H that apply to read-only sections. */
16178
16179 static asection *
16180 readonly_dynrelocs (struct elf_link_hash_entry *h)
16181 {
16182 struct elf_dyn_relocs *p;
16183
16184 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
16185 {
16186 asection *s = p->sec->output_section;
16187
16188 if (s != NULL && (s->flags & SEC_READONLY) != 0)
16189 return p->sec;
16190 }
16191 return NULL;
16192 }
16193
16194 /* Adjust a symbol defined by a dynamic object and referenced by a
16195 regular object. The current definition is in some section of the
16196 dynamic object, but we're not including those sections. We have to
16197 change the definition to something the rest of the link can
16198 understand. */
16199
16200 static bfd_boolean
16201 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16202 struct elf_link_hash_entry * h)
16203 {
16204 bfd * dynobj;
16205 asection *s, *srel;
16206 struct elf32_arm_link_hash_entry * eh;
16207 struct elf32_arm_link_hash_table *globals;
16208
16209 globals = elf32_arm_hash_table (info);
16210 if (globals == NULL)
16211 return FALSE;
16212
16213 dynobj = elf_hash_table (info)->dynobj;
16214
16215 /* Make sure we know what is going on here. */
16216 BFD_ASSERT (dynobj != NULL
16217 && (h->needs_plt
16218 || h->type == STT_GNU_IFUNC
16219 || h->is_weakalias
16220 || (h->def_dynamic
16221 && h->ref_regular
16222 && !h->def_regular)));
16223
16224 eh = (struct elf32_arm_link_hash_entry *) h;
16225
16226 /* If this is a function, put it in the procedure linkage table. We
16227 will fill in the contents of the procedure linkage table later,
16228 when we know the address of the .got section. */
16229 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16230 {
16231 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16232 symbol binds locally. */
16233 if (h->plt.refcount <= 0
16234 || (h->type != STT_GNU_IFUNC
16235 && (SYMBOL_CALLS_LOCAL (info, h)
16236 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16237 && h->root.type == bfd_link_hash_undefweak))))
16238 {
16239 /* This case can occur if we saw a PLT32 reloc in an input
16240 file, but the symbol was never referred to by a dynamic
16241 object, or if all references were garbage collected. In
16242 such a case, we don't actually need to build a procedure
16243 linkage table, and we can just do a PC24 reloc instead. */
16244 h->plt.offset = (bfd_vma) -1;
16245 eh->plt.thumb_refcount = 0;
16246 eh->plt.maybe_thumb_refcount = 0;
16247 eh->plt.noncall_refcount = 0;
16248 h->needs_plt = 0;
16249 }
16250
16251 return TRUE;
16252 }
16253 else
16254 {
16255 /* It's possible that we incorrectly decided a .plt reloc was
16256 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16257 in check_relocs. We can't decide accurately between function
16258 and non-function syms in check-relocs; Objects loaded later in
16259 the link may change h->type. So fix it now. */
16260 h->plt.offset = (bfd_vma) -1;
16261 eh->plt.thumb_refcount = 0;
16262 eh->plt.maybe_thumb_refcount = 0;
16263 eh->plt.noncall_refcount = 0;
16264 }
16265
16266 /* If this is a weak symbol, and there is a real definition, the
16267 processor independent code will have arranged for us to see the
16268 real definition first, and we can just use the same value. */
16269 if (h->is_weakalias)
16270 {
16271 struct elf_link_hash_entry *def = weakdef (h);
16272 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16273 h->root.u.def.section = def->root.u.def.section;
16274 h->root.u.def.value = def->root.u.def.value;
16275 return TRUE;
16276 }
16277
16278 /* If there are no non-GOT references, we do not need a copy
16279 relocation. */
16280 if (!h->non_got_ref)
16281 return TRUE;
16282
16283 /* This is a reference to a symbol defined by a dynamic object which
16284 is not a function. */
16285
16286 /* If we are creating a shared library, we must presume that the
16287 only references to the symbol are via the global offset table.
16288 For such cases we need not do anything here; the relocations will
16289 be handled correctly by relocate_section. Relocatable executables
16290 can reference data in shared objects directly, so we don't need to
16291 do anything here. */
16292 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16293 return TRUE;
16294
16295 /* We must allocate the symbol in our .dynbss section, which will
16296 become part of the .bss section of the executable. There will be
16297 an entry for this symbol in the .dynsym section. The dynamic
16298 object will contain position independent code, so all references
16299 from the dynamic object to this symbol will go through the global
16300 offset table. The dynamic linker will use the .dynsym entry to
16301 determine the address it must put in the global offset table, so
16302 both the dynamic object and the regular object will refer to the
16303 same memory location for the variable. */
16304 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16305 linker to copy the initial value out of the dynamic object and into
16306 the runtime process image. We need to remember the offset into the
16307 .rel(a).bss section we are going to use. */
16308 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16309 {
16310 s = globals->root.sdynrelro;
16311 srel = globals->root.sreldynrelro;
16312 }
16313 else
16314 {
16315 s = globals->root.sdynbss;
16316 srel = globals->root.srelbss;
16317 }
16318 if (info->nocopyreloc == 0
16319 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16320 && h->size != 0)
16321 {
16322 elf32_arm_allocate_dynrelocs (info, srel, 1);
16323 h->needs_copy = 1;
16324 }
16325
16326 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16327 }
16328
16329 /* Allocate space in .plt, .got and associated reloc sections for
16330 dynamic relocs. */
16331
16332 static bfd_boolean
16333 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16334 {
16335 struct bfd_link_info *info;
16336 struct elf32_arm_link_hash_table *htab;
16337 struct elf32_arm_link_hash_entry *eh;
16338 struct elf_dyn_relocs *p;
16339
16340 if (h->root.type == bfd_link_hash_indirect)
16341 return TRUE;
16342
16343 eh = (struct elf32_arm_link_hash_entry *) h;
16344
16345 info = (struct bfd_link_info *) inf;
16346 htab = elf32_arm_hash_table (info);
16347 if (htab == NULL)
16348 return FALSE;
16349
16350 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16351 && h->plt.refcount > 0)
16352 {
16353 /* Make sure this symbol is output as a dynamic symbol.
16354 Undefined weak syms won't yet be marked as dynamic. */
16355 if (h->dynindx == -1 && !h->forced_local
16356 && h->root.type == bfd_link_hash_undefweak)
16357 {
16358 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16359 return FALSE;
16360 }
16361
16362 /* If the call in the PLT entry binds locally, the associated
16363 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16364 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16365 than the .plt section. */
16366 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16367 {
16368 eh->is_iplt = 1;
16369 if (eh->plt.noncall_refcount == 0
16370 && SYMBOL_REFERENCES_LOCAL (info, h))
16371 /* All non-call references can be resolved directly.
16372 This means that they can (and in some cases, must)
16373 resolve directly to the run-time target, rather than
16374 to the PLT. That in turns means that any .got entry
16375 would be equal to the .igot.plt entry, so there's
16376 no point having both. */
16377 h->got.refcount = 0;
16378 }
16379
16380 if (bfd_link_pic (info)
16381 || eh->is_iplt
16382 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16383 {
16384 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16385
16386 /* If this symbol is not defined in a regular file, and we are
16387 not generating a shared library, then set the symbol to this
16388 location in the .plt. This is required to make function
16389 pointers compare as equal between the normal executable and
16390 the shared library. */
16391 if (! bfd_link_pic (info)
16392 && !h->def_regular)
16393 {
16394 h->root.u.def.section = htab->root.splt;
16395 h->root.u.def.value = h->plt.offset;
16396
16397 /* Make sure the function is not marked as Thumb, in case
16398 it is the target of an ABS32 relocation, which will
16399 point to the PLT entry. */
16400 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16401 }
16402
16403 /* VxWorks executables have a second set of relocations for
16404 each PLT entry. They go in a separate relocation section,
16405 which is processed by the kernel loader. */
16406 if (htab->vxworks_p && !bfd_link_pic (info))
16407 {
16408 /* There is a relocation for the initial PLT entry:
16409 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16410 if (h->plt.offset == htab->plt_header_size)
16411 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16412
16413 /* There are two extra relocations for each subsequent
16414 PLT entry: an R_ARM_32 relocation for the GOT entry,
16415 and an R_ARM_32 relocation for the PLT entry. */
16416 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16417 }
16418 }
16419 else
16420 {
16421 h->plt.offset = (bfd_vma) -1;
16422 h->needs_plt = 0;
16423 }
16424 }
16425 else
16426 {
16427 h->plt.offset = (bfd_vma) -1;
16428 h->needs_plt = 0;
16429 }
16430
16431 eh = (struct elf32_arm_link_hash_entry *) h;
16432 eh->tlsdesc_got = (bfd_vma) -1;
16433
16434 if (h->got.refcount > 0)
16435 {
16436 asection *s;
16437 bfd_boolean dyn;
16438 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16439 int indx;
16440
16441 /* Make sure this symbol is output as a dynamic symbol.
16442 Undefined weak syms won't yet be marked as dynamic. */
16443 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16444 && h->root.type == bfd_link_hash_undefweak)
16445 {
16446 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16447 return FALSE;
16448 }
16449
16450 if (!htab->symbian_p)
16451 {
16452 s = htab->root.sgot;
16453 h->got.offset = s->size;
16454
16455 if (tls_type == GOT_UNKNOWN)
16456 abort ();
16457
16458 if (tls_type == GOT_NORMAL)
16459 /* Non-TLS symbols need one GOT slot. */
16460 s->size += 4;
16461 else
16462 {
16463 if (tls_type & GOT_TLS_GDESC)
16464 {
16465 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16466 eh->tlsdesc_got
16467 = (htab->root.sgotplt->size
16468 - elf32_arm_compute_jump_table_size (htab));
16469 htab->root.sgotplt->size += 8;
16470 h->got.offset = (bfd_vma) -2;
16471 /* plt.got_offset needs to know there's a TLS_DESC
16472 reloc in the middle of .got.plt. */
16473 htab->num_tls_desc++;
16474 }
16475
16476 if (tls_type & GOT_TLS_GD)
16477 {
16478 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16479 consecutive GOT slots. If the symbol is both GD
16480 and GDESC, got.offset may have been
16481 overwritten. */
16482 h->got.offset = s->size;
16483 s->size += 8;
16484 }
16485
16486 if (tls_type & GOT_TLS_IE)
16487 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16488 slot. */
16489 s->size += 4;
16490 }
16491
16492 dyn = htab->root.dynamic_sections_created;
16493
16494 indx = 0;
16495 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16496 bfd_link_pic (info),
16497 h)
16498 && (!bfd_link_pic (info)
16499 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16500 indx = h->dynindx;
16501
16502 if (tls_type != GOT_NORMAL
16503 && (bfd_link_dll (info) || indx != 0)
16504 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16505 || h->root.type != bfd_link_hash_undefweak))
16506 {
16507 if (tls_type & GOT_TLS_IE)
16508 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16509
16510 if (tls_type & GOT_TLS_GD)
16511 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16512
16513 if (tls_type & GOT_TLS_GDESC)
16514 {
16515 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16516 /* GDESC needs a trampoline to jump to. */
16517 htab->tls_trampoline = -1;
16518 }
16519
16520 /* Only GD needs it. GDESC just emits one relocation per
16521 2 entries. */
16522 if ((tls_type & GOT_TLS_GD) && indx != 0)
16523 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16524 }
16525 else if (((indx != -1) || htab->fdpic_p)
16526 && !SYMBOL_REFERENCES_LOCAL (info, h))
16527 {
16528 if (htab->root.dynamic_sections_created)
16529 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16530 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16531 }
16532 else if (h->type == STT_GNU_IFUNC
16533 && eh->plt.noncall_refcount == 0)
16534 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16535 they all resolve dynamically instead. Reserve room for the
16536 GOT entry's R_ARM_IRELATIVE relocation. */
16537 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16538 else if (bfd_link_pic (info)
16539 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16540 || h->root.type != bfd_link_hash_undefweak))
16541 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16542 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16543 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16544 /* Reserve room for rofixup for FDPIC executable. */
16545 /* TLS relocs do not need space since they are completely
16546 resolved. */
16547 htab->srofixup->size += 4;
16548 }
16549 }
16550 else
16551 h->got.offset = (bfd_vma) -1;
16552
16553 /* FDPIC support. */
16554 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16555 {
16556 /* Symbol musn't be exported. */
16557 if (h->dynindx != -1)
16558 abort();
16559
16560 /* We only allocate one function descriptor with its associated relocation. */
16561 if (eh->fdpic_cnts.funcdesc_offset == -1)
16562 {
16563 asection *s = htab->root.sgot;
16564
16565 eh->fdpic_cnts.funcdesc_offset = s->size;
16566 s->size += 8;
16567 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16568 if (bfd_link_pic(info))
16569 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16570 else
16571 htab->srofixup->size += 8;
16572 }
16573 }
16574
16575 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16576 {
16577 asection *s = htab->root.sgot;
16578
16579 if (htab->root.dynamic_sections_created && h->dynindx == -1
16580 && !h->forced_local)
16581 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16582 return FALSE;
16583
16584 if (h->dynindx == -1)
16585 {
16586 /* We only allocate one function descriptor with its associated relocation. q */
16587 if (eh->fdpic_cnts.funcdesc_offset == -1)
16588 {
16589
16590 eh->fdpic_cnts.funcdesc_offset = s->size;
16591 s->size += 8;
16592 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16593 if (bfd_link_pic(info))
16594 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16595 else
16596 htab->srofixup->size += 8;
16597 }
16598 }
16599
16600 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16601 R_ARM_RELATIVE/rofixup relocation on it. */
16602 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16603 s->size += 4;
16604 if (h->dynindx == -1 && !bfd_link_pic(info))
16605 htab->srofixup->size += 4;
16606 else
16607 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16608 }
16609
16610 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16611 {
16612 if (htab->root.dynamic_sections_created && h->dynindx == -1
16613 && !h->forced_local)
16614 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16615 return FALSE;
16616
16617 if (h->dynindx == -1)
16618 {
16619 /* We only allocate one function descriptor with its associated relocation. */
16620 if (eh->fdpic_cnts.funcdesc_offset == -1)
16621 {
16622 asection *s = htab->root.sgot;
16623
16624 eh->fdpic_cnts.funcdesc_offset = s->size;
16625 s->size += 8;
16626 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16627 if (bfd_link_pic(info))
16628 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16629 else
16630 htab->srofixup->size += 8;
16631 }
16632 }
16633 if (h->dynindx == -1 && !bfd_link_pic(info))
16634 {
16635 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16636 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16637 }
16638 else
16639 {
16640 /* Will need one dynamic reloc per reference. will be either
16641 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16642 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16643 eh->fdpic_cnts.funcdesc_cnt);
16644 }
16645 }
16646
16647 /* Allocate stubs for exported Thumb functions on v4t. */
16648 if (!htab->use_blx && h->dynindx != -1
16649 && h->def_regular
16650 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16651 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16652 {
16653 struct elf_link_hash_entry * th;
16654 struct bfd_link_hash_entry * bh;
16655 struct elf_link_hash_entry * myh;
16656 char name[1024];
16657 asection *s;
16658 bh = NULL;
16659 /* Create a new symbol to regist the real location of the function. */
16660 s = h->root.u.def.section;
16661 sprintf (name, "__real_%s", h->root.root.string);
16662 _bfd_generic_link_add_one_symbol (info, s->owner,
16663 name, BSF_GLOBAL, s,
16664 h->root.u.def.value,
16665 NULL, TRUE, FALSE, &bh);
16666
16667 myh = (struct elf_link_hash_entry *) bh;
16668 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16669 myh->forced_local = 1;
16670 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16671 eh->export_glue = myh;
16672 th = record_arm_to_thumb_glue (info, h);
16673 /* Point the symbol at the stub. */
16674 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16675 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16676 h->root.u.def.section = th->root.u.def.section;
16677 h->root.u.def.value = th->root.u.def.value & ~1;
16678 }
16679
16680 if (eh->dyn_relocs == NULL)
16681 return TRUE;
16682
16683 /* In the shared -Bsymbolic case, discard space allocated for
16684 dynamic pc-relative relocs against symbols which turn out to be
16685 defined in regular objects. For the normal shared case, discard
16686 space for pc-relative relocs that have become local due to symbol
16687 visibility changes. */
16688
16689 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16690 {
16691 /* Relocs that use pc_count are PC-relative forms, which will appear
16692 on something like ".long foo - ." or "movw REG, foo - .". We want
16693 calls to protected symbols to resolve directly to the function
16694 rather than going via the plt. If people want function pointer
16695 comparisons to work as expected then they should avoid writing
16696 assembly like ".long foo - .". */
16697 if (SYMBOL_CALLS_LOCAL (info, h))
16698 {
16699 struct elf_dyn_relocs **pp;
16700
16701 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16702 {
16703 p->count -= p->pc_count;
16704 p->pc_count = 0;
16705 if (p->count == 0)
16706 *pp = p->next;
16707 else
16708 pp = &p->next;
16709 }
16710 }
16711
16712 if (htab->vxworks_p)
16713 {
16714 struct elf_dyn_relocs **pp;
16715
16716 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16717 {
16718 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16719 *pp = p->next;
16720 else
16721 pp = &p->next;
16722 }
16723 }
16724
16725 /* Also discard relocs on undefined weak syms with non-default
16726 visibility. */
16727 if (eh->dyn_relocs != NULL
16728 && h->root.type == bfd_link_hash_undefweak)
16729 {
16730 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16731 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16732 eh->dyn_relocs = NULL;
16733
16734 /* Make sure undefined weak symbols are output as a dynamic
16735 symbol in PIEs. */
16736 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16737 && !h->forced_local)
16738 {
16739 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16740 return FALSE;
16741 }
16742 }
16743
16744 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16745 && h->root.type == bfd_link_hash_new)
16746 {
16747 /* Output absolute symbols so that we can create relocations
16748 against them. For normal symbols we output a relocation
16749 against the section that contains them. */
16750 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16751 return FALSE;
16752 }
16753
16754 }
16755 else
16756 {
16757 /* For the non-shared case, discard space for relocs against
16758 symbols which turn out to need copy relocs or are not
16759 dynamic. */
16760
16761 if (!h->non_got_ref
16762 && ((h->def_dynamic
16763 && !h->def_regular)
16764 || (htab->root.dynamic_sections_created
16765 && (h->root.type == bfd_link_hash_undefweak
16766 || h->root.type == bfd_link_hash_undefined))))
16767 {
16768 /* Make sure this symbol is output as a dynamic symbol.
16769 Undefined weak syms won't yet be marked as dynamic. */
16770 if (h->dynindx == -1 && !h->forced_local
16771 && h->root.type == bfd_link_hash_undefweak)
16772 {
16773 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16774 return FALSE;
16775 }
16776
16777 /* If that succeeded, we know we'll be keeping all the
16778 relocs. */
16779 if (h->dynindx != -1)
16780 goto keep;
16781 }
16782
16783 eh->dyn_relocs = NULL;
16784
16785 keep: ;
16786 }
16787
16788 /* Finally, allocate space. */
16789 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16790 {
16791 asection *sreloc = elf_section_data (p->sec)->sreloc;
16792
16793 if (h->type == STT_GNU_IFUNC
16794 && eh->plt.noncall_refcount == 0
16795 && SYMBOL_REFERENCES_LOCAL (info, h))
16796 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16797 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16798 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16799 else if (htab->fdpic_p && !bfd_link_pic(info))
16800 htab->srofixup->size += 4 * p->count;
16801 else
16802 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16803 }
16804
16805 return TRUE;
16806 }
16807
16808 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16809 read-only sections. */
16810
16811 static bfd_boolean
16812 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16813 {
16814 asection *sec;
16815
16816 if (h->root.type == bfd_link_hash_indirect)
16817 return TRUE;
16818
16819 sec = readonly_dynrelocs (h);
16820 if (sec != NULL)
16821 {
16822 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16823
16824 info->flags |= DF_TEXTREL;
16825 info->callbacks->minfo
16826 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16827 sec->owner, h->root.root.string, sec);
16828
16829 /* Not an error, just cut short the traversal. */
16830 return FALSE;
16831 }
16832
16833 return TRUE;
16834 }
16835
16836 void
16837 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16838 int byteswap_code)
16839 {
16840 struct elf32_arm_link_hash_table *globals;
16841
16842 globals = elf32_arm_hash_table (info);
16843 if (globals == NULL)
16844 return;
16845
16846 globals->byteswap_code = byteswap_code;
16847 }
16848
16849 /* Set the sizes of the dynamic sections. */
16850
16851 static bfd_boolean
16852 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16853 struct bfd_link_info * info)
16854 {
16855 bfd * dynobj;
16856 asection * s;
16857 bfd_boolean plt;
16858 bfd_boolean relocs;
16859 bfd *ibfd;
16860 struct elf32_arm_link_hash_table *htab;
16861
16862 htab = elf32_arm_hash_table (info);
16863 if (htab == NULL)
16864 return FALSE;
16865
16866 dynobj = elf_hash_table (info)->dynobj;
16867 BFD_ASSERT (dynobj != NULL);
16868 check_use_blx (htab);
16869
16870 if (elf_hash_table (info)->dynamic_sections_created)
16871 {
16872 /* Set the contents of the .interp section to the interpreter. */
16873 if (bfd_link_executable (info) && !info->nointerp)
16874 {
16875 s = bfd_get_linker_section (dynobj, ".interp");
16876 BFD_ASSERT (s != NULL);
16877 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16878 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16879 }
16880 }
16881
16882 /* Set up .got offsets for local syms, and space for local dynamic
16883 relocs. */
16884 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16885 {
16886 bfd_signed_vma *local_got;
16887 bfd_signed_vma *end_local_got;
16888 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16889 char *local_tls_type;
16890 bfd_vma *local_tlsdesc_gotent;
16891 bfd_size_type locsymcount;
16892 Elf_Internal_Shdr *symtab_hdr;
16893 asection *srel;
16894 bfd_boolean is_vxworks = htab->vxworks_p;
16895 unsigned int symndx;
16896 struct fdpic_local *local_fdpic_cnts;
16897
16898 if (! is_arm_elf (ibfd))
16899 continue;
16900
16901 for (s = ibfd->sections; s != NULL; s = s->next)
16902 {
16903 struct elf_dyn_relocs *p;
16904
16905 for (p = (struct elf_dyn_relocs *)
16906 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16907 {
16908 if (!bfd_is_abs_section (p->sec)
16909 && bfd_is_abs_section (p->sec->output_section))
16910 {
16911 /* Input section has been discarded, either because
16912 it is a copy of a linkonce section or due to
16913 linker script /DISCARD/, so we'll be discarding
16914 the relocs too. */
16915 }
16916 else if (is_vxworks
16917 && strcmp (p->sec->output_section->name,
16918 ".tls_vars") == 0)
16919 {
16920 /* Relocations in vxworks .tls_vars sections are
16921 handled specially by the loader. */
16922 }
16923 else if (p->count != 0)
16924 {
16925 srel = elf_section_data (p->sec)->sreloc;
16926 if (htab->fdpic_p && !bfd_link_pic(info))
16927 htab->srofixup->size += 4 * p->count;
16928 else
16929 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16930 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16931 info->flags |= DF_TEXTREL;
16932 }
16933 }
16934 }
16935
16936 local_got = elf_local_got_refcounts (ibfd);
16937 if (!local_got)
16938 continue;
16939
16940 symtab_hdr = & elf_symtab_hdr (ibfd);
16941 locsymcount = symtab_hdr->sh_info;
16942 end_local_got = local_got + locsymcount;
16943 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16944 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16945 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16946 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16947 symndx = 0;
16948 s = htab->root.sgot;
16949 srel = htab->root.srelgot;
16950 for (; local_got < end_local_got;
16951 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16952 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16953 {
16954 *local_tlsdesc_gotent = (bfd_vma) -1;
16955 local_iplt = *local_iplt_ptr;
16956
16957 /* FDPIC support. */
16958 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16959 {
16960 if (local_fdpic_cnts->funcdesc_offset == -1)
16961 {
16962 local_fdpic_cnts->funcdesc_offset = s->size;
16963 s->size += 8;
16964
16965 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16966 if (bfd_link_pic(info))
16967 elf32_arm_allocate_dynrelocs (info, srel, 1);
16968 else
16969 htab->srofixup->size += 8;
16970 }
16971 }
16972
16973 if (local_fdpic_cnts->funcdesc_cnt > 0)
16974 {
16975 if (local_fdpic_cnts->funcdesc_offset == -1)
16976 {
16977 local_fdpic_cnts->funcdesc_offset = s->size;
16978 s->size += 8;
16979
16980 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16981 if (bfd_link_pic(info))
16982 elf32_arm_allocate_dynrelocs (info, srel, 1);
16983 else
16984 htab->srofixup->size += 8;
16985 }
16986
16987 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16988 if (bfd_link_pic(info))
16989 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16990 else
16991 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16992 }
16993
16994 if (local_iplt != NULL)
16995 {
16996 struct elf_dyn_relocs *p;
16997
16998 if (local_iplt->root.refcount > 0)
16999 {
17000 elf32_arm_allocate_plt_entry (info, TRUE,
17001 &local_iplt->root,
17002 &local_iplt->arm);
17003 if (local_iplt->arm.noncall_refcount == 0)
17004 /* All references to the PLT are calls, so all
17005 non-call references can resolve directly to the
17006 run-time target. This means that the .got entry
17007 would be the same as the .igot.plt entry, so there's
17008 no point creating both. */
17009 *local_got = 0;
17010 }
17011 else
17012 {
17013 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
17014 local_iplt->root.offset = (bfd_vma) -1;
17015 }
17016
17017 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
17018 {
17019 asection *psrel;
17020
17021 psrel = elf_section_data (p->sec)->sreloc;
17022 if (local_iplt->arm.noncall_refcount == 0)
17023 elf32_arm_allocate_irelocs (info, psrel, p->count);
17024 else
17025 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
17026 }
17027 }
17028 if (*local_got > 0)
17029 {
17030 Elf_Internal_Sym *isym;
17031
17032 *local_got = s->size;
17033 if (*local_tls_type & GOT_TLS_GD)
17034 /* TLS_GD relocs need an 8-byte structure in the GOT. */
17035 s->size += 8;
17036 if (*local_tls_type & GOT_TLS_GDESC)
17037 {
17038 *local_tlsdesc_gotent = htab->root.sgotplt->size
17039 - elf32_arm_compute_jump_table_size (htab);
17040 htab->root.sgotplt->size += 8;
17041 *local_got = (bfd_vma) -2;
17042 /* plt.got_offset needs to know there's a TLS_DESC
17043 reloc in the middle of .got.plt. */
17044 htab->num_tls_desc++;
17045 }
17046 if (*local_tls_type & GOT_TLS_IE)
17047 s->size += 4;
17048
17049 if (*local_tls_type & GOT_NORMAL)
17050 {
17051 /* If the symbol is both GD and GDESC, *local_got
17052 may have been overwritten. */
17053 *local_got = s->size;
17054 s->size += 4;
17055 }
17056
17057 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
17058 if (isym == NULL)
17059 return FALSE;
17060
17061 /* If all references to an STT_GNU_IFUNC PLT are calls,
17062 then all non-call references, including this GOT entry,
17063 resolve directly to the run-time target. */
17064 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
17065 && (local_iplt == NULL
17066 || local_iplt->arm.noncall_refcount == 0))
17067 elf32_arm_allocate_irelocs (info, srel, 1);
17068 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
17069 {
17070 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
17071 elf32_arm_allocate_dynrelocs (info, srel, 1);
17072 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
17073 htab->srofixup->size += 4;
17074
17075 if ((bfd_link_pic (info) || htab->fdpic_p)
17076 && *local_tls_type & GOT_TLS_GDESC)
17077 {
17078 elf32_arm_allocate_dynrelocs (info,
17079 htab->root.srelplt, 1);
17080 htab->tls_trampoline = -1;
17081 }
17082 }
17083 }
17084 else
17085 *local_got = (bfd_vma) -1;
17086 }
17087 }
17088
17089 if (htab->tls_ldm_got.refcount > 0)
17090 {
17091 /* Allocate two GOT entries and one dynamic relocation (if necessary)
17092 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
17093 htab->tls_ldm_got.offset = htab->root.sgot->size;
17094 htab->root.sgot->size += 8;
17095 if (bfd_link_pic (info))
17096 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
17097 }
17098 else
17099 htab->tls_ldm_got.offset = -1;
17100
17101 /* At the very end of the .rofixup section is a pointer to the GOT,
17102 reserve space for it. */
17103 if (htab->fdpic_p && htab->srofixup != NULL)
17104 htab->srofixup->size += 4;
17105
17106 /* Allocate global sym .plt and .got entries, and space for global
17107 sym dynamic relocs. */
17108 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
17109
17110 /* Here we rummage through the found bfds to collect glue information. */
17111 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
17112 {
17113 if (! is_arm_elf (ibfd))
17114 continue;
17115
17116 /* Initialise mapping tables for code/data. */
17117 bfd_elf32_arm_init_maps (ibfd);
17118
17119 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17120 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17121 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17122 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17123 }
17124
17125 /* Allocate space for the glue sections now that we've sized them. */
17126 bfd_elf32_arm_allocate_interworking_sections (info);
17127
17128 /* For every jump slot reserved in the sgotplt, reloc_count is
17129 incremented. However, when we reserve space for TLS descriptors,
17130 it's not incremented, so in order to compute the space reserved
17131 for them, it suffices to multiply the reloc count by the jump
17132 slot size. */
17133 if (htab->root.srelplt)
17134 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
17135
17136 if (htab->tls_trampoline)
17137 {
17138 if (htab->root.splt->size == 0)
17139 htab->root.splt->size += htab->plt_header_size;
17140
17141 htab->tls_trampoline = htab->root.splt->size;
17142 htab->root.splt->size += htab->plt_entry_size;
17143
17144 /* If we're not using lazy TLS relocations, don't generate the
17145 PLT and GOT entries they require. */
17146 if (!(info->flags & DF_BIND_NOW))
17147 {
17148 htab->dt_tlsdesc_got = htab->root.sgot->size;
17149 htab->root.sgot->size += 4;
17150
17151 htab->dt_tlsdesc_plt = htab->root.splt->size;
17152 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17153 }
17154 }
17155
17156 /* The check_relocs and adjust_dynamic_symbol entry points have
17157 determined the sizes of the various dynamic sections. Allocate
17158 memory for them. */
17159 plt = FALSE;
17160 relocs = FALSE;
17161 for (s = dynobj->sections; s != NULL; s = s->next)
17162 {
17163 const char * name;
17164
17165 if ((s->flags & SEC_LINKER_CREATED) == 0)
17166 continue;
17167
17168 /* It's OK to base decisions on the section name, because none
17169 of the dynobj section names depend upon the input files. */
17170 name = bfd_section_name (s);
17171
17172 if (s == htab->root.splt)
17173 {
17174 /* Remember whether there is a PLT. */
17175 plt = s->size != 0;
17176 }
17177 else if (CONST_STRNEQ (name, ".rel"))
17178 {
17179 if (s->size != 0)
17180 {
17181 /* Remember whether there are any reloc sections other
17182 than .rel(a).plt and .rela.plt.unloaded. */
17183 if (s != htab->root.srelplt && s != htab->srelplt2)
17184 relocs = TRUE;
17185
17186 /* We use the reloc_count field as a counter if we need
17187 to copy relocs into the output file. */
17188 s->reloc_count = 0;
17189 }
17190 }
17191 else if (s != htab->root.sgot
17192 && s != htab->root.sgotplt
17193 && s != htab->root.iplt
17194 && s != htab->root.igotplt
17195 && s != htab->root.sdynbss
17196 && s != htab->root.sdynrelro
17197 && s != htab->srofixup)
17198 {
17199 /* It's not one of our sections, so don't allocate space. */
17200 continue;
17201 }
17202
17203 if (s->size == 0)
17204 {
17205 /* If we don't need this section, strip it from the
17206 output file. This is mostly to handle .rel(a).bss and
17207 .rel(a).plt. We must create both sections in
17208 create_dynamic_sections, because they must be created
17209 before the linker maps input sections to output
17210 sections. The linker does that before
17211 adjust_dynamic_symbol is called, and it is that
17212 function which decides whether anything needs to go
17213 into these sections. */
17214 s->flags |= SEC_EXCLUDE;
17215 continue;
17216 }
17217
17218 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17219 continue;
17220
17221 /* Allocate memory for the section contents. */
17222 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17223 if (s->contents == NULL)
17224 return FALSE;
17225 }
17226
17227 if (elf_hash_table (info)->dynamic_sections_created)
17228 {
17229 /* Add some entries to the .dynamic section. We fill in the
17230 values later, in elf32_arm_finish_dynamic_sections, but we
17231 must add the entries now so that we get the correct size for
17232 the .dynamic section. The DT_DEBUG entry is filled in by the
17233 dynamic linker and used by the debugger. */
17234 #define add_dynamic_entry(TAG, VAL) \
17235 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
17236
17237 if (bfd_link_executable (info))
17238 {
17239 if (!add_dynamic_entry (DT_DEBUG, 0))
17240 return FALSE;
17241 }
17242
17243 if (plt)
17244 {
17245 if ( !add_dynamic_entry (DT_PLTGOT, 0)
17246 || !add_dynamic_entry (DT_PLTRELSZ, 0)
17247 || !add_dynamic_entry (DT_PLTREL,
17248 htab->use_rel ? DT_REL : DT_RELA)
17249 || !add_dynamic_entry (DT_JMPREL, 0))
17250 return FALSE;
17251
17252 if (htab->dt_tlsdesc_plt
17253 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
17254 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
17255 return FALSE;
17256 }
17257
17258 if (relocs)
17259 {
17260 if (htab->use_rel)
17261 {
17262 if (!add_dynamic_entry (DT_REL, 0)
17263 || !add_dynamic_entry (DT_RELSZ, 0)
17264 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
17265 return FALSE;
17266 }
17267 else
17268 {
17269 if (!add_dynamic_entry (DT_RELA, 0)
17270 || !add_dynamic_entry (DT_RELASZ, 0)
17271 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17272 return FALSE;
17273 }
17274 }
17275
17276 /* If any dynamic relocs apply to a read-only section,
17277 then we need a DT_TEXTREL entry. */
17278 if ((info->flags & DF_TEXTREL) == 0)
17279 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17280
17281 if ((info->flags & DF_TEXTREL) != 0)
17282 {
17283 if (!add_dynamic_entry (DT_TEXTREL, 0))
17284 return FALSE;
17285 }
17286 if (htab->vxworks_p
17287 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17288 return FALSE;
17289 }
17290 #undef add_dynamic_entry
17291
17292 return TRUE;
17293 }
17294
17295 /* Size sections even though they're not dynamic. We use it to setup
17296 _TLS_MODULE_BASE_, if needed. */
17297
17298 static bfd_boolean
17299 elf32_arm_always_size_sections (bfd *output_bfd,
17300 struct bfd_link_info *info)
17301 {
17302 asection *tls_sec;
17303 struct elf32_arm_link_hash_table *htab;
17304
17305 htab = elf32_arm_hash_table (info);
17306
17307 if (bfd_link_relocatable (info))
17308 return TRUE;
17309
17310 tls_sec = elf_hash_table (info)->tls_sec;
17311
17312 if (tls_sec)
17313 {
17314 struct elf_link_hash_entry *tlsbase;
17315
17316 tlsbase = elf_link_hash_lookup
17317 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17318
17319 if (tlsbase)
17320 {
17321 struct bfd_link_hash_entry *bh = NULL;
17322 const struct elf_backend_data *bed
17323 = get_elf_backend_data (output_bfd);
17324
17325 if (!(_bfd_generic_link_add_one_symbol
17326 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17327 tls_sec, 0, NULL, FALSE,
17328 bed->collect, &bh)))
17329 return FALSE;
17330
17331 tlsbase->type = STT_TLS;
17332 tlsbase = (struct elf_link_hash_entry *)bh;
17333 tlsbase->def_regular = 1;
17334 tlsbase->other = STV_HIDDEN;
17335 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17336 }
17337 }
17338
17339 if (htab->fdpic_p && !bfd_link_relocatable (info)
17340 && !bfd_elf_stack_segment_size (output_bfd, info,
17341 "__stacksize", DEFAULT_STACK_SIZE))
17342 return FALSE;
17343
17344 return TRUE;
17345 }
17346
17347 /* Finish up dynamic symbol handling. We set the contents of various
17348 dynamic sections here. */
17349
17350 static bfd_boolean
17351 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17352 struct bfd_link_info * info,
17353 struct elf_link_hash_entry * h,
17354 Elf_Internal_Sym * sym)
17355 {
17356 struct elf32_arm_link_hash_table *htab;
17357 struct elf32_arm_link_hash_entry *eh;
17358
17359 htab = elf32_arm_hash_table (info);
17360 if (htab == NULL)
17361 return FALSE;
17362
17363 eh = (struct elf32_arm_link_hash_entry *) h;
17364
17365 if (h->plt.offset != (bfd_vma) -1)
17366 {
17367 if (!eh->is_iplt)
17368 {
17369 BFD_ASSERT (h->dynindx != -1);
17370 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17371 h->dynindx, 0))
17372 return FALSE;
17373 }
17374
17375 if (!h->def_regular)
17376 {
17377 /* Mark the symbol as undefined, rather than as defined in
17378 the .plt section. */
17379 sym->st_shndx = SHN_UNDEF;
17380 /* If the symbol is weak we need to clear the value.
17381 Otherwise, the PLT entry would provide a definition for
17382 the symbol even if the symbol wasn't defined anywhere,
17383 and so the symbol would never be NULL. Leave the value if
17384 there were any relocations where pointer equality matters
17385 (this is a clue for the dynamic linker, to make function
17386 pointer comparisons work between an application and shared
17387 library). */
17388 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17389 sym->st_value = 0;
17390 }
17391 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17392 {
17393 /* At least one non-call relocation references this .iplt entry,
17394 so the .iplt entry is the function's canonical address. */
17395 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17396 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17397 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17398 (output_bfd, htab->root.iplt->output_section));
17399 sym->st_value = (h->plt.offset
17400 + htab->root.iplt->output_section->vma
17401 + htab->root.iplt->output_offset);
17402 }
17403 }
17404
17405 if (h->needs_copy)
17406 {
17407 asection * s;
17408 Elf_Internal_Rela rel;
17409
17410 /* This symbol needs a copy reloc. Set it up. */
17411 BFD_ASSERT (h->dynindx != -1
17412 && (h->root.type == bfd_link_hash_defined
17413 || h->root.type == bfd_link_hash_defweak));
17414
17415 rel.r_addend = 0;
17416 rel.r_offset = (h->root.u.def.value
17417 + h->root.u.def.section->output_section->vma
17418 + h->root.u.def.section->output_offset);
17419 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17420 if (h->root.u.def.section == htab->root.sdynrelro)
17421 s = htab->root.sreldynrelro;
17422 else
17423 s = htab->root.srelbss;
17424 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17425 }
17426
17427 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17428 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17429 it is relative to the ".got" section. */
17430 if (h == htab->root.hdynamic
17431 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17432 sym->st_shndx = SHN_ABS;
17433
17434 return TRUE;
17435 }
17436
17437 static void
17438 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17439 void *contents,
17440 const unsigned long *template, unsigned count)
17441 {
17442 unsigned ix;
17443
17444 for (ix = 0; ix != count; ix++)
17445 {
17446 unsigned long insn = template[ix];
17447
17448 /* Emit mov pc,rx if bx is not permitted. */
17449 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17450 insn = (insn & 0xf000000f) | 0x01a0f000;
17451 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17452 }
17453 }
17454
17455 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17456 other variants, NaCl needs this entry in a static executable's
17457 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17458 zero. For .iplt really only the last bundle is useful, and .iplt
17459 could have a shorter first entry, with each individual PLT entry's
17460 relative branch calculated differently so it targets the last
17461 bundle instead of the instruction before it (labelled .Lplt_tail
17462 above). But it's simpler to keep the size and layout of PLT0
17463 consistent with the dynamic case, at the cost of some dead code at
17464 the start of .iplt and the one dead store to the stack at the start
17465 of .Lplt_tail. */
17466 static void
17467 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17468 asection *plt, bfd_vma got_displacement)
17469 {
17470 unsigned int i;
17471
17472 put_arm_insn (htab, output_bfd,
17473 elf32_arm_nacl_plt0_entry[0]
17474 | arm_movw_immediate (got_displacement),
17475 plt->contents + 0);
17476 put_arm_insn (htab, output_bfd,
17477 elf32_arm_nacl_plt0_entry[1]
17478 | arm_movt_immediate (got_displacement),
17479 plt->contents + 4);
17480
17481 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17482 put_arm_insn (htab, output_bfd,
17483 elf32_arm_nacl_plt0_entry[i],
17484 plt->contents + (i * 4));
17485 }
17486
17487 /* Finish up the dynamic sections. */
17488
17489 static bfd_boolean
17490 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17491 {
17492 bfd * dynobj;
17493 asection * sgot;
17494 asection * sdyn;
17495 struct elf32_arm_link_hash_table *htab;
17496
17497 htab = elf32_arm_hash_table (info);
17498 if (htab == NULL)
17499 return FALSE;
17500
17501 dynobj = elf_hash_table (info)->dynobj;
17502
17503 sgot = htab->root.sgotplt;
17504 /* A broken linker script might have discarded the dynamic sections.
17505 Catch this here so that we do not seg-fault later on. */
17506 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17507 return FALSE;
17508 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17509
17510 if (elf_hash_table (info)->dynamic_sections_created)
17511 {
17512 asection *splt;
17513 Elf32_External_Dyn *dyncon, *dynconend;
17514
17515 splt = htab->root.splt;
17516 BFD_ASSERT (splt != NULL && sdyn != NULL);
17517 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17518
17519 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17520 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17521
17522 for (; dyncon < dynconend; dyncon++)
17523 {
17524 Elf_Internal_Dyn dyn;
17525 const char * name;
17526 asection * s;
17527
17528 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17529
17530 switch (dyn.d_tag)
17531 {
17532 unsigned int type;
17533
17534 default:
17535 if (htab->vxworks_p
17536 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17537 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17538 break;
17539
17540 case DT_HASH:
17541 name = ".hash";
17542 goto get_vma_if_bpabi;
17543 case DT_STRTAB:
17544 name = ".dynstr";
17545 goto get_vma_if_bpabi;
17546 case DT_SYMTAB:
17547 name = ".dynsym";
17548 goto get_vma_if_bpabi;
17549 case DT_VERSYM:
17550 name = ".gnu.version";
17551 goto get_vma_if_bpabi;
17552 case DT_VERDEF:
17553 name = ".gnu.version_d";
17554 goto get_vma_if_bpabi;
17555 case DT_VERNEED:
17556 name = ".gnu.version_r";
17557 goto get_vma_if_bpabi;
17558
17559 case DT_PLTGOT:
17560 name = htab->symbian_p ? ".got" : ".got.plt";
17561 goto get_vma;
17562 case DT_JMPREL:
17563 name = RELOC_SECTION (htab, ".plt");
17564 get_vma:
17565 s = bfd_get_linker_section (dynobj, name);
17566 if (s == NULL)
17567 {
17568 _bfd_error_handler
17569 (_("could not find section %s"), name);
17570 bfd_set_error (bfd_error_invalid_operation);
17571 return FALSE;
17572 }
17573 if (!htab->symbian_p)
17574 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17575 else
17576 /* In the BPABI, tags in the PT_DYNAMIC section point
17577 at the file offset, not the memory address, for the
17578 convenience of the post linker. */
17579 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17580 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17581 break;
17582
17583 get_vma_if_bpabi:
17584 if (htab->symbian_p)
17585 goto get_vma;
17586 break;
17587
17588 case DT_PLTRELSZ:
17589 s = htab->root.srelplt;
17590 BFD_ASSERT (s != NULL);
17591 dyn.d_un.d_val = s->size;
17592 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17593 break;
17594
17595 case DT_RELSZ:
17596 case DT_RELASZ:
17597 case DT_REL:
17598 case DT_RELA:
17599 /* In the BPABI, the DT_REL tag must point at the file
17600 offset, not the VMA, of the first relocation
17601 section. So, we use code similar to that in
17602 elflink.c, but do not check for SHF_ALLOC on the
17603 relocation section, since relocation sections are
17604 never allocated under the BPABI. PLT relocs are also
17605 included. */
17606 if (htab->symbian_p)
17607 {
17608 unsigned int i;
17609 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17610 ? SHT_REL : SHT_RELA);
17611 dyn.d_un.d_val = 0;
17612 for (i = 1; i < elf_numsections (output_bfd); i++)
17613 {
17614 Elf_Internal_Shdr *hdr
17615 = elf_elfsections (output_bfd)[i];
17616 if (hdr->sh_type == type)
17617 {
17618 if (dyn.d_tag == DT_RELSZ
17619 || dyn.d_tag == DT_RELASZ)
17620 dyn.d_un.d_val += hdr->sh_size;
17621 else if ((ufile_ptr) hdr->sh_offset
17622 <= dyn.d_un.d_val - 1)
17623 dyn.d_un.d_val = hdr->sh_offset;
17624 }
17625 }
17626 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17627 }
17628 break;
17629
17630 case DT_TLSDESC_PLT:
17631 s = htab->root.splt;
17632 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17633 + htab->dt_tlsdesc_plt);
17634 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17635 break;
17636
17637 case DT_TLSDESC_GOT:
17638 s = htab->root.sgot;
17639 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17640 + htab->dt_tlsdesc_got);
17641 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17642 break;
17643
17644 /* Set the bottom bit of DT_INIT/FINI if the
17645 corresponding function is Thumb. */
17646 case DT_INIT:
17647 name = info->init_function;
17648 goto get_sym;
17649 case DT_FINI:
17650 name = info->fini_function;
17651 get_sym:
17652 /* If it wasn't set by elf_bfd_final_link
17653 then there is nothing to adjust. */
17654 if (dyn.d_un.d_val != 0)
17655 {
17656 struct elf_link_hash_entry * eh;
17657
17658 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17659 FALSE, FALSE, TRUE);
17660 if (eh != NULL
17661 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17662 == ST_BRANCH_TO_THUMB)
17663 {
17664 dyn.d_un.d_val |= 1;
17665 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17666 }
17667 }
17668 break;
17669 }
17670 }
17671
17672 /* Fill in the first entry in the procedure linkage table. */
17673 if (splt->size > 0 && htab->plt_header_size)
17674 {
17675 const bfd_vma *plt0_entry;
17676 bfd_vma got_address, plt_address, got_displacement;
17677
17678 /* Calculate the addresses of the GOT and PLT. */
17679 got_address = sgot->output_section->vma + sgot->output_offset;
17680 plt_address = splt->output_section->vma + splt->output_offset;
17681
17682 if (htab->vxworks_p)
17683 {
17684 /* The VxWorks GOT is relocated by the dynamic linker.
17685 Therefore, we must emit relocations rather than simply
17686 computing the values now. */
17687 Elf_Internal_Rela rel;
17688
17689 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17690 put_arm_insn (htab, output_bfd, plt0_entry[0],
17691 splt->contents + 0);
17692 put_arm_insn (htab, output_bfd, plt0_entry[1],
17693 splt->contents + 4);
17694 put_arm_insn (htab, output_bfd, plt0_entry[2],
17695 splt->contents + 8);
17696 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17697
17698 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17699 rel.r_offset = plt_address + 12;
17700 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17701 rel.r_addend = 0;
17702 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17703 htab->srelplt2->contents);
17704 }
17705 else if (htab->nacl_p)
17706 arm_nacl_put_plt0 (htab, output_bfd, splt,
17707 got_address + 8 - (plt_address + 16));
17708 else if (using_thumb_only (htab))
17709 {
17710 got_displacement = got_address - (plt_address + 12);
17711
17712 plt0_entry = elf32_thumb2_plt0_entry;
17713 put_arm_insn (htab, output_bfd, plt0_entry[0],
17714 splt->contents + 0);
17715 put_arm_insn (htab, output_bfd, plt0_entry[1],
17716 splt->contents + 4);
17717 put_arm_insn (htab, output_bfd, plt0_entry[2],
17718 splt->contents + 8);
17719
17720 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17721 }
17722 else
17723 {
17724 got_displacement = got_address - (plt_address + 16);
17725
17726 plt0_entry = elf32_arm_plt0_entry;
17727 put_arm_insn (htab, output_bfd, plt0_entry[0],
17728 splt->contents + 0);
17729 put_arm_insn (htab, output_bfd, plt0_entry[1],
17730 splt->contents + 4);
17731 put_arm_insn (htab, output_bfd, plt0_entry[2],
17732 splt->contents + 8);
17733 put_arm_insn (htab, output_bfd, plt0_entry[3],
17734 splt->contents + 12);
17735
17736 #ifdef FOUR_WORD_PLT
17737 /* The displacement value goes in the otherwise-unused
17738 last word of the second entry. */
17739 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17740 #else
17741 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17742 #endif
17743 }
17744 }
17745
17746 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17747 really seem like the right value. */
17748 if (splt->output_section->owner == output_bfd)
17749 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17750
17751 if (htab->dt_tlsdesc_plt)
17752 {
17753 bfd_vma got_address
17754 = sgot->output_section->vma + sgot->output_offset;
17755 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17756 + htab->root.sgot->output_offset);
17757 bfd_vma plt_address
17758 = splt->output_section->vma + splt->output_offset;
17759
17760 arm_put_trampoline (htab, output_bfd,
17761 splt->contents + htab->dt_tlsdesc_plt,
17762 dl_tlsdesc_lazy_trampoline, 6);
17763
17764 bfd_put_32 (output_bfd,
17765 gotplt_address + htab->dt_tlsdesc_got
17766 - (plt_address + htab->dt_tlsdesc_plt)
17767 - dl_tlsdesc_lazy_trampoline[6],
17768 splt->contents + htab->dt_tlsdesc_plt + 24);
17769 bfd_put_32 (output_bfd,
17770 got_address - (plt_address + htab->dt_tlsdesc_plt)
17771 - dl_tlsdesc_lazy_trampoline[7],
17772 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17773 }
17774
17775 if (htab->tls_trampoline)
17776 {
17777 arm_put_trampoline (htab, output_bfd,
17778 splt->contents + htab->tls_trampoline,
17779 tls_trampoline, 3);
17780 #ifdef FOUR_WORD_PLT
17781 bfd_put_32 (output_bfd, 0x00000000,
17782 splt->contents + htab->tls_trampoline + 12);
17783 #endif
17784 }
17785
17786 if (htab->vxworks_p
17787 && !bfd_link_pic (info)
17788 && htab->root.splt->size > 0)
17789 {
17790 /* Correct the .rel(a).plt.unloaded relocations. They will have
17791 incorrect symbol indexes. */
17792 int num_plts;
17793 unsigned char *p;
17794
17795 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17796 / htab->plt_entry_size);
17797 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17798
17799 for (; num_plts; num_plts--)
17800 {
17801 Elf_Internal_Rela rel;
17802
17803 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17804 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17805 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17806 p += RELOC_SIZE (htab);
17807
17808 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17809 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17810 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17811 p += RELOC_SIZE (htab);
17812 }
17813 }
17814 }
17815
17816 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17817 /* NaCl uses a special first entry in .iplt too. */
17818 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17819
17820 /* Fill in the first three entries in the global offset table. */
17821 if (sgot)
17822 {
17823 if (sgot->size > 0)
17824 {
17825 if (sdyn == NULL)
17826 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17827 else
17828 bfd_put_32 (output_bfd,
17829 sdyn->output_section->vma + sdyn->output_offset,
17830 sgot->contents);
17831 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17832 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17833 }
17834
17835 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17836 }
17837
17838 /* At the very end of the .rofixup section is a pointer to the GOT. */
17839 if (htab->fdpic_p && htab->srofixup != NULL)
17840 {
17841 struct elf_link_hash_entry *hgot = htab->root.hgot;
17842
17843 bfd_vma got_value = hgot->root.u.def.value
17844 + hgot->root.u.def.section->output_section->vma
17845 + hgot->root.u.def.section->output_offset;
17846
17847 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17848
17849 /* Make sure we allocated and generated the same number of fixups. */
17850 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17851 }
17852
17853 return TRUE;
17854 }
17855
17856 static bfd_boolean
17857 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17858 {
17859 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17860 struct elf32_arm_link_hash_table *globals;
17861 struct elf_segment_map *m;
17862
17863 if (!_bfd_elf_init_file_header (abfd, link_info))
17864 return FALSE;
17865
17866 i_ehdrp = elf_elfheader (abfd);
17867
17868 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17869 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17870 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17871
17872 if (link_info)
17873 {
17874 globals = elf32_arm_hash_table (link_info);
17875 if (globals != NULL && globals->byteswap_code)
17876 i_ehdrp->e_flags |= EF_ARM_BE8;
17877
17878 if (globals->fdpic_p)
17879 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17880 }
17881
17882 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17883 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17884 {
17885 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17886 if (abi == AEABI_VFP_args_vfp)
17887 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17888 else
17889 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17890 }
17891
17892 /* Scan segment to set p_flags attribute if it contains only sections with
17893 SHF_ARM_PURECODE flag. */
17894 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17895 {
17896 unsigned int j;
17897
17898 if (m->count == 0)
17899 continue;
17900 for (j = 0; j < m->count; j++)
17901 {
17902 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17903 break;
17904 }
17905 if (j == m->count)
17906 {
17907 m->p_flags = PF_X;
17908 m->p_flags_valid = 1;
17909 }
17910 }
17911 return TRUE;
17912 }
17913
17914 static enum elf_reloc_type_class
17915 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17916 const asection *rel_sec ATTRIBUTE_UNUSED,
17917 const Elf_Internal_Rela *rela)
17918 {
17919 switch ((int) ELF32_R_TYPE (rela->r_info))
17920 {
17921 case R_ARM_RELATIVE:
17922 return reloc_class_relative;
17923 case R_ARM_JUMP_SLOT:
17924 return reloc_class_plt;
17925 case R_ARM_COPY:
17926 return reloc_class_copy;
17927 case R_ARM_IRELATIVE:
17928 return reloc_class_ifunc;
17929 default:
17930 return reloc_class_normal;
17931 }
17932 }
17933
17934 static void
17935 arm_final_write_processing (bfd *abfd)
17936 {
17937 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17938 }
17939
17940 static bfd_boolean
17941 elf32_arm_final_write_processing (bfd *abfd)
17942 {
17943 arm_final_write_processing (abfd);
17944 return _bfd_elf_final_write_processing (abfd);
17945 }
17946
17947 /* Return TRUE if this is an unwinding table entry. */
17948
17949 static bfd_boolean
17950 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17951 {
17952 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17953 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17954 }
17955
17956
17957 /* Set the type and flags for an ARM section. We do this by
17958 the section name, which is a hack, but ought to work. */
17959
17960 static bfd_boolean
17961 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17962 {
17963 const char * name;
17964
17965 name = bfd_section_name (sec);
17966
17967 if (is_arm_elf_unwind_section_name (abfd, name))
17968 {
17969 hdr->sh_type = SHT_ARM_EXIDX;
17970 hdr->sh_flags |= SHF_LINK_ORDER;
17971 }
17972
17973 if (sec->flags & SEC_ELF_PURECODE)
17974 hdr->sh_flags |= SHF_ARM_PURECODE;
17975
17976 return TRUE;
17977 }
17978
17979 /* Handle an ARM specific section when reading an object file. This is
17980 called when bfd_section_from_shdr finds a section with an unknown
17981 type. */
17982
17983 static bfd_boolean
17984 elf32_arm_section_from_shdr (bfd *abfd,
17985 Elf_Internal_Shdr * hdr,
17986 const char *name,
17987 int shindex)
17988 {
17989 /* There ought to be a place to keep ELF backend specific flags, but
17990 at the moment there isn't one. We just keep track of the
17991 sections by their name, instead. Fortunately, the ABI gives
17992 names for all the ARM specific sections, so we will probably get
17993 away with this. */
17994 switch (hdr->sh_type)
17995 {
17996 case SHT_ARM_EXIDX:
17997 case SHT_ARM_PREEMPTMAP:
17998 case SHT_ARM_ATTRIBUTES:
17999 break;
18000
18001 default:
18002 return FALSE;
18003 }
18004
18005 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
18006 return FALSE;
18007
18008 return TRUE;
18009 }
18010
18011 static _arm_elf_section_data *
18012 get_arm_elf_section_data (asection * sec)
18013 {
18014 if (sec && sec->owner && is_arm_elf (sec->owner))
18015 return elf32_arm_section_data (sec);
18016 else
18017 return NULL;
18018 }
18019
18020 typedef struct
18021 {
18022 void *flaginfo;
18023 struct bfd_link_info *info;
18024 asection *sec;
18025 int sec_shndx;
18026 int (*func) (void *, const char *, Elf_Internal_Sym *,
18027 asection *, struct elf_link_hash_entry *);
18028 } output_arch_syminfo;
18029
18030 enum map_symbol_type
18031 {
18032 ARM_MAP_ARM,
18033 ARM_MAP_THUMB,
18034 ARM_MAP_DATA
18035 };
18036
18037
18038 /* Output a single mapping symbol. */
18039
18040 static bfd_boolean
18041 elf32_arm_output_map_sym (output_arch_syminfo *osi,
18042 enum map_symbol_type type,
18043 bfd_vma offset)
18044 {
18045 static const char *names[3] = {"$a", "$t", "$d"};
18046 Elf_Internal_Sym sym;
18047
18048 sym.st_value = osi->sec->output_section->vma
18049 + osi->sec->output_offset
18050 + offset;
18051 sym.st_size = 0;
18052 sym.st_other = 0;
18053 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
18054 sym.st_shndx = osi->sec_shndx;
18055 sym.st_target_internal = 0;
18056 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
18057 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
18058 }
18059
18060 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
18061 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
18062
18063 static bfd_boolean
18064 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
18065 bfd_boolean is_iplt_entry_p,
18066 union gotplt_union *root_plt,
18067 struct arm_plt_info *arm_plt)
18068 {
18069 struct elf32_arm_link_hash_table *htab;
18070 bfd_vma addr, plt_header_size;
18071
18072 if (root_plt->offset == (bfd_vma) -1)
18073 return TRUE;
18074
18075 htab = elf32_arm_hash_table (osi->info);
18076 if (htab == NULL)
18077 return FALSE;
18078
18079 if (is_iplt_entry_p)
18080 {
18081 osi->sec = htab->root.iplt;
18082 plt_header_size = 0;
18083 }
18084 else
18085 {
18086 osi->sec = htab->root.splt;
18087 plt_header_size = htab->plt_header_size;
18088 }
18089 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
18090 (osi->info->output_bfd, osi->sec->output_section));
18091
18092 addr = root_plt->offset & -2;
18093 if (htab->symbian_p)
18094 {
18095 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18096 return FALSE;
18097 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
18098 return FALSE;
18099 }
18100 else if (htab->vxworks_p)
18101 {
18102 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18103 return FALSE;
18104 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
18105 return FALSE;
18106 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
18107 return FALSE;
18108 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
18109 return FALSE;
18110 }
18111 else if (htab->nacl_p)
18112 {
18113 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18114 return FALSE;
18115 }
18116 else if (htab->fdpic_p)
18117 {
18118 enum map_symbol_type type = using_thumb_only(htab)
18119 ? ARM_MAP_THUMB
18120 : ARM_MAP_ARM;
18121
18122 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
18123 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18124 return FALSE;
18125 if (!elf32_arm_output_map_sym (osi, type, addr))
18126 return FALSE;
18127 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
18128 return FALSE;
18129 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
18130 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
18131 return FALSE;
18132 }
18133 else if (using_thumb_only (htab))
18134 {
18135 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
18136 return FALSE;
18137 }
18138 else
18139 {
18140 bfd_boolean thumb_stub_p;
18141
18142 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
18143 if (thumb_stub_p)
18144 {
18145 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18146 return FALSE;
18147 }
18148 #ifdef FOUR_WORD_PLT
18149 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18150 return FALSE;
18151 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
18152 return FALSE;
18153 #else
18154 /* A three-word PLT with no Thumb thunk contains only Arm code,
18155 so only need to output a mapping symbol for the first PLT entry and
18156 entries with thumb thunks. */
18157 if (thumb_stub_p || addr == plt_header_size)
18158 {
18159 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18160 return FALSE;
18161 }
18162 #endif
18163 }
18164
18165 return TRUE;
18166 }
18167
18168 /* Output mapping symbols for PLT entries associated with H. */
18169
18170 static bfd_boolean
18171 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
18172 {
18173 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
18174 struct elf32_arm_link_hash_entry *eh;
18175
18176 if (h->root.type == bfd_link_hash_indirect)
18177 return TRUE;
18178
18179 if (h->root.type == bfd_link_hash_warning)
18180 /* When warning symbols are created, they **replace** the "real"
18181 entry in the hash table, thus we never get to see the real
18182 symbol in a hash traversal. So look at it now. */
18183 h = (struct elf_link_hash_entry *) h->root.u.i.link;
18184
18185 eh = (struct elf32_arm_link_hash_entry *) h;
18186 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
18187 &h->plt, &eh->plt);
18188 }
18189
18190 /* Bind a veneered symbol to its veneer identified by its hash entry
18191 STUB_ENTRY. The veneered location thus loose its symbol. */
18192
18193 static void
18194 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
18195 {
18196 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
18197
18198 BFD_ASSERT (hash);
18199 hash->root.root.u.def.section = stub_entry->stub_sec;
18200 hash->root.root.u.def.value = stub_entry->stub_offset;
18201 hash->root.size = stub_entry->stub_size;
18202 }
18203
18204 /* Output a single local symbol for a generated stub. */
18205
18206 static bfd_boolean
18207 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
18208 bfd_vma offset, bfd_vma size)
18209 {
18210 Elf_Internal_Sym sym;
18211
18212 sym.st_value = osi->sec->output_section->vma
18213 + osi->sec->output_offset
18214 + offset;
18215 sym.st_size = size;
18216 sym.st_other = 0;
18217 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18218 sym.st_shndx = osi->sec_shndx;
18219 sym.st_target_internal = 0;
18220 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18221 }
18222
18223 static bfd_boolean
18224 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18225 void * in_arg)
18226 {
18227 struct elf32_arm_stub_hash_entry *stub_entry;
18228 asection *stub_sec;
18229 bfd_vma addr;
18230 char *stub_name;
18231 output_arch_syminfo *osi;
18232 const insn_sequence *template_sequence;
18233 enum stub_insn_type prev_type;
18234 int size;
18235 int i;
18236 enum map_symbol_type sym_type;
18237
18238 /* Massage our args to the form they really have. */
18239 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18240 osi = (output_arch_syminfo *) in_arg;
18241
18242 stub_sec = stub_entry->stub_sec;
18243
18244 /* Ensure this stub is attached to the current section being
18245 processed. */
18246 if (stub_sec != osi->sec)
18247 return TRUE;
18248
18249 addr = (bfd_vma) stub_entry->stub_offset;
18250 template_sequence = stub_entry->stub_template;
18251
18252 if (arm_stub_sym_claimed (stub_entry->stub_type))
18253 arm_stub_claim_sym (stub_entry);
18254 else
18255 {
18256 stub_name = stub_entry->output_name;
18257 switch (template_sequence[0].type)
18258 {
18259 case ARM_TYPE:
18260 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18261 stub_entry->stub_size))
18262 return FALSE;
18263 break;
18264 case THUMB16_TYPE:
18265 case THUMB32_TYPE:
18266 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18267 stub_entry->stub_size))
18268 return FALSE;
18269 break;
18270 default:
18271 BFD_FAIL ();
18272 return 0;
18273 }
18274 }
18275
18276 prev_type = DATA_TYPE;
18277 size = 0;
18278 for (i = 0; i < stub_entry->stub_template_size; i++)
18279 {
18280 switch (template_sequence[i].type)
18281 {
18282 case ARM_TYPE:
18283 sym_type = ARM_MAP_ARM;
18284 break;
18285
18286 case THUMB16_TYPE:
18287 case THUMB32_TYPE:
18288 sym_type = ARM_MAP_THUMB;
18289 break;
18290
18291 case DATA_TYPE:
18292 sym_type = ARM_MAP_DATA;
18293 break;
18294
18295 default:
18296 BFD_FAIL ();
18297 return FALSE;
18298 }
18299
18300 if (template_sequence[i].type != prev_type)
18301 {
18302 prev_type = template_sequence[i].type;
18303 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18304 return FALSE;
18305 }
18306
18307 switch (template_sequence[i].type)
18308 {
18309 case ARM_TYPE:
18310 case THUMB32_TYPE:
18311 size += 4;
18312 break;
18313
18314 case THUMB16_TYPE:
18315 size += 2;
18316 break;
18317
18318 case DATA_TYPE:
18319 size += 4;
18320 break;
18321
18322 default:
18323 BFD_FAIL ();
18324 return FALSE;
18325 }
18326 }
18327
18328 return TRUE;
18329 }
18330
18331 /* Output mapping symbols for linker generated sections,
18332 and for those data-only sections that do not have a
18333 $d. */
18334
18335 static bfd_boolean
18336 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18337 struct bfd_link_info *info,
18338 void *flaginfo,
18339 int (*func) (void *, const char *,
18340 Elf_Internal_Sym *,
18341 asection *,
18342 struct elf_link_hash_entry *))
18343 {
18344 output_arch_syminfo osi;
18345 struct elf32_arm_link_hash_table *htab;
18346 bfd_vma offset;
18347 bfd_size_type size;
18348 bfd *input_bfd;
18349
18350 htab = elf32_arm_hash_table (info);
18351 if (htab == NULL)
18352 return FALSE;
18353
18354 check_use_blx (htab);
18355
18356 osi.flaginfo = flaginfo;
18357 osi.info = info;
18358 osi.func = func;
18359
18360 /* Add a $d mapping symbol to data-only sections that
18361 don't have any mapping symbol. This may result in (harmless) redundant
18362 mapping symbols. */
18363 for (input_bfd = info->input_bfds;
18364 input_bfd != NULL;
18365 input_bfd = input_bfd->link.next)
18366 {
18367 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18368 for (osi.sec = input_bfd->sections;
18369 osi.sec != NULL;
18370 osi.sec = osi.sec->next)
18371 {
18372 if (osi.sec->output_section != NULL
18373 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18374 != 0)
18375 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18376 == SEC_HAS_CONTENTS
18377 && get_arm_elf_section_data (osi.sec) != NULL
18378 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18379 && osi.sec->size > 0
18380 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18381 {
18382 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18383 (output_bfd, osi.sec->output_section);
18384 if (osi.sec_shndx != (int)SHN_BAD)
18385 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18386 }
18387 }
18388 }
18389
18390 /* ARM->Thumb glue. */
18391 if (htab->arm_glue_size > 0)
18392 {
18393 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18394 ARM2THUMB_GLUE_SECTION_NAME);
18395
18396 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18397 (output_bfd, osi.sec->output_section);
18398 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18399 || htab->pic_veneer)
18400 size = ARM2THUMB_PIC_GLUE_SIZE;
18401 else if (htab->use_blx)
18402 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18403 else
18404 size = ARM2THUMB_STATIC_GLUE_SIZE;
18405
18406 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18407 {
18408 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18409 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18410 }
18411 }
18412
18413 /* Thumb->ARM glue. */
18414 if (htab->thumb_glue_size > 0)
18415 {
18416 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18417 THUMB2ARM_GLUE_SECTION_NAME);
18418
18419 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18420 (output_bfd, osi.sec->output_section);
18421 size = THUMB2ARM_GLUE_SIZE;
18422
18423 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18424 {
18425 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18426 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18427 }
18428 }
18429
18430 /* ARMv4 BX veneers. */
18431 if (htab->bx_glue_size > 0)
18432 {
18433 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18434 ARM_BX_GLUE_SECTION_NAME);
18435
18436 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18437 (output_bfd, osi.sec->output_section);
18438
18439 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18440 }
18441
18442 /* Long calls stubs. */
18443 if (htab->stub_bfd && htab->stub_bfd->sections)
18444 {
18445 asection* stub_sec;
18446
18447 for (stub_sec = htab->stub_bfd->sections;
18448 stub_sec != NULL;
18449 stub_sec = stub_sec->next)
18450 {
18451 /* Ignore non-stub sections. */
18452 if (!strstr (stub_sec->name, STUB_SUFFIX))
18453 continue;
18454
18455 osi.sec = stub_sec;
18456
18457 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18458 (output_bfd, osi.sec->output_section);
18459
18460 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18461 }
18462 }
18463
18464 /* Finally, output mapping symbols for the PLT. */
18465 if (htab->root.splt && htab->root.splt->size > 0)
18466 {
18467 osi.sec = htab->root.splt;
18468 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18469 (output_bfd, osi.sec->output_section));
18470
18471 /* Output mapping symbols for the plt header. SymbianOS does not have a
18472 plt header. */
18473 if (htab->vxworks_p)
18474 {
18475 /* VxWorks shared libraries have no PLT header. */
18476 if (!bfd_link_pic (info))
18477 {
18478 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18479 return FALSE;
18480 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18481 return FALSE;
18482 }
18483 }
18484 else if (htab->nacl_p)
18485 {
18486 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18487 return FALSE;
18488 }
18489 else if (using_thumb_only (htab) && !htab->fdpic_p)
18490 {
18491 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18492 return FALSE;
18493 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18494 return FALSE;
18495 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18496 return FALSE;
18497 }
18498 else if (!htab->symbian_p && !htab->fdpic_p)
18499 {
18500 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18501 return FALSE;
18502 #ifndef FOUR_WORD_PLT
18503 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18504 return FALSE;
18505 #endif
18506 }
18507 }
18508 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18509 {
18510 /* NaCl uses a special first entry in .iplt too. */
18511 osi.sec = htab->root.iplt;
18512 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18513 (output_bfd, osi.sec->output_section));
18514 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18515 return FALSE;
18516 }
18517 if ((htab->root.splt && htab->root.splt->size > 0)
18518 || (htab->root.iplt && htab->root.iplt->size > 0))
18519 {
18520 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18521 for (input_bfd = info->input_bfds;
18522 input_bfd != NULL;
18523 input_bfd = input_bfd->link.next)
18524 {
18525 struct arm_local_iplt_info **local_iplt;
18526 unsigned int i, num_syms;
18527
18528 local_iplt = elf32_arm_local_iplt (input_bfd);
18529 if (local_iplt != NULL)
18530 {
18531 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18532 for (i = 0; i < num_syms; i++)
18533 if (local_iplt[i] != NULL
18534 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18535 &local_iplt[i]->root,
18536 &local_iplt[i]->arm))
18537 return FALSE;
18538 }
18539 }
18540 }
18541 if (htab->dt_tlsdesc_plt != 0)
18542 {
18543 /* Mapping symbols for the lazy tls trampoline. */
18544 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18545 return FALSE;
18546
18547 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18548 htab->dt_tlsdesc_plt + 24))
18549 return FALSE;
18550 }
18551 if (htab->tls_trampoline != 0)
18552 {
18553 /* Mapping symbols for the tls trampoline. */
18554 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18555 return FALSE;
18556 #ifdef FOUR_WORD_PLT
18557 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18558 htab->tls_trampoline + 12))
18559 return FALSE;
18560 #endif
18561 }
18562
18563 return TRUE;
18564 }
18565
18566 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18567 the import library. All SYMCOUNT symbols of ABFD can be examined
18568 from their pointers in SYMS. Pointers of symbols to keep should be
18569 stored continuously at the beginning of that array.
18570
18571 Returns the number of symbols to keep. */
18572
18573 static unsigned int
18574 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18575 struct bfd_link_info *info,
18576 asymbol **syms, long symcount)
18577 {
18578 size_t maxnamelen;
18579 char *cmse_name;
18580 long src_count, dst_count = 0;
18581 struct elf32_arm_link_hash_table *htab;
18582
18583 htab = elf32_arm_hash_table (info);
18584 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18585 symcount = 0;
18586
18587 maxnamelen = 128;
18588 cmse_name = (char *) bfd_malloc (maxnamelen);
18589 for (src_count = 0; src_count < symcount; src_count++)
18590 {
18591 struct elf32_arm_link_hash_entry *cmse_hash;
18592 asymbol *sym;
18593 flagword flags;
18594 char *name;
18595 size_t namelen;
18596
18597 sym = syms[src_count];
18598 flags = sym->flags;
18599 name = (char *) bfd_asymbol_name (sym);
18600
18601 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18602 continue;
18603 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18604 continue;
18605
18606 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18607 if (namelen > maxnamelen)
18608 {
18609 cmse_name = (char *)
18610 bfd_realloc (cmse_name, namelen);
18611 maxnamelen = namelen;
18612 }
18613 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18614 cmse_hash = (struct elf32_arm_link_hash_entry *)
18615 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18616
18617 if (!cmse_hash
18618 || (cmse_hash->root.root.type != bfd_link_hash_defined
18619 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18620 || cmse_hash->root.type != STT_FUNC)
18621 continue;
18622
18623 syms[dst_count++] = sym;
18624 }
18625 free (cmse_name);
18626
18627 syms[dst_count] = NULL;
18628
18629 return dst_count;
18630 }
18631
18632 /* Filter symbols of ABFD to include in the import library. All
18633 SYMCOUNT symbols of ABFD can be examined from their pointers in
18634 SYMS. Pointers of symbols to keep should be stored continuously at
18635 the beginning of that array.
18636
18637 Returns the number of symbols to keep. */
18638
18639 static unsigned int
18640 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18641 struct bfd_link_info *info,
18642 asymbol **syms, long symcount)
18643 {
18644 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18645
18646 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18647 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18648 library to be a relocatable object file. */
18649 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18650 if (globals->cmse_implib)
18651 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18652 else
18653 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18654 }
18655
18656 /* Allocate target specific section data. */
18657
18658 static bfd_boolean
18659 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18660 {
18661 if (!sec->used_by_bfd)
18662 {
18663 _arm_elf_section_data *sdata;
18664 bfd_size_type amt = sizeof (*sdata);
18665
18666 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18667 if (sdata == NULL)
18668 return FALSE;
18669 sec->used_by_bfd = sdata;
18670 }
18671
18672 return _bfd_elf_new_section_hook (abfd, sec);
18673 }
18674
18675
18676 /* Used to order a list of mapping symbols by address. */
18677
18678 static int
18679 elf32_arm_compare_mapping (const void * a, const void * b)
18680 {
18681 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18682 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18683
18684 if (amap->vma > bmap->vma)
18685 return 1;
18686 else if (amap->vma < bmap->vma)
18687 return -1;
18688 else if (amap->type > bmap->type)
18689 /* Ensure results do not depend on the host qsort for objects with
18690 multiple mapping symbols at the same address by sorting on type
18691 after vma. */
18692 return 1;
18693 else if (amap->type < bmap->type)
18694 return -1;
18695 else
18696 return 0;
18697 }
18698
18699 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18700
18701 static unsigned long
18702 offset_prel31 (unsigned long addr, bfd_vma offset)
18703 {
18704 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18705 }
18706
18707 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18708 relocations. */
18709
18710 static void
18711 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18712 {
18713 unsigned long first_word = bfd_get_32 (output_bfd, from);
18714 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18715
18716 /* High bit of first word is supposed to be zero. */
18717 if ((first_word & 0x80000000ul) == 0)
18718 first_word = offset_prel31 (first_word, offset);
18719
18720 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18721 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18722 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18723 second_word = offset_prel31 (second_word, offset);
18724
18725 bfd_put_32 (output_bfd, first_word, to);
18726 bfd_put_32 (output_bfd, second_word, to + 4);
18727 }
18728
18729 /* Data for make_branch_to_a8_stub(). */
18730
18731 struct a8_branch_to_stub_data
18732 {
18733 asection *writing_section;
18734 bfd_byte *contents;
18735 };
18736
18737
18738 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18739 places for a particular section. */
18740
18741 static bfd_boolean
18742 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18743 void *in_arg)
18744 {
18745 struct elf32_arm_stub_hash_entry *stub_entry;
18746 struct a8_branch_to_stub_data *data;
18747 bfd_byte *contents;
18748 unsigned long branch_insn;
18749 bfd_vma veneered_insn_loc, veneer_entry_loc;
18750 bfd_signed_vma branch_offset;
18751 bfd *abfd;
18752 unsigned int loc;
18753
18754 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18755 data = (struct a8_branch_to_stub_data *) in_arg;
18756
18757 if (stub_entry->target_section != data->writing_section
18758 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18759 return TRUE;
18760
18761 contents = data->contents;
18762
18763 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18764 generated when both source and target are in the same section. */
18765 veneered_insn_loc = stub_entry->target_section->output_section->vma
18766 + stub_entry->target_section->output_offset
18767 + stub_entry->source_value;
18768
18769 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18770 + stub_entry->stub_sec->output_offset
18771 + stub_entry->stub_offset;
18772
18773 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18774 veneered_insn_loc &= ~3u;
18775
18776 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18777
18778 abfd = stub_entry->target_section->owner;
18779 loc = stub_entry->source_value;
18780
18781 /* We attempt to avoid this condition by setting stubs_always_after_branch
18782 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18783 This check is just to be on the safe side... */
18784 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18785 {
18786 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18787 "allocated in unsafe location"), abfd);
18788 return FALSE;
18789 }
18790
18791 switch (stub_entry->stub_type)
18792 {
18793 case arm_stub_a8_veneer_b:
18794 case arm_stub_a8_veneer_b_cond:
18795 branch_insn = 0xf0009000;
18796 goto jump24;
18797
18798 case arm_stub_a8_veneer_blx:
18799 branch_insn = 0xf000e800;
18800 goto jump24;
18801
18802 case arm_stub_a8_veneer_bl:
18803 {
18804 unsigned int i1, j1, i2, j2, s;
18805
18806 branch_insn = 0xf000d000;
18807
18808 jump24:
18809 if (branch_offset < -16777216 || branch_offset > 16777214)
18810 {
18811 /* There's not much we can do apart from complain if this
18812 happens. */
18813 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18814 "of range (input file too large)"), abfd);
18815 return FALSE;
18816 }
18817
18818 /* i1 = not(j1 eor s), so:
18819 not i1 = j1 eor s
18820 j1 = (not i1) eor s. */
18821
18822 branch_insn |= (branch_offset >> 1) & 0x7ff;
18823 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18824 i2 = (branch_offset >> 22) & 1;
18825 i1 = (branch_offset >> 23) & 1;
18826 s = (branch_offset >> 24) & 1;
18827 j1 = (!i1) ^ s;
18828 j2 = (!i2) ^ s;
18829 branch_insn |= j2 << 11;
18830 branch_insn |= j1 << 13;
18831 branch_insn |= s << 26;
18832 }
18833 break;
18834
18835 default:
18836 BFD_FAIL ();
18837 return FALSE;
18838 }
18839
18840 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18841 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18842
18843 return TRUE;
18844 }
18845
18846 /* Beginning of stm32l4xx work-around. */
18847
18848 /* Functions encoding instructions necessary for the emission of the
18849 fix-stm32l4xx-629360.
18850 Encoding is extracted from the
18851 ARM (C) Architecture Reference Manual
18852 ARMv7-A and ARMv7-R edition
18853 ARM DDI 0406C.b (ID072512). */
18854
18855 static inline bfd_vma
18856 create_instruction_branch_absolute (int branch_offset)
18857 {
18858 /* A8.8.18 B (A8-334)
18859 B target_address (Encoding T4). */
18860 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18861 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18862 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18863
18864 int s = ((branch_offset & 0x1000000) >> 24);
18865 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18866 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18867
18868 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18869 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18870
18871 bfd_vma patched_inst = 0xf0009000
18872 | s << 26 /* S. */
18873 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18874 | j1 << 13 /* J1. */
18875 | j2 << 11 /* J2. */
18876 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18877
18878 return patched_inst;
18879 }
18880
18881 static inline bfd_vma
18882 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18883 {
18884 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18885 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18886 bfd_vma patched_inst = 0xe8900000
18887 | (/*W=*/wback << 21)
18888 | (base_reg << 16)
18889 | (reg_mask & 0x0000ffff);
18890
18891 return patched_inst;
18892 }
18893
18894 static inline bfd_vma
18895 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18896 {
18897 /* A8.8.60 LDMDB/LDMEA (A8-402)
18898 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18899 bfd_vma patched_inst = 0xe9100000
18900 | (/*W=*/wback << 21)
18901 | (base_reg << 16)
18902 | (reg_mask & 0x0000ffff);
18903
18904 return patched_inst;
18905 }
18906
18907 static inline bfd_vma
18908 create_instruction_mov (int target_reg, int source_reg)
18909 {
18910 /* A8.8.103 MOV (register) (A8-486)
18911 MOV Rd, Rm (Encoding T1). */
18912 bfd_vma patched_inst = 0x4600
18913 | (target_reg & 0x7)
18914 | ((target_reg & 0x8) >> 3) << 7
18915 | (source_reg << 3);
18916
18917 return patched_inst;
18918 }
18919
18920 static inline bfd_vma
18921 create_instruction_sub (int target_reg, int source_reg, int value)
18922 {
18923 /* A8.8.221 SUB (immediate) (A8-708)
18924 SUB Rd, Rn, #value (Encoding T3). */
18925 bfd_vma patched_inst = 0xf1a00000
18926 | (target_reg << 8)
18927 | (source_reg << 16)
18928 | (/*S=*/0 << 20)
18929 | ((value & 0x800) >> 11) << 26
18930 | ((value & 0x700) >> 8) << 12
18931 | (value & 0x0ff);
18932
18933 return patched_inst;
18934 }
18935
18936 static inline bfd_vma
18937 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18938 int first_reg)
18939 {
18940 /* A8.8.332 VLDM (A8-922)
18941 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18942 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18943 | (/*W=*/wback << 21)
18944 | (base_reg << 16)
18945 | (num_words & 0x000000ff)
18946 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18947 | (first_reg & 0x00000001) << 22;
18948
18949 return patched_inst;
18950 }
18951
18952 static inline bfd_vma
18953 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18954 int first_reg)
18955 {
18956 /* A8.8.332 VLDM (A8-922)
18957 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18958 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18959 | (base_reg << 16)
18960 | (num_words & 0x000000ff)
18961 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18962 | (first_reg & 0x00000001) << 22;
18963
18964 return patched_inst;
18965 }
18966
18967 static inline bfd_vma
18968 create_instruction_udf_w (int value)
18969 {
18970 /* A8.8.247 UDF (A8-758)
18971 Undefined (Encoding T2). */
18972 bfd_vma patched_inst = 0xf7f0a000
18973 | (value & 0x00000fff)
18974 | (value & 0x000f0000) << 16;
18975
18976 return patched_inst;
18977 }
18978
18979 static inline bfd_vma
18980 create_instruction_udf (int value)
18981 {
18982 /* A8.8.247 UDF (A8-758)
18983 Undefined (Encoding T1). */
18984 bfd_vma patched_inst = 0xde00
18985 | (value & 0xff);
18986
18987 return patched_inst;
18988 }
18989
18990 /* Functions writing an instruction in memory, returning the next
18991 memory position to write to. */
18992
18993 static inline bfd_byte *
18994 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18995 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18996 {
18997 put_thumb2_insn (htab, output_bfd, insn, pt);
18998 return pt + 4;
18999 }
19000
19001 static inline bfd_byte *
19002 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
19003 bfd * output_bfd, bfd_byte *pt, insn32 insn)
19004 {
19005 put_thumb_insn (htab, output_bfd, insn, pt);
19006 return pt + 2;
19007 }
19008
19009 /* Function filling up a region in memory with T1 and T2 UDFs taking
19010 care of alignment. */
19011
19012 static bfd_byte *
19013 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
19014 bfd * output_bfd,
19015 const bfd_byte * const base_stub_contents,
19016 bfd_byte * const from_stub_contents,
19017 const bfd_byte * const end_stub_contents)
19018 {
19019 bfd_byte *current_stub_contents = from_stub_contents;
19020
19021 /* Fill the remaining of the stub with deterministic contents : UDF
19022 instructions.
19023 Check if realignment is needed on modulo 4 frontier using T1, to
19024 further use T2. */
19025 if ((current_stub_contents < end_stub_contents)
19026 && !((current_stub_contents - base_stub_contents) % 2)
19027 && ((current_stub_contents - base_stub_contents) % 4))
19028 current_stub_contents =
19029 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19030 create_instruction_udf (0));
19031
19032 for (; current_stub_contents < end_stub_contents;)
19033 current_stub_contents =
19034 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19035 create_instruction_udf_w (0));
19036
19037 return current_stub_contents;
19038 }
19039
19040 /* Functions writing the stream of instructions equivalent to the
19041 derived sequence for ldmia, ldmdb, vldm respectively. */
19042
19043 static void
19044 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
19045 bfd * output_bfd,
19046 const insn32 initial_insn,
19047 const bfd_byte *const initial_insn_addr,
19048 bfd_byte *const base_stub_contents)
19049 {
19050 int wback = (initial_insn & 0x00200000) >> 21;
19051 int ri, rn = (initial_insn & 0x000F0000) >> 16;
19052 int insn_all_registers = initial_insn & 0x0000ffff;
19053 int insn_low_registers, insn_high_registers;
19054 int usable_register_mask;
19055 int nb_registers = elf32_arm_popcount (insn_all_registers);
19056 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19057 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19058 bfd_byte *current_stub_contents = base_stub_contents;
19059
19060 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
19061
19062 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19063 smaller than 8 registers load sequences that do not cause the
19064 hardware issue. */
19065 if (nb_registers <= 8)
19066 {
19067 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19068 current_stub_contents =
19069 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19070 initial_insn);
19071
19072 /* B initial_insn_addr+4. */
19073 if (!restore_pc)
19074 current_stub_contents =
19075 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19076 create_instruction_branch_absolute
19077 (initial_insn_addr - current_stub_contents));
19078
19079 /* Fill the remaining of the stub with deterministic contents. */
19080 current_stub_contents =
19081 stm32l4xx_fill_stub_udf (htab, output_bfd,
19082 base_stub_contents, current_stub_contents,
19083 base_stub_contents +
19084 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19085
19086 return;
19087 }
19088
19089 /* - reg_list[13] == 0. */
19090 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
19091
19092 /* - reg_list[14] & reg_list[15] != 1. */
19093 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19094
19095 /* - if (wback==1) reg_list[rn] == 0. */
19096 BFD_ASSERT (!wback || !restore_rn);
19097
19098 /* - nb_registers > 8. */
19099 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19100
19101 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19102
19103 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
19104 - One with the 7 lowest registers (register mask 0x007F)
19105 This LDM will finally contain between 2 and 7 registers
19106 - One with the 7 highest registers (register mask 0xDF80)
19107 This ldm will finally contain between 2 and 7 registers. */
19108 insn_low_registers = insn_all_registers & 0x007F;
19109 insn_high_registers = insn_all_registers & 0xDF80;
19110
19111 /* A spare register may be needed during this veneer to temporarily
19112 handle the base register. This register will be restored with the
19113 last LDM operation.
19114 The usable register may be any general purpose register (that
19115 excludes PC, SP, LR : register mask is 0x1FFF). */
19116 usable_register_mask = 0x1FFF;
19117
19118 /* Generate the stub function. */
19119 if (wback)
19120 {
19121 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
19122 current_stub_contents =
19123 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19124 create_instruction_ldmia
19125 (rn, /*wback=*/1, insn_low_registers));
19126
19127 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19128 current_stub_contents =
19129 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19130 create_instruction_ldmia
19131 (rn, /*wback=*/1, insn_high_registers));
19132 if (!restore_pc)
19133 {
19134 /* B initial_insn_addr+4. */
19135 current_stub_contents =
19136 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19137 create_instruction_branch_absolute
19138 (initial_insn_addr - current_stub_contents));
19139 }
19140 }
19141 else /* if (!wback). */
19142 {
19143 ri = rn;
19144
19145 /* If Rn is not part of the high-register-list, move it there. */
19146 if (!(insn_high_registers & (1 << rn)))
19147 {
19148 /* Choose a Ri in the high-register-list that will be restored. */
19149 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19150
19151 /* MOV Ri, Rn. */
19152 current_stub_contents =
19153 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19154 create_instruction_mov (ri, rn));
19155 }
19156
19157 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19158 current_stub_contents =
19159 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19160 create_instruction_ldmia
19161 (ri, /*wback=*/1, insn_low_registers));
19162
19163 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19164 current_stub_contents =
19165 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19166 create_instruction_ldmia
19167 (ri, /*wback=*/0, insn_high_registers));
19168
19169 if (!restore_pc)
19170 {
19171 /* B initial_insn_addr+4. */
19172 current_stub_contents =
19173 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19174 create_instruction_branch_absolute
19175 (initial_insn_addr - current_stub_contents));
19176 }
19177 }
19178
19179 /* Fill the remaining of the stub with deterministic contents. */
19180 current_stub_contents =
19181 stm32l4xx_fill_stub_udf (htab, output_bfd,
19182 base_stub_contents, current_stub_contents,
19183 base_stub_contents +
19184 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19185 }
19186
19187 static void
19188 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
19189 bfd * output_bfd,
19190 const insn32 initial_insn,
19191 const bfd_byte *const initial_insn_addr,
19192 bfd_byte *const base_stub_contents)
19193 {
19194 int wback = (initial_insn & 0x00200000) >> 21;
19195 int ri, rn = (initial_insn & 0x000f0000) >> 16;
19196 int insn_all_registers = initial_insn & 0x0000ffff;
19197 int insn_low_registers, insn_high_registers;
19198 int usable_register_mask;
19199 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19200 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19201 int nb_registers = elf32_arm_popcount (insn_all_registers);
19202 bfd_byte *current_stub_contents = base_stub_contents;
19203
19204 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19205
19206 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19207 smaller than 8 registers load sequences that do not cause the
19208 hardware issue. */
19209 if (nb_registers <= 8)
19210 {
19211 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19212 current_stub_contents =
19213 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19214 initial_insn);
19215
19216 /* B initial_insn_addr+4. */
19217 current_stub_contents =
19218 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19219 create_instruction_branch_absolute
19220 (initial_insn_addr - current_stub_contents));
19221
19222 /* Fill the remaining of the stub with deterministic contents. */
19223 current_stub_contents =
19224 stm32l4xx_fill_stub_udf (htab, output_bfd,
19225 base_stub_contents, current_stub_contents,
19226 base_stub_contents +
19227 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19228
19229 return;
19230 }
19231
19232 /* - reg_list[13] == 0. */
19233 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19234
19235 /* - reg_list[14] & reg_list[15] != 1. */
19236 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19237
19238 /* - if (wback==1) reg_list[rn] == 0. */
19239 BFD_ASSERT (!wback || !restore_rn);
19240
19241 /* - nb_registers > 8. */
19242 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19243
19244 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19245
19246 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19247 - One with the 7 lowest registers (register mask 0x007F)
19248 This LDM will finally contain between 2 and 7 registers
19249 - One with the 7 highest registers (register mask 0xDF80)
19250 This ldm will finally contain between 2 and 7 registers. */
19251 insn_low_registers = insn_all_registers & 0x007F;
19252 insn_high_registers = insn_all_registers & 0xDF80;
19253
19254 /* A spare register may be needed during this veneer to temporarily
19255 handle the base register. This register will be restored with
19256 the last LDM operation.
19257 The usable register may be any general purpose register (that excludes
19258 PC, SP, LR : register mask is 0x1FFF). */
19259 usable_register_mask = 0x1FFF;
19260
19261 /* Generate the stub function. */
19262 if (!wback && !restore_pc && !restore_rn)
19263 {
19264 /* Choose a Ri in the low-register-list that will be restored. */
19265 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19266
19267 /* MOV Ri, Rn. */
19268 current_stub_contents =
19269 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19270 create_instruction_mov (ri, rn));
19271
19272 /* LDMDB Ri!, {R-high-register-list}. */
19273 current_stub_contents =
19274 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19275 create_instruction_ldmdb
19276 (ri, /*wback=*/1, insn_high_registers));
19277
19278 /* LDMDB Ri, {R-low-register-list}. */
19279 current_stub_contents =
19280 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19281 create_instruction_ldmdb
19282 (ri, /*wback=*/0, insn_low_registers));
19283
19284 /* B initial_insn_addr+4. */
19285 current_stub_contents =
19286 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19287 create_instruction_branch_absolute
19288 (initial_insn_addr - current_stub_contents));
19289 }
19290 else if (wback && !restore_pc && !restore_rn)
19291 {
19292 /* LDMDB Rn!, {R-high-register-list}. */
19293 current_stub_contents =
19294 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19295 create_instruction_ldmdb
19296 (rn, /*wback=*/1, insn_high_registers));
19297
19298 /* LDMDB Rn!, {R-low-register-list}. */
19299 current_stub_contents =
19300 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19301 create_instruction_ldmdb
19302 (rn, /*wback=*/1, insn_low_registers));
19303
19304 /* B initial_insn_addr+4. */
19305 current_stub_contents =
19306 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19307 create_instruction_branch_absolute
19308 (initial_insn_addr - current_stub_contents));
19309 }
19310 else if (!wback && restore_pc && !restore_rn)
19311 {
19312 /* Choose a Ri in the high-register-list that will be restored. */
19313 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19314
19315 /* SUB Ri, Rn, #(4*nb_registers). */
19316 current_stub_contents =
19317 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19318 create_instruction_sub (ri, rn, (4 * nb_registers)));
19319
19320 /* LDMIA Ri!, {R-low-register-list}. */
19321 current_stub_contents =
19322 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19323 create_instruction_ldmia
19324 (ri, /*wback=*/1, insn_low_registers));
19325
19326 /* LDMIA Ri, {R-high-register-list}. */
19327 current_stub_contents =
19328 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19329 create_instruction_ldmia
19330 (ri, /*wback=*/0, insn_high_registers));
19331 }
19332 else if (wback && restore_pc && !restore_rn)
19333 {
19334 /* Choose a Ri in the high-register-list that will be restored. */
19335 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19336
19337 /* SUB Rn, Rn, #(4*nb_registers) */
19338 current_stub_contents =
19339 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19340 create_instruction_sub (rn, rn, (4 * nb_registers)));
19341
19342 /* MOV Ri, Rn. */
19343 current_stub_contents =
19344 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19345 create_instruction_mov (ri, rn));
19346
19347 /* LDMIA Ri!, {R-low-register-list}. */
19348 current_stub_contents =
19349 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19350 create_instruction_ldmia
19351 (ri, /*wback=*/1, insn_low_registers));
19352
19353 /* LDMIA Ri, {R-high-register-list}. */
19354 current_stub_contents =
19355 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19356 create_instruction_ldmia
19357 (ri, /*wback=*/0, insn_high_registers));
19358 }
19359 else if (!wback && !restore_pc && restore_rn)
19360 {
19361 ri = rn;
19362 if (!(insn_low_registers & (1 << rn)))
19363 {
19364 /* Choose a Ri in the low-register-list that will be restored. */
19365 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19366
19367 /* MOV Ri, Rn. */
19368 current_stub_contents =
19369 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19370 create_instruction_mov (ri, rn));
19371 }
19372
19373 /* LDMDB Ri!, {R-high-register-list}. */
19374 current_stub_contents =
19375 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19376 create_instruction_ldmdb
19377 (ri, /*wback=*/1, insn_high_registers));
19378
19379 /* LDMDB Ri, {R-low-register-list}. */
19380 current_stub_contents =
19381 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19382 create_instruction_ldmdb
19383 (ri, /*wback=*/0, insn_low_registers));
19384
19385 /* B initial_insn_addr+4. */
19386 current_stub_contents =
19387 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19388 create_instruction_branch_absolute
19389 (initial_insn_addr - current_stub_contents));
19390 }
19391 else if (!wback && restore_pc && restore_rn)
19392 {
19393 ri = rn;
19394 if (!(insn_high_registers & (1 << rn)))
19395 {
19396 /* Choose a Ri in the high-register-list that will be restored. */
19397 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19398 }
19399
19400 /* SUB Ri, Rn, #(4*nb_registers). */
19401 current_stub_contents =
19402 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19403 create_instruction_sub (ri, rn, (4 * nb_registers)));
19404
19405 /* LDMIA Ri!, {R-low-register-list}. */
19406 current_stub_contents =
19407 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19408 create_instruction_ldmia
19409 (ri, /*wback=*/1, insn_low_registers));
19410
19411 /* LDMIA Ri, {R-high-register-list}. */
19412 current_stub_contents =
19413 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19414 create_instruction_ldmia
19415 (ri, /*wback=*/0, insn_high_registers));
19416 }
19417 else if (wback && restore_rn)
19418 {
19419 /* The assembler should not have accepted to encode this. */
19420 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19421 "undefined behavior.\n");
19422 }
19423
19424 /* Fill the remaining of the stub with deterministic contents. */
19425 current_stub_contents =
19426 stm32l4xx_fill_stub_udf (htab, output_bfd,
19427 base_stub_contents, current_stub_contents,
19428 base_stub_contents +
19429 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19430
19431 }
19432
19433 static void
19434 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19435 bfd * output_bfd,
19436 const insn32 initial_insn,
19437 const bfd_byte *const initial_insn_addr,
19438 bfd_byte *const base_stub_contents)
19439 {
19440 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19441 bfd_byte *current_stub_contents = base_stub_contents;
19442
19443 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19444
19445 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19446 smaller than 8 words load sequences that do not cause the
19447 hardware issue. */
19448 if (num_words <= 8)
19449 {
19450 /* Untouched instruction. */
19451 current_stub_contents =
19452 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19453 initial_insn);
19454
19455 /* B initial_insn_addr+4. */
19456 current_stub_contents =
19457 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19458 create_instruction_branch_absolute
19459 (initial_insn_addr - current_stub_contents));
19460 }
19461 else
19462 {
19463 bfd_boolean is_dp = /* DP encoding. */
19464 (initial_insn & 0xfe100f00) == 0xec100b00;
19465 bfd_boolean is_ia_nobang = /* (IA without !). */
19466 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19467 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19468 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19469 bfd_boolean is_db_bang = /* (DB with !). */
19470 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19471 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19472 /* d = UInt (Vd:D);. */
19473 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19474 | (((unsigned int)initial_insn << 9) >> 31);
19475
19476 /* Compute the number of 8-words chunks needed to split. */
19477 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19478 int chunk;
19479
19480 /* The test coverage has been done assuming the following
19481 hypothesis that exactly one of the previous is_ predicates is
19482 true. */
19483 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19484 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19485
19486 /* We treat the cutting of the words in one pass for all
19487 cases, then we emit the adjustments:
19488
19489 vldm rx, {...}
19490 -> vldm rx!, {8_words_or_less} for each needed 8_word
19491 -> sub rx, rx, #size (list)
19492
19493 vldm rx!, {...}
19494 -> vldm rx!, {8_words_or_less} for each needed 8_word
19495 This also handles vpop instruction (when rx is sp)
19496
19497 vldmd rx!, {...}
19498 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19499 for (chunk = 0; chunk < chunks; ++chunk)
19500 {
19501 bfd_vma new_insn = 0;
19502
19503 if (is_ia_nobang || is_ia_bang)
19504 {
19505 new_insn = create_instruction_vldmia
19506 (base_reg,
19507 is_dp,
19508 /*wback= . */1,
19509 chunks - (chunk + 1) ?
19510 8 : num_words - chunk * 8,
19511 first_reg + chunk * 8);
19512 }
19513 else if (is_db_bang)
19514 {
19515 new_insn = create_instruction_vldmdb
19516 (base_reg,
19517 is_dp,
19518 chunks - (chunk + 1) ?
19519 8 : num_words - chunk * 8,
19520 first_reg + chunk * 8);
19521 }
19522
19523 if (new_insn)
19524 current_stub_contents =
19525 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19526 new_insn);
19527 }
19528
19529 /* Only this case requires the base register compensation
19530 subtract. */
19531 if (is_ia_nobang)
19532 {
19533 current_stub_contents =
19534 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19535 create_instruction_sub
19536 (base_reg, base_reg, 4*num_words));
19537 }
19538
19539 /* B initial_insn_addr+4. */
19540 current_stub_contents =
19541 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19542 create_instruction_branch_absolute
19543 (initial_insn_addr - current_stub_contents));
19544 }
19545
19546 /* Fill the remaining of the stub with deterministic contents. */
19547 current_stub_contents =
19548 stm32l4xx_fill_stub_udf (htab, output_bfd,
19549 base_stub_contents, current_stub_contents,
19550 base_stub_contents +
19551 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19552 }
19553
19554 static void
19555 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19556 bfd * output_bfd,
19557 const insn32 wrong_insn,
19558 const bfd_byte *const wrong_insn_addr,
19559 bfd_byte *const stub_contents)
19560 {
19561 if (is_thumb2_ldmia (wrong_insn))
19562 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19563 wrong_insn, wrong_insn_addr,
19564 stub_contents);
19565 else if (is_thumb2_ldmdb (wrong_insn))
19566 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19567 wrong_insn, wrong_insn_addr,
19568 stub_contents);
19569 else if (is_thumb2_vldm (wrong_insn))
19570 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19571 wrong_insn, wrong_insn_addr,
19572 stub_contents);
19573 }
19574
19575 /* End of stm32l4xx work-around. */
19576
19577
19578 /* Do code byteswapping. Return FALSE afterwards so that the section is
19579 written out as normal. */
19580
19581 static bfd_boolean
19582 elf32_arm_write_section (bfd *output_bfd,
19583 struct bfd_link_info *link_info,
19584 asection *sec,
19585 bfd_byte *contents)
19586 {
19587 unsigned int mapcount, errcount;
19588 _arm_elf_section_data *arm_data;
19589 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19590 elf32_arm_section_map *map;
19591 elf32_vfp11_erratum_list *errnode;
19592 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19593 bfd_vma ptr;
19594 bfd_vma end;
19595 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19596 bfd_byte tmp;
19597 unsigned int i;
19598
19599 if (globals == NULL)
19600 return FALSE;
19601
19602 /* If this section has not been allocated an _arm_elf_section_data
19603 structure then we cannot record anything. */
19604 arm_data = get_arm_elf_section_data (sec);
19605 if (arm_data == NULL)
19606 return FALSE;
19607
19608 mapcount = arm_data->mapcount;
19609 map = arm_data->map;
19610 errcount = arm_data->erratumcount;
19611
19612 if (errcount != 0)
19613 {
19614 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19615
19616 for (errnode = arm_data->erratumlist; errnode != 0;
19617 errnode = errnode->next)
19618 {
19619 bfd_vma target = errnode->vma - offset;
19620
19621 switch (errnode->type)
19622 {
19623 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19624 {
19625 bfd_vma branch_to_veneer;
19626 /* Original condition code of instruction, plus bit mask for
19627 ARM B instruction. */
19628 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19629 | 0x0a000000;
19630
19631 /* The instruction is before the label. */
19632 target -= 4;
19633
19634 /* Above offset included in -4 below. */
19635 branch_to_veneer = errnode->u.b.veneer->vma
19636 - errnode->vma - 4;
19637
19638 if ((signed) branch_to_veneer < -(1 << 25)
19639 || (signed) branch_to_veneer >= (1 << 25))
19640 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19641 "range"), output_bfd);
19642
19643 insn |= (branch_to_veneer >> 2) & 0xffffff;
19644 contents[endianflip ^ target] = insn & 0xff;
19645 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19646 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19647 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19648 }
19649 break;
19650
19651 case VFP11_ERRATUM_ARM_VENEER:
19652 {
19653 bfd_vma branch_from_veneer;
19654 unsigned int insn;
19655
19656 /* Take size of veneer into account. */
19657 branch_from_veneer = errnode->u.v.branch->vma
19658 - errnode->vma - 12;
19659
19660 if ((signed) branch_from_veneer < -(1 << 25)
19661 || (signed) branch_from_veneer >= (1 << 25))
19662 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19663 "range"), output_bfd);
19664
19665 /* Original instruction. */
19666 insn = errnode->u.v.branch->u.b.vfp_insn;
19667 contents[endianflip ^ target] = insn & 0xff;
19668 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19669 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19670 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19671
19672 /* Branch back to insn after original insn. */
19673 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19674 contents[endianflip ^ (target + 4)] = insn & 0xff;
19675 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19676 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19677 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19678 }
19679 break;
19680
19681 default:
19682 abort ();
19683 }
19684 }
19685 }
19686
19687 if (arm_data->stm32l4xx_erratumcount != 0)
19688 {
19689 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19690 stm32l4xx_errnode != 0;
19691 stm32l4xx_errnode = stm32l4xx_errnode->next)
19692 {
19693 bfd_vma target = stm32l4xx_errnode->vma - offset;
19694
19695 switch (stm32l4xx_errnode->type)
19696 {
19697 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19698 {
19699 unsigned int insn;
19700 bfd_vma branch_to_veneer =
19701 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19702
19703 if ((signed) branch_to_veneer < -(1 << 24)
19704 || (signed) branch_to_veneer >= (1 << 24))
19705 {
19706 bfd_vma out_of_range =
19707 ((signed) branch_to_veneer < -(1 << 24)) ?
19708 - branch_to_veneer - (1 << 24) :
19709 ((signed) branch_to_veneer >= (1 << 24)) ?
19710 branch_to_veneer - (1 << 24) : 0;
19711
19712 _bfd_error_handler
19713 (_("%pB(%#" PRIx64 "): error: "
19714 "cannot create STM32L4XX veneer; "
19715 "jump out of range by %" PRId64 " bytes; "
19716 "cannot encode branch instruction"),
19717 output_bfd,
19718 (uint64_t) (stm32l4xx_errnode->vma - 4),
19719 (int64_t) out_of_range);
19720 continue;
19721 }
19722
19723 insn = create_instruction_branch_absolute
19724 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19725
19726 /* The instruction is before the label. */
19727 target -= 4;
19728
19729 put_thumb2_insn (globals, output_bfd,
19730 (bfd_vma) insn, contents + target);
19731 }
19732 break;
19733
19734 case STM32L4XX_ERRATUM_VENEER:
19735 {
19736 bfd_byte * veneer;
19737 bfd_byte * veneer_r;
19738 unsigned int insn;
19739
19740 veneer = contents + target;
19741 veneer_r = veneer
19742 + stm32l4xx_errnode->u.b.veneer->vma
19743 - stm32l4xx_errnode->vma - 4;
19744
19745 if ((signed) (veneer_r - veneer -
19746 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19747 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19748 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19749 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19750 || (signed) (veneer_r - veneer) >= (1 << 24))
19751 {
19752 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19753 "veneer"), output_bfd);
19754 continue;
19755 }
19756
19757 /* Original instruction. */
19758 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19759
19760 stm32l4xx_create_replacing_stub
19761 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19762 }
19763 break;
19764
19765 default:
19766 abort ();
19767 }
19768 }
19769 }
19770
19771 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19772 {
19773 arm_unwind_table_edit *edit_node
19774 = arm_data->u.exidx.unwind_edit_list;
19775 /* Now, sec->size is the size of the section we will write. The original
19776 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19777 markers) was sec->rawsize. (This isn't the case if we perform no
19778 edits, then rawsize will be zero and we should use size). */
19779 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19780 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19781 unsigned int in_index, out_index;
19782 bfd_vma add_to_offsets = 0;
19783
19784 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19785 {
19786 if (edit_node)
19787 {
19788 unsigned int edit_index = edit_node->index;
19789
19790 if (in_index < edit_index && in_index * 8 < input_size)
19791 {
19792 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19793 contents + in_index * 8, add_to_offsets);
19794 out_index++;
19795 in_index++;
19796 }
19797 else if (in_index == edit_index
19798 || (in_index * 8 >= input_size
19799 && edit_index == UINT_MAX))
19800 {
19801 switch (edit_node->type)
19802 {
19803 case DELETE_EXIDX_ENTRY:
19804 in_index++;
19805 add_to_offsets += 8;
19806 break;
19807
19808 case INSERT_EXIDX_CANTUNWIND_AT_END:
19809 {
19810 asection *text_sec = edit_node->linked_section;
19811 bfd_vma text_offset = text_sec->output_section->vma
19812 + text_sec->output_offset
19813 + text_sec->size;
19814 bfd_vma exidx_offset = offset + out_index * 8;
19815 unsigned long prel31_offset;
19816
19817 /* Note: this is meant to be equivalent to an
19818 R_ARM_PREL31 relocation. These synthetic
19819 EXIDX_CANTUNWIND markers are not relocated by the
19820 usual BFD method. */
19821 prel31_offset = (text_offset - exidx_offset)
19822 & 0x7ffffffful;
19823 if (bfd_link_relocatable (link_info))
19824 {
19825 /* Here relocation for new EXIDX_CANTUNWIND is
19826 created, so there is no need to
19827 adjust offset by hand. */
19828 prel31_offset = text_sec->output_offset
19829 + text_sec->size;
19830 }
19831
19832 /* First address we can't unwind. */
19833 bfd_put_32 (output_bfd, prel31_offset,
19834 &edited_contents[out_index * 8]);
19835
19836 /* Code for EXIDX_CANTUNWIND. */
19837 bfd_put_32 (output_bfd, 0x1,
19838 &edited_contents[out_index * 8 + 4]);
19839
19840 out_index++;
19841 add_to_offsets -= 8;
19842 }
19843 break;
19844 }
19845
19846 edit_node = edit_node->next;
19847 }
19848 }
19849 else
19850 {
19851 /* No more edits, copy remaining entries verbatim. */
19852 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19853 contents + in_index * 8, add_to_offsets);
19854 out_index++;
19855 in_index++;
19856 }
19857 }
19858
19859 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19860 bfd_set_section_contents (output_bfd, sec->output_section,
19861 edited_contents,
19862 (file_ptr) sec->output_offset, sec->size);
19863
19864 return TRUE;
19865 }
19866
19867 /* Fix code to point to Cortex-A8 erratum stubs. */
19868 if (globals->fix_cortex_a8)
19869 {
19870 struct a8_branch_to_stub_data data;
19871
19872 data.writing_section = sec;
19873 data.contents = contents;
19874
19875 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19876 & data);
19877 }
19878
19879 if (mapcount == 0)
19880 return FALSE;
19881
19882 if (globals->byteswap_code)
19883 {
19884 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19885
19886 ptr = map[0].vma;
19887 for (i = 0; i < mapcount; i++)
19888 {
19889 if (i == mapcount - 1)
19890 end = sec->size;
19891 else
19892 end = map[i + 1].vma;
19893
19894 switch (map[i].type)
19895 {
19896 case 'a':
19897 /* Byte swap code words. */
19898 while (ptr + 3 < end)
19899 {
19900 tmp = contents[ptr];
19901 contents[ptr] = contents[ptr + 3];
19902 contents[ptr + 3] = tmp;
19903 tmp = contents[ptr + 1];
19904 contents[ptr + 1] = contents[ptr + 2];
19905 contents[ptr + 2] = tmp;
19906 ptr += 4;
19907 }
19908 break;
19909
19910 case 't':
19911 /* Byte swap code halfwords. */
19912 while (ptr + 1 < end)
19913 {
19914 tmp = contents[ptr];
19915 contents[ptr] = contents[ptr + 1];
19916 contents[ptr + 1] = tmp;
19917 ptr += 2;
19918 }
19919 break;
19920
19921 case 'd':
19922 /* Leave data alone. */
19923 break;
19924 }
19925 ptr = end;
19926 }
19927 }
19928
19929 free (map);
19930 arm_data->mapcount = -1;
19931 arm_data->mapsize = 0;
19932 arm_data->map = NULL;
19933
19934 return FALSE;
19935 }
19936
19937 /* Mangle thumb function symbols as we read them in. */
19938
19939 static bfd_boolean
19940 elf32_arm_swap_symbol_in (bfd * abfd,
19941 const void *psrc,
19942 const void *pshn,
19943 Elf_Internal_Sym *dst)
19944 {
19945 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19946 return FALSE;
19947 dst->st_target_internal = 0;
19948
19949 /* New EABI objects mark thumb function symbols by setting the low bit of
19950 the address. */
19951 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19952 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19953 {
19954 if (dst->st_value & 1)
19955 {
19956 dst->st_value &= ~(bfd_vma) 1;
19957 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19958 ST_BRANCH_TO_THUMB);
19959 }
19960 else
19961 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19962 }
19963 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19964 {
19965 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19966 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19967 }
19968 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19969 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19970 else
19971 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19972
19973 return TRUE;
19974 }
19975
19976
19977 /* Mangle thumb function symbols as we write them out. */
19978
19979 static void
19980 elf32_arm_swap_symbol_out (bfd *abfd,
19981 const Elf_Internal_Sym *src,
19982 void *cdst,
19983 void *shndx)
19984 {
19985 Elf_Internal_Sym newsym;
19986
19987 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19988 of the address set, as per the new EABI. We do this unconditionally
19989 because objcopy does not set the elf header flags until after
19990 it writes out the symbol table. */
19991 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19992 {
19993 newsym = *src;
19994 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19995 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19996 if (newsym.st_shndx != SHN_UNDEF)
19997 {
19998 /* Do this only for defined symbols. At link type, the static
19999 linker will simulate the work of dynamic linker of resolving
20000 symbols and will carry over the thumbness of found symbols to
20001 the output symbol table. It's not clear how it happens, but
20002 the thumbness of undefined symbols can well be different at
20003 runtime, and writing '1' for them will be confusing for users
20004 and possibly for dynamic linker itself.
20005 */
20006 newsym.st_value |= 1;
20007 }
20008
20009 src = &newsym;
20010 }
20011 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
20012 }
20013
20014 /* Add the PT_ARM_EXIDX program header. */
20015
20016 static bfd_boolean
20017 elf32_arm_modify_segment_map (bfd *abfd,
20018 struct bfd_link_info *info ATTRIBUTE_UNUSED)
20019 {
20020 struct elf_segment_map *m;
20021 asection *sec;
20022
20023 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
20024 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
20025 {
20026 /* If there is already a PT_ARM_EXIDX header, then we do not
20027 want to add another one. This situation arises when running
20028 "strip"; the input binary already has the header. */
20029 m = elf_seg_map (abfd);
20030 while (m && m->p_type != PT_ARM_EXIDX)
20031 m = m->next;
20032 if (!m)
20033 {
20034 m = (struct elf_segment_map *)
20035 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
20036 if (m == NULL)
20037 return FALSE;
20038 m->p_type = PT_ARM_EXIDX;
20039 m->count = 1;
20040 m->sections[0] = sec;
20041
20042 m->next = elf_seg_map (abfd);
20043 elf_seg_map (abfd) = m;
20044 }
20045 }
20046
20047 return TRUE;
20048 }
20049
20050 /* We may add a PT_ARM_EXIDX program header. */
20051
20052 static int
20053 elf32_arm_additional_program_headers (bfd *abfd,
20054 struct bfd_link_info *info ATTRIBUTE_UNUSED)
20055 {
20056 asection *sec;
20057
20058 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
20059 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
20060 return 1;
20061 else
20062 return 0;
20063 }
20064
20065 /* Hook called by the linker routine which adds symbols from an object
20066 file. */
20067
20068 static bfd_boolean
20069 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
20070 Elf_Internal_Sym *sym, const char **namep,
20071 flagword *flagsp, asection **secp, bfd_vma *valp)
20072 {
20073 if (elf32_arm_hash_table (info) == NULL)
20074 return FALSE;
20075
20076 if (elf32_arm_hash_table (info)->vxworks_p
20077 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
20078 flagsp, secp, valp))
20079 return FALSE;
20080
20081 return TRUE;
20082 }
20083
20084 /* We use this to override swap_symbol_in and swap_symbol_out. */
20085 const struct elf_size_info elf32_arm_size_info =
20086 {
20087 sizeof (Elf32_External_Ehdr),
20088 sizeof (Elf32_External_Phdr),
20089 sizeof (Elf32_External_Shdr),
20090 sizeof (Elf32_External_Rel),
20091 sizeof (Elf32_External_Rela),
20092 sizeof (Elf32_External_Sym),
20093 sizeof (Elf32_External_Dyn),
20094 sizeof (Elf_External_Note),
20095 4,
20096 1,
20097 32, 2,
20098 ELFCLASS32, EV_CURRENT,
20099 bfd_elf32_write_out_phdrs,
20100 bfd_elf32_write_shdrs_and_ehdr,
20101 bfd_elf32_checksum_contents,
20102 bfd_elf32_write_relocs,
20103 elf32_arm_swap_symbol_in,
20104 elf32_arm_swap_symbol_out,
20105 bfd_elf32_slurp_reloc_table,
20106 bfd_elf32_slurp_symbol_table,
20107 bfd_elf32_swap_dyn_in,
20108 bfd_elf32_swap_dyn_out,
20109 bfd_elf32_swap_reloc_in,
20110 bfd_elf32_swap_reloc_out,
20111 bfd_elf32_swap_reloca_in,
20112 bfd_elf32_swap_reloca_out
20113 };
20114
20115 static bfd_vma
20116 read_code32 (const bfd *abfd, const bfd_byte *addr)
20117 {
20118 /* V7 BE8 code is always little endian. */
20119 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20120 return bfd_getl32 (addr);
20121
20122 return bfd_get_32 (abfd, addr);
20123 }
20124
20125 static bfd_vma
20126 read_code16 (const bfd *abfd, const bfd_byte *addr)
20127 {
20128 /* V7 BE8 code is always little endian. */
20129 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20130 return bfd_getl16 (addr);
20131
20132 return bfd_get_16 (abfd, addr);
20133 }
20134
20135 /* Return size of plt0 entry starting at ADDR
20136 or (bfd_vma) -1 if size can not be determined. */
20137
20138 static bfd_vma
20139 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
20140 {
20141 bfd_vma first_word;
20142 bfd_vma plt0_size;
20143
20144 first_word = read_code32 (abfd, addr);
20145
20146 if (first_word == elf32_arm_plt0_entry[0])
20147 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
20148 else if (first_word == elf32_thumb2_plt0_entry[0])
20149 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
20150 else
20151 /* We don't yet handle this PLT format. */
20152 return (bfd_vma) -1;
20153
20154 return plt0_size;
20155 }
20156
20157 /* Return size of plt entry starting at offset OFFSET
20158 of plt section located at address START
20159 or (bfd_vma) -1 if size can not be determined. */
20160
20161 static bfd_vma
20162 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
20163 {
20164 bfd_vma first_insn;
20165 bfd_vma plt_size = 0;
20166 const bfd_byte *addr = start + offset;
20167
20168 /* PLT entry size if fixed on Thumb-only platforms. */
20169 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
20170 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
20171
20172 /* Respect Thumb stub if necessary. */
20173 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
20174 {
20175 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
20176 }
20177
20178 /* Strip immediate from first add. */
20179 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
20180
20181 #ifdef FOUR_WORD_PLT
20182 if (first_insn == elf32_arm_plt_entry[0])
20183 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
20184 #else
20185 if (first_insn == elf32_arm_plt_entry_long[0])
20186 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20187 else if (first_insn == elf32_arm_plt_entry_short[0])
20188 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20189 #endif
20190 else
20191 /* We don't yet handle this PLT format. */
20192 return (bfd_vma) -1;
20193
20194 return plt_size;
20195 }
20196
20197 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20198
20199 static long
20200 elf32_arm_get_synthetic_symtab (bfd *abfd,
20201 long symcount ATTRIBUTE_UNUSED,
20202 asymbol **syms ATTRIBUTE_UNUSED,
20203 long dynsymcount,
20204 asymbol **dynsyms,
20205 asymbol **ret)
20206 {
20207 asection *relplt;
20208 asymbol *s;
20209 arelent *p;
20210 long count, i, n;
20211 size_t size;
20212 Elf_Internal_Shdr *hdr;
20213 char *names;
20214 asection *plt;
20215 bfd_vma offset;
20216 bfd_byte *data;
20217
20218 *ret = NULL;
20219
20220 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20221 return 0;
20222
20223 if (dynsymcount <= 0)
20224 return 0;
20225
20226 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20227 if (relplt == NULL)
20228 return 0;
20229
20230 hdr = &elf_section_data (relplt)->this_hdr;
20231 if (hdr->sh_link != elf_dynsymtab (abfd)
20232 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20233 return 0;
20234
20235 plt = bfd_get_section_by_name (abfd, ".plt");
20236 if (plt == NULL)
20237 return 0;
20238
20239 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
20240 return -1;
20241
20242 data = plt->contents;
20243 if (data == NULL)
20244 {
20245 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
20246 return -1;
20247 bfd_cache_section_contents((asection *) plt, data);
20248 }
20249
20250 count = relplt->size / hdr->sh_entsize;
20251 size = count * sizeof (asymbol);
20252 p = relplt->relocation;
20253 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20254 {
20255 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20256 if (p->addend != 0)
20257 size += sizeof ("+0x") - 1 + 8;
20258 }
20259
20260 s = *ret = (asymbol *) bfd_malloc (size);
20261 if (s == NULL)
20262 return -1;
20263
20264 offset = elf32_arm_plt0_size (abfd, data);
20265 if (offset == (bfd_vma) -1)
20266 return -1;
20267
20268 names = (char *) (s + count);
20269 p = relplt->relocation;
20270 n = 0;
20271 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20272 {
20273 size_t len;
20274
20275 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20276 if (plt_size == (bfd_vma) -1)
20277 break;
20278
20279 *s = **p->sym_ptr_ptr;
20280 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20281 we are defining a symbol, ensure one of them is set. */
20282 if ((s->flags & BSF_LOCAL) == 0)
20283 s->flags |= BSF_GLOBAL;
20284 s->flags |= BSF_SYNTHETIC;
20285 s->section = plt;
20286 s->value = offset;
20287 s->name = names;
20288 s->udata.p = NULL;
20289 len = strlen ((*p->sym_ptr_ptr)->name);
20290 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20291 names += len;
20292 if (p->addend != 0)
20293 {
20294 char buf[30], *a;
20295
20296 memcpy (names, "+0x", sizeof ("+0x") - 1);
20297 names += sizeof ("+0x") - 1;
20298 bfd_sprintf_vma (abfd, buf, p->addend);
20299 for (a = buf; *a == '0'; ++a)
20300 ;
20301 len = strlen (a);
20302 memcpy (names, a, len);
20303 names += len;
20304 }
20305 memcpy (names, "@plt", sizeof ("@plt"));
20306 names += sizeof ("@plt");
20307 ++s, ++n;
20308 offset += plt_size;
20309 }
20310
20311 return n;
20312 }
20313
20314 static bfd_boolean
20315 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20316 {
20317 if (hdr->sh_flags & SHF_ARM_PURECODE)
20318 *flags |= SEC_ELF_PURECODE;
20319 return TRUE;
20320 }
20321
20322 static flagword
20323 elf32_arm_lookup_section_flags (char *flag_name)
20324 {
20325 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20326 return SHF_ARM_PURECODE;
20327
20328 return SEC_NO_FLAGS;
20329 }
20330
20331 static unsigned int
20332 elf32_arm_count_additional_relocs (asection *sec)
20333 {
20334 struct _arm_elf_section_data *arm_data;
20335 arm_data = get_arm_elf_section_data (sec);
20336
20337 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20338 }
20339
20340 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20341 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20342 FALSE otherwise. ISECTION is the best guess matching section from the
20343 input bfd IBFD, but it might be NULL. */
20344
20345 static bfd_boolean
20346 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20347 bfd *obfd ATTRIBUTE_UNUSED,
20348 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20349 Elf_Internal_Shdr *osection)
20350 {
20351 switch (osection->sh_type)
20352 {
20353 case SHT_ARM_EXIDX:
20354 {
20355 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20356 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20357 unsigned i = 0;
20358
20359 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20360 osection->sh_info = 0;
20361
20362 /* The sh_link field must be set to the text section associated with
20363 this index section. Unfortunately the ARM EHABI does not specify
20364 exactly how to determine this association. Our caller does try
20365 to match up OSECTION with its corresponding input section however
20366 so that is a good first guess. */
20367 if (isection != NULL
20368 && osection->bfd_section != NULL
20369 && isection->bfd_section != NULL
20370 && isection->bfd_section->output_section != NULL
20371 && isection->bfd_section->output_section == osection->bfd_section
20372 && iheaders != NULL
20373 && isection->sh_link > 0
20374 && isection->sh_link < elf_numsections (ibfd)
20375 && iheaders[isection->sh_link]->bfd_section != NULL
20376 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20377 )
20378 {
20379 for (i = elf_numsections (obfd); i-- > 0;)
20380 if (oheaders[i]->bfd_section
20381 == iheaders[isection->sh_link]->bfd_section->output_section)
20382 break;
20383 }
20384
20385 if (i == 0)
20386 {
20387 /* Failing that we have to find a matching section ourselves. If
20388 we had the output section name available we could compare that
20389 with input section names. Unfortunately we don't. So instead
20390 we use a simple heuristic and look for the nearest executable
20391 section before this one. */
20392 for (i = elf_numsections (obfd); i-- > 0;)
20393 if (oheaders[i] == osection)
20394 break;
20395 if (i == 0)
20396 break;
20397
20398 while (i-- > 0)
20399 if (oheaders[i]->sh_type == SHT_PROGBITS
20400 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20401 == (SHF_ALLOC | SHF_EXECINSTR))
20402 break;
20403 }
20404
20405 if (i)
20406 {
20407 osection->sh_link = i;
20408 /* If the text section was part of a group
20409 then the index section should be too. */
20410 if (oheaders[i]->sh_flags & SHF_GROUP)
20411 osection->sh_flags |= SHF_GROUP;
20412 return TRUE;
20413 }
20414 }
20415 break;
20416
20417 case SHT_ARM_PREEMPTMAP:
20418 osection->sh_flags = SHF_ALLOC;
20419 break;
20420
20421 case SHT_ARM_ATTRIBUTES:
20422 case SHT_ARM_DEBUGOVERLAY:
20423 case SHT_ARM_OVERLAYSECTION:
20424 default:
20425 break;
20426 }
20427
20428 return FALSE;
20429 }
20430
20431 /* Returns TRUE if NAME is an ARM mapping symbol.
20432 Traditionally the symbols $a, $d and $t have been used.
20433 The ARM ELF standard also defines $x (for A64 code). It also allows a
20434 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20435 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20436 not support them here. $t.x indicates the start of ThumbEE instructions. */
20437
20438 static bfd_boolean
20439 is_arm_mapping_symbol (const char * name)
20440 {
20441 return name != NULL /* Paranoia. */
20442 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20443 the mapping symbols could have acquired a prefix.
20444 We do not support this here, since such symbols no
20445 longer conform to the ARM ELF ABI. */
20446 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20447 && (name[2] == 0 || name[2] == '.');
20448 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20449 any characters that follow the period are legal characters for the body
20450 of a symbol's name. For now we just assume that this is the case. */
20451 }
20452
20453 /* Make sure that mapping symbols in object files are not removed via the
20454 "strip --strip-unneeded" tool. These symbols are needed in order to
20455 correctly generate interworking veneers, and for byte swapping code
20456 regions. Once an object file has been linked, it is safe to remove the
20457 symbols as they will no longer be needed. */
20458
20459 static void
20460 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20461 {
20462 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20463 && sym->section != bfd_abs_section_ptr
20464 && is_arm_mapping_symbol (sym->name))
20465 sym->flags |= BSF_KEEP;
20466 }
20467
20468 #undef elf_backend_copy_special_section_fields
20469 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20470
20471 #define ELF_ARCH bfd_arch_arm
20472 #define ELF_TARGET_ID ARM_ELF_DATA
20473 #define ELF_MACHINE_CODE EM_ARM
20474 #ifdef __QNXTARGET__
20475 #define ELF_MAXPAGESIZE 0x1000
20476 #else
20477 #define ELF_MAXPAGESIZE 0x10000
20478 #endif
20479 #define ELF_MINPAGESIZE 0x1000
20480 #define ELF_COMMONPAGESIZE 0x1000
20481
20482 #define bfd_elf32_mkobject elf32_arm_mkobject
20483
20484 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20485 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20486 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20487 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20488 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20489 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20490 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20491 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20492 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20493 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20494 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20495 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20496 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20497
20498 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20499 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20500 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20501 #define elf_backend_check_relocs elf32_arm_check_relocs
20502 #define elf_backend_update_relocs elf32_arm_update_relocs
20503 #define elf_backend_relocate_section elf32_arm_relocate_section
20504 #define elf_backend_write_section elf32_arm_write_section
20505 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20506 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20507 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20508 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20509 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20510 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20511 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20512 #define elf_backend_init_file_header elf32_arm_init_file_header
20513 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20514 #define elf_backend_object_p elf32_arm_object_p
20515 #define elf_backend_fake_sections elf32_arm_fake_sections
20516 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20517 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20518 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20519 #define elf_backend_size_info elf32_arm_size_info
20520 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20521 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20522 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20523 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20524 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20525 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20526 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20527 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20528
20529 #define elf_backend_can_refcount 1
20530 #define elf_backend_can_gc_sections 1
20531 #define elf_backend_plt_readonly 1
20532 #define elf_backend_want_got_plt 1
20533 #define elf_backend_want_plt_sym 0
20534 #define elf_backend_want_dynrelro 1
20535 #define elf_backend_may_use_rel_p 1
20536 #define elf_backend_may_use_rela_p 0
20537 #define elf_backend_default_use_rela_p 0
20538 #define elf_backend_dtrel_excludes_plt 1
20539
20540 #define elf_backend_got_header_size 12
20541 #define elf_backend_extern_protected_data 1
20542
20543 #undef elf_backend_obj_attrs_vendor
20544 #define elf_backend_obj_attrs_vendor "aeabi"
20545 #undef elf_backend_obj_attrs_section
20546 #define elf_backend_obj_attrs_section ".ARM.attributes"
20547 #undef elf_backend_obj_attrs_arg_type
20548 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20549 #undef elf_backend_obj_attrs_section_type
20550 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20551 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20552 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20553
20554 #undef elf_backend_section_flags
20555 #define elf_backend_section_flags elf32_arm_section_flags
20556 #undef elf_backend_lookup_section_flags_hook
20557 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20558
20559 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20560
20561 #include "elf32-target.h"
20562
20563 /* Native Client targets. */
20564
20565 #undef TARGET_LITTLE_SYM
20566 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20567 #undef TARGET_LITTLE_NAME
20568 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20569 #undef TARGET_BIG_SYM
20570 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20571 #undef TARGET_BIG_NAME
20572 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20573
20574 /* Like elf32_arm_link_hash_table_create -- but overrides
20575 appropriately for NaCl. */
20576
20577 static struct bfd_link_hash_table *
20578 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20579 {
20580 struct bfd_link_hash_table *ret;
20581
20582 ret = elf32_arm_link_hash_table_create (abfd);
20583 if (ret)
20584 {
20585 struct elf32_arm_link_hash_table *htab
20586 = (struct elf32_arm_link_hash_table *) ret;
20587
20588 htab->nacl_p = 1;
20589
20590 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20591 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20592 }
20593 return ret;
20594 }
20595
20596 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20597 really need to use elf32_arm_modify_segment_map. But we do it
20598 anyway just to reduce gratuitous differences with the stock ARM backend. */
20599
20600 static bfd_boolean
20601 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20602 {
20603 return (elf32_arm_modify_segment_map (abfd, info)
20604 && nacl_modify_segment_map (abfd, info));
20605 }
20606
20607 static bfd_boolean
20608 elf32_arm_nacl_final_write_processing (bfd *abfd)
20609 {
20610 arm_final_write_processing (abfd);
20611 return nacl_final_write_processing (abfd);
20612 }
20613
20614 static bfd_vma
20615 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20616 const arelent *rel ATTRIBUTE_UNUSED)
20617 {
20618 return plt->vma
20619 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20620 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20621 }
20622
20623 #undef elf32_bed
20624 #define elf32_bed elf32_arm_nacl_bed
20625 #undef bfd_elf32_bfd_link_hash_table_create
20626 #define bfd_elf32_bfd_link_hash_table_create \
20627 elf32_arm_nacl_link_hash_table_create
20628 #undef elf_backend_plt_alignment
20629 #define elf_backend_plt_alignment 4
20630 #undef elf_backend_modify_segment_map
20631 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20632 #undef elf_backend_modify_headers
20633 #define elf_backend_modify_headers nacl_modify_headers
20634 #undef elf_backend_final_write_processing
20635 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20636 #undef bfd_elf32_get_synthetic_symtab
20637 #undef elf_backend_plt_sym_val
20638 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20639 #undef elf_backend_copy_special_section_fields
20640
20641 #undef ELF_MINPAGESIZE
20642 #undef ELF_COMMONPAGESIZE
20643
20644
20645 #include "elf32-target.h"
20646
20647 /* Reset to defaults. */
20648 #undef elf_backend_plt_alignment
20649 #undef elf_backend_modify_segment_map
20650 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20651 #undef elf_backend_modify_headers
20652 #undef elf_backend_final_write_processing
20653 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20654 #undef ELF_MINPAGESIZE
20655 #define ELF_MINPAGESIZE 0x1000
20656 #undef ELF_COMMONPAGESIZE
20657 #define ELF_COMMONPAGESIZE 0x1000
20658
20659
20660 /* FDPIC Targets. */
20661
20662 #undef TARGET_LITTLE_SYM
20663 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20664 #undef TARGET_LITTLE_NAME
20665 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20666 #undef TARGET_BIG_SYM
20667 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20668 #undef TARGET_BIG_NAME
20669 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20670 #undef elf_match_priority
20671 #define elf_match_priority 128
20672 #undef ELF_OSABI
20673 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20674
20675 /* Like elf32_arm_link_hash_table_create -- but overrides
20676 appropriately for FDPIC. */
20677
20678 static struct bfd_link_hash_table *
20679 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20680 {
20681 struct bfd_link_hash_table *ret;
20682
20683 ret = elf32_arm_link_hash_table_create (abfd);
20684 if (ret)
20685 {
20686 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20687
20688 htab->fdpic_p = 1;
20689 }
20690 return ret;
20691 }
20692
20693 /* We need dynamic symbols for every section, since segments can
20694 relocate independently. */
20695 static bfd_boolean
20696 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20697 struct bfd_link_info *info
20698 ATTRIBUTE_UNUSED,
20699 asection *p ATTRIBUTE_UNUSED)
20700 {
20701 switch (elf_section_data (p)->this_hdr.sh_type)
20702 {
20703 case SHT_PROGBITS:
20704 case SHT_NOBITS:
20705 /* If sh_type is yet undecided, assume it could be
20706 SHT_PROGBITS/SHT_NOBITS. */
20707 case SHT_NULL:
20708 return FALSE;
20709
20710 /* There shouldn't be section relative relocations
20711 against any other section. */
20712 default:
20713 return TRUE;
20714 }
20715 }
20716
20717 #undef elf32_bed
20718 #define elf32_bed elf32_arm_fdpic_bed
20719
20720 #undef bfd_elf32_bfd_link_hash_table_create
20721 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20722
20723 #undef elf_backend_omit_section_dynsym
20724 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20725
20726 #include "elf32-target.h"
20727
20728 #undef elf_match_priority
20729 #undef ELF_OSABI
20730 #undef elf_backend_omit_section_dynsym
20731
20732 /* VxWorks Targets. */
20733
20734 #undef TARGET_LITTLE_SYM
20735 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20736 #undef TARGET_LITTLE_NAME
20737 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20738 #undef TARGET_BIG_SYM
20739 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20740 #undef TARGET_BIG_NAME
20741 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20742
20743 /* Like elf32_arm_link_hash_table_create -- but overrides
20744 appropriately for VxWorks. */
20745
20746 static struct bfd_link_hash_table *
20747 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20748 {
20749 struct bfd_link_hash_table *ret;
20750
20751 ret = elf32_arm_link_hash_table_create (abfd);
20752 if (ret)
20753 {
20754 struct elf32_arm_link_hash_table *htab
20755 = (struct elf32_arm_link_hash_table *) ret;
20756 htab->use_rel = 0;
20757 htab->vxworks_p = 1;
20758 }
20759 return ret;
20760 }
20761
20762 static bfd_boolean
20763 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20764 {
20765 arm_final_write_processing (abfd);
20766 return elf_vxworks_final_write_processing (abfd);
20767 }
20768
20769 #undef elf32_bed
20770 #define elf32_bed elf32_arm_vxworks_bed
20771
20772 #undef bfd_elf32_bfd_link_hash_table_create
20773 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20774 #undef elf_backend_final_write_processing
20775 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20776 #undef elf_backend_emit_relocs
20777 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20778
20779 #undef elf_backend_may_use_rel_p
20780 #define elf_backend_may_use_rel_p 0
20781 #undef elf_backend_may_use_rela_p
20782 #define elf_backend_may_use_rela_p 1
20783 #undef elf_backend_default_use_rela_p
20784 #define elf_backend_default_use_rela_p 1
20785 #undef elf_backend_want_plt_sym
20786 #define elf_backend_want_plt_sym 1
20787 #undef ELF_MAXPAGESIZE
20788 #define ELF_MAXPAGESIZE 0x1000
20789
20790 #include "elf32-target.h"
20791
20792
20793 /* Merge backend specific data from an object file to the output
20794 object file when linking. */
20795
20796 static bfd_boolean
20797 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20798 {
20799 bfd *obfd = info->output_bfd;
20800 flagword out_flags;
20801 flagword in_flags;
20802 bfd_boolean flags_compatible = TRUE;
20803 asection *sec;
20804
20805 /* Check if we have the same endianness. */
20806 if (! _bfd_generic_verify_endian_match (ibfd, info))
20807 return FALSE;
20808
20809 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20810 return TRUE;
20811
20812 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20813 return FALSE;
20814
20815 /* The input BFD must have had its flags initialised. */
20816 /* The following seems bogus to me -- The flags are initialized in
20817 the assembler but I don't think an elf_flags_init field is
20818 written into the object. */
20819 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20820
20821 in_flags = elf_elfheader (ibfd)->e_flags;
20822 out_flags = elf_elfheader (obfd)->e_flags;
20823
20824 /* In theory there is no reason why we couldn't handle this. However
20825 in practice it isn't even close to working and there is no real
20826 reason to want it. */
20827 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20828 && !(ibfd->flags & DYNAMIC)
20829 && (in_flags & EF_ARM_BE8))
20830 {
20831 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20832 ibfd);
20833 return FALSE;
20834 }
20835
20836 if (!elf_flags_init (obfd))
20837 {
20838 /* If the input is the default architecture and had the default
20839 flags then do not bother setting the flags for the output
20840 architecture, instead allow future merges to do this. If no
20841 future merges ever set these flags then they will retain their
20842 uninitialised values, which surprise surprise, correspond
20843 to the default values. */
20844 if (bfd_get_arch_info (ibfd)->the_default
20845 && elf_elfheader (ibfd)->e_flags == 0)
20846 return TRUE;
20847
20848 elf_flags_init (obfd) = TRUE;
20849 elf_elfheader (obfd)->e_flags = in_flags;
20850
20851 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20852 && bfd_get_arch_info (obfd)->the_default)
20853 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20854
20855 return TRUE;
20856 }
20857
20858 /* Determine what should happen if the input ARM architecture
20859 does not match the output ARM architecture. */
20860 if (! bfd_arm_merge_machines (ibfd, obfd))
20861 return FALSE;
20862
20863 /* Identical flags must be compatible. */
20864 if (in_flags == out_flags)
20865 return TRUE;
20866
20867 /* Check to see if the input BFD actually contains any sections. If
20868 not, its flags may not have been initialised either, but it
20869 cannot actually cause any incompatiblity. Do not short-circuit
20870 dynamic objects; their section list may be emptied by
20871 elf_link_add_object_symbols.
20872
20873 Also check to see if there are no code sections in the input.
20874 In this case there is no need to check for code specific flags.
20875 XXX - do we need to worry about floating-point format compatability
20876 in data sections ? */
20877 if (!(ibfd->flags & DYNAMIC))
20878 {
20879 bfd_boolean null_input_bfd = TRUE;
20880 bfd_boolean only_data_sections = TRUE;
20881
20882 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20883 {
20884 /* Ignore synthetic glue sections. */
20885 if (strcmp (sec->name, ".glue_7")
20886 && strcmp (sec->name, ".glue_7t"))
20887 {
20888 if ((bfd_section_flags (sec)
20889 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20890 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20891 only_data_sections = FALSE;
20892
20893 null_input_bfd = FALSE;
20894 break;
20895 }
20896 }
20897
20898 if (null_input_bfd || only_data_sections)
20899 return TRUE;
20900 }
20901
20902 /* Complain about various flag mismatches. */
20903 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20904 EF_ARM_EABI_VERSION (out_flags)))
20905 {
20906 _bfd_error_handler
20907 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20908 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20909 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20910 return FALSE;
20911 }
20912
20913 /* Not sure what needs to be checked for EABI versions >= 1. */
20914 /* VxWorks libraries do not use these flags. */
20915 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20916 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20917 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20918 {
20919 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20920 {
20921 _bfd_error_handler
20922 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20923 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20924 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20925 flags_compatible = FALSE;
20926 }
20927
20928 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20929 {
20930 if (in_flags & EF_ARM_APCS_FLOAT)
20931 _bfd_error_handler
20932 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20933 ibfd, obfd);
20934 else
20935 _bfd_error_handler
20936 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20937 ibfd, obfd);
20938
20939 flags_compatible = FALSE;
20940 }
20941
20942 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20943 {
20944 if (in_flags & EF_ARM_VFP_FLOAT)
20945 _bfd_error_handler
20946 (_("error: %pB uses %s instructions, whereas %pB does not"),
20947 ibfd, "VFP", obfd);
20948 else
20949 _bfd_error_handler
20950 (_("error: %pB uses %s instructions, whereas %pB does not"),
20951 ibfd, "FPA", obfd);
20952
20953 flags_compatible = FALSE;
20954 }
20955
20956 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20957 {
20958 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20959 _bfd_error_handler
20960 (_("error: %pB uses %s instructions, whereas %pB does not"),
20961 ibfd, "Maverick", obfd);
20962 else
20963 _bfd_error_handler
20964 (_("error: %pB does not use %s instructions, whereas %pB does"),
20965 ibfd, "Maverick", obfd);
20966
20967 flags_compatible = FALSE;
20968 }
20969
20970 #ifdef EF_ARM_SOFT_FLOAT
20971 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20972 {
20973 /* We can allow interworking between code that is VFP format
20974 layout, and uses either soft float or integer regs for
20975 passing floating point arguments and results. We already
20976 know that the APCS_FLOAT flags match; similarly for VFP
20977 flags. */
20978 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20979 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20980 {
20981 if (in_flags & EF_ARM_SOFT_FLOAT)
20982 _bfd_error_handler
20983 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20984 ibfd, obfd);
20985 else
20986 _bfd_error_handler
20987 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20988 ibfd, obfd);
20989
20990 flags_compatible = FALSE;
20991 }
20992 }
20993 #endif
20994
20995 /* Interworking mismatch is only a warning. */
20996 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20997 {
20998 if (in_flags & EF_ARM_INTERWORK)
20999 {
21000 _bfd_error_handler
21001 (_("warning: %pB supports interworking, whereas %pB does not"),
21002 ibfd, obfd);
21003 }
21004 else
21005 {
21006 _bfd_error_handler
21007 (_("warning: %pB does not support interworking, whereas %pB does"),
21008 ibfd, obfd);
21009 }
21010 }
21011 }
21012
21013 return flags_compatible;
21014 }
21015
21016
21017 /* Symbian OS Targets. */
21018
21019 #undef TARGET_LITTLE_SYM
21020 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
21021 #undef TARGET_LITTLE_NAME
21022 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
21023 #undef TARGET_BIG_SYM
21024 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
21025 #undef TARGET_BIG_NAME
21026 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
21027
21028 /* Like elf32_arm_link_hash_table_create -- but overrides
21029 appropriately for Symbian OS. */
21030
21031 static struct bfd_link_hash_table *
21032 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
21033 {
21034 struct bfd_link_hash_table *ret;
21035
21036 ret = elf32_arm_link_hash_table_create (abfd);
21037 if (ret)
21038 {
21039 struct elf32_arm_link_hash_table *htab
21040 = (struct elf32_arm_link_hash_table *)ret;
21041 /* There is no PLT header for Symbian OS. */
21042 htab->plt_header_size = 0;
21043 /* The PLT entries are each one instruction and one word. */
21044 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
21045 htab->symbian_p = 1;
21046 /* Symbian uses armv5t or above, so use_blx is always true. */
21047 htab->use_blx = 1;
21048 htab->root.is_relocatable_executable = 1;
21049 }
21050 return ret;
21051 }
21052
21053 static const struct bfd_elf_special_section
21054 elf32_arm_symbian_special_sections[] =
21055 {
21056 /* In a BPABI executable, the dynamic linking sections do not go in
21057 the loadable read-only segment. The post-linker may wish to
21058 refer to these sections, but they are not part of the final
21059 program image. */
21060 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
21061 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
21062 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
21063 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
21064 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
21065 /* These sections do not need to be writable as the SymbianOS
21066 postlinker will arrange things so that no dynamic relocation is
21067 required. */
21068 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
21069 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
21070 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
21071 { NULL, 0, 0, 0, 0 }
21072 };
21073
21074 static void
21075 elf32_arm_symbian_begin_write_processing (bfd *abfd,
21076 struct bfd_link_info *link_info)
21077 {
21078 /* BPABI objects are never loaded directly by an OS kernel; they are
21079 processed by a postlinker first, into an OS-specific format. If
21080 the D_PAGED bit is set on the file, BFD will align segments on
21081 page boundaries, so that an OS can directly map the file. With
21082 BPABI objects, that just results in wasted space. In addition,
21083 because we clear the D_PAGED bit, map_sections_to_segments will
21084 recognize that the program headers should not be mapped into any
21085 loadable segment. */
21086 abfd->flags &= ~D_PAGED;
21087 elf32_arm_begin_write_processing (abfd, link_info);
21088 }
21089
21090 static bfd_boolean
21091 elf32_arm_symbian_modify_segment_map (bfd *abfd,
21092 struct bfd_link_info *info)
21093 {
21094 struct elf_segment_map *m;
21095 asection *dynsec;
21096
21097 /* BPABI shared libraries and executables should have a PT_DYNAMIC
21098 segment. However, because the .dynamic section is not marked
21099 with SEC_LOAD, the generic ELF code will not create such a
21100 segment. */
21101 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
21102 if (dynsec)
21103 {
21104 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
21105 if (m->p_type == PT_DYNAMIC)
21106 break;
21107
21108 if (m == NULL)
21109 {
21110 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
21111 m->next = elf_seg_map (abfd);
21112 elf_seg_map (abfd) = m;
21113 }
21114 }
21115
21116 /* Also call the generic arm routine. */
21117 return elf32_arm_modify_segment_map (abfd, info);
21118 }
21119
21120 /* Return address for Ith PLT stub in section PLT, for relocation REL
21121 or (bfd_vma) -1 if it should not be included. */
21122
21123 static bfd_vma
21124 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
21125 const arelent *rel ATTRIBUTE_UNUSED)
21126 {
21127 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
21128 }
21129
21130 #undef elf32_bed
21131 #define elf32_bed elf32_arm_symbian_bed
21132
21133 /* The dynamic sections are not allocated on SymbianOS; the postlinker
21134 will process them and then discard them. */
21135 #undef ELF_DYNAMIC_SEC_FLAGS
21136 #define ELF_DYNAMIC_SEC_FLAGS \
21137 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
21138
21139 #undef elf_backend_emit_relocs
21140
21141 #undef bfd_elf32_bfd_link_hash_table_create
21142 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
21143 #undef elf_backend_special_sections
21144 #define elf_backend_special_sections elf32_arm_symbian_special_sections
21145 #undef elf_backend_begin_write_processing
21146 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
21147 #undef elf_backend_final_write_processing
21148 #define elf_backend_final_write_processing elf32_arm_final_write_processing
21149
21150 #undef elf_backend_modify_segment_map
21151 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
21152
21153 /* There is no .got section for BPABI objects, and hence no header. */
21154 #undef elf_backend_got_header_size
21155 #define elf_backend_got_header_size 0
21156
21157 /* Similarly, there is no .got.plt section. */
21158 #undef elf_backend_want_got_plt
21159 #define elf_backend_want_got_plt 0
21160
21161 #undef elf_backend_plt_sym_val
21162 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
21163
21164 #undef elf_backend_may_use_rel_p
21165 #define elf_backend_may_use_rel_p 1
21166 #undef elf_backend_may_use_rela_p
21167 #define elf_backend_may_use_rela_p 0
21168 #undef elf_backend_default_use_rela_p
21169 #define elf_backend_default_use_rela_p 0
21170 #undef elf_backend_want_plt_sym
21171 #define elf_backend_want_plt_sym 0
21172 #undef elf_backend_dtrel_excludes_plt
21173 #define elf_backend_dtrel_excludes_plt 0
21174 #undef ELF_MAXPAGESIZE
21175 #define ELF_MAXPAGESIZE 0x8000
21176
21177 #include "elf32-target.h"