]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf32-arm.c
Move struc-symbol.h to symbols.c
[thirdparty/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2018 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32
33 /* Return the relocation section associated with NAME. HTAB is the
34 bfd's elf32_arm_link_hash_entry. */
35 #define RELOC_SECTION(HTAB, NAME) \
36 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37
38 /* Return size of a relocation entry. HTAB is the bfd's
39 elf32_arm_link_hash_entry. */
40 #define RELOC_SIZE(HTAB) \
41 ((HTAB)->use_rel \
42 ? sizeof (Elf32_External_Rel) \
43 : sizeof (Elf32_External_Rela))
44
45 /* Return function to swap relocations in. HTAB is the bfd's
46 elf32_arm_link_hash_entry. */
47 #define SWAP_RELOC_IN(HTAB) \
48 ((HTAB)->use_rel \
49 ? bfd_elf32_swap_reloc_in \
50 : bfd_elf32_swap_reloca_in)
51
52 /* Return function to swap relocations out. HTAB is the bfd's
53 elf32_arm_link_hash_entry. */
54 #define SWAP_RELOC_OUT(HTAB) \
55 ((HTAB)->use_rel \
56 ? bfd_elf32_swap_reloc_out \
57 : bfd_elf32_swap_reloca_out)
58
59 #define elf_info_to_howto NULL
60 #define elf_info_to_howto_rel elf32_arm_info_to_howto
61
62 #define ARM_ELF_ABI_VERSION 0
63 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
64
65 /* The Adjusted Place, as defined by AAELF. */
66 #define Pa(X) ((X) & 0xfffffffc)
67
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 struct bfd_link_info *link_info,
70 asection *sec,
71 bfd_byte *contents);
72
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75 in that slot. */
76
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79 /* No relocation. */
80 HOWTO (R_ARM_NONE, /* type */
81 0, /* rightshift */
82 3, /* size (0 = byte, 1 = short, 2 = long) */
83 0, /* bitsize */
84 FALSE, /* pc_relative */
85 0, /* bitpos */
86 complain_overflow_dont,/* complain_on_overflow */
87 bfd_elf_generic_reloc, /* special_function */
88 "R_ARM_NONE", /* name */
89 FALSE, /* partial_inplace */
90 0, /* src_mask */
91 0, /* dst_mask */
92 FALSE), /* pcrel_offset */
93
94 HOWTO (R_ARM_PC24, /* type */
95 2, /* rightshift */
96 2, /* size (0 = byte, 1 = short, 2 = long) */
97 24, /* bitsize */
98 TRUE, /* pc_relative */
99 0, /* bitpos */
100 complain_overflow_signed,/* complain_on_overflow */
101 bfd_elf_generic_reloc, /* special_function */
102 "R_ARM_PC24", /* name */
103 FALSE, /* partial_inplace */
104 0x00ffffff, /* src_mask */
105 0x00ffffff, /* dst_mask */
106 TRUE), /* pcrel_offset */
107
108 /* 32 bit absolute */
109 HOWTO (R_ARM_ABS32, /* type */
110 0, /* rightshift */
111 2, /* size (0 = byte, 1 = short, 2 = long) */
112 32, /* bitsize */
113 FALSE, /* pc_relative */
114 0, /* bitpos */
115 complain_overflow_bitfield,/* complain_on_overflow */
116 bfd_elf_generic_reloc, /* special_function */
117 "R_ARM_ABS32", /* name */
118 FALSE, /* partial_inplace */
119 0xffffffff, /* src_mask */
120 0xffffffff, /* dst_mask */
121 FALSE), /* pcrel_offset */
122
123 /* standard 32bit pc-relative reloc */
124 HOWTO (R_ARM_REL32, /* type */
125 0, /* rightshift */
126 2, /* size (0 = byte, 1 = short, 2 = long) */
127 32, /* bitsize */
128 TRUE, /* pc_relative */
129 0, /* bitpos */
130 complain_overflow_bitfield,/* complain_on_overflow */
131 bfd_elf_generic_reloc, /* special_function */
132 "R_ARM_REL32", /* name */
133 FALSE, /* partial_inplace */
134 0xffffffff, /* src_mask */
135 0xffffffff, /* dst_mask */
136 TRUE), /* pcrel_offset */
137
138 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139 HOWTO (R_ARM_LDR_PC_G0, /* type */
140 0, /* rightshift */
141 0, /* size (0 = byte, 1 = short, 2 = long) */
142 32, /* bitsize */
143 TRUE, /* pc_relative */
144 0, /* bitpos */
145 complain_overflow_dont,/* complain_on_overflow */
146 bfd_elf_generic_reloc, /* special_function */
147 "R_ARM_LDR_PC_G0", /* name */
148 FALSE, /* partial_inplace */
149 0xffffffff, /* src_mask */
150 0xffffffff, /* dst_mask */
151 TRUE), /* pcrel_offset */
152
153 /* 16 bit absolute */
154 HOWTO (R_ARM_ABS16, /* type */
155 0, /* rightshift */
156 1, /* size (0 = byte, 1 = short, 2 = long) */
157 16, /* bitsize */
158 FALSE, /* pc_relative */
159 0, /* bitpos */
160 complain_overflow_bitfield,/* complain_on_overflow */
161 bfd_elf_generic_reloc, /* special_function */
162 "R_ARM_ABS16", /* name */
163 FALSE, /* partial_inplace */
164 0x0000ffff, /* src_mask */
165 0x0000ffff, /* dst_mask */
166 FALSE), /* pcrel_offset */
167
168 /* 12 bit absolute */
169 HOWTO (R_ARM_ABS12, /* type */
170 0, /* rightshift */
171 2, /* size (0 = byte, 1 = short, 2 = long) */
172 12, /* bitsize */
173 FALSE, /* pc_relative */
174 0, /* bitpos */
175 complain_overflow_bitfield,/* complain_on_overflow */
176 bfd_elf_generic_reloc, /* special_function */
177 "R_ARM_ABS12", /* name */
178 FALSE, /* partial_inplace */
179 0x00000fff, /* src_mask */
180 0x00000fff, /* dst_mask */
181 FALSE), /* pcrel_offset */
182
183 HOWTO (R_ARM_THM_ABS5, /* type */
184 6, /* rightshift */
185 1, /* size (0 = byte, 1 = short, 2 = long) */
186 5, /* bitsize */
187 FALSE, /* pc_relative */
188 0, /* bitpos */
189 complain_overflow_bitfield,/* complain_on_overflow */
190 bfd_elf_generic_reloc, /* special_function */
191 "R_ARM_THM_ABS5", /* name */
192 FALSE, /* partial_inplace */
193 0x000007e0, /* src_mask */
194 0x000007e0, /* dst_mask */
195 FALSE), /* pcrel_offset */
196
197 /* 8 bit absolute */
198 HOWTO (R_ARM_ABS8, /* type */
199 0, /* rightshift */
200 0, /* size (0 = byte, 1 = short, 2 = long) */
201 8, /* bitsize */
202 FALSE, /* pc_relative */
203 0, /* bitpos */
204 complain_overflow_bitfield,/* complain_on_overflow */
205 bfd_elf_generic_reloc, /* special_function */
206 "R_ARM_ABS8", /* name */
207 FALSE, /* partial_inplace */
208 0x000000ff, /* src_mask */
209 0x000000ff, /* dst_mask */
210 FALSE), /* pcrel_offset */
211
212 HOWTO (R_ARM_SBREL32, /* type */
213 0, /* rightshift */
214 2, /* size (0 = byte, 1 = short, 2 = long) */
215 32, /* bitsize */
216 FALSE, /* pc_relative */
217 0, /* bitpos */
218 complain_overflow_dont,/* complain_on_overflow */
219 bfd_elf_generic_reloc, /* special_function */
220 "R_ARM_SBREL32", /* name */
221 FALSE, /* partial_inplace */
222 0xffffffff, /* src_mask */
223 0xffffffff, /* dst_mask */
224 FALSE), /* pcrel_offset */
225
226 HOWTO (R_ARM_THM_CALL, /* type */
227 1, /* rightshift */
228 2, /* size (0 = byte, 1 = short, 2 = long) */
229 24, /* bitsize */
230 TRUE, /* pc_relative */
231 0, /* bitpos */
232 complain_overflow_signed,/* complain_on_overflow */
233 bfd_elf_generic_reloc, /* special_function */
234 "R_ARM_THM_CALL", /* name */
235 FALSE, /* partial_inplace */
236 0x07ff2fff, /* src_mask */
237 0x07ff2fff, /* dst_mask */
238 TRUE), /* pcrel_offset */
239
240 HOWTO (R_ARM_THM_PC8, /* type */
241 1, /* rightshift */
242 1, /* size (0 = byte, 1 = short, 2 = long) */
243 8, /* bitsize */
244 TRUE, /* pc_relative */
245 0, /* bitpos */
246 complain_overflow_signed,/* complain_on_overflow */
247 bfd_elf_generic_reloc, /* special_function */
248 "R_ARM_THM_PC8", /* name */
249 FALSE, /* partial_inplace */
250 0x000000ff, /* src_mask */
251 0x000000ff, /* dst_mask */
252 TRUE), /* pcrel_offset */
253
254 HOWTO (R_ARM_BREL_ADJ, /* type */
255 1, /* rightshift */
256 1, /* size (0 = byte, 1 = short, 2 = long) */
257 32, /* bitsize */
258 FALSE, /* pc_relative */
259 0, /* bitpos */
260 complain_overflow_signed,/* complain_on_overflow */
261 bfd_elf_generic_reloc, /* special_function */
262 "R_ARM_BREL_ADJ", /* name */
263 FALSE, /* partial_inplace */
264 0xffffffff, /* src_mask */
265 0xffffffff, /* dst_mask */
266 FALSE), /* pcrel_offset */
267
268 HOWTO (R_ARM_TLS_DESC, /* type */
269 0, /* rightshift */
270 2, /* size (0 = byte, 1 = short, 2 = long) */
271 32, /* bitsize */
272 FALSE, /* pc_relative */
273 0, /* bitpos */
274 complain_overflow_bitfield,/* complain_on_overflow */
275 bfd_elf_generic_reloc, /* special_function */
276 "R_ARM_TLS_DESC", /* name */
277 FALSE, /* partial_inplace */
278 0xffffffff, /* src_mask */
279 0xffffffff, /* dst_mask */
280 FALSE), /* pcrel_offset */
281
282 HOWTO (R_ARM_THM_SWI8, /* type */
283 0, /* rightshift */
284 0, /* size (0 = byte, 1 = short, 2 = long) */
285 0, /* bitsize */
286 FALSE, /* pc_relative */
287 0, /* bitpos */
288 complain_overflow_signed,/* complain_on_overflow */
289 bfd_elf_generic_reloc, /* special_function */
290 "R_ARM_SWI8", /* name */
291 FALSE, /* partial_inplace */
292 0x00000000, /* src_mask */
293 0x00000000, /* dst_mask */
294 FALSE), /* pcrel_offset */
295
296 /* BLX instruction for the ARM. */
297 HOWTO (R_ARM_XPC25, /* type */
298 2, /* rightshift */
299 2, /* size (0 = byte, 1 = short, 2 = long) */
300 24, /* bitsize */
301 TRUE, /* pc_relative */
302 0, /* bitpos */
303 complain_overflow_signed,/* complain_on_overflow */
304 bfd_elf_generic_reloc, /* special_function */
305 "R_ARM_XPC25", /* name */
306 FALSE, /* partial_inplace */
307 0x00ffffff, /* src_mask */
308 0x00ffffff, /* dst_mask */
309 TRUE), /* pcrel_offset */
310
311 /* BLX instruction for the Thumb. */
312 HOWTO (R_ARM_THM_XPC22, /* type */
313 2, /* rightshift */
314 2, /* size (0 = byte, 1 = short, 2 = long) */
315 24, /* bitsize */
316 TRUE, /* pc_relative */
317 0, /* bitpos */
318 complain_overflow_signed,/* complain_on_overflow */
319 bfd_elf_generic_reloc, /* special_function */
320 "R_ARM_THM_XPC22", /* name */
321 FALSE, /* partial_inplace */
322 0x07ff2fff, /* src_mask */
323 0x07ff2fff, /* dst_mask */
324 TRUE), /* pcrel_offset */
325
326 /* Dynamic TLS relocations. */
327
328 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
329 0, /* rightshift */
330 2, /* size (0 = byte, 1 = short, 2 = long) */
331 32, /* bitsize */
332 FALSE, /* pc_relative */
333 0, /* bitpos */
334 complain_overflow_bitfield,/* complain_on_overflow */
335 bfd_elf_generic_reloc, /* special_function */
336 "R_ARM_TLS_DTPMOD32", /* name */
337 TRUE, /* partial_inplace */
338 0xffffffff, /* src_mask */
339 0xffffffff, /* dst_mask */
340 FALSE), /* pcrel_offset */
341
342 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
343 0, /* rightshift */
344 2, /* size (0 = byte, 1 = short, 2 = long) */
345 32, /* bitsize */
346 FALSE, /* pc_relative */
347 0, /* bitpos */
348 complain_overflow_bitfield,/* complain_on_overflow */
349 bfd_elf_generic_reloc, /* special_function */
350 "R_ARM_TLS_DTPOFF32", /* name */
351 TRUE, /* partial_inplace */
352 0xffffffff, /* src_mask */
353 0xffffffff, /* dst_mask */
354 FALSE), /* pcrel_offset */
355
356 HOWTO (R_ARM_TLS_TPOFF32, /* type */
357 0, /* rightshift */
358 2, /* size (0 = byte, 1 = short, 2 = long) */
359 32, /* bitsize */
360 FALSE, /* pc_relative */
361 0, /* bitpos */
362 complain_overflow_bitfield,/* complain_on_overflow */
363 bfd_elf_generic_reloc, /* special_function */
364 "R_ARM_TLS_TPOFF32", /* name */
365 TRUE, /* partial_inplace */
366 0xffffffff, /* src_mask */
367 0xffffffff, /* dst_mask */
368 FALSE), /* pcrel_offset */
369
370 /* Relocs used in ARM Linux */
371
372 HOWTO (R_ARM_COPY, /* type */
373 0, /* rightshift */
374 2, /* size (0 = byte, 1 = short, 2 = long) */
375 32, /* bitsize */
376 FALSE, /* pc_relative */
377 0, /* bitpos */
378 complain_overflow_bitfield,/* complain_on_overflow */
379 bfd_elf_generic_reloc, /* special_function */
380 "R_ARM_COPY", /* name */
381 TRUE, /* partial_inplace */
382 0xffffffff, /* src_mask */
383 0xffffffff, /* dst_mask */
384 FALSE), /* pcrel_offset */
385
386 HOWTO (R_ARM_GLOB_DAT, /* type */
387 0, /* rightshift */
388 2, /* size (0 = byte, 1 = short, 2 = long) */
389 32, /* bitsize */
390 FALSE, /* pc_relative */
391 0, /* bitpos */
392 complain_overflow_bitfield,/* complain_on_overflow */
393 bfd_elf_generic_reloc, /* special_function */
394 "R_ARM_GLOB_DAT", /* name */
395 TRUE, /* partial_inplace */
396 0xffffffff, /* src_mask */
397 0xffffffff, /* dst_mask */
398 FALSE), /* pcrel_offset */
399
400 HOWTO (R_ARM_JUMP_SLOT, /* type */
401 0, /* rightshift */
402 2, /* size (0 = byte, 1 = short, 2 = long) */
403 32, /* bitsize */
404 FALSE, /* pc_relative */
405 0, /* bitpos */
406 complain_overflow_bitfield,/* complain_on_overflow */
407 bfd_elf_generic_reloc, /* special_function */
408 "R_ARM_JUMP_SLOT", /* name */
409 TRUE, /* partial_inplace */
410 0xffffffff, /* src_mask */
411 0xffffffff, /* dst_mask */
412 FALSE), /* pcrel_offset */
413
414 HOWTO (R_ARM_RELATIVE, /* type */
415 0, /* rightshift */
416 2, /* size (0 = byte, 1 = short, 2 = long) */
417 32, /* bitsize */
418 FALSE, /* pc_relative */
419 0, /* bitpos */
420 complain_overflow_bitfield,/* complain_on_overflow */
421 bfd_elf_generic_reloc, /* special_function */
422 "R_ARM_RELATIVE", /* name */
423 TRUE, /* partial_inplace */
424 0xffffffff, /* src_mask */
425 0xffffffff, /* dst_mask */
426 FALSE), /* pcrel_offset */
427
428 HOWTO (R_ARM_GOTOFF32, /* type */
429 0, /* rightshift */
430 2, /* size (0 = byte, 1 = short, 2 = long) */
431 32, /* bitsize */
432 FALSE, /* pc_relative */
433 0, /* bitpos */
434 complain_overflow_bitfield,/* complain_on_overflow */
435 bfd_elf_generic_reloc, /* special_function */
436 "R_ARM_GOTOFF32", /* name */
437 TRUE, /* partial_inplace */
438 0xffffffff, /* src_mask */
439 0xffffffff, /* dst_mask */
440 FALSE), /* pcrel_offset */
441
442 HOWTO (R_ARM_GOTPC, /* type */
443 0, /* rightshift */
444 2, /* size (0 = byte, 1 = short, 2 = long) */
445 32, /* bitsize */
446 TRUE, /* pc_relative */
447 0, /* bitpos */
448 complain_overflow_bitfield,/* complain_on_overflow */
449 bfd_elf_generic_reloc, /* special_function */
450 "R_ARM_GOTPC", /* name */
451 TRUE, /* partial_inplace */
452 0xffffffff, /* src_mask */
453 0xffffffff, /* dst_mask */
454 TRUE), /* pcrel_offset */
455
456 HOWTO (R_ARM_GOT32, /* type */
457 0, /* rightshift */
458 2, /* size (0 = byte, 1 = short, 2 = long) */
459 32, /* bitsize */
460 FALSE, /* pc_relative */
461 0, /* bitpos */
462 complain_overflow_bitfield,/* complain_on_overflow */
463 bfd_elf_generic_reloc, /* special_function */
464 "R_ARM_GOT32", /* name */
465 TRUE, /* partial_inplace */
466 0xffffffff, /* src_mask */
467 0xffffffff, /* dst_mask */
468 FALSE), /* pcrel_offset */
469
470 HOWTO (R_ARM_PLT32, /* type */
471 2, /* rightshift */
472 2, /* size (0 = byte, 1 = short, 2 = long) */
473 24, /* bitsize */
474 TRUE, /* pc_relative */
475 0, /* bitpos */
476 complain_overflow_bitfield,/* complain_on_overflow */
477 bfd_elf_generic_reloc, /* special_function */
478 "R_ARM_PLT32", /* name */
479 FALSE, /* partial_inplace */
480 0x00ffffff, /* src_mask */
481 0x00ffffff, /* dst_mask */
482 TRUE), /* pcrel_offset */
483
484 HOWTO (R_ARM_CALL, /* type */
485 2, /* rightshift */
486 2, /* size (0 = byte, 1 = short, 2 = long) */
487 24, /* bitsize */
488 TRUE, /* pc_relative */
489 0, /* bitpos */
490 complain_overflow_signed,/* complain_on_overflow */
491 bfd_elf_generic_reloc, /* special_function */
492 "R_ARM_CALL", /* name */
493 FALSE, /* partial_inplace */
494 0x00ffffff, /* src_mask */
495 0x00ffffff, /* dst_mask */
496 TRUE), /* pcrel_offset */
497
498 HOWTO (R_ARM_JUMP24, /* type */
499 2, /* rightshift */
500 2, /* size (0 = byte, 1 = short, 2 = long) */
501 24, /* bitsize */
502 TRUE, /* pc_relative */
503 0, /* bitpos */
504 complain_overflow_signed,/* complain_on_overflow */
505 bfd_elf_generic_reloc, /* special_function */
506 "R_ARM_JUMP24", /* name */
507 FALSE, /* partial_inplace */
508 0x00ffffff, /* src_mask */
509 0x00ffffff, /* dst_mask */
510 TRUE), /* pcrel_offset */
511
512 HOWTO (R_ARM_THM_JUMP24, /* type */
513 1, /* rightshift */
514 2, /* size (0 = byte, 1 = short, 2 = long) */
515 24, /* bitsize */
516 TRUE, /* pc_relative */
517 0, /* bitpos */
518 complain_overflow_signed,/* complain_on_overflow */
519 bfd_elf_generic_reloc, /* special_function */
520 "R_ARM_THM_JUMP24", /* name */
521 FALSE, /* partial_inplace */
522 0x07ff2fff, /* src_mask */
523 0x07ff2fff, /* dst_mask */
524 TRUE), /* pcrel_offset */
525
526 HOWTO (R_ARM_BASE_ABS, /* type */
527 0, /* rightshift */
528 2, /* size (0 = byte, 1 = short, 2 = long) */
529 32, /* bitsize */
530 FALSE, /* pc_relative */
531 0, /* bitpos */
532 complain_overflow_dont,/* complain_on_overflow */
533 bfd_elf_generic_reloc, /* special_function */
534 "R_ARM_BASE_ABS", /* name */
535 FALSE, /* partial_inplace */
536 0xffffffff, /* src_mask */
537 0xffffffff, /* dst_mask */
538 FALSE), /* pcrel_offset */
539
540 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
541 0, /* rightshift */
542 2, /* size (0 = byte, 1 = short, 2 = long) */
543 12, /* bitsize */
544 TRUE, /* pc_relative */
545 0, /* bitpos */
546 complain_overflow_dont,/* complain_on_overflow */
547 bfd_elf_generic_reloc, /* special_function */
548 "R_ARM_ALU_PCREL_7_0", /* name */
549 FALSE, /* partial_inplace */
550 0x00000fff, /* src_mask */
551 0x00000fff, /* dst_mask */
552 TRUE), /* pcrel_offset */
553
554 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
555 0, /* rightshift */
556 2, /* size (0 = byte, 1 = short, 2 = long) */
557 12, /* bitsize */
558 TRUE, /* pc_relative */
559 8, /* bitpos */
560 complain_overflow_dont,/* complain_on_overflow */
561 bfd_elf_generic_reloc, /* special_function */
562 "R_ARM_ALU_PCREL_15_8",/* name */
563 FALSE, /* partial_inplace */
564 0x00000fff, /* src_mask */
565 0x00000fff, /* dst_mask */
566 TRUE), /* pcrel_offset */
567
568 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
569 0, /* rightshift */
570 2, /* size (0 = byte, 1 = short, 2 = long) */
571 12, /* bitsize */
572 TRUE, /* pc_relative */
573 16, /* bitpos */
574 complain_overflow_dont,/* complain_on_overflow */
575 bfd_elf_generic_reloc, /* special_function */
576 "R_ARM_ALU_PCREL_23_15",/* name */
577 FALSE, /* partial_inplace */
578 0x00000fff, /* src_mask */
579 0x00000fff, /* dst_mask */
580 TRUE), /* pcrel_offset */
581
582 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
583 0, /* rightshift */
584 2, /* size (0 = byte, 1 = short, 2 = long) */
585 12, /* bitsize */
586 FALSE, /* pc_relative */
587 0, /* bitpos */
588 complain_overflow_dont,/* complain_on_overflow */
589 bfd_elf_generic_reloc, /* special_function */
590 "R_ARM_LDR_SBREL_11_0",/* name */
591 FALSE, /* partial_inplace */
592 0x00000fff, /* src_mask */
593 0x00000fff, /* dst_mask */
594 FALSE), /* pcrel_offset */
595
596 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
597 0, /* rightshift */
598 2, /* size (0 = byte, 1 = short, 2 = long) */
599 8, /* bitsize */
600 FALSE, /* pc_relative */
601 12, /* bitpos */
602 complain_overflow_dont,/* complain_on_overflow */
603 bfd_elf_generic_reloc, /* special_function */
604 "R_ARM_ALU_SBREL_19_12",/* name */
605 FALSE, /* partial_inplace */
606 0x000ff000, /* src_mask */
607 0x000ff000, /* dst_mask */
608 FALSE), /* pcrel_offset */
609
610 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
611 0, /* rightshift */
612 2, /* size (0 = byte, 1 = short, 2 = long) */
613 8, /* bitsize */
614 FALSE, /* pc_relative */
615 20, /* bitpos */
616 complain_overflow_dont,/* complain_on_overflow */
617 bfd_elf_generic_reloc, /* special_function */
618 "R_ARM_ALU_SBREL_27_20",/* name */
619 FALSE, /* partial_inplace */
620 0x0ff00000, /* src_mask */
621 0x0ff00000, /* dst_mask */
622 FALSE), /* pcrel_offset */
623
624 HOWTO (R_ARM_TARGET1, /* type */
625 0, /* rightshift */
626 2, /* size (0 = byte, 1 = short, 2 = long) */
627 32, /* bitsize */
628 FALSE, /* pc_relative */
629 0, /* bitpos */
630 complain_overflow_dont,/* complain_on_overflow */
631 bfd_elf_generic_reloc, /* special_function */
632 "R_ARM_TARGET1", /* name */
633 FALSE, /* partial_inplace */
634 0xffffffff, /* src_mask */
635 0xffffffff, /* dst_mask */
636 FALSE), /* pcrel_offset */
637
638 HOWTO (R_ARM_ROSEGREL32, /* type */
639 0, /* rightshift */
640 2, /* size (0 = byte, 1 = short, 2 = long) */
641 32, /* bitsize */
642 FALSE, /* pc_relative */
643 0, /* bitpos */
644 complain_overflow_dont,/* complain_on_overflow */
645 bfd_elf_generic_reloc, /* special_function */
646 "R_ARM_ROSEGREL32", /* name */
647 FALSE, /* partial_inplace */
648 0xffffffff, /* src_mask */
649 0xffffffff, /* dst_mask */
650 FALSE), /* pcrel_offset */
651
652 HOWTO (R_ARM_V4BX, /* type */
653 0, /* rightshift */
654 2, /* size (0 = byte, 1 = short, 2 = long) */
655 32, /* bitsize */
656 FALSE, /* pc_relative */
657 0, /* bitpos */
658 complain_overflow_dont,/* complain_on_overflow */
659 bfd_elf_generic_reloc, /* special_function */
660 "R_ARM_V4BX", /* name */
661 FALSE, /* partial_inplace */
662 0xffffffff, /* src_mask */
663 0xffffffff, /* dst_mask */
664 FALSE), /* pcrel_offset */
665
666 HOWTO (R_ARM_TARGET2, /* type */
667 0, /* rightshift */
668 2, /* size (0 = byte, 1 = short, 2 = long) */
669 32, /* bitsize */
670 FALSE, /* pc_relative */
671 0, /* bitpos */
672 complain_overflow_signed,/* complain_on_overflow */
673 bfd_elf_generic_reloc, /* special_function */
674 "R_ARM_TARGET2", /* name */
675 FALSE, /* partial_inplace */
676 0xffffffff, /* src_mask */
677 0xffffffff, /* dst_mask */
678 TRUE), /* pcrel_offset */
679
680 HOWTO (R_ARM_PREL31, /* type */
681 0, /* rightshift */
682 2, /* size (0 = byte, 1 = short, 2 = long) */
683 31, /* bitsize */
684 TRUE, /* pc_relative */
685 0, /* bitpos */
686 complain_overflow_signed,/* complain_on_overflow */
687 bfd_elf_generic_reloc, /* special_function */
688 "R_ARM_PREL31", /* name */
689 FALSE, /* partial_inplace */
690 0x7fffffff, /* src_mask */
691 0x7fffffff, /* dst_mask */
692 TRUE), /* pcrel_offset */
693
694 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
695 0, /* rightshift */
696 2, /* size (0 = byte, 1 = short, 2 = long) */
697 16, /* bitsize */
698 FALSE, /* pc_relative */
699 0, /* bitpos */
700 complain_overflow_dont,/* complain_on_overflow */
701 bfd_elf_generic_reloc, /* special_function */
702 "R_ARM_MOVW_ABS_NC", /* name */
703 FALSE, /* partial_inplace */
704 0x000f0fff, /* src_mask */
705 0x000f0fff, /* dst_mask */
706 FALSE), /* pcrel_offset */
707
708 HOWTO (R_ARM_MOVT_ABS, /* type */
709 0, /* rightshift */
710 2, /* size (0 = byte, 1 = short, 2 = long) */
711 16, /* bitsize */
712 FALSE, /* pc_relative */
713 0, /* bitpos */
714 complain_overflow_bitfield,/* complain_on_overflow */
715 bfd_elf_generic_reloc, /* special_function */
716 "R_ARM_MOVT_ABS", /* name */
717 FALSE, /* partial_inplace */
718 0x000f0fff, /* src_mask */
719 0x000f0fff, /* dst_mask */
720 FALSE), /* pcrel_offset */
721
722 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
723 0, /* rightshift */
724 2, /* size (0 = byte, 1 = short, 2 = long) */
725 16, /* bitsize */
726 TRUE, /* pc_relative */
727 0, /* bitpos */
728 complain_overflow_dont,/* complain_on_overflow */
729 bfd_elf_generic_reloc, /* special_function */
730 "R_ARM_MOVW_PREL_NC", /* name */
731 FALSE, /* partial_inplace */
732 0x000f0fff, /* src_mask */
733 0x000f0fff, /* dst_mask */
734 TRUE), /* pcrel_offset */
735
736 HOWTO (R_ARM_MOVT_PREL, /* type */
737 0, /* rightshift */
738 2, /* size (0 = byte, 1 = short, 2 = long) */
739 16, /* bitsize */
740 TRUE, /* pc_relative */
741 0, /* bitpos */
742 complain_overflow_bitfield,/* complain_on_overflow */
743 bfd_elf_generic_reloc, /* special_function */
744 "R_ARM_MOVT_PREL", /* name */
745 FALSE, /* partial_inplace */
746 0x000f0fff, /* src_mask */
747 0x000f0fff, /* dst_mask */
748 TRUE), /* pcrel_offset */
749
750 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
751 0, /* rightshift */
752 2, /* size (0 = byte, 1 = short, 2 = long) */
753 16, /* bitsize */
754 FALSE, /* pc_relative */
755 0, /* bitpos */
756 complain_overflow_dont,/* complain_on_overflow */
757 bfd_elf_generic_reloc, /* special_function */
758 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 FALSE, /* partial_inplace */
760 0x040f70ff, /* src_mask */
761 0x040f70ff, /* dst_mask */
762 FALSE), /* pcrel_offset */
763
764 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
765 0, /* rightshift */
766 2, /* size (0 = byte, 1 = short, 2 = long) */
767 16, /* bitsize */
768 FALSE, /* pc_relative */
769 0, /* bitpos */
770 complain_overflow_bitfield,/* complain_on_overflow */
771 bfd_elf_generic_reloc, /* special_function */
772 "R_ARM_THM_MOVT_ABS", /* name */
773 FALSE, /* partial_inplace */
774 0x040f70ff, /* src_mask */
775 0x040f70ff, /* dst_mask */
776 FALSE), /* pcrel_offset */
777
778 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 0, /* rightshift */
780 2, /* size (0 = byte, 1 = short, 2 = long) */
781 16, /* bitsize */
782 TRUE, /* pc_relative */
783 0, /* bitpos */
784 complain_overflow_dont,/* complain_on_overflow */
785 bfd_elf_generic_reloc, /* special_function */
786 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 FALSE, /* partial_inplace */
788 0x040f70ff, /* src_mask */
789 0x040f70ff, /* dst_mask */
790 TRUE), /* pcrel_offset */
791
792 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
793 0, /* rightshift */
794 2, /* size (0 = byte, 1 = short, 2 = long) */
795 16, /* bitsize */
796 TRUE, /* pc_relative */
797 0, /* bitpos */
798 complain_overflow_bitfield,/* complain_on_overflow */
799 bfd_elf_generic_reloc, /* special_function */
800 "R_ARM_THM_MOVT_PREL", /* name */
801 FALSE, /* partial_inplace */
802 0x040f70ff, /* src_mask */
803 0x040f70ff, /* dst_mask */
804 TRUE), /* pcrel_offset */
805
806 HOWTO (R_ARM_THM_JUMP19, /* type */
807 1, /* rightshift */
808 2, /* size (0 = byte, 1 = short, 2 = long) */
809 19, /* bitsize */
810 TRUE, /* pc_relative */
811 0, /* bitpos */
812 complain_overflow_signed,/* complain_on_overflow */
813 bfd_elf_generic_reloc, /* special_function */
814 "R_ARM_THM_JUMP19", /* name */
815 FALSE, /* partial_inplace */
816 0x043f2fff, /* src_mask */
817 0x043f2fff, /* dst_mask */
818 TRUE), /* pcrel_offset */
819
820 HOWTO (R_ARM_THM_JUMP6, /* type */
821 1, /* rightshift */
822 1, /* size (0 = byte, 1 = short, 2 = long) */
823 6, /* bitsize */
824 TRUE, /* pc_relative */
825 0, /* bitpos */
826 complain_overflow_unsigned,/* complain_on_overflow */
827 bfd_elf_generic_reloc, /* special_function */
828 "R_ARM_THM_JUMP6", /* name */
829 FALSE, /* partial_inplace */
830 0x02f8, /* src_mask */
831 0x02f8, /* dst_mask */
832 TRUE), /* pcrel_offset */
833
834 /* These are declared as 13-bit signed relocations because we can
835 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836 versa. */
837 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 0, /* rightshift */
839 2, /* size (0 = byte, 1 = short, 2 = long) */
840 13, /* bitsize */
841 TRUE, /* pc_relative */
842 0, /* bitpos */
843 complain_overflow_dont,/* complain_on_overflow */
844 bfd_elf_generic_reloc, /* special_function */
845 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 FALSE, /* partial_inplace */
847 0xffffffff, /* src_mask */
848 0xffffffff, /* dst_mask */
849 TRUE), /* pcrel_offset */
850
851 HOWTO (R_ARM_THM_PC12, /* type */
852 0, /* rightshift */
853 2, /* size (0 = byte, 1 = short, 2 = long) */
854 13, /* bitsize */
855 TRUE, /* pc_relative */
856 0, /* bitpos */
857 complain_overflow_dont,/* complain_on_overflow */
858 bfd_elf_generic_reloc, /* special_function */
859 "R_ARM_THM_PC12", /* name */
860 FALSE, /* partial_inplace */
861 0xffffffff, /* src_mask */
862 0xffffffff, /* dst_mask */
863 TRUE), /* pcrel_offset */
864
865 HOWTO (R_ARM_ABS32_NOI, /* type */
866 0, /* rightshift */
867 2, /* size (0 = byte, 1 = short, 2 = long) */
868 32, /* bitsize */
869 FALSE, /* pc_relative */
870 0, /* bitpos */
871 complain_overflow_dont,/* complain_on_overflow */
872 bfd_elf_generic_reloc, /* special_function */
873 "R_ARM_ABS32_NOI", /* name */
874 FALSE, /* partial_inplace */
875 0xffffffff, /* src_mask */
876 0xffffffff, /* dst_mask */
877 FALSE), /* pcrel_offset */
878
879 HOWTO (R_ARM_REL32_NOI, /* type */
880 0, /* rightshift */
881 2, /* size (0 = byte, 1 = short, 2 = long) */
882 32, /* bitsize */
883 TRUE, /* pc_relative */
884 0, /* bitpos */
885 complain_overflow_dont,/* complain_on_overflow */
886 bfd_elf_generic_reloc, /* special_function */
887 "R_ARM_REL32_NOI", /* name */
888 FALSE, /* partial_inplace */
889 0xffffffff, /* src_mask */
890 0xffffffff, /* dst_mask */
891 FALSE), /* pcrel_offset */
892
893 /* Group relocations. */
894
895 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
896 0, /* rightshift */
897 2, /* size (0 = byte, 1 = short, 2 = long) */
898 32, /* bitsize */
899 TRUE, /* pc_relative */
900 0, /* bitpos */
901 complain_overflow_dont,/* complain_on_overflow */
902 bfd_elf_generic_reloc, /* special_function */
903 "R_ARM_ALU_PC_G0_NC", /* name */
904 FALSE, /* partial_inplace */
905 0xffffffff, /* src_mask */
906 0xffffffff, /* dst_mask */
907 TRUE), /* pcrel_offset */
908
909 HOWTO (R_ARM_ALU_PC_G0, /* type */
910 0, /* rightshift */
911 2, /* size (0 = byte, 1 = short, 2 = long) */
912 32, /* bitsize */
913 TRUE, /* pc_relative */
914 0, /* bitpos */
915 complain_overflow_dont,/* complain_on_overflow */
916 bfd_elf_generic_reloc, /* special_function */
917 "R_ARM_ALU_PC_G0", /* name */
918 FALSE, /* partial_inplace */
919 0xffffffff, /* src_mask */
920 0xffffffff, /* dst_mask */
921 TRUE), /* pcrel_offset */
922
923 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
924 0, /* rightshift */
925 2, /* size (0 = byte, 1 = short, 2 = long) */
926 32, /* bitsize */
927 TRUE, /* pc_relative */
928 0, /* bitpos */
929 complain_overflow_dont,/* complain_on_overflow */
930 bfd_elf_generic_reloc, /* special_function */
931 "R_ARM_ALU_PC_G1_NC", /* name */
932 FALSE, /* partial_inplace */
933 0xffffffff, /* src_mask */
934 0xffffffff, /* dst_mask */
935 TRUE), /* pcrel_offset */
936
937 HOWTO (R_ARM_ALU_PC_G1, /* type */
938 0, /* rightshift */
939 2, /* size (0 = byte, 1 = short, 2 = long) */
940 32, /* bitsize */
941 TRUE, /* pc_relative */
942 0, /* bitpos */
943 complain_overflow_dont,/* complain_on_overflow */
944 bfd_elf_generic_reloc, /* special_function */
945 "R_ARM_ALU_PC_G1", /* name */
946 FALSE, /* partial_inplace */
947 0xffffffff, /* src_mask */
948 0xffffffff, /* dst_mask */
949 TRUE), /* pcrel_offset */
950
951 HOWTO (R_ARM_ALU_PC_G2, /* type */
952 0, /* rightshift */
953 2, /* size (0 = byte, 1 = short, 2 = long) */
954 32, /* bitsize */
955 TRUE, /* pc_relative */
956 0, /* bitpos */
957 complain_overflow_dont,/* complain_on_overflow */
958 bfd_elf_generic_reloc, /* special_function */
959 "R_ARM_ALU_PC_G2", /* name */
960 FALSE, /* partial_inplace */
961 0xffffffff, /* src_mask */
962 0xffffffff, /* dst_mask */
963 TRUE), /* pcrel_offset */
964
965 HOWTO (R_ARM_LDR_PC_G1, /* type */
966 0, /* rightshift */
967 2, /* size (0 = byte, 1 = short, 2 = long) */
968 32, /* bitsize */
969 TRUE, /* pc_relative */
970 0, /* bitpos */
971 complain_overflow_dont,/* complain_on_overflow */
972 bfd_elf_generic_reloc, /* special_function */
973 "R_ARM_LDR_PC_G1", /* name */
974 FALSE, /* partial_inplace */
975 0xffffffff, /* src_mask */
976 0xffffffff, /* dst_mask */
977 TRUE), /* pcrel_offset */
978
979 HOWTO (R_ARM_LDR_PC_G2, /* type */
980 0, /* rightshift */
981 2, /* size (0 = byte, 1 = short, 2 = long) */
982 32, /* bitsize */
983 TRUE, /* pc_relative */
984 0, /* bitpos */
985 complain_overflow_dont,/* complain_on_overflow */
986 bfd_elf_generic_reloc, /* special_function */
987 "R_ARM_LDR_PC_G2", /* name */
988 FALSE, /* partial_inplace */
989 0xffffffff, /* src_mask */
990 0xffffffff, /* dst_mask */
991 TRUE), /* pcrel_offset */
992
993 HOWTO (R_ARM_LDRS_PC_G0, /* type */
994 0, /* rightshift */
995 2, /* size (0 = byte, 1 = short, 2 = long) */
996 32, /* bitsize */
997 TRUE, /* pc_relative */
998 0, /* bitpos */
999 complain_overflow_dont,/* complain_on_overflow */
1000 bfd_elf_generic_reloc, /* special_function */
1001 "R_ARM_LDRS_PC_G0", /* name */
1002 FALSE, /* partial_inplace */
1003 0xffffffff, /* src_mask */
1004 0xffffffff, /* dst_mask */
1005 TRUE), /* pcrel_offset */
1006
1007 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1008 0, /* rightshift */
1009 2, /* size (0 = byte, 1 = short, 2 = long) */
1010 32, /* bitsize */
1011 TRUE, /* pc_relative */
1012 0, /* bitpos */
1013 complain_overflow_dont,/* complain_on_overflow */
1014 bfd_elf_generic_reloc, /* special_function */
1015 "R_ARM_LDRS_PC_G1", /* name */
1016 FALSE, /* partial_inplace */
1017 0xffffffff, /* src_mask */
1018 0xffffffff, /* dst_mask */
1019 TRUE), /* pcrel_offset */
1020
1021 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1022 0, /* rightshift */
1023 2, /* size (0 = byte, 1 = short, 2 = long) */
1024 32, /* bitsize */
1025 TRUE, /* pc_relative */
1026 0, /* bitpos */
1027 complain_overflow_dont,/* complain_on_overflow */
1028 bfd_elf_generic_reloc, /* special_function */
1029 "R_ARM_LDRS_PC_G2", /* name */
1030 FALSE, /* partial_inplace */
1031 0xffffffff, /* src_mask */
1032 0xffffffff, /* dst_mask */
1033 TRUE), /* pcrel_offset */
1034
1035 HOWTO (R_ARM_LDC_PC_G0, /* type */
1036 0, /* rightshift */
1037 2, /* size (0 = byte, 1 = short, 2 = long) */
1038 32, /* bitsize */
1039 TRUE, /* pc_relative */
1040 0, /* bitpos */
1041 complain_overflow_dont,/* complain_on_overflow */
1042 bfd_elf_generic_reloc, /* special_function */
1043 "R_ARM_LDC_PC_G0", /* name */
1044 FALSE, /* partial_inplace */
1045 0xffffffff, /* src_mask */
1046 0xffffffff, /* dst_mask */
1047 TRUE), /* pcrel_offset */
1048
1049 HOWTO (R_ARM_LDC_PC_G1, /* type */
1050 0, /* rightshift */
1051 2, /* size (0 = byte, 1 = short, 2 = long) */
1052 32, /* bitsize */
1053 TRUE, /* pc_relative */
1054 0, /* bitpos */
1055 complain_overflow_dont,/* complain_on_overflow */
1056 bfd_elf_generic_reloc, /* special_function */
1057 "R_ARM_LDC_PC_G1", /* name */
1058 FALSE, /* partial_inplace */
1059 0xffffffff, /* src_mask */
1060 0xffffffff, /* dst_mask */
1061 TRUE), /* pcrel_offset */
1062
1063 HOWTO (R_ARM_LDC_PC_G2, /* type */
1064 0, /* rightshift */
1065 2, /* size (0 = byte, 1 = short, 2 = long) */
1066 32, /* bitsize */
1067 TRUE, /* pc_relative */
1068 0, /* bitpos */
1069 complain_overflow_dont,/* complain_on_overflow */
1070 bfd_elf_generic_reloc, /* special_function */
1071 "R_ARM_LDC_PC_G2", /* name */
1072 FALSE, /* partial_inplace */
1073 0xffffffff, /* src_mask */
1074 0xffffffff, /* dst_mask */
1075 TRUE), /* pcrel_offset */
1076
1077 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1078 0, /* rightshift */
1079 2, /* size (0 = byte, 1 = short, 2 = long) */
1080 32, /* bitsize */
1081 TRUE, /* pc_relative */
1082 0, /* bitpos */
1083 complain_overflow_dont,/* complain_on_overflow */
1084 bfd_elf_generic_reloc, /* special_function */
1085 "R_ARM_ALU_SB_G0_NC", /* name */
1086 FALSE, /* partial_inplace */
1087 0xffffffff, /* src_mask */
1088 0xffffffff, /* dst_mask */
1089 TRUE), /* pcrel_offset */
1090
1091 HOWTO (R_ARM_ALU_SB_G0, /* type */
1092 0, /* rightshift */
1093 2, /* size (0 = byte, 1 = short, 2 = long) */
1094 32, /* bitsize */
1095 TRUE, /* pc_relative */
1096 0, /* bitpos */
1097 complain_overflow_dont,/* complain_on_overflow */
1098 bfd_elf_generic_reloc, /* special_function */
1099 "R_ARM_ALU_SB_G0", /* name */
1100 FALSE, /* partial_inplace */
1101 0xffffffff, /* src_mask */
1102 0xffffffff, /* dst_mask */
1103 TRUE), /* pcrel_offset */
1104
1105 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1106 0, /* rightshift */
1107 2, /* size (0 = byte, 1 = short, 2 = long) */
1108 32, /* bitsize */
1109 TRUE, /* pc_relative */
1110 0, /* bitpos */
1111 complain_overflow_dont,/* complain_on_overflow */
1112 bfd_elf_generic_reloc, /* special_function */
1113 "R_ARM_ALU_SB_G1_NC", /* name */
1114 FALSE, /* partial_inplace */
1115 0xffffffff, /* src_mask */
1116 0xffffffff, /* dst_mask */
1117 TRUE), /* pcrel_offset */
1118
1119 HOWTO (R_ARM_ALU_SB_G1, /* type */
1120 0, /* rightshift */
1121 2, /* size (0 = byte, 1 = short, 2 = long) */
1122 32, /* bitsize */
1123 TRUE, /* pc_relative */
1124 0, /* bitpos */
1125 complain_overflow_dont,/* complain_on_overflow */
1126 bfd_elf_generic_reloc, /* special_function */
1127 "R_ARM_ALU_SB_G1", /* name */
1128 FALSE, /* partial_inplace */
1129 0xffffffff, /* src_mask */
1130 0xffffffff, /* dst_mask */
1131 TRUE), /* pcrel_offset */
1132
1133 HOWTO (R_ARM_ALU_SB_G2, /* type */
1134 0, /* rightshift */
1135 2, /* size (0 = byte, 1 = short, 2 = long) */
1136 32, /* bitsize */
1137 TRUE, /* pc_relative */
1138 0, /* bitpos */
1139 complain_overflow_dont,/* complain_on_overflow */
1140 bfd_elf_generic_reloc, /* special_function */
1141 "R_ARM_ALU_SB_G2", /* name */
1142 FALSE, /* partial_inplace */
1143 0xffffffff, /* src_mask */
1144 0xffffffff, /* dst_mask */
1145 TRUE), /* pcrel_offset */
1146
1147 HOWTO (R_ARM_LDR_SB_G0, /* type */
1148 0, /* rightshift */
1149 2, /* size (0 = byte, 1 = short, 2 = long) */
1150 32, /* bitsize */
1151 TRUE, /* pc_relative */
1152 0, /* bitpos */
1153 complain_overflow_dont,/* complain_on_overflow */
1154 bfd_elf_generic_reloc, /* special_function */
1155 "R_ARM_LDR_SB_G0", /* name */
1156 FALSE, /* partial_inplace */
1157 0xffffffff, /* src_mask */
1158 0xffffffff, /* dst_mask */
1159 TRUE), /* pcrel_offset */
1160
1161 HOWTO (R_ARM_LDR_SB_G1, /* type */
1162 0, /* rightshift */
1163 2, /* size (0 = byte, 1 = short, 2 = long) */
1164 32, /* bitsize */
1165 TRUE, /* pc_relative */
1166 0, /* bitpos */
1167 complain_overflow_dont,/* complain_on_overflow */
1168 bfd_elf_generic_reloc, /* special_function */
1169 "R_ARM_LDR_SB_G1", /* name */
1170 FALSE, /* partial_inplace */
1171 0xffffffff, /* src_mask */
1172 0xffffffff, /* dst_mask */
1173 TRUE), /* pcrel_offset */
1174
1175 HOWTO (R_ARM_LDR_SB_G2, /* type */
1176 0, /* rightshift */
1177 2, /* size (0 = byte, 1 = short, 2 = long) */
1178 32, /* bitsize */
1179 TRUE, /* pc_relative */
1180 0, /* bitpos */
1181 complain_overflow_dont,/* complain_on_overflow */
1182 bfd_elf_generic_reloc, /* special_function */
1183 "R_ARM_LDR_SB_G2", /* name */
1184 FALSE, /* partial_inplace */
1185 0xffffffff, /* src_mask */
1186 0xffffffff, /* dst_mask */
1187 TRUE), /* pcrel_offset */
1188
1189 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1190 0, /* rightshift */
1191 2, /* size (0 = byte, 1 = short, 2 = long) */
1192 32, /* bitsize */
1193 TRUE, /* pc_relative */
1194 0, /* bitpos */
1195 complain_overflow_dont,/* complain_on_overflow */
1196 bfd_elf_generic_reloc, /* special_function */
1197 "R_ARM_LDRS_SB_G0", /* name */
1198 FALSE, /* partial_inplace */
1199 0xffffffff, /* src_mask */
1200 0xffffffff, /* dst_mask */
1201 TRUE), /* pcrel_offset */
1202
1203 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1204 0, /* rightshift */
1205 2, /* size (0 = byte, 1 = short, 2 = long) */
1206 32, /* bitsize */
1207 TRUE, /* pc_relative */
1208 0, /* bitpos */
1209 complain_overflow_dont,/* complain_on_overflow */
1210 bfd_elf_generic_reloc, /* special_function */
1211 "R_ARM_LDRS_SB_G1", /* name */
1212 FALSE, /* partial_inplace */
1213 0xffffffff, /* src_mask */
1214 0xffffffff, /* dst_mask */
1215 TRUE), /* pcrel_offset */
1216
1217 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1218 0, /* rightshift */
1219 2, /* size (0 = byte, 1 = short, 2 = long) */
1220 32, /* bitsize */
1221 TRUE, /* pc_relative */
1222 0, /* bitpos */
1223 complain_overflow_dont,/* complain_on_overflow */
1224 bfd_elf_generic_reloc, /* special_function */
1225 "R_ARM_LDRS_SB_G2", /* name */
1226 FALSE, /* partial_inplace */
1227 0xffffffff, /* src_mask */
1228 0xffffffff, /* dst_mask */
1229 TRUE), /* pcrel_offset */
1230
1231 HOWTO (R_ARM_LDC_SB_G0, /* type */
1232 0, /* rightshift */
1233 2, /* size (0 = byte, 1 = short, 2 = long) */
1234 32, /* bitsize */
1235 TRUE, /* pc_relative */
1236 0, /* bitpos */
1237 complain_overflow_dont,/* complain_on_overflow */
1238 bfd_elf_generic_reloc, /* special_function */
1239 "R_ARM_LDC_SB_G0", /* name */
1240 FALSE, /* partial_inplace */
1241 0xffffffff, /* src_mask */
1242 0xffffffff, /* dst_mask */
1243 TRUE), /* pcrel_offset */
1244
1245 HOWTO (R_ARM_LDC_SB_G1, /* type */
1246 0, /* rightshift */
1247 2, /* size (0 = byte, 1 = short, 2 = long) */
1248 32, /* bitsize */
1249 TRUE, /* pc_relative */
1250 0, /* bitpos */
1251 complain_overflow_dont,/* complain_on_overflow */
1252 bfd_elf_generic_reloc, /* special_function */
1253 "R_ARM_LDC_SB_G1", /* name */
1254 FALSE, /* partial_inplace */
1255 0xffffffff, /* src_mask */
1256 0xffffffff, /* dst_mask */
1257 TRUE), /* pcrel_offset */
1258
1259 HOWTO (R_ARM_LDC_SB_G2, /* type */
1260 0, /* rightshift */
1261 2, /* size (0 = byte, 1 = short, 2 = long) */
1262 32, /* bitsize */
1263 TRUE, /* pc_relative */
1264 0, /* bitpos */
1265 complain_overflow_dont,/* complain_on_overflow */
1266 bfd_elf_generic_reloc, /* special_function */
1267 "R_ARM_LDC_SB_G2", /* name */
1268 FALSE, /* partial_inplace */
1269 0xffffffff, /* src_mask */
1270 0xffffffff, /* dst_mask */
1271 TRUE), /* pcrel_offset */
1272
1273 /* End of group relocations. */
1274
1275 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1276 0, /* rightshift */
1277 2, /* size (0 = byte, 1 = short, 2 = long) */
1278 16, /* bitsize */
1279 FALSE, /* pc_relative */
1280 0, /* bitpos */
1281 complain_overflow_dont,/* complain_on_overflow */
1282 bfd_elf_generic_reloc, /* special_function */
1283 "R_ARM_MOVW_BREL_NC", /* name */
1284 FALSE, /* partial_inplace */
1285 0x0000ffff, /* src_mask */
1286 0x0000ffff, /* dst_mask */
1287 FALSE), /* pcrel_offset */
1288
1289 HOWTO (R_ARM_MOVT_BREL, /* type */
1290 0, /* rightshift */
1291 2, /* size (0 = byte, 1 = short, 2 = long) */
1292 16, /* bitsize */
1293 FALSE, /* pc_relative */
1294 0, /* bitpos */
1295 complain_overflow_bitfield,/* complain_on_overflow */
1296 bfd_elf_generic_reloc, /* special_function */
1297 "R_ARM_MOVT_BREL", /* name */
1298 FALSE, /* partial_inplace */
1299 0x0000ffff, /* src_mask */
1300 0x0000ffff, /* dst_mask */
1301 FALSE), /* pcrel_offset */
1302
1303 HOWTO (R_ARM_MOVW_BREL, /* type */
1304 0, /* rightshift */
1305 2, /* size (0 = byte, 1 = short, 2 = long) */
1306 16, /* bitsize */
1307 FALSE, /* pc_relative */
1308 0, /* bitpos */
1309 complain_overflow_dont,/* complain_on_overflow */
1310 bfd_elf_generic_reloc, /* special_function */
1311 "R_ARM_MOVW_BREL", /* name */
1312 FALSE, /* partial_inplace */
1313 0x0000ffff, /* src_mask */
1314 0x0000ffff, /* dst_mask */
1315 FALSE), /* pcrel_offset */
1316
1317 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 0, /* rightshift */
1319 2, /* size (0 = byte, 1 = short, 2 = long) */
1320 16, /* bitsize */
1321 FALSE, /* pc_relative */
1322 0, /* bitpos */
1323 complain_overflow_dont,/* complain_on_overflow */
1324 bfd_elf_generic_reloc, /* special_function */
1325 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 FALSE, /* partial_inplace */
1327 0x040f70ff, /* src_mask */
1328 0x040f70ff, /* dst_mask */
1329 FALSE), /* pcrel_offset */
1330
1331 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1332 0, /* rightshift */
1333 2, /* size (0 = byte, 1 = short, 2 = long) */
1334 16, /* bitsize */
1335 FALSE, /* pc_relative */
1336 0, /* bitpos */
1337 complain_overflow_bitfield,/* complain_on_overflow */
1338 bfd_elf_generic_reloc, /* special_function */
1339 "R_ARM_THM_MOVT_BREL", /* name */
1340 FALSE, /* partial_inplace */
1341 0x040f70ff, /* src_mask */
1342 0x040f70ff, /* dst_mask */
1343 FALSE), /* pcrel_offset */
1344
1345 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1346 0, /* rightshift */
1347 2, /* size (0 = byte, 1 = short, 2 = long) */
1348 16, /* bitsize */
1349 FALSE, /* pc_relative */
1350 0, /* bitpos */
1351 complain_overflow_dont,/* complain_on_overflow */
1352 bfd_elf_generic_reloc, /* special_function */
1353 "R_ARM_THM_MOVW_BREL", /* name */
1354 FALSE, /* partial_inplace */
1355 0x040f70ff, /* src_mask */
1356 0x040f70ff, /* dst_mask */
1357 FALSE), /* pcrel_offset */
1358
1359 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1360 0, /* rightshift */
1361 2, /* size (0 = byte, 1 = short, 2 = long) */
1362 32, /* bitsize */
1363 FALSE, /* pc_relative */
1364 0, /* bitpos */
1365 complain_overflow_bitfield,/* complain_on_overflow */
1366 NULL, /* special_function */
1367 "R_ARM_TLS_GOTDESC", /* name */
1368 TRUE, /* partial_inplace */
1369 0xffffffff, /* src_mask */
1370 0xffffffff, /* dst_mask */
1371 FALSE), /* pcrel_offset */
1372
1373 HOWTO (R_ARM_TLS_CALL, /* type */
1374 0, /* rightshift */
1375 2, /* size (0 = byte, 1 = short, 2 = long) */
1376 24, /* bitsize */
1377 FALSE, /* pc_relative */
1378 0, /* bitpos */
1379 complain_overflow_dont,/* complain_on_overflow */
1380 bfd_elf_generic_reloc, /* special_function */
1381 "R_ARM_TLS_CALL", /* name */
1382 FALSE, /* partial_inplace */
1383 0x00ffffff, /* src_mask */
1384 0x00ffffff, /* dst_mask */
1385 FALSE), /* pcrel_offset */
1386
1387 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1388 0, /* rightshift */
1389 2, /* size (0 = byte, 1 = short, 2 = long) */
1390 0, /* bitsize */
1391 FALSE, /* pc_relative */
1392 0, /* bitpos */
1393 complain_overflow_bitfield,/* complain_on_overflow */
1394 bfd_elf_generic_reloc, /* special_function */
1395 "R_ARM_TLS_DESCSEQ", /* name */
1396 FALSE, /* partial_inplace */
1397 0x00000000, /* src_mask */
1398 0x00000000, /* dst_mask */
1399 FALSE), /* pcrel_offset */
1400
1401 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1402 0, /* rightshift */
1403 2, /* size (0 = byte, 1 = short, 2 = long) */
1404 24, /* bitsize */
1405 FALSE, /* pc_relative */
1406 0, /* bitpos */
1407 complain_overflow_dont,/* complain_on_overflow */
1408 bfd_elf_generic_reloc, /* special_function */
1409 "R_ARM_THM_TLS_CALL", /* name */
1410 FALSE, /* partial_inplace */
1411 0x07ff07ff, /* src_mask */
1412 0x07ff07ff, /* dst_mask */
1413 FALSE), /* pcrel_offset */
1414
1415 HOWTO (R_ARM_PLT32_ABS, /* type */
1416 0, /* rightshift */
1417 2, /* size (0 = byte, 1 = short, 2 = long) */
1418 32, /* bitsize */
1419 FALSE, /* pc_relative */
1420 0, /* bitpos */
1421 complain_overflow_dont,/* complain_on_overflow */
1422 bfd_elf_generic_reloc, /* special_function */
1423 "R_ARM_PLT32_ABS", /* name */
1424 FALSE, /* partial_inplace */
1425 0xffffffff, /* src_mask */
1426 0xffffffff, /* dst_mask */
1427 FALSE), /* pcrel_offset */
1428
1429 HOWTO (R_ARM_GOT_ABS, /* type */
1430 0, /* rightshift */
1431 2, /* size (0 = byte, 1 = short, 2 = long) */
1432 32, /* bitsize */
1433 FALSE, /* pc_relative */
1434 0, /* bitpos */
1435 complain_overflow_dont,/* complain_on_overflow */
1436 bfd_elf_generic_reloc, /* special_function */
1437 "R_ARM_GOT_ABS", /* name */
1438 FALSE, /* partial_inplace */
1439 0xffffffff, /* src_mask */
1440 0xffffffff, /* dst_mask */
1441 FALSE), /* pcrel_offset */
1442
1443 HOWTO (R_ARM_GOT_PREL, /* type */
1444 0, /* rightshift */
1445 2, /* size (0 = byte, 1 = short, 2 = long) */
1446 32, /* bitsize */
1447 TRUE, /* pc_relative */
1448 0, /* bitpos */
1449 complain_overflow_dont, /* complain_on_overflow */
1450 bfd_elf_generic_reloc, /* special_function */
1451 "R_ARM_GOT_PREL", /* name */
1452 FALSE, /* partial_inplace */
1453 0xffffffff, /* src_mask */
1454 0xffffffff, /* dst_mask */
1455 TRUE), /* pcrel_offset */
1456
1457 HOWTO (R_ARM_GOT_BREL12, /* type */
1458 0, /* rightshift */
1459 2, /* size (0 = byte, 1 = short, 2 = long) */
1460 12, /* bitsize */
1461 FALSE, /* pc_relative */
1462 0, /* bitpos */
1463 complain_overflow_bitfield,/* complain_on_overflow */
1464 bfd_elf_generic_reloc, /* special_function */
1465 "R_ARM_GOT_BREL12", /* name */
1466 FALSE, /* partial_inplace */
1467 0x00000fff, /* src_mask */
1468 0x00000fff, /* dst_mask */
1469 FALSE), /* pcrel_offset */
1470
1471 HOWTO (R_ARM_GOTOFF12, /* type */
1472 0, /* rightshift */
1473 2, /* size (0 = byte, 1 = short, 2 = long) */
1474 12, /* bitsize */
1475 FALSE, /* pc_relative */
1476 0, /* bitpos */
1477 complain_overflow_bitfield,/* complain_on_overflow */
1478 bfd_elf_generic_reloc, /* special_function */
1479 "R_ARM_GOTOFF12", /* name */
1480 FALSE, /* partial_inplace */
1481 0x00000fff, /* src_mask */
1482 0x00000fff, /* dst_mask */
1483 FALSE), /* pcrel_offset */
1484
1485 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1486
1487 /* GNU extension to record C++ vtable member usage */
1488 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1489 0, /* rightshift */
1490 2, /* size (0 = byte, 1 = short, 2 = long) */
1491 0, /* bitsize */
1492 FALSE, /* pc_relative */
1493 0, /* bitpos */
1494 complain_overflow_dont, /* complain_on_overflow */
1495 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1496 "R_ARM_GNU_VTENTRY", /* name */
1497 FALSE, /* partial_inplace */
1498 0, /* src_mask */
1499 0, /* dst_mask */
1500 FALSE), /* pcrel_offset */
1501
1502 /* GNU extension to record C++ vtable hierarchy */
1503 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 0, /* rightshift */
1505 2, /* size (0 = byte, 1 = short, 2 = long) */
1506 0, /* bitsize */
1507 FALSE, /* pc_relative */
1508 0, /* bitpos */
1509 complain_overflow_dont, /* complain_on_overflow */
1510 NULL, /* special_function */
1511 "R_ARM_GNU_VTINHERIT", /* name */
1512 FALSE, /* partial_inplace */
1513 0, /* src_mask */
1514 0, /* dst_mask */
1515 FALSE), /* pcrel_offset */
1516
1517 HOWTO (R_ARM_THM_JUMP11, /* type */
1518 1, /* rightshift */
1519 1, /* size (0 = byte, 1 = short, 2 = long) */
1520 11, /* bitsize */
1521 TRUE, /* pc_relative */
1522 0, /* bitpos */
1523 complain_overflow_signed, /* complain_on_overflow */
1524 bfd_elf_generic_reloc, /* special_function */
1525 "R_ARM_THM_JUMP11", /* name */
1526 FALSE, /* partial_inplace */
1527 0x000007ff, /* src_mask */
1528 0x000007ff, /* dst_mask */
1529 TRUE), /* pcrel_offset */
1530
1531 HOWTO (R_ARM_THM_JUMP8, /* type */
1532 1, /* rightshift */
1533 1, /* size (0 = byte, 1 = short, 2 = long) */
1534 8, /* bitsize */
1535 TRUE, /* pc_relative */
1536 0, /* bitpos */
1537 complain_overflow_signed, /* complain_on_overflow */
1538 bfd_elf_generic_reloc, /* special_function */
1539 "R_ARM_THM_JUMP8", /* name */
1540 FALSE, /* partial_inplace */
1541 0x000000ff, /* src_mask */
1542 0x000000ff, /* dst_mask */
1543 TRUE), /* pcrel_offset */
1544
1545 /* TLS relocations */
1546 HOWTO (R_ARM_TLS_GD32, /* type */
1547 0, /* rightshift */
1548 2, /* size (0 = byte, 1 = short, 2 = long) */
1549 32, /* bitsize */
1550 FALSE, /* pc_relative */
1551 0, /* bitpos */
1552 complain_overflow_bitfield,/* complain_on_overflow */
1553 NULL, /* special_function */
1554 "R_ARM_TLS_GD32", /* name */
1555 TRUE, /* partial_inplace */
1556 0xffffffff, /* src_mask */
1557 0xffffffff, /* dst_mask */
1558 FALSE), /* pcrel_offset */
1559
1560 HOWTO (R_ARM_TLS_LDM32, /* type */
1561 0, /* rightshift */
1562 2, /* size (0 = byte, 1 = short, 2 = long) */
1563 32, /* bitsize */
1564 FALSE, /* pc_relative */
1565 0, /* bitpos */
1566 complain_overflow_bitfield,/* complain_on_overflow */
1567 bfd_elf_generic_reloc, /* special_function */
1568 "R_ARM_TLS_LDM32", /* name */
1569 TRUE, /* partial_inplace */
1570 0xffffffff, /* src_mask */
1571 0xffffffff, /* dst_mask */
1572 FALSE), /* pcrel_offset */
1573
1574 HOWTO (R_ARM_TLS_LDO32, /* type */
1575 0, /* rightshift */
1576 2, /* size (0 = byte, 1 = short, 2 = long) */
1577 32, /* bitsize */
1578 FALSE, /* pc_relative */
1579 0, /* bitpos */
1580 complain_overflow_bitfield,/* complain_on_overflow */
1581 bfd_elf_generic_reloc, /* special_function */
1582 "R_ARM_TLS_LDO32", /* name */
1583 TRUE, /* partial_inplace */
1584 0xffffffff, /* src_mask */
1585 0xffffffff, /* dst_mask */
1586 FALSE), /* pcrel_offset */
1587
1588 HOWTO (R_ARM_TLS_IE32, /* type */
1589 0, /* rightshift */
1590 2, /* size (0 = byte, 1 = short, 2 = long) */
1591 32, /* bitsize */
1592 FALSE, /* pc_relative */
1593 0, /* bitpos */
1594 complain_overflow_bitfield,/* complain_on_overflow */
1595 NULL, /* special_function */
1596 "R_ARM_TLS_IE32", /* name */
1597 TRUE, /* partial_inplace */
1598 0xffffffff, /* src_mask */
1599 0xffffffff, /* dst_mask */
1600 FALSE), /* pcrel_offset */
1601
1602 HOWTO (R_ARM_TLS_LE32, /* type */
1603 0, /* rightshift */
1604 2, /* size (0 = byte, 1 = short, 2 = long) */
1605 32, /* bitsize */
1606 FALSE, /* pc_relative */
1607 0, /* bitpos */
1608 complain_overflow_bitfield,/* complain_on_overflow */
1609 NULL, /* special_function */
1610 "R_ARM_TLS_LE32", /* name */
1611 TRUE, /* partial_inplace */
1612 0xffffffff, /* src_mask */
1613 0xffffffff, /* dst_mask */
1614 FALSE), /* pcrel_offset */
1615
1616 HOWTO (R_ARM_TLS_LDO12, /* type */
1617 0, /* rightshift */
1618 2, /* size (0 = byte, 1 = short, 2 = long) */
1619 12, /* bitsize */
1620 FALSE, /* pc_relative */
1621 0, /* bitpos */
1622 complain_overflow_bitfield,/* complain_on_overflow */
1623 bfd_elf_generic_reloc, /* special_function */
1624 "R_ARM_TLS_LDO12", /* name */
1625 FALSE, /* partial_inplace */
1626 0x00000fff, /* src_mask */
1627 0x00000fff, /* dst_mask */
1628 FALSE), /* pcrel_offset */
1629
1630 HOWTO (R_ARM_TLS_LE12, /* type */
1631 0, /* rightshift */
1632 2, /* size (0 = byte, 1 = short, 2 = long) */
1633 12, /* bitsize */
1634 FALSE, /* pc_relative */
1635 0, /* bitpos */
1636 complain_overflow_bitfield,/* complain_on_overflow */
1637 bfd_elf_generic_reloc, /* special_function */
1638 "R_ARM_TLS_LE12", /* name */
1639 FALSE, /* partial_inplace */
1640 0x00000fff, /* src_mask */
1641 0x00000fff, /* dst_mask */
1642 FALSE), /* pcrel_offset */
1643
1644 HOWTO (R_ARM_TLS_IE12GP, /* type */
1645 0, /* rightshift */
1646 2, /* size (0 = byte, 1 = short, 2 = long) */
1647 12, /* bitsize */
1648 FALSE, /* pc_relative */
1649 0, /* bitpos */
1650 complain_overflow_bitfield,/* complain_on_overflow */
1651 bfd_elf_generic_reloc, /* special_function */
1652 "R_ARM_TLS_IE12GP", /* name */
1653 FALSE, /* partial_inplace */
1654 0x00000fff, /* src_mask */
1655 0x00000fff, /* dst_mask */
1656 FALSE), /* pcrel_offset */
1657
1658 /* 112-127 private relocations. */
1659 EMPTY_HOWTO (112),
1660 EMPTY_HOWTO (113),
1661 EMPTY_HOWTO (114),
1662 EMPTY_HOWTO (115),
1663 EMPTY_HOWTO (116),
1664 EMPTY_HOWTO (117),
1665 EMPTY_HOWTO (118),
1666 EMPTY_HOWTO (119),
1667 EMPTY_HOWTO (120),
1668 EMPTY_HOWTO (121),
1669 EMPTY_HOWTO (122),
1670 EMPTY_HOWTO (123),
1671 EMPTY_HOWTO (124),
1672 EMPTY_HOWTO (125),
1673 EMPTY_HOWTO (126),
1674 EMPTY_HOWTO (127),
1675
1676 /* R_ARM_ME_TOO, obsolete. */
1677 EMPTY_HOWTO (128),
1678
1679 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1680 0, /* rightshift */
1681 1, /* size (0 = byte, 1 = short, 2 = long) */
1682 0, /* bitsize */
1683 FALSE, /* pc_relative */
1684 0, /* bitpos */
1685 complain_overflow_bitfield,/* complain_on_overflow */
1686 bfd_elf_generic_reloc, /* special_function */
1687 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 FALSE, /* partial_inplace */
1689 0x00000000, /* src_mask */
1690 0x00000000, /* dst_mask */
1691 FALSE), /* pcrel_offset */
1692 EMPTY_HOWTO (130),
1693 EMPTY_HOWTO (131),
1694 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1695 0, /* rightshift. */
1696 1, /* size (0 = byte, 1 = short, 2 = long). */
1697 16, /* bitsize. */
1698 FALSE, /* pc_relative. */
1699 0, /* bitpos. */
1700 complain_overflow_bitfield,/* complain_on_overflow. */
1701 bfd_elf_generic_reloc, /* special_function. */
1702 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1703 FALSE, /* partial_inplace. */
1704 0x00000000, /* src_mask. */
1705 0x00000000, /* dst_mask. */
1706 FALSE), /* pcrel_offset. */
1707 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1708 0, /* rightshift. */
1709 1, /* size (0 = byte, 1 = short, 2 = long). */
1710 16, /* bitsize. */
1711 FALSE, /* pc_relative. */
1712 0, /* bitpos. */
1713 complain_overflow_bitfield,/* complain_on_overflow. */
1714 bfd_elf_generic_reloc, /* special_function. */
1715 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1716 FALSE, /* partial_inplace. */
1717 0x00000000, /* src_mask. */
1718 0x00000000, /* dst_mask. */
1719 FALSE), /* pcrel_offset. */
1720 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1721 0, /* rightshift. */
1722 1, /* size (0 = byte, 1 = short, 2 = long). */
1723 16, /* bitsize. */
1724 FALSE, /* pc_relative. */
1725 0, /* bitpos. */
1726 complain_overflow_bitfield,/* complain_on_overflow. */
1727 bfd_elf_generic_reloc, /* special_function. */
1728 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1729 FALSE, /* partial_inplace. */
1730 0x00000000, /* src_mask. */
1731 0x00000000, /* dst_mask. */
1732 FALSE), /* pcrel_offset. */
1733 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1734 0, /* rightshift. */
1735 1, /* size (0 = byte, 1 = short, 2 = long). */
1736 16, /* bitsize. */
1737 FALSE, /* pc_relative. */
1738 0, /* bitpos. */
1739 complain_overflow_bitfield,/* complain_on_overflow. */
1740 bfd_elf_generic_reloc, /* special_function. */
1741 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1742 FALSE, /* partial_inplace. */
1743 0x00000000, /* src_mask. */
1744 0x00000000, /* dst_mask. */
1745 FALSE), /* pcrel_offset. */
1746 };
1747
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[8] =
1750 {
1751 HOWTO (R_ARM_IRELATIVE, /* type */
1752 0, /* rightshift */
1753 2, /* size (0 = byte, 1 = short, 2 = long) */
1754 32, /* bitsize */
1755 FALSE, /* pc_relative */
1756 0, /* bitpos */
1757 complain_overflow_bitfield,/* complain_on_overflow */
1758 bfd_elf_generic_reloc, /* special_function */
1759 "R_ARM_IRELATIVE", /* name */
1760 TRUE, /* partial_inplace */
1761 0xffffffff, /* src_mask */
1762 0xffffffff, /* dst_mask */
1763 FALSE), /* pcrel_offset */
1764 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1765 0, /* rightshift */
1766 2, /* size (0 = byte, 1 = short, 2 = long) */
1767 32, /* bitsize */
1768 FALSE, /* pc_relative */
1769 0, /* bitpos */
1770 complain_overflow_bitfield,/* complain_on_overflow */
1771 bfd_elf_generic_reloc, /* special_function */
1772 "R_ARM_GOTFUNCDESC", /* name */
1773 FALSE, /* partial_inplace */
1774 0, /* src_mask */
1775 0xffffffff, /* dst_mask */
1776 FALSE), /* pcrel_offset */
1777 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1778 0, /* rightshift */
1779 2, /* size (0 = byte, 1 = short, 2 = long) */
1780 32, /* bitsize */
1781 FALSE, /* pc_relative */
1782 0, /* bitpos */
1783 complain_overflow_bitfield,/* complain_on_overflow */
1784 bfd_elf_generic_reloc, /* special_function */
1785 "R_ARM_GOTOFFFUNCDESC",/* name */
1786 FALSE, /* partial_inplace */
1787 0, /* src_mask */
1788 0xffffffff, /* dst_mask */
1789 FALSE), /* pcrel_offset */
1790 HOWTO (R_ARM_FUNCDESC, /* type */
1791 0, /* rightshift */
1792 2, /* size (0 = byte, 1 = short, 2 = long) */
1793 32, /* bitsize */
1794 FALSE, /* pc_relative */
1795 0, /* bitpos */
1796 complain_overflow_bitfield,/* complain_on_overflow */
1797 bfd_elf_generic_reloc, /* special_function */
1798 "R_ARM_FUNCDESC", /* name */
1799 FALSE, /* partial_inplace */
1800 0, /* src_mask */
1801 0xffffffff, /* dst_mask */
1802 FALSE), /* pcrel_offset */
1803 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1804 0, /* rightshift */
1805 2, /* size (0 = byte, 1 = short, 2 = long) */
1806 64, /* bitsize */
1807 FALSE, /* pc_relative */
1808 0, /* bitpos */
1809 complain_overflow_bitfield,/* complain_on_overflow */
1810 bfd_elf_generic_reloc, /* special_function */
1811 "R_ARM_FUNCDESC_VALUE",/* name */
1812 FALSE, /* partial_inplace */
1813 0, /* src_mask */
1814 0xffffffff, /* dst_mask */
1815 FALSE), /* pcrel_offset */
1816 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1817 0, /* rightshift */
1818 2, /* size (0 = byte, 1 = short, 2 = long) */
1819 32, /* bitsize */
1820 FALSE, /* pc_relative */
1821 0, /* bitpos */
1822 complain_overflow_bitfield,/* complain_on_overflow */
1823 bfd_elf_generic_reloc, /* special_function */
1824 "R_ARM_TLS_GD32_FDPIC",/* name */
1825 FALSE, /* partial_inplace */
1826 0, /* src_mask */
1827 0xffffffff, /* dst_mask */
1828 FALSE), /* pcrel_offset */
1829 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1830 0, /* rightshift */
1831 2, /* size (0 = byte, 1 = short, 2 = long) */
1832 32, /* bitsize */
1833 FALSE, /* pc_relative */
1834 0, /* bitpos */
1835 complain_overflow_bitfield,/* complain_on_overflow */
1836 bfd_elf_generic_reloc, /* special_function */
1837 "R_ARM_TLS_LDM32_FDPIC",/* name */
1838 FALSE, /* partial_inplace */
1839 0, /* src_mask */
1840 0xffffffff, /* dst_mask */
1841 FALSE), /* pcrel_offset */
1842 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1843 0, /* rightshift */
1844 2, /* size (0 = byte, 1 = short, 2 = long) */
1845 32, /* bitsize */
1846 FALSE, /* pc_relative */
1847 0, /* bitpos */
1848 complain_overflow_bitfield,/* complain_on_overflow */
1849 bfd_elf_generic_reloc, /* special_function */
1850 "R_ARM_TLS_IE32_FDPIC",/* name */
1851 FALSE, /* partial_inplace */
1852 0, /* src_mask */
1853 0xffffffff, /* dst_mask */
1854 FALSE), /* pcrel_offset */
1855 };
1856
1857 /* 249-255 extended, currently unused, relocations: */
1858 static reloc_howto_type elf32_arm_howto_table_3[4] =
1859 {
1860 HOWTO (R_ARM_RREL32, /* type */
1861 0, /* rightshift */
1862 0, /* size (0 = byte, 1 = short, 2 = long) */
1863 0, /* bitsize */
1864 FALSE, /* pc_relative */
1865 0, /* bitpos */
1866 complain_overflow_dont,/* complain_on_overflow */
1867 bfd_elf_generic_reloc, /* special_function */
1868 "R_ARM_RREL32", /* name */
1869 FALSE, /* partial_inplace */
1870 0, /* src_mask */
1871 0, /* dst_mask */
1872 FALSE), /* pcrel_offset */
1873
1874 HOWTO (R_ARM_RABS32, /* type */
1875 0, /* rightshift */
1876 0, /* size (0 = byte, 1 = short, 2 = long) */
1877 0, /* bitsize */
1878 FALSE, /* pc_relative */
1879 0, /* bitpos */
1880 complain_overflow_dont,/* complain_on_overflow */
1881 bfd_elf_generic_reloc, /* special_function */
1882 "R_ARM_RABS32", /* name */
1883 FALSE, /* partial_inplace */
1884 0, /* src_mask */
1885 0, /* dst_mask */
1886 FALSE), /* pcrel_offset */
1887
1888 HOWTO (R_ARM_RPC24, /* type */
1889 0, /* rightshift */
1890 0, /* size (0 = byte, 1 = short, 2 = long) */
1891 0, /* bitsize */
1892 FALSE, /* pc_relative */
1893 0, /* bitpos */
1894 complain_overflow_dont,/* complain_on_overflow */
1895 bfd_elf_generic_reloc, /* special_function */
1896 "R_ARM_RPC24", /* name */
1897 FALSE, /* partial_inplace */
1898 0, /* src_mask */
1899 0, /* dst_mask */
1900 FALSE), /* pcrel_offset */
1901
1902 HOWTO (R_ARM_RBASE, /* type */
1903 0, /* rightshift */
1904 0, /* size (0 = byte, 1 = short, 2 = long) */
1905 0, /* bitsize */
1906 FALSE, /* pc_relative */
1907 0, /* bitpos */
1908 complain_overflow_dont,/* complain_on_overflow */
1909 bfd_elf_generic_reloc, /* special_function */
1910 "R_ARM_RBASE", /* name */
1911 FALSE, /* partial_inplace */
1912 0, /* src_mask */
1913 0, /* dst_mask */
1914 FALSE) /* pcrel_offset */
1915 };
1916
1917 static reloc_howto_type *
1918 elf32_arm_howto_from_type (unsigned int r_type)
1919 {
1920 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1921 return &elf32_arm_howto_table_1[r_type];
1922
1923 if (r_type >= R_ARM_IRELATIVE
1924 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1925 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1926
1927 if (r_type >= R_ARM_RREL32
1928 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1929 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1930
1931 return NULL;
1932 }
1933
1934 static bfd_boolean
1935 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1936 Elf_Internal_Rela * elf_reloc)
1937 {
1938 unsigned int r_type;
1939
1940 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1941 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1942 {
1943 /* xgettext:c-format */
1944 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1945 abfd, r_type);
1946 bfd_set_error (bfd_error_bad_value);
1947 return FALSE;
1948 }
1949 return TRUE;
1950 }
1951
1952 struct elf32_arm_reloc_map
1953 {
1954 bfd_reloc_code_real_type bfd_reloc_val;
1955 unsigned char elf_reloc_val;
1956 };
1957
1958 /* All entries in this list must also be present in elf32_arm_howto_table. */
1959 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1960 {
1961 {BFD_RELOC_NONE, R_ARM_NONE},
1962 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
1963 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
1964 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
1965 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
1966 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
1967 {BFD_RELOC_32, R_ARM_ABS32},
1968 {BFD_RELOC_32_PCREL, R_ARM_REL32},
1969 {BFD_RELOC_8, R_ARM_ABS8},
1970 {BFD_RELOC_16, R_ARM_ABS16},
1971 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
1972 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
1973 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1974 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1975 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1976 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1977 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
1978 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
1979 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
1980 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
1981 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
1982 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
1983 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
1984 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
1985 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
1986 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1987 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
1988 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
1989 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
1990 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
1991 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
1992 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
1993 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
1994 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
1995 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
1996 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
1997 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
1998 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
1999 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2000 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2001 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2002 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2003 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2004 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2005 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2006 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2007 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2008 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2009 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2010 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2011 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2012 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2013 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2014 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2015 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2016 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2017 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2018 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2019 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2020 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2021 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2022 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2023 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2024 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2025 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2026 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2027 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2028 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2029 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2030 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2031 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2032 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2033 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2034 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2035 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2036 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2037 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2038 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2039 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2040 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2041 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2042 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2043 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2044 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2045 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2046 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2047 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2048 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2049 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2050 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2051 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2052 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2053 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2054 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2055 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2056 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2057 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
2058 };
2059
2060 static reloc_howto_type *
2061 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2062 bfd_reloc_code_real_type code)
2063 {
2064 unsigned int i;
2065
2066 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2067 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2068 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2069
2070 return NULL;
2071 }
2072
2073 static reloc_howto_type *
2074 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2075 const char *r_name)
2076 {
2077 unsigned int i;
2078
2079 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2080 if (elf32_arm_howto_table_1[i].name != NULL
2081 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2082 return &elf32_arm_howto_table_1[i];
2083
2084 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2085 if (elf32_arm_howto_table_2[i].name != NULL
2086 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2087 return &elf32_arm_howto_table_2[i];
2088
2089 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2090 if (elf32_arm_howto_table_3[i].name != NULL
2091 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2092 return &elf32_arm_howto_table_3[i];
2093
2094 return NULL;
2095 }
2096
2097 /* Support for core dump NOTE sections. */
2098
2099 static bfd_boolean
2100 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2101 {
2102 int offset;
2103 size_t size;
2104
2105 switch (note->descsz)
2106 {
2107 default:
2108 return FALSE;
2109
2110 case 148: /* Linux/ARM 32-bit. */
2111 /* pr_cursig */
2112 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2113
2114 /* pr_pid */
2115 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2116
2117 /* pr_reg */
2118 offset = 72;
2119 size = 72;
2120
2121 break;
2122 }
2123
2124 /* Make a ".reg/999" section. */
2125 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2126 size, note->descpos + offset);
2127 }
2128
2129 static bfd_boolean
2130 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2131 {
2132 switch (note->descsz)
2133 {
2134 default:
2135 return FALSE;
2136
2137 case 124: /* Linux/ARM elf_prpsinfo. */
2138 elf_tdata (abfd)->core->pid
2139 = bfd_get_32 (abfd, note->descdata + 12);
2140 elf_tdata (abfd)->core->program
2141 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2142 elf_tdata (abfd)->core->command
2143 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2144 }
2145
2146 /* Note that for some reason, a spurious space is tacked
2147 onto the end of the args in some (at least one anyway)
2148 implementations, so strip it off if it exists. */
2149 {
2150 char *command = elf_tdata (abfd)->core->command;
2151 int n = strlen (command);
2152
2153 if (0 < n && command[n - 1] == ' ')
2154 command[n - 1] = '\0';
2155 }
2156
2157 return TRUE;
2158 }
2159
2160 static char *
2161 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2162 int note_type, ...)
2163 {
2164 switch (note_type)
2165 {
2166 default:
2167 return NULL;
2168
2169 case NT_PRPSINFO:
2170 {
2171 char data[124] ATTRIBUTE_NONSTRING;
2172 va_list ap;
2173
2174 va_start (ap, note_type);
2175 memset (data, 0, sizeof (data));
2176 strncpy (data + 28, va_arg (ap, const char *), 16);
2177 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2178 DIAGNOSTIC_PUSH;
2179 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2180 -Wstringop-truncation:
2181 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2182 */
2183 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2184 #endif
2185 strncpy (data + 44, va_arg (ap, const char *), 80);
2186 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2187 DIAGNOSTIC_POP;
2188 #endif
2189 va_end (ap);
2190
2191 return elfcore_write_note (abfd, buf, bufsiz,
2192 "CORE", note_type, data, sizeof (data));
2193 }
2194
2195 case NT_PRSTATUS:
2196 {
2197 char data[148];
2198 va_list ap;
2199 long pid;
2200 int cursig;
2201 const void *greg;
2202
2203 va_start (ap, note_type);
2204 memset (data, 0, sizeof (data));
2205 pid = va_arg (ap, long);
2206 bfd_put_32 (abfd, pid, data + 24);
2207 cursig = va_arg (ap, int);
2208 bfd_put_16 (abfd, cursig, data + 12);
2209 greg = va_arg (ap, const void *);
2210 memcpy (data + 72, greg, 72);
2211 va_end (ap);
2212
2213 return elfcore_write_note (abfd, buf, bufsiz,
2214 "CORE", note_type, data, sizeof (data));
2215 }
2216 }
2217 }
2218
2219 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2220 #define TARGET_LITTLE_NAME "elf32-littlearm"
2221 #define TARGET_BIG_SYM arm_elf32_be_vec
2222 #define TARGET_BIG_NAME "elf32-bigarm"
2223
2224 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2225 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2226 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2227
2228 typedef unsigned long int insn32;
2229 typedef unsigned short int insn16;
2230
2231 /* In lieu of proper flags, assume all EABIv4 or later objects are
2232 interworkable. */
2233 #define INTERWORK_FLAG(abfd) \
2234 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2235 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2236 || ((abfd)->flags & BFD_LINKER_CREATED))
2237
2238 /* The linker script knows the section names for placement.
2239 The entry_names are used to do simple name mangling on the stubs.
2240 Given a function name, and its type, the stub can be found. The
2241 name can be changed. The only requirement is the %s be present. */
2242 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2243 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2244
2245 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2246 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2247
2248 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2249 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2250
2251 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2252 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2253
2254 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2255 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2256
2257 #define STUB_ENTRY_NAME "__%s_veneer"
2258
2259 #define CMSE_PREFIX "__acle_se_"
2260
2261 /* The name of the dynamic interpreter. This is put in the .interp
2262 section. */
2263 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2264
2265 /* FDPIC default stack size. */
2266 #define DEFAULT_STACK_SIZE 0x8000
2267
2268 static const unsigned long tls_trampoline [] =
2269 {
2270 0xe08e0000, /* add r0, lr, r0 */
2271 0xe5901004, /* ldr r1, [r0,#4] */
2272 0xe12fff11, /* bx r1 */
2273 };
2274
2275 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2276 {
2277 0xe52d2004, /* push {r2} */
2278 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2279 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2280 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2281 0xe081100f, /* 2: add r1, pc */
2282 0xe12fff12, /* bx r2 */
2283 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2284 + dl_tlsdesc_lazy_resolver(GOT) */
2285 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2286 };
2287
2288 /* ARM FDPIC PLT entry. */
2289 /* The last 5 words contain PLT lazy fragment code and data. */
2290 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2291 {
2292 0xe59fc008, /* ldr r12, .L1 */
2293 0xe08cc009, /* add r12, r12, r9 */
2294 0xe59c9004, /* ldr r9, [r12, #4] */
2295 0xe59cf000, /* ldr pc, [r12] */
2296 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2297 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2298 0xe51fc00c, /* ldr r12, [pc, #-12] */
2299 0xe92d1000, /* push {r12} */
2300 0xe599c004, /* ldr r12, [r9, #4] */
2301 0xe599f000, /* ldr pc, [r9] */
2302 };
2303
2304 /* Thumb FDPIC PLT entry. */
2305 /* The last 5 words contain PLT lazy fragment code and data. */
2306 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2307 {
2308 0xc00cf8df, /* ldr.w r12, .L1 */
2309 0x0c09eb0c, /* add.w r12, r12, r9 */
2310 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2311 0xf000f8dc, /* ldr.w pc, [r12] */
2312 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2313 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2314 0xc008f85f, /* ldr.w r12, .L2 */
2315 0xcd04f84d, /* push {r12} */
2316 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2317 0xf000f8d9, /* ldr.w pc, [r9] */
2318 };
2319
2320 #ifdef FOUR_WORD_PLT
2321
2322 /* The first entry in a procedure linkage table looks like
2323 this. It is set up so that any shared library function that is
2324 called before the relocation has been set up calls the dynamic
2325 linker first. */
2326 static const bfd_vma elf32_arm_plt0_entry [] =
2327 {
2328 0xe52de004, /* str lr, [sp, #-4]! */
2329 0xe59fe010, /* ldr lr, [pc, #16] */
2330 0xe08fe00e, /* add lr, pc, lr */
2331 0xe5bef008, /* ldr pc, [lr, #8]! */
2332 };
2333
2334 /* Subsequent entries in a procedure linkage table look like
2335 this. */
2336 static const bfd_vma elf32_arm_plt_entry [] =
2337 {
2338 0xe28fc600, /* add ip, pc, #NN */
2339 0xe28cca00, /* add ip, ip, #NN */
2340 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2341 0x00000000, /* unused */
2342 };
2343
2344 #else /* not FOUR_WORD_PLT */
2345
2346 /* The first entry in a procedure linkage table looks like
2347 this. It is set up so that any shared library function that is
2348 called before the relocation has been set up calls the dynamic
2349 linker first. */
2350 static const bfd_vma elf32_arm_plt0_entry [] =
2351 {
2352 0xe52de004, /* str lr, [sp, #-4]! */
2353 0xe59fe004, /* ldr lr, [pc, #4] */
2354 0xe08fe00e, /* add lr, pc, lr */
2355 0xe5bef008, /* ldr pc, [lr, #8]! */
2356 0x00000000, /* &GOT[0] - . */
2357 };
2358
2359 /* By default subsequent entries in a procedure linkage table look like
2360 this. Offsets that don't fit into 28 bits will cause link error. */
2361 static const bfd_vma elf32_arm_plt_entry_short [] =
2362 {
2363 0xe28fc600, /* add ip, pc, #0xNN00000 */
2364 0xe28cca00, /* add ip, ip, #0xNN000 */
2365 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2366 };
2367
2368 /* When explicitly asked, we'll use this "long" entry format
2369 which can cope with arbitrary displacements. */
2370 static const bfd_vma elf32_arm_plt_entry_long [] =
2371 {
2372 0xe28fc200, /* add ip, pc, #0xN0000000 */
2373 0xe28cc600, /* add ip, ip, #0xNN00000 */
2374 0xe28cca00, /* add ip, ip, #0xNN000 */
2375 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2376 };
2377
2378 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2379
2380 #endif /* not FOUR_WORD_PLT */
2381
2382 /* The first entry in a procedure linkage table looks like this.
2383 It is set up so that any shared library function that is called before the
2384 relocation has been set up calls the dynamic linker first. */
2385 static const bfd_vma elf32_thumb2_plt0_entry [] =
2386 {
2387 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2388 an instruction maybe encoded to one or two array elements. */
2389 0xf8dfb500, /* push {lr} */
2390 0x44fee008, /* ldr.w lr, [pc, #8] */
2391 /* add lr, pc */
2392 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2393 0x00000000, /* &GOT[0] - . */
2394 };
2395
2396 /* Subsequent entries in a procedure linkage table for thumb only target
2397 look like this. */
2398 static const bfd_vma elf32_thumb2_plt_entry [] =
2399 {
2400 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2401 an instruction maybe encoded to one or two array elements. */
2402 0x0c00f240, /* movw ip, #0xNNNN */
2403 0x0c00f2c0, /* movt ip, #0xNNNN */
2404 0xf8dc44fc, /* add ip, pc */
2405 0xbf00f000 /* ldr.w pc, [ip] */
2406 /* nop */
2407 };
2408
2409 /* The format of the first entry in the procedure linkage table
2410 for a VxWorks executable. */
2411 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2412 {
2413 0xe52dc008, /* str ip,[sp,#-8]! */
2414 0xe59fc000, /* ldr ip,[pc] */
2415 0xe59cf008, /* ldr pc,[ip,#8] */
2416 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2417 };
2418
2419 /* The format of subsequent entries in a VxWorks executable. */
2420 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2421 {
2422 0xe59fc000, /* ldr ip,[pc] */
2423 0xe59cf000, /* ldr pc,[ip] */
2424 0x00000000, /* .long @got */
2425 0xe59fc000, /* ldr ip,[pc] */
2426 0xea000000, /* b _PLT */
2427 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2428 };
2429
2430 /* The format of entries in a VxWorks shared library. */
2431 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2432 {
2433 0xe59fc000, /* ldr ip,[pc] */
2434 0xe79cf009, /* ldr pc,[ip,r9] */
2435 0x00000000, /* .long @got */
2436 0xe59fc000, /* ldr ip,[pc] */
2437 0xe599f008, /* ldr pc,[r9,#8] */
2438 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2439 };
2440
2441 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2442 #define PLT_THUMB_STUB_SIZE 4
2443 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2444 {
2445 0x4778, /* bx pc */
2446 0x46c0 /* nop */
2447 };
2448
2449 /* The entries in a PLT when using a DLL-based target with multiple
2450 address spaces. */
2451 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2452 {
2453 0xe51ff004, /* ldr pc, [pc, #-4] */
2454 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2455 };
2456
2457 /* The first entry in a procedure linkage table looks like
2458 this. It is set up so that any shared library function that is
2459 called before the relocation has been set up calls the dynamic
2460 linker first. */
2461 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2462 {
2463 /* First bundle: */
2464 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2465 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2466 0xe08cc00f, /* add ip, ip, pc */
2467 0xe52dc008, /* str ip, [sp, #-8]! */
2468 /* Second bundle: */
2469 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2470 0xe59cc000, /* ldr ip, [ip] */
2471 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2472 0xe12fff1c, /* bx ip */
2473 /* Third bundle: */
2474 0xe320f000, /* nop */
2475 0xe320f000, /* nop */
2476 0xe320f000, /* nop */
2477 /* .Lplt_tail: */
2478 0xe50dc004, /* str ip, [sp, #-4] */
2479 /* Fourth bundle: */
2480 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2481 0xe59cc000, /* ldr ip, [ip] */
2482 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2483 0xe12fff1c, /* bx ip */
2484 };
2485 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2486
2487 /* Subsequent entries in a procedure linkage table look like this. */
2488 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2489 {
2490 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2491 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2492 0xe08cc00f, /* add ip, ip, pc */
2493 0xea000000, /* b .Lplt_tail */
2494 };
2495
2496 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2497 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2498 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2499 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2500 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2501 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2502 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2503 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2504
2505 enum stub_insn_type
2506 {
2507 THUMB16_TYPE = 1,
2508 THUMB32_TYPE,
2509 ARM_TYPE,
2510 DATA_TYPE
2511 };
2512
2513 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2514 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2515 is inserted in arm_build_one_stub(). */
2516 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2517 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2518 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2519 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2520 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2521 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2522 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2523 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2524
2525 typedef struct
2526 {
2527 bfd_vma data;
2528 enum stub_insn_type type;
2529 unsigned int r_type;
2530 int reloc_addend;
2531 } insn_sequence;
2532
2533 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2534 to reach the stub if necessary. */
2535 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2536 {
2537 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2538 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2539 };
2540
2541 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2542 available. */
2543 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2544 {
2545 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2546 ARM_INSN (0xe12fff1c), /* bx ip */
2547 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2548 };
2549
2550 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2551 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2552 {
2553 THUMB16_INSN (0xb401), /* push {r0} */
2554 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2555 THUMB16_INSN (0x4684), /* mov ip, r0 */
2556 THUMB16_INSN (0xbc01), /* pop {r0} */
2557 THUMB16_INSN (0x4760), /* bx ip */
2558 THUMB16_INSN (0xbf00), /* nop */
2559 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2560 };
2561
2562 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2563 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2564 {
2565 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2566 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2567 };
2568
2569 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2570 M-profile architectures. */
2571 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2572 {
2573 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2574 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2575 THUMB16_INSN (0x4760), /* bx ip */
2576 };
2577
2578 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2579 allowed. */
2580 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2581 {
2582 THUMB16_INSN (0x4778), /* bx pc */
2583 THUMB16_INSN (0x46c0), /* nop */
2584 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2585 ARM_INSN (0xe12fff1c), /* bx ip */
2586 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2587 };
2588
2589 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2590 available. */
2591 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2592 {
2593 THUMB16_INSN (0x4778), /* bx pc */
2594 THUMB16_INSN (0x46c0), /* nop */
2595 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2596 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2597 };
2598
2599 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2600 one, when the destination is close enough. */
2601 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2602 {
2603 THUMB16_INSN (0x4778), /* bx pc */
2604 THUMB16_INSN (0x46c0), /* nop */
2605 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2606 };
2607
2608 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2609 blx to reach the stub if necessary. */
2610 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2611 {
2612 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2613 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2614 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2615 };
2616
2617 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2618 blx to reach the stub if necessary. We can not add into pc;
2619 it is not guaranteed to mode switch (different in ARMv6 and
2620 ARMv7). */
2621 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2622 {
2623 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2624 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2625 ARM_INSN (0xe12fff1c), /* bx ip */
2626 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2627 };
2628
2629 /* V4T ARM -> ARM long branch stub, PIC. */
2630 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2631 {
2632 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2633 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2634 ARM_INSN (0xe12fff1c), /* bx ip */
2635 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2636 };
2637
2638 /* V4T Thumb -> ARM long branch stub, PIC. */
2639 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2640 {
2641 THUMB16_INSN (0x4778), /* bx pc */
2642 THUMB16_INSN (0x46c0), /* nop */
2643 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2644 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2645 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2646 };
2647
2648 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2649 architectures. */
2650 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2651 {
2652 THUMB16_INSN (0xb401), /* push {r0} */
2653 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2654 THUMB16_INSN (0x46fc), /* mov ip, pc */
2655 THUMB16_INSN (0x4484), /* add ip, r0 */
2656 THUMB16_INSN (0xbc01), /* pop {r0} */
2657 THUMB16_INSN (0x4760), /* bx ip */
2658 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2659 };
2660
2661 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2662 allowed. */
2663 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2664 {
2665 THUMB16_INSN (0x4778), /* bx pc */
2666 THUMB16_INSN (0x46c0), /* nop */
2667 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2668 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2669 ARM_INSN (0xe12fff1c), /* bx ip */
2670 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2671 };
2672
2673 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2674 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2675 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2676 {
2677 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2678 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2679 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2680 };
2681
2682 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2683 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2684 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2685 {
2686 THUMB16_INSN (0x4778), /* bx pc */
2687 THUMB16_INSN (0x46c0), /* nop */
2688 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2689 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2690 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2691 };
2692
2693 /* NaCl ARM -> ARM long branch stub. */
2694 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2695 {
2696 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2697 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2698 ARM_INSN (0xe12fff1c), /* bx ip */
2699 ARM_INSN (0xe320f000), /* nop */
2700 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2701 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2702 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2703 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2704 };
2705
2706 /* NaCl ARM -> ARM long branch stub, PIC. */
2707 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2708 {
2709 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2710 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2711 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2712 ARM_INSN (0xe12fff1c), /* bx ip */
2713 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2714 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2715 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2716 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2717 };
2718
2719 /* Stub used for transition to secure state (aka SG veneer). */
2720 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2721 {
2722 THUMB32_INSN (0xe97fe97f), /* sg. */
2723 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2724 };
2725
2726
2727 /* Cortex-A8 erratum-workaround stubs. */
2728
2729 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2730 can't use a conditional branch to reach this stub). */
2731
2732 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2733 {
2734 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2735 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2736 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2737 };
2738
2739 /* Stub used for b.w and bl.w instructions. */
2740
2741 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2742 {
2743 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2744 };
2745
2746 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2747 {
2748 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2749 };
2750
2751 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2752 instruction (which switches to ARM mode) to point to this stub. Jump to the
2753 real destination using an ARM-mode branch. */
2754
2755 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2756 {
2757 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2758 };
2759
2760 /* For each section group there can be a specially created linker section
2761 to hold the stubs for that group. The name of the stub section is based
2762 upon the name of another section within that group with the suffix below
2763 applied.
2764
2765 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2766 create what appeared to be a linker stub section when it actually
2767 contained user code/data. For example, consider this fragment:
2768
2769 const char * stubborn_problems[] = { "np" };
2770
2771 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2772 section called:
2773
2774 .data.rel.local.stubborn_problems
2775
2776 This then causes problems in arm32_arm_build_stubs() as it triggers:
2777
2778 // Ignore non-stub sections.
2779 if (!strstr (stub_sec->name, STUB_SUFFIX))
2780 continue;
2781
2782 And so the section would be ignored instead of being processed. Hence
2783 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2784 C identifier. */
2785 #define STUB_SUFFIX ".__stub"
2786
2787 /* One entry per long/short branch stub defined above. */
2788 #define DEF_STUBS \
2789 DEF_STUB(long_branch_any_any) \
2790 DEF_STUB(long_branch_v4t_arm_thumb) \
2791 DEF_STUB(long_branch_thumb_only) \
2792 DEF_STUB(long_branch_v4t_thumb_thumb) \
2793 DEF_STUB(long_branch_v4t_thumb_arm) \
2794 DEF_STUB(short_branch_v4t_thumb_arm) \
2795 DEF_STUB(long_branch_any_arm_pic) \
2796 DEF_STUB(long_branch_any_thumb_pic) \
2797 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2798 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2799 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2800 DEF_STUB(long_branch_thumb_only_pic) \
2801 DEF_STUB(long_branch_any_tls_pic) \
2802 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2803 DEF_STUB(long_branch_arm_nacl) \
2804 DEF_STUB(long_branch_arm_nacl_pic) \
2805 DEF_STUB(cmse_branch_thumb_only) \
2806 DEF_STUB(a8_veneer_b_cond) \
2807 DEF_STUB(a8_veneer_b) \
2808 DEF_STUB(a8_veneer_bl) \
2809 DEF_STUB(a8_veneer_blx) \
2810 DEF_STUB(long_branch_thumb2_only) \
2811 DEF_STUB(long_branch_thumb2_only_pure)
2812
2813 #define DEF_STUB(x) arm_stub_##x,
2814 enum elf32_arm_stub_type
2815 {
2816 arm_stub_none,
2817 DEF_STUBS
2818 max_stub_type
2819 };
2820 #undef DEF_STUB
2821
2822 /* Note the first a8_veneer type. */
2823 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2824
2825 typedef struct
2826 {
2827 const insn_sequence* template_sequence;
2828 int template_size;
2829 } stub_def;
2830
2831 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2832 static const stub_def stub_definitions[] =
2833 {
2834 {NULL, 0},
2835 DEF_STUBS
2836 };
2837
2838 struct elf32_arm_stub_hash_entry
2839 {
2840 /* Base hash table entry structure. */
2841 struct bfd_hash_entry root;
2842
2843 /* The stub section. */
2844 asection *stub_sec;
2845
2846 /* Offset within stub_sec of the beginning of this stub. */
2847 bfd_vma stub_offset;
2848
2849 /* Given the symbol's value and its section we can determine its final
2850 value when building the stubs (so the stub knows where to jump). */
2851 bfd_vma target_value;
2852 asection *target_section;
2853
2854 /* Same as above but for the source of the branch to the stub. Used for
2855 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2856 such, source section does not need to be recorded since Cortex-A8 erratum
2857 workaround stubs are only generated when both source and target are in the
2858 same section. */
2859 bfd_vma source_value;
2860
2861 /* The instruction which caused this stub to be generated (only valid for
2862 Cortex-A8 erratum workaround stubs at present). */
2863 unsigned long orig_insn;
2864
2865 /* The stub type. */
2866 enum elf32_arm_stub_type stub_type;
2867 /* Its encoding size in bytes. */
2868 int stub_size;
2869 /* Its template. */
2870 const insn_sequence *stub_template;
2871 /* The size of the template (number of entries). */
2872 int stub_template_size;
2873
2874 /* The symbol table entry, if any, that this was derived from. */
2875 struct elf32_arm_link_hash_entry *h;
2876
2877 /* Type of branch. */
2878 enum arm_st_branch_type branch_type;
2879
2880 /* Where this stub is being called from, or, in the case of combined
2881 stub sections, the first input section in the group. */
2882 asection *id_sec;
2883
2884 /* The name for the local symbol at the start of this stub. The
2885 stub name in the hash table has to be unique; this does not, so
2886 it can be friendlier. */
2887 char *output_name;
2888 };
2889
2890 /* Used to build a map of a section. This is required for mixed-endian
2891 code/data. */
2892
2893 typedef struct elf32_elf_section_map
2894 {
2895 bfd_vma vma;
2896 char type;
2897 }
2898 elf32_arm_section_map;
2899
2900 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2901
2902 typedef enum
2903 {
2904 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2905 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2906 VFP11_ERRATUM_ARM_VENEER,
2907 VFP11_ERRATUM_THUMB_VENEER
2908 }
2909 elf32_vfp11_erratum_type;
2910
2911 typedef struct elf32_vfp11_erratum_list
2912 {
2913 struct elf32_vfp11_erratum_list *next;
2914 bfd_vma vma;
2915 union
2916 {
2917 struct
2918 {
2919 struct elf32_vfp11_erratum_list *veneer;
2920 unsigned int vfp_insn;
2921 } b;
2922 struct
2923 {
2924 struct elf32_vfp11_erratum_list *branch;
2925 unsigned int id;
2926 } v;
2927 } u;
2928 elf32_vfp11_erratum_type type;
2929 }
2930 elf32_vfp11_erratum_list;
2931
2932 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2933 veneer. */
2934 typedef enum
2935 {
2936 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2937 STM32L4XX_ERRATUM_VENEER
2938 }
2939 elf32_stm32l4xx_erratum_type;
2940
2941 typedef struct elf32_stm32l4xx_erratum_list
2942 {
2943 struct elf32_stm32l4xx_erratum_list *next;
2944 bfd_vma vma;
2945 union
2946 {
2947 struct
2948 {
2949 struct elf32_stm32l4xx_erratum_list *veneer;
2950 unsigned int insn;
2951 } b;
2952 struct
2953 {
2954 struct elf32_stm32l4xx_erratum_list *branch;
2955 unsigned int id;
2956 } v;
2957 } u;
2958 elf32_stm32l4xx_erratum_type type;
2959 }
2960 elf32_stm32l4xx_erratum_list;
2961
2962 typedef enum
2963 {
2964 DELETE_EXIDX_ENTRY,
2965 INSERT_EXIDX_CANTUNWIND_AT_END
2966 }
2967 arm_unwind_edit_type;
2968
2969 /* A (sorted) list of edits to apply to an unwind table. */
2970 typedef struct arm_unwind_table_edit
2971 {
2972 arm_unwind_edit_type type;
2973 /* Note: we sometimes want to insert an unwind entry corresponding to a
2974 section different from the one we're currently writing out, so record the
2975 (text) section this edit relates to here. */
2976 asection *linked_section;
2977 unsigned int index;
2978 struct arm_unwind_table_edit *next;
2979 }
2980 arm_unwind_table_edit;
2981
2982 typedef struct _arm_elf_section_data
2983 {
2984 /* Information about mapping symbols. */
2985 struct bfd_elf_section_data elf;
2986 unsigned int mapcount;
2987 unsigned int mapsize;
2988 elf32_arm_section_map *map;
2989 /* Information about CPU errata. */
2990 unsigned int erratumcount;
2991 elf32_vfp11_erratum_list *erratumlist;
2992 unsigned int stm32l4xx_erratumcount;
2993 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2994 unsigned int additional_reloc_count;
2995 /* Information about unwind tables. */
2996 union
2997 {
2998 /* Unwind info attached to a text section. */
2999 struct
3000 {
3001 asection *arm_exidx_sec;
3002 } text;
3003
3004 /* Unwind info attached to an .ARM.exidx section. */
3005 struct
3006 {
3007 arm_unwind_table_edit *unwind_edit_list;
3008 arm_unwind_table_edit *unwind_edit_tail;
3009 } exidx;
3010 } u;
3011 }
3012 _arm_elf_section_data;
3013
3014 #define elf32_arm_section_data(sec) \
3015 ((_arm_elf_section_data *) elf_section_data (sec))
3016
3017 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3018 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3019 so may be created multiple times: we use an array of these entries whilst
3020 relaxing which we can refresh easily, then create stubs for each potentially
3021 erratum-triggering instruction once we've settled on a solution. */
3022
3023 struct a8_erratum_fix
3024 {
3025 bfd *input_bfd;
3026 asection *section;
3027 bfd_vma offset;
3028 bfd_vma target_offset;
3029 unsigned long orig_insn;
3030 char *stub_name;
3031 enum elf32_arm_stub_type stub_type;
3032 enum arm_st_branch_type branch_type;
3033 };
3034
3035 /* A table of relocs applied to branches which might trigger Cortex-A8
3036 erratum. */
3037
3038 struct a8_erratum_reloc
3039 {
3040 bfd_vma from;
3041 bfd_vma destination;
3042 struct elf32_arm_link_hash_entry *hash;
3043 const char *sym_name;
3044 unsigned int r_type;
3045 enum arm_st_branch_type branch_type;
3046 bfd_boolean non_a8_stub;
3047 };
3048
3049 /* The size of the thread control block. */
3050 #define TCB_SIZE 8
3051
3052 /* ARM-specific information about a PLT entry, over and above the usual
3053 gotplt_union. */
3054 struct arm_plt_info
3055 {
3056 /* We reference count Thumb references to a PLT entry separately,
3057 so that we can emit the Thumb trampoline only if needed. */
3058 bfd_signed_vma thumb_refcount;
3059
3060 /* Some references from Thumb code may be eliminated by BL->BLX
3061 conversion, so record them separately. */
3062 bfd_signed_vma maybe_thumb_refcount;
3063
3064 /* How many of the recorded PLT accesses were from non-call relocations.
3065 This information is useful when deciding whether anything takes the
3066 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3067 non-call references to the function should resolve directly to the
3068 real runtime target. */
3069 unsigned int noncall_refcount;
3070
3071 /* Since PLT entries have variable size if the Thumb prologue is
3072 used, we need to record the index into .got.plt instead of
3073 recomputing it from the PLT offset. */
3074 bfd_signed_vma got_offset;
3075 };
3076
3077 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3078 struct arm_local_iplt_info
3079 {
3080 /* The information that is usually found in the generic ELF part of
3081 the hash table entry. */
3082 union gotplt_union root;
3083
3084 /* The information that is usually found in the ARM-specific part of
3085 the hash table entry. */
3086 struct arm_plt_info arm;
3087
3088 /* A list of all potential dynamic relocations against this symbol. */
3089 struct elf_dyn_relocs *dyn_relocs;
3090 };
3091
3092 /* Structure to handle FDPIC support for local functions. */
3093 struct fdpic_local {
3094 unsigned int funcdesc_cnt;
3095 unsigned int gotofffuncdesc_cnt;
3096 int funcdesc_offset;
3097 };
3098
3099 struct elf_arm_obj_tdata
3100 {
3101 struct elf_obj_tdata root;
3102
3103 /* tls_type for each local got entry. */
3104 char *local_got_tls_type;
3105
3106 /* GOTPLT entries for TLS descriptors. */
3107 bfd_vma *local_tlsdesc_gotent;
3108
3109 /* Information for local symbols that need entries in .iplt. */
3110 struct arm_local_iplt_info **local_iplt;
3111
3112 /* Zero to warn when linking objects with incompatible enum sizes. */
3113 int no_enum_size_warning;
3114
3115 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3116 int no_wchar_size_warning;
3117
3118 /* Maintains FDPIC counters and funcdesc info. */
3119 struct fdpic_local *local_fdpic_cnts;
3120 };
3121
3122 #define elf_arm_tdata(bfd) \
3123 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3124
3125 #define elf32_arm_local_got_tls_type(bfd) \
3126 (elf_arm_tdata (bfd)->local_got_tls_type)
3127
3128 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3129 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3130
3131 #define elf32_arm_local_iplt(bfd) \
3132 (elf_arm_tdata (bfd)->local_iplt)
3133
3134 #define elf32_arm_local_fdpic_cnts(bfd) \
3135 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3136
3137 #define is_arm_elf(bfd) \
3138 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3139 && elf_tdata (bfd) != NULL \
3140 && elf_object_id (bfd) == ARM_ELF_DATA)
3141
3142 static bfd_boolean
3143 elf32_arm_mkobject (bfd *abfd)
3144 {
3145 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3146 ARM_ELF_DATA);
3147 }
3148
3149 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3150
3151 /* Structure to handle FDPIC support for extern functions. */
3152 struct fdpic_global {
3153 unsigned int gotofffuncdesc_cnt;
3154 unsigned int gotfuncdesc_cnt;
3155 unsigned int funcdesc_cnt;
3156 int funcdesc_offset;
3157 int gotfuncdesc_offset;
3158 };
3159
3160 /* Arm ELF linker hash entry. */
3161 struct elf32_arm_link_hash_entry
3162 {
3163 struct elf_link_hash_entry root;
3164
3165 /* Track dynamic relocs copied for this symbol. */
3166 struct elf_dyn_relocs *dyn_relocs;
3167
3168 /* ARM-specific PLT information. */
3169 struct arm_plt_info plt;
3170
3171 #define GOT_UNKNOWN 0
3172 #define GOT_NORMAL 1
3173 #define GOT_TLS_GD 2
3174 #define GOT_TLS_IE 4
3175 #define GOT_TLS_GDESC 8
3176 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3177 unsigned int tls_type : 8;
3178
3179 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3180 unsigned int is_iplt : 1;
3181
3182 unsigned int unused : 23;
3183
3184 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3185 starting at the end of the jump table. */
3186 bfd_vma tlsdesc_got;
3187
3188 /* The symbol marking the real symbol location for exported thumb
3189 symbols with Arm stubs. */
3190 struct elf_link_hash_entry *export_glue;
3191
3192 /* A pointer to the most recently used stub hash entry against this
3193 symbol. */
3194 struct elf32_arm_stub_hash_entry *stub_cache;
3195
3196 /* Counter for FDPIC relocations against this symbol. */
3197 struct fdpic_global fdpic_cnts;
3198 };
3199
3200 /* Traverse an arm ELF linker hash table. */
3201 #define elf32_arm_link_hash_traverse(table, func, info) \
3202 (elf_link_hash_traverse \
3203 (&(table)->root, \
3204 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3205 (info)))
3206
3207 /* Get the ARM elf linker hash table from a link_info structure. */
3208 #define elf32_arm_hash_table(info) \
3209 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3210 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3211
3212 #define arm_stub_hash_lookup(table, string, create, copy) \
3213 ((struct elf32_arm_stub_hash_entry *) \
3214 bfd_hash_lookup ((table), (string), (create), (copy)))
3215
3216 /* Array to keep track of which stub sections have been created, and
3217 information on stub grouping. */
3218 struct map_stub
3219 {
3220 /* This is the section to which stubs in the group will be
3221 attached. */
3222 asection *link_sec;
3223 /* The stub section. */
3224 asection *stub_sec;
3225 };
3226
3227 #define elf32_arm_compute_jump_table_size(htab) \
3228 ((htab)->next_tls_desc_index * 4)
3229
3230 /* ARM ELF linker hash table. */
3231 struct elf32_arm_link_hash_table
3232 {
3233 /* The main hash table. */
3234 struct elf_link_hash_table root;
3235
3236 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3237 bfd_size_type thumb_glue_size;
3238
3239 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3240 bfd_size_type arm_glue_size;
3241
3242 /* The size in bytes of section containing the ARMv4 BX veneers. */
3243 bfd_size_type bx_glue_size;
3244
3245 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3246 veneer has been populated. */
3247 bfd_vma bx_glue_offset[15];
3248
3249 /* The size in bytes of the section containing glue for VFP11 erratum
3250 veneers. */
3251 bfd_size_type vfp11_erratum_glue_size;
3252
3253 /* The size in bytes of the section containing glue for STM32L4XX erratum
3254 veneers. */
3255 bfd_size_type stm32l4xx_erratum_glue_size;
3256
3257 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3258 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3259 elf32_arm_write_section(). */
3260 struct a8_erratum_fix *a8_erratum_fixes;
3261 unsigned int num_a8_erratum_fixes;
3262
3263 /* An arbitrary input BFD chosen to hold the glue sections. */
3264 bfd * bfd_of_glue_owner;
3265
3266 /* Nonzero to output a BE8 image. */
3267 int byteswap_code;
3268
3269 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3270 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3271 int target1_is_rel;
3272
3273 /* The relocation to use for R_ARM_TARGET2 relocations. */
3274 int target2_reloc;
3275
3276 /* 0 = Ignore R_ARM_V4BX.
3277 1 = Convert BX to MOV PC.
3278 2 = Generate v4 interworing stubs. */
3279 int fix_v4bx;
3280
3281 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3282 int fix_cortex_a8;
3283
3284 /* Whether we should fix the ARM1176 BLX immediate issue. */
3285 int fix_arm1176;
3286
3287 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3288 int use_blx;
3289
3290 /* What sort of code sequences we should look for which may trigger the
3291 VFP11 denorm erratum. */
3292 bfd_arm_vfp11_fix vfp11_fix;
3293
3294 /* Global counter for the number of fixes we have emitted. */
3295 int num_vfp11_fixes;
3296
3297 /* What sort of code sequences we should look for which may trigger the
3298 STM32L4XX erratum. */
3299 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3300
3301 /* Global counter for the number of fixes we have emitted. */
3302 int num_stm32l4xx_fixes;
3303
3304 /* Nonzero to force PIC branch veneers. */
3305 int pic_veneer;
3306
3307 /* The number of bytes in the initial entry in the PLT. */
3308 bfd_size_type plt_header_size;
3309
3310 /* The number of bytes in the subsequent PLT etries. */
3311 bfd_size_type plt_entry_size;
3312
3313 /* True if the target system is VxWorks. */
3314 int vxworks_p;
3315
3316 /* True if the target system is Symbian OS. */
3317 int symbian_p;
3318
3319 /* True if the target system is Native Client. */
3320 int nacl_p;
3321
3322 /* True if the target uses REL relocations. */
3323 bfd_boolean use_rel;
3324
3325 /* Nonzero if import library must be a secure gateway import library
3326 as per ARMv8-M Security Extensions. */
3327 int cmse_implib;
3328
3329 /* The import library whose symbols' address must remain stable in
3330 the import library generated. */
3331 bfd *in_implib_bfd;
3332
3333 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3334 bfd_vma next_tls_desc_index;
3335
3336 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3337 bfd_vma num_tls_desc;
3338
3339 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3340 asection *srelplt2;
3341
3342 /* The offset into splt of the PLT entry for the TLS descriptor
3343 resolver. Special values are 0, if not necessary (or not found
3344 to be necessary yet), and -1 if needed but not determined
3345 yet. */
3346 bfd_vma dt_tlsdesc_plt;
3347
3348 /* The offset into sgot of the GOT entry used by the PLT entry
3349 above. */
3350 bfd_vma dt_tlsdesc_got;
3351
3352 /* Offset in .plt section of tls_arm_trampoline. */
3353 bfd_vma tls_trampoline;
3354
3355 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3356 union
3357 {
3358 bfd_signed_vma refcount;
3359 bfd_vma offset;
3360 } tls_ldm_got;
3361
3362 /* Small local sym cache. */
3363 struct sym_cache sym_cache;
3364
3365 /* For convenience in allocate_dynrelocs. */
3366 bfd * obfd;
3367
3368 /* The amount of space used by the reserved portion of the sgotplt
3369 section, plus whatever space is used by the jump slots. */
3370 bfd_vma sgotplt_jump_table_size;
3371
3372 /* The stub hash table. */
3373 struct bfd_hash_table stub_hash_table;
3374
3375 /* Linker stub bfd. */
3376 bfd *stub_bfd;
3377
3378 /* Linker call-backs. */
3379 asection * (*add_stub_section) (const char *, asection *, asection *,
3380 unsigned int);
3381 void (*layout_sections_again) (void);
3382
3383 /* Array to keep track of which stub sections have been created, and
3384 information on stub grouping. */
3385 struct map_stub *stub_group;
3386
3387 /* Input stub section holding secure gateway veneers. */
3388 asection *cmse_stub_sec;
3389
3390 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3391 start to be allocated. */
3392 bfd_vma new_cmse_stub_offset;
3393
3394 /* Number of elements in stub_group. */
3395 unsigned int top_id;
3396
3397 /* Assorted information used by elf32_arm_size_stubs. */
3398 unsigned int bfd_count;
3399 unsigned int top_index;
3400 asection **input_list;
3401
3402 /* True if the target system uses FDPIC. */
3403 int fdpic_p;
3404
3405 /* Fixup section. Used for FDPIC. */
3406 asection *srofixup;
3407 };
3408
3409 /* Add an FDPIC read-only fixup. */
3410 static void
3411 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3412 {
3413 bfd_vma fixup_offset;
3414
3415 fixup_offset = srofixup->reloc_count++ * 4;
3416 BFD_ASSERT (fixup_offset < srofixup->size);
3417 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3418 }
3419
3420 static inline int
3421 ctz (unsigned int mask)
3422 {
3423 #if GCC_VERSION >= 3004
3424 return __builtin_ctz (mask);
3425 #else
3426 unsigned int i;
3427
3428 for (i = 0; i < 8 * sizeof (mask); i++)
3429 {
3430 if (mask & 0x1)
3431 break;
3432 mask = (mask >> 1);
3433 }
3434 return i;
3435 #endif
3436 }
3437
3438 static inline int
3439 elf32_arm_popcount (unsigned int mask)
3440 {
3441 #if GCC_VERSION >= 3004
3442 return __builtin_popcount (mask);
3443 #else
3444 unsigned int i;
3445 int sum = 0;
3446
3447 for (i = 0; i < 8 * sizeof (mask); i++)
3448 {
3449 if (mask & 0x1)
3450 sum++;
3451 mask = (mask >> 1);
3452 }
3453 return sum;
3454 #endif
3455 }
3456
3457 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3458 asection *sreloc, Elf_Internal_Rela *rel);
3459
3460 static void
3461 arm_elf_fill_funcdesc(bfd *output_bfd,
3462 struct bfd_link_info *info,
3463 int *funcdesc_offset,
3464 int dynindx,
3465 int offset,
3466 bfd_vma addr,
3467 bfd_vma dynreloc_value,
3468 bfd_vma seg)
3469 {
3470 if ((*funcdesc_offset & 1) == 0)
3471 {
3472 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3473 asection *sgot = globals->root.sgot;
3474
3475 if (bfd_link_pic(info))
3476 {
3477 asection *srelgot = globals->root.srelgot;
3478 Elf_Internal_Rela outrel;
3479
3480 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3481 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3482 outrel.r_addend = 0;
3483
3484 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3485 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3486 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3487 }
3488 else
3489 {
3490 struct elf_link_hash_entry *hgot = globals->root.hgot;
3491 bfd_vma got_value = hgot->root.u.def.value
3492 + hgot->root.u.def.section->output_section->vma
3493 + hgot->root.u.def.section->output_offset;
3494
3495 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3496 sgot->output_section->vma + sgot->output_offset
3497 + offset);
3498 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3499 sgot->output_section->vma + sgot->output_offset
3500 + offset + 4);
3501 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3502 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3503 }
3504 *funcdesc_offset |= 1;
3505 }
3506 }
3507
3508 /* Create an entry in an ARM ELF linker hash table. */
3509
3510 static struct bfd_hash_entry *
3511 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3512 struct bfd_hash_table * table,
3513 const char * string)
3514 {
3515 struct elf32_arm_link_hash_entry * ret =
3516 (struct elf32_arm_link_hash_entry *) entry;
3517
3518 /* Allocate the structure if it has not already been allocated by a
3519 subclass. */
3520 if (ret == NULL)
3521 ret = (struct elf32_arm_link_hash_entry *)
3522 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3523 if (ret == NULL)
3524 return (struct bfd_hash_entry *) ret;
3525
3526 /* Call the allocation method of the superclass. */
3527 ret = ((struct elf32_arm_link_hash_entry *)
3528 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3529 table, string));
3530 if (ret != NULL)
3531 {
3532 ret->dyn_relocs = NULL;
3533 ret->tls_type = GOT_UNKNOWN;
3534 ret->tlsdesc_got = (bfd_vma) -1;
3535 ret->plt.thumb_refcount = 0;
3536 ret->plt.maybe_thumb_refcount = 0;
3537 ret->plt.noncall_refcount = 0;
3538 ret->plt.got_offset = -1;
3539 ret->is_iplt = FALSE;
3540 ret->export_glue = NULL;
3541
3542 ret->stub_cache = NULL;
3543
3544 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3545 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3546 ret->fdpic_cnts.funcdesc_cnt = 0;
3547 ret->fdpic_cnts.funcdesc_offset = -1;
3548 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3549 }
3550
3551 return (struct bfd_hash_entry *) ret;
3552 }
3553
3554 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3555 symbols. */
3556
3557 static bfd_boolean
3558 elf32_arm_allocate_local_sym_info (bfd *abfd)
3559 {
3560 if (elf_local_got_refcounts (abfd) == NULL)
3561 {
3562 bfd_size_type num_syms;
3563 bfd_size_type size;
3564 char *data;
3565
3566 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3567 size = num_syms * (sizeof (bfd_signed_vma)
3568 + sizeof (struct arm_local_iplt_info *)
3569 + sizeof (bfd_vma)
3570 + sizeof (char)
3571 + sizeof (struct fdpic_local));
3572 data = bfd_zalloc (abfd, size);
3573 if (data == NULL)
3574 return FALSE;
3575
3576 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3577 data += num_syms * sizeof (struct fdpic_local);
3578
3579 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3580 data += num_syms * sizeof (bfd_signed_vma);
3581
3582 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3583 data += num_syms * sizeof (struct arm_local_iplt_info *);
3584
3585 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3586 data += num_syms * sizeof (bfd_vma);
3587
3588 elf32_arm_local_got_tls_type (abfd) = data;
3589 }
3590 return TRUE;
3591 }
3592
3593 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3594 to input bfd ABFD. Create the information if it doesn't already exist.
3595 Return null if an allocation fails. */
3596
3597 static struct arm_local_iplt_info *
3598 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3599 {
3600 struct arm_local_iplt_info **ptr;
3601
3602 if (!elf32_arm_allocate_local_sym_info (abfd))
3603 return NULL;
3604
3605 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3606 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3607 if (*ptr == NULL)
3608 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3609 return *ptr;
3610 }
3611
3612 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3613 in ABFD's symbol table. If the symbol is global, H points to its
3614 hash table entry, otherwise H is null.
3615
3616 Return true if the symbol does have PLT information. When returning
3617 true, point *ROOT_PLT at the target-independent reference count/offset
3618 union and *ARM_PLT at the ARM-specific information. */
3619
3620 static bfd_boolean
3621 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3622 struct elf32_arm_link_hash_entry *h,
3623 unsigned long r_symndx, union gotplt_union **root_plt,
3624 struct arm_plt_info **arm_plt)
3625 {
3626 struct arm_local_iplt_info *local_iplt;
3627
3628 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3629 return FALSE;
3630
3631 if (h != NULL)
3632 {
3633 *root_plt = &h->root.plt;
3634 *arm_plt = &h->plt;
3635 return TRUE;
3636 }
3637
3638 if (elf32_arm_local_iplt (abfd) == NULL)
3639 return FALSE;
3640
3641 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3642 if (local_iplt == NULL)
3643 return FALSE;
3644
3645 *root_plt = &local_iplt->root;
3646 *arm_plt = &local_iplt->arm;
3647 return TRUE;
3648 }
3649
3650 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3651
3652 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3653 before it. */
3654
3655 static bfd_boolean
3656 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3657 struct arm_plt_info *arm_plt)
3658 {
3659 struct elf32_arm_link_hash_table *htab;
3660
3661 htab = elf32_arm_hash_table (info);
3662
3663 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3664 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3665 }
3666
3667 /* Return a pointer to the head of the dynamic reloc list that should
3668 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3669 ABFD's symbol table. Return null if an error occurs. */
3670
3671 static struct elf_dyn_relocs **
3672 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3673 Elf_Internal_Sym *isym)
3674 {
3675 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3676 {
3677 struct arm_local_iplt_info *local_iplt;
3678
3679 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3680 if (local_iplt == NULL)
3681 return NULL;
3682 return &local_iplt->dyn_relocs;
3683 }
3684 else
3685 {
3686 /* Track dynamic relocs needed for local syms too.
3687 We really need local syms available to do this
3688 easily. Oh well. */
3689 asection *s;
3690 void *vpp;
3691
3692 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3693 if (s == NULL)
3694 abort ();
3695
3696 vpp = &elf_section_data (s)->local_dynrel;
3697 return (struct elf_dyn_relocs **) vpp;
3698 }
3699 }
3700
3701 /* Initialize an entry in the stub hash table. */
3702
3703 static struct bfd_hash_entry *
3704 stub_hash_newfunc (struct bfd_hash_entry *entry,
3705 struct bfd_hash_table *table,
3706 const char *string)
3707 {
3708 /* Allocate the structure if it has not already been allocated by a
3709 subclass. */
3710 if (entry == NULL)
3711 {
3712 entry = (struct bfd_hash_entry *)
3713 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3714 if (entry == NULL)
3715 return entry;
3716 }
3717
3718 /* Call the allocation method of the superclass. */
3719 entry = bfd_hash_newfunc (entry, table, string);
3720 if (entry != NULL)
3721 {
3722 struct elf32_arm_stub_hash_entry *eh;
3723
3724 /* Initialize the local fields. */
3725 eh = (struct elf32_arm_stub_hash_entry *) entry;
3726 eh->stub_sec = NULL;
3727 eh->stub_offset = (bfd_vma) -1;
3728 eh->source_value = 0;
3729 eh->target_value = 0;
3730 eh->target_section = NULL;
3731 eh->orig_insn = 0;
3732 eh->stub_type = arm_stub_none;
3733 eh->stub_size = 0;
3734 eh->stub_template = NULL;
3735 eh->stub_template_size = -1;
3736 eh->h = NULL;
3737 eh->id_sec = NULL;
3738 eh->output_name = NULL;
3739 }
3740
3741 return entry;
3742 }
3743
3744 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3745 shortcuts to them in our hash table. */
3746
3747 static bfd_boolean
3748 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3749 {
3750 struct elf32_arm_link_hash_table *htab;
3751
3752 htab = elf32_arm_hash_table (info);
3753 if (htab == NULL)
3754 return FALSE;
3755
3756 /* BPABI objects never have a GOT, or associated sections. */
3757 if (htab->symbian_p)
3758 return TRUE;
3759
3760 if (! _bfd_elf_create_got_section (dynobj, info))
3761 return FALSE;
3762
3763 /* Also create .rofixup. */
3764 if (htab->fdpic_p)
3765 {
3766 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3767 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3768 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3769 if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3770 return FALSE;
3771 }
3772
3773 return TRUE;
3774 }
3775
3776 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3777
3778 static bfd_boolean
3779 create_ifunc_sections (struct bfd_link_info *info)
3780 {
3781 struct elf32_arm_link_hash_table *htab;
3782 const struct elf_backend_data *bed;
3783 bfd *dynobj;
3784 asection *s;
3785 flagword flags;
3786
3787 htab = elf32_arm_hash_table (info);
3788 dynobj = htab->root.dynobj;
3789 bed = get_elf_backend_data (dynobj);
3790 flags = bed->dynamic_sec_flags;
3791
3792 if (htab->root.iplt == NULL)
3793 {
3794 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3795 flags | SEC_READONLY | SEC_CODE);
3796 if (s == NULL
3797 || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3798 return FALSE;
3799 htab->root.iplt = s;
3800 }
3801
3802 if (htab->root.irelplt == NULL)
3803 {
3804 s = bfd_make_section_anyway_with_flags (dynobj,
3805 RELOC_SECTION (htab, ".iplt"),
3806 flags | SEC_READONLY);
3807 if (s == NULL
3808 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3809 return FALSE;
3810 htab->root.irelplt = s;
3811 }
3812
3813 if (htab->root.igotplt == NULL)
3814 {
3815 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3816 if (s == NULL
3817 || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3818 return FALSE;
3819 htab->root.igotplt = s;
3820 }
3821 return TRUE;
3822 }
3823
3824 /* Determine if we're dealing with a Thumb only architecture. */
3825
3826 static bfd_boolean
3827 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3828 {
3829 int arch;
3830 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3831 Tag_CPU_arch_profile);
3832
3833 if (profile)
3834 return profile == 'M';
3835
3836 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3837
3838 /* Force return logic to be reviewed for each new architecture. */
3839 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3840
3841 if (arch == TAG_CPU_ARCH_V6_M
3842 || arch == TAG_CPU_ARCH_V6S_M
3843 || arch == TAG_CPU_ARCH_V7E_M
3844 || arch == TAG_CPU_ARCH_V8M_BASE
3845 || arch == TAG_CPU_ARCH_V8M_MAIN)
3846 return TRUE;
3847
3848 return FALSE;
3849 }
3850
3851 /* Determine if we're dealing with a Thumb-2 object. */
3852
3853 static bfd_boolean
3854 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3855 {
3856 int arch;
3857 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3858 Tag_THUMB_ISA_use);
3859
3860 if (thumb_isa)
3861 return thumb_isa == 2;
3862
3863 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3864
3865 /* Force return logic to be reviewed for each new architecture. */
3866 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3867
3868 return (arch == TAG_CPU_ARCH_V6T2
3869 || arch == TAG_CPU_ARCH_V7
3870 || arch == TAG_CPU_ARCH_V7E_M
3871 || arch == TAG_CPU_ARCH_V8
3872 || arch == TAG_CPU_ARCH_V8R
3873 || arch == TAG_CPU_ARCH_V8M_MAIN);
3874 }
3875
3876 /* Determine whether Thumb-2 BL instruction is available. */
3877
3878 static bfd_boolean
3879 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3880 {
3881 int arch =
3882 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3883
3884 /* Force return logic to be reviewed for each new architecture. */
3885 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3886
3887 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3888 return (arch == TAG_CPU_ARCH_V6T2
3889 || arch >= TAG_CPU_ARCH_V7);
3890 }
3891
3892 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3893 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3894 hash table. */
3895
3896 static bfd_boolean
3897 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3898 {
3899 struct elf32_arm_link_hash_table *htab;
3900
3901 htab = elf32_arm_hash_table (info);
3902 if (htab == NULL)
3903 return FALSE;
3904
3905 if (!htab->root.sgot && !create_got_section (dynobj, info))
3906 return FALSE;
3907
3908 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3909 return FALSE;
3910
3911 if (htab->vxworks_p)
3912 {
3913 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3914 return FALSE;
3915
3916 if (bfd_link_pic (info))
3917 {
3918 htab->plt_header_size = 0;
3919 htab->plt_entry_size
3920 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3921 }
3922 else
3923 {
3924 htab->plt_header_size
3925 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3926 htab->plt_entry_size
3927 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3928 }
3929
3930 if (elf_elfheader (dynobj))
3931 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3932 }
3933 else
3934 {
3935 /* PR ld/16017
3936 Test for thumb only architectures. Note - we cannot just call
3937 using_thumb_only() as the attributes in the output bfd have not been
3938 initialised at this point, so instead we use the input bfd. */
3939 bfd * saved_obfd = htab->obfd;
3940
3941 htab->obfd = dynobj;
3942 if (using_thumb_only (htab))
3943 {
3944 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3945 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3946 }
3947 htab->obfd = saved_obfd;
3948 }
3949
3950 if (htab->fdpic_p) {
3951 htab->plt_header_size = 0;
3952 if (info->flags & DF_BIND_NOW)
3953 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3954 else
3955 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3956 }
3957
3958 if (!htab->root.splt
3959 || !htab->root.srelplt
3960 || !htab->root.sdynbss
3961 || (!bfd_link_pic (info) && !htab->root.srelbss))
3962 abort ();
3963
3964 return TRUE;
3965 }
3966
3967 /* Copy the extra info we tack onto an elf_link_hash_entry. */
3968
3969 static void
3970 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3971 struct elf_link_hash_entry *dir,
3972 struct elf_link_hash_entry *ind)
3973 {
3974 struct elf32_arm_link_hash_entry *edir, *eind;
3975
3976 edir = (struct elf32_arm_link_hash_entry *) dir;
3977 eind = (struct elf32_arm_link_hash_entry *) ind;
3978
3979 if (eind->dyn_relocs != NULL)
3980 {
3981 if (edir->dyn_relocs != NULL)
3982 {
3983 struct elf_dyn_relocs **pp;
3984 struct elf_dyn_relocs *p;
3985
3986 /* Add reloc counts against the indirect sym to the direct sym
3987 list. Merge any entries against the same section. */
3988 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3989 {
3990 struct elf_dyn_relocs *q;
3991
3992 for (q = edir->dyn_relocs; q != NULL; q = q->next)
3993 if (q->sec == p->sec)
3994 {
3995 q->pc_count += p->pc_count;
3996 q->count += p->count;
3997 *pp = p->next;
3998 break;
3999 }
4000 if (q == NULL)
4001 pp = &p->next;
4002 }
4003 *pp = edir->dyn_relocs;
4004 }
4005
4006 edir->dyn_relocs = eind->dyn_relocs;
4007 eind->dyn_relocs = NULL;
4008 }
4009
4010 if (ind->root.type == bfd_link_hash_indirect)
4011 {
4012 /* Copy over PLT info. */
4013 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4014 eind->plt.thumb_refcount = 0;
4015 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4016 eind->plt.maybe_thumb_refcount = 0;
4017 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4018 eind->plt.noncall_refcount = 0;
4019
4020 /* Copy FDPIC counters. */
4021 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4022 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4023 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4024
4025 /* We should only allocate a function to .iplt once the final
4026 symbol information is known. */
4027 BFD_ASSERT (!eind->is_iplt);
4028
4029 if (dir->got.refcount <= 0)
4030 {
4031 edir->tls_type = eind->tls_type;
4032 eind->tls_type = GOT_UNKNOWN;
4033 }
4034 }
4035
4036 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4037 }
4038
4039 /* Destroy an ARM elf linker hash table. */
4040
4041 static void
4042 elf32_arm_link_hash_table_free (bfd *obfd)
4043 {
4044 struct elf32_arm_link_hash_table *ret
4045 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4046
4047 bfd_hash_table_free (&ret->stub_hash_table);
4048 _bfd_elf_link_hash_table_free (obfd);
4049 }
4050
4051 /* Create an ARM elf linker hash table. */
4052
4053 static struct bfd_link_hash_table *
4054 elf32_arm_link_hash_table_create (bfd *abfd)
4055 {
4056 struct elf32_arm_link_hash_table *ret;
4057 bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4058
4059 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4060 if (ret == NULL)
4061 return NULL;
4062
4063 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4064 elf32_arm_link_hash_newfunc,
4065 sizeof (struct elf32_arm_link_hash_entry),
4066 ARM_ELF_DATA))
4067 {
4068 free (ret);
4069 return NULL;
4070 }
4071
4072 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4073 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4074 #ifdef FOUR_WORD_PLT
4075 ret->plt_header_size = 16;
4076 ret->plt_entry_size = 16;
4077 #else
4078 ret->plt_header_size = 20;
4079 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4080 #endif
4081 ret->use_rel = TRUE;
4082 ret->obfd = abfd;
4083 ret->fdpic_p = 0;
4084
4085 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4086 sizeof (struct elf32_arm_stub_hash_entry)))
4087 {
4088 _bfd_elf_link_hash_table_free (abfd);
4089 return NULL;
4090 }
4091 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4092
4093 return &ret->root.root;
4094 }
4095
4096 /* Determine what kind of NOPs are available. */
4097
4098 static bfd_boolean
4099 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4100 {
4101 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4102 Tag_CPU_arch);
4103
4104 /* Force return logic to be reviewed for each new architecture. */
4105 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
4106
4107 return (arch == TAG_CPU_ARCH_V6T2
4108 || arch == TAG_CPU_ARCH_V6K
4109 || arch == TAG_CPU_ARCH_V7
4110 || arch == TAG_CPU_ARCH_V8
4111 || arch == TAG_CPU_ARCH_V8R);
4112 }
4113
4114 static bfd_boolean
4115 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4116 {
4117 switch (stub_type)
4118 {
4119 case arm_stub_long_branch_thumb_only:
4120 case arm_stub_long_branch_thumb2_only:
4121 case arm_stub_long_branch_thumb2_only_pure:
4122 case arm_stub_long_branch_v4t_thumb_arm:
4123 case arm_stub_short_branch_v4t_thumb_arm:
4124 case arm_stub_long_branch_v4t_thumb_arm_pic:
4125 case arm_stub_long_branch_v4t_thumb_tls_pic:
4126 case arm_stub_long_branch_thumb_only_pic:
4127 case arm_stub_cmse_branch_thumb_only:
4128 return TRUE;
4129 case arm_stub_none:
4130 BFD_FAIL ();
4131 return FALSE;
4132 break;
4133 default:
4134 return FALSE;
4135 }
4136 }
4137
4138 /* Determine the type of stub needed, if any, for a call. */
4139
4140 static enum elf32_arm_stub_type
4141 arm_type_of_stub (struct bfd_link_info *info,
4142 asection *input_sec,
4143 const Elf_Internal_Rela *rel,
4144 unsigned char st_type,
4145 enum arm_st_branch_type *actual_branch_type,
4146 struct elf32_arm_link_hash_entry *hash,
4147 bfd_vma destination,
4148 asection *sym_sec,
4149 bfd *input_bfd,
4150 const char *name)
4151 {
4152 bfd_vma location;
4153 bfd_signed_vma branch_offset;
4154 unsigned int r_type;
4155 struct elf32_arm_link_hash_table * globals;
4156 bfd_boolean thumb2, thumb2_bl, thumb_only;
4157 enum elf32_arm_stub_type stub_type = arm_stub_none;
4158 int use_plt = 0;
4159 enum arm_st_branch_type branch_type = *actual_branch_type;
4160 union gotplt_union *root_plt;
4161 struct arm_plt_info *arm_plt;
4162 int arch;
4163 int thumb2_movw;
4164
4165 if (branch_type == ST_BRANCH_LONG)
4166 return stub_type;
4167
4168 globals = elf32_arm_hash_table (info);
4169 if (globals == NULL)
4170 return stub_type;
4171
4172 thumb_only = using_thumb_only (globals);
4173 thumb2 = using_thumb2 (globals);
4174 thumb2_bl = using_thumb2_bl (globals);
4175
4176 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4177
4178 /* True for architectures that implement the thumb2 movw instruction. */
4179 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4180
4181 /* Determine where the call point is. */
4182 location = (input_sec->output_offset
4183 + input_sec->output_section->vma
4184 + rel->r_offset);
4185
4186 r_type = ELF32_R_TYPE (rel->r_info);
4187
4188 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4189 are considering a function call relocation. */
4190 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4191 || r_type == R_ARM_THM_JUMP19)
4192 && branch_type == ST_BRANCH_TO_ARM)
4193 branch_type = ST_BRANCH_TO_THUMB;
4194
4195 /* For TLS call relocs, it is the caller's responsibility to provide
4196 the address of the appropriate trampoline. */
4197 if (r_type != R_ARM_TLS_CALL
4198 && r_type != R_ARM_THM_TLS_CALL
4199 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4200 ELF32_R_SYM (rel->r_info), &root_plt,
4201 &arm_plt)
4202 && root_plt->offset != (bfd_vma) -1)
4203 {
4204 asection *splt;
4205
4206 if (hash == NULL || hash->is_iplt)
4207 splt = globals->root.iplt;
4208 else
4209 splt = globals->root.splt;
4210 if (splt != NULL)
4211 {
4212 use_plt = 1;
4213
4214 /* Note when dealing with PLT entries: the main PLT stub is in
4215 ARM mode, so if the branch is in Thumb mode, another
4216 Thumb->ARM stub will be inserted later just before the ARM
4217 PLT stub. If a long branch stub is needed, we'll add a
4218 Thumb->Arm one and branch directly to the ARM PLT entry.
4219 Here, we have to check if a pre-PLT Thumb->ARM stub
4220 is needed and if it will be close enough. */
4221
4222 destination = (splt->output_section->vma
4223 + splt->output_offset
4224 + root_plt->offset);
4225 st_type = STT_FUNC;
4226
4227 /* Thumb branch/call to PLT: it can become a branch to ARM
4228 or to Thumb. We must perform the same checks and
4229 corrections as in elf32_arm_final_link_relocate. */
4230 if ((r_type == R_ARM_THM_CALL)
4231 || (r_type == R_ARM_THM_JUMP24))
4232 {
4233 if (globals->use_blx
4234 && r_type == R_ARM_THM_CALL
4235 && !thumb_only)
4236 {
4237 /* If the Thumb BLX instruction is available, convert
4238 the BL to a BLX instruction to call the ARM-mode
4239 PLT entry. */
4240 branch_type = ST_BRANCH_TO_ARM;
4241 }
4242 else
4243 {
4244 if (!thumb_only)
4245 /* Target the Thumb stub before the ARM PLT entry. */
4246 destination -= PLT_THUMB_STUB_SIZE;
4247 branch_type = ST_BRANCH_TO_THUMB;
4248 }
4249 }
4250 else
4251 {
4252 branch_type = ST_BRANCH_TO_ARM;
4253 }
4254 }
4255 }
4256 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4257 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4258
4259 branch_offset = (bfd_signed_vma)(destination - location);
4260
4261 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4262 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4263 {
4264 /* Handle cases where:
4265 - this call goes too far (different Thumb/Thumb2 max
4266 distance)
4267 - it's a Thumb->Arm call and blx is not available, or it's a
4268 Thumb->Arm branch (not bl). A stub is needed in this case,
4269 but only if this call is not through a PLT entry. Indeed,
4270 PLT stubs handle mode switching already. */
4271 if ((!thumb2_bl
4272 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4273 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4274 || (thumb2_bl
4275 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4276 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4277 || (thumb2
4278 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4279 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4280 && (r_type == R_ARM_THM_JUMP19))
4281 || (branch_type == ST_BRANCH_TO_ARM
4282 && (((r_type == R_ARM_THM_CALL
4283 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4284 || (r_type == R_ARM_THM_JUMP24)
4285 || (r_type == R_ARM_THM_JUMP19))
4286 && !use_plt))
4287 {
4288 /* If we need to insert a Thumb-Thumb long branch stub to a
4289 PLT, use one that branches directly to the ARM PLT
4290 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4291 stub, undo this now. */
4292 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4293 {
4294 branch_type = ST_BRANCH_TO_ARM;
4295 branch_offset += PLT_THUMB_STUB_SIZE;
4296 }
4297
4298 if (branch_type == ST_BRANCH_TO_THUMB)
4299 {
4300 /* Thumb to thumb. */
4301 if (!thumb_only)
4302 {
4303 if (input_sec->flags & SEC_ELF_PURECODE)
4304 _bfd_error_handler
4305 (_("%pB(%pA): warning: long branch veneers used in"
4306 " section with SHF_ARM_PURECODE section"
4307 " attribute is only supported for M-profile"
4308 " targets that implement the movw instruction"),
4309 input_bfd, input_sec);
4310
4311 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4312 /* PIC stubs. */
4313 ? ((globals->use_blx
4314 && (r_type == R_ARM_THM_CALL))
4315 /* V5T and above. Stub starts with ARM code, so
4316 we must be able to switch mode before
4317 reaching it, which is only possible for 'bl'
4318 (ie R_ARM_THM_CALL relocation). */
4319 ? arm_stub_long_branch_any_thumb_pic
4320 /* On V4T, use Thumb code only. */
4321 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4322
4323 /* non-PIC stubs. */
4324 : ((globals->use_blx
4325 && (r_type == R_ARM_THM_CALL))
4326 /* V5T and above. */
4327 ? arm_stub_long_branch_any_any
4328 /* V4T. */
4329 : arm_stub_long_branch_v4t_thumb_thumb);
4330 }
4331 else
4332 {
4333 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4334 stub_type = arm_stub_long_branch_thumb2_only_pure;
4335 else
4336 {
4337 if (input_sec->flags & SEC_ELF_PURECODE)
4338 _bfd_error_handler
4339 (_("%pB(%pA): warning: long branch veneers used in"
4340 " section with SHF_ARM_PURECODE section"
4341 " attribute is only supported for M-profile"
4342 " targets that implement the movw instruction"),
4343 input_bfd, input_sec);
4344
4345 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4346 /* PIC stub. */
4347 ? arm_stub_long_branch_thumb_only_pic
4348 /* non-PIC stub. */
4349 : (thumb2 ? arm_stub_long_branch_thumb2_only
4350 : arm_stub_long_branch_thumb_only);
4351 }
4352 }
4353 }
4354 else
4355 {
4356 if (input_sec->flags & SEC_ELF_PURECODE)
4357 _bfd_error_handler
4358 (_("%pB(%pA): warning: long branch veneers used in"
4359 " section with SHF_ARM_PURECODE section"
4360 " attribute is only supported" " for M-profile"
4361 " targets that implement the movw instruction"),
4362 input_bfd, input_sec);
4363
4364 /* Thumb to arm. */
4365 if (sym_sec != NULL
4366 && sym_sec->owner != NULL
4367 && !INTERWORK_FLAG (sym_sec->owner))
4368 {
4369 _bfd_error_handler
4370 (_("%pB(%s): warning: interworking not enabled;"
4371 " first occurrence: %pB: %s call to %s"),
4372 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4373 }
4374
4375 stub_type =
4376 (bfd_link_pic (info) | globals->pic_veneer)
4377 /* PIC stubs. */
4378 ? (r_type == R_ARM_THM_TLS_CALL
4379 /* TLS PIC stubs. */
4380 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4381 : arm_stub_long_branch_v4t_thumb_tls_pic)
4382 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4383 /* V5T PIC and above. */
4384 ? arm_stub_long_branch_any_arm_pic
4385 /* V4T PIC stub. */
4386 : arm_stub_long_branch_v4t_thumb_arm_pic))
4387
4388 /* non-PIC stubs. */
4389 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4390 /* V5T and above. */
4391 ? arm_stub_long_branch_any_any
4392 /* V4T. */
4393 : arm_stub_long_branch_v4t_thumb_arm);
4394
4395 /* Handle v4t short branches. */
4396 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4397 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4398 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4399 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4400 }
4401 }
4402 }
4403 else if (r_type == R_ARM_CALL
4404 || r_type == R_ARM_JUMP24
4405 || r_type == R_ARM_PLT32
4406 || r_type == R_ARM_TLS_CALL)
4407 {
4408 if (input_sec->flags & SEC_ELF_PURECODE)
4409 _bfd_error_handler
4410 (_("%pB(%pA): warning: long branch veneers used in"
4411 " section with SHF_ARM_PURECODE section"
4412 " attribute is only supported for M-profile"
4413 " targets that implement the movw instruction"),
4414 input_bfd, input_sec);
4415 if (branch_type == ST_BRANCH_TO_THUMB)
4416 {
4417 /* Arm to thumb. */
4418
4419 if (sym_sec != NULL
4420 && sym_sec->owner != NULL
4421 && !INTERWORK_FLAG (sym_sec->owner))
4422 {
4423 _bfd_error_handler
4424 (_("%pB(%s): warning: interworking not enabled;"
4425 " first occurrence: %pB: %s call to %s"),
4426 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4427 }
4428
4429 /* We have an extra 2-bytes reach because of
4430 the mode change (bit 24 (H) of BLX encoding). */
4431 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4432 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4433 || (r_type == R_ARM_CALL && !globals->use_blx)
4434 || (r_type == R_ARM_JUMP24)
4435 || (r_type == R_ARM_PLT32))
4436 {
4437 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4438 /* PIC stubs. */
4439 ? ((globals->use_blx)
4440 /* V5T and above. */
4441 ? arm_stub_long_branch_any_thumb_pic
4442 /* V4T stub. */
4443 : arm_stub_long_branch_v4t_arm_thumb_pic)
4444
4445 /* non-PIC stubs. */
4446 : ((globals->use_blx)
4447 /* V5T and above. */
4448 ? arm_stub_long_branch_any_any
4449 /* V4T. */
4450 : arm_stub_long_branch_v4t_arm_thumb);
4451 }
4452 }
4453 else
4454 {
4455 /* Arm to arm. */
4456 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4457 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4458 {
4459 stub_type =
4460 (bfd_link_pic (info) | globals->pic_veneer)
4461 /* PIC stubs. */
4462 ? (r_type == R_ARM_TLS_CALL
4463 /* TLS PIC Stub. */
4464 ? arm_stub_long_branch_any_tls_pic
4465 : (globals->nacl_p
4466 ? arm_stub_long_branch_arm_nacl_pic
4467 : arm_stub_long_branch_any_arm_pic))
4468 /* non-PIC stubs. */
4469 : (globals->nacl_p
4470 ? arm_stub_long_branch_arm_nacl
4471 : arm_stub_long_branch_any_any);
4472 }
4473 }
4474 }
4475
4476 /* If a stub is needed, record the actual destination type. */
4477 if (stub_type != arm_stub_none)
4478 *actual_branch_type = branch_type;
4479
4480 return stub_type;
4481 }
4482
4483 /* Build a name for an entry in the stub hash table. */
4484
4485 static char *
4486 elf32_arm_stub_name (const asection *input_section,
4487 const asection *sym_sec,
4488 const struct elf32_arm_link_hash_entry *hash,
4489 const Elf_Internal_Rela *rel,
4490 enum elf32_arm_stub_type stub_type)
4491 {
4492 char *stub_name;
4493 bfd_size_type len;
4494
4495 if (hash)
4496 {
4497 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4498 stub_name = (char *) bfd_malloc (len);
4499 if (stub_name != NULL)
4500 sprintf (stub_name, "%08x_%s+%x_%d",
4501 input_section->id & 0xffffffff,
4502 hash->root.root.root.string,
4503 (int) rel->r_addend & 0xffffffff,
4504 (int) stub_type);
4505 }
4506 else
4507 {
4508 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4509 stub_name = (char *) bfd_malloc (len);
4510 if (stub_name != NULL)
4511 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4512 input_section->id & 0xffffffff,
4513 sym_sec->id & 0xffffffff,
4514 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4515 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4516 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4517 (int) rel->r_addend & 0xffffffff,
4518 (int) stub_type);
4519 }
4520
4521 return stub_name;
4522 }
4523
4524 /* Look up an entry in the stub hash. Stub entries are cached because
4525 creating the stub name takes a bit of time. */
4526
4527 static struct elf32_arm_stub_hash_entry *
4528 elf32_arm_get_stub_entry (const asection *input_section,
4529 const asection *sym_sec,
4530 struct elf_link_hash_entry *hash,
4531 const Elf_Internal_Rela *rel,
4532 struct elf32_arm_link_hash_table *htab,
4533 enum elf32_arm_stub_type stub_type)
4534 {
4535 struct elf32_arm_stub_hash_entry *stub_entry;
4536 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4537 const asection *id_sec;
4538
4539 if ((input_section->flags & SEC_CODE) == 0)
4540 return NULL;
4541
4542 /* If this input section is part of a group of sections sharing one
4543 stub section, then use the id of the first section in the group.
4544 Stub names need to include a section id, as there may well be
4545 more than one stub used to reach say, printf, and we need to
4546 distinguish between them. */
4547 BFD_ASSERT (input_section->id <= htab->top_id);
4548 id_sec = htab->stub_group[input_section->id].link_sec;
4549
4550 if (h != NULL && h->stub_cache != NULL
4551 && h->stub_cache->h == h
4552 && h->stub_cache->id_sec == id_sec
4553 && h->stub_cache->stub_type == stub_type)
4554 {
4555 stub_entry = h->stub_cache;
4556 }
4557 else
4558 {
4559 char *stub_name;
4560
4561 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4562 if (stub_name == NULL)
4563 return NULL;
4564
4565 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4566 stub_name, FALSE, FALSE);
4567 if (h != NULL)
4568 h->stub_cache = stub_entry;
4569
4570 free (stub_name);
4571 }
4572
4573 return stub_entry;
4574 }
4575
4576 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4577 section. */
4578
4579 static bfd_boolean
4580 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4581 {
4582 if (stub_type >= max_stub_type)
4583 abort (); /* Should be unreachable. */
4584
4585 switch (stub_type)
4586 {
4587 case arm_stub_cmse_branch_thumb_only:
4588 return TRUE;
4589
4590 default:
4591 return FALSE;
4592 }
4593
4594 abort (); /* Should be unreachable. */
4595 }
4596
4597 /* Required alignment (as a power of 2) for the dedicated section holding
4598 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4599 with input sections. */
4600
4601 static int
4602 arm_dedicated_stub_output_section_required_alignment
4603 (enum elf32_arm_stub_type stub_type)
4604 {
4605 if (stub_type >= max_stub_type)
4606 abort (); /* Should be unreachable. */
4607
4608 switch (stub_type)
4609 {
4610 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4611 boundary. */
4612 case arm_stub_cmse_branch_thumb_only:
4613 return 5;
4614
4615 default:
4616 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4617 return 0;
4618 }
4619
4620 abort (); /* Should be unreachable. */
4621 }
4622
4623 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4624 NULL if veneers of this type are interspersed with input sections. */
4625
4626 static const char *
4627 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4628 {
4629 if (stub_type >= max_stub_type)
4630 abort (); /* Should be unreachable. */
4631
4632 switch (stub_type)
4633 {
4634 case arm_stub_cmse_branch_thumb_only:
4635 return ".gnu.sgstubs";
4636
4637 default:
4638 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4639 return NULL;
4640 }
4641
4642 abort (); /* Should be unreachable. */
4643 }
4644
4645 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4646 returns the address of the hash table field in HTAB holding a pointer to the
4647 corresponding input section. Otherwise, returns NULL. */
4648
4649 static asection **
4650 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4651 enum elf32_arm_stub_type stub_type)
4652 {
4653 if (stub_type >= max_stub_type)
4654 abort (); /* Should be unreachable. */
4655
4656 switch (stub_type)
4657 {
4658 case arm_stub_cmse_branch_thumb_only:
4659 return &htab->cmse_stub_sec;
4660
4661 default:
4662 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4663 return NULL;
4664 }
4665
4666 abort (); /* Should be unreachable. */
4667 }
4668
4669 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4670 is the section that branch into veneer and can be NULL if stub should go in
4671 a dedicated output section. Returns a pointer to the stub section, and the
4672 section to which the stub section will be attached (in *LINK_SEC_P).
4673 LINK_SEC_P may be NULL. */
4674
4675 static asection *
4676 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4677 struct elf32_arm_link_hash_table *htab,
4678 enum elf32_arm_stub_type stub_type)
4679 {
4680 asection *link_sec, *out_sec, **stub_sec_p;
4681 const char *stub_sec_prefix;
4682 bfd_boolean dedicated_output_section =
4683 arm_dedicated_stub_output_section_required (stub_type);
4684 int align;
4685
4686 if (dedicated_output_section)
4687 {
4688 bfd *output_bfd = htab->obfd;
4689 const char *out_sec_name =
4690 arm_dedicated_stub_output_section_name (stub_type);
4691 link_sec = NULL;
4692 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4693 stub_sec_prefix = out_sec_name;
4694 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4695 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4696 if (out_sec == NULL)
4697 {
4698 _bfd_error_handler (_("no address assigned to the veneers output "
4699 "section %s"), out_sec_name);
4700 return NULL;
4701 }
4702 }
4703 else
4704 {
4705 BFD_ASSERT (section->id <= htab->top_id);
4706 link_sec = htab->stub_group[section->id].link_sec;
4707 BFD_ASSERT (link_sec != NULL);
4708 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4709 if (*stub_sec_p == NULL)
4710 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4711 stub_sec_prefix = link_sec->name;
4712 out_sec = link_sec->output_section;
4713 align = htab->nacl_p ? 4 : 3;
4714 }
4715
4716 if (*stub_sec_p == NULL)
4717 {
4718 size_t namelen;
4719 bfd_size_type len;
4720 char *s_name;
4721
4722 namelen = strlen (stub_sec_prefix);
4723 len = namelen + sizeof (STUB_SUFFIX);
4724 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4725 if (s_name == NULL)
4726 return NULL;
4727
4728 memcpy (s_name, stub_sec_prefix, namelen);
4729 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4730 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4731 align);
4732 if (*stub_sec_p == NULL)
4733 return NULL;
4734
4735 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4736 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4737 | SEC_KEEP;
4738 }
4739
4740 if (!dedicated_output_section)
4741 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4742
4743 if (link_sec_p)
4744 *link_sec_p = link_sec;
4745
4746 return *stub_sec_p;
4747 }
4748
4749 /* Add a new stub entry to the stub hash. Not all fields of the new
4750 stub entry are initialised. */
4751
4752 static struct elf32_arm_stub_hash_entry *
4753 elf32_arm_add_stub (const char *stub_name, asection *section,
4754 struct elf32_arm_link_hash_table *htab,
4755 enum elf32_arm_stub_type stub_type)
4756 {
4757 asection *link_sec;
4758 asection *stub_sec;
4759 struct elf32_arm_stub_hash_entry *stub_entry;
4760
4761 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4762 stub_type);
4763 if (stub_sec == NULL)
4764 return NULL;
4765
4766 /* Enter this entry into the linker stub hash table. */
4767 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4768 TRUE, FALSE);
4769 if (stub_entry == NULL)
4770 {
4771 if (section == NULL)
4772 section = stub_sec;
4773 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4774 section->owner, stub_name);
4775 return NULL;
4776 }
4777
4778 stub_entry->stub_sec = stub_sec;
4779 stub_entry->stub_offset = (bfd_vma) -1;
4780 stub_entry->id_sec = link_sec;
4781
4782 return stub_entry;
4783 }
4784
4785 /* Store an Arm insn into an output section not processed by
4786 elf32_arm_write_section. */
4787
4788 static void
4789 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4790 bfd * output_bfd, bfd_vma val, void * ptr)
4791 {
4792 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4793 bfd_putl32 (val, ptr);
4794 else
4795 bfd_putb32 (val, ptr);
4796 }
4797
4798 /* Store a 16-bit Thumb insn into an output section not processed by
4799 elf32_arm_write_section. */
4800
4801 static void
4802 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4803 bfd * output_bfd, bfd_vma val, void * ptr)
4804 {
4805 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4806 bfd_putl16 (val, ptr);
4807 else
4808 bfd_putb16 (val, ptr);
4809 }
4810
4811 /* Store a Thumb2 insn into an output section not processed by
4812 elf32_arm_write_section. */
4813
4814 static void
4815 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4816 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4817 {
4818 /* T2 instructions are 16-bit streamed. */
4819 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4820 {
4821 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4822 bfd_putl16 ((val & 0xffff), ptr + 2);
4823 }
4824 else
4825 {
4826 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4827 bfd_putb16 ((val & 0xffff), ptr + 2);
4828 }
4829 }
4830
4831 /* If it's possible to change R_TYPE to a more efficient access
4832 model, return the new reloc type. */
4833
4834 static unsigned
4835 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4836 struct elf_link_hash_entry *h)
4837 {
4838 int is_local = (h == NULL);
4839
4840 if (bfd_link_pic (info)
4841 || (h && h->root.type == bfd_link_hash_undefweak))
4842 return r_type;
4843
4844 /* We do not support relaxations for Old TLS models. */
4845 switch (r_type)
4846 {
4847 case R_ARM_TLS_GOTDESC:
4848 case R_ARM_TLS_CALL:
4849 case R_ARM_THM_TLS_CALL:
4850 case R_ARM_TLS_DESCSEQ:
4851 case R_ARM_THM_TLS_DESCSEQ:
4852 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4853 }
4854
4855 return r_type;
4856 }
4857
4858 static bfd_reloc_status_type elf32_arm_final_link_relocate
4859 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4860 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4861 const char *, unsigned char, enum arm_st_branch_type,
4862 struct elf_link_hash_entry *, bfd_boolean *, char **);
4863
4864 static unsigned int
4865 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4866 {
4867 switch (stub_type)
4868 {
4869 case arm_stub_a8_veneer_b_cond:
4870 case arm_stub_a8_veneer_b:
4871 case arm_stub_a8_veneer_bl:
4872 return 2;
4873
4874 case arm_stub_long_branch_any_any:
4875 case arm_stub_long_branch_v4t_arm_thumb:
4876 case arm_stub_long_branch_thumb_only:
4877 case arm_stub_long_branch_thumb2_only:
4878 case arm_stub_long_branch_thumb2_only_pure:
4879 case arm_stub_long_branch_v4t_thumb_thumb:
4880 case arm_stub_long_branch_v4t_thumb_arm:
4881 case arm_stub_short_branch_v4t_thumb_arm:
4882 case arm_stub_long_branch_any_arm_pic:
4883 case arm_stub_long_branch_any_thumb_pic:
4884 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4885 case arm_stub_long_branch_v4t_arm_thumb_pic:
4886 case arm_stub_long_branch_v4t_thumb_arm_pic:
4887 case arm_stub_long_branch_thumb_only_pic:
4888 case arm_stub_long_branch_any_tls_pic:
4889 case arm_stub_long_branch_v4t_thumb_tls_pic:
4890 case arm_stub_cmse_branch_thumb_only:
4891 case arm_stub_a8_veneer_blx:
4892 return 4;
4893
4894 case arm_stub_long_branch_arm_nacl:
4895 case arm_stub_long_branch_arm_nacl_pic:
4896 return 16;
4897
4898 default:
4899 abort (); /* Should be unreachable. */
4900 }
4901 }
4902
4903 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4904 veneering (TRUE) or have their own symbol (FALSE). */
4905
4906 static bfd_boolean
4907 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4908 {
4909 if (stub_type >= max_stub_type)
4910 abort (); /* Should be unreachable. */
4911
4912 switch (stub_type)
4913 {
4914 case arm_stub_cmse_branch_thumb_only:
4915 return TRUE;
4916
4917 default:
4918 return FALSE;
4919 }
4920
4921 abort (); /* Should be unreachable. */
4922 }
4923
4924 /* Returns the padding needed for the dedicated section used stubs of type
4925 STUB_TYPE. */
4926
4927 static int
4928 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4929 {
4930 if (stub_type >= max_stub_type)
4931 abort (); /* Should be unreachable. */
4932
4933 switch (stub_type)
4934 {
4935 case arm_stub_cmse_branch_thumb_only:
4936 return 32;
4937
4938 default:
4939 return 0;
4940 }
4941
4942 abort (); /* Should be unreachable. */
4943 }
4944
4945 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4946 returns the address of the hash table field in HTAB holding the offset at
4947 which new veneers should be layed out in the stub section. */
4948
4949 static bfd_vma*
4950 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4951 enum elf32_arm_stub_type stub_type)
4952 {
4953 switch (stub_type)
4954 {
4955 case arm_stub_cmse_branch_thumb_only:
4956 return &htab->new_cmse_stub_offset;
4957
4958 default:
4959 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4960 return NULL;
4961 }
4962 }
4963
4964 static bfd_boolean
4965 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4966 void * in_arg)
4967 {
4968 #define MAXRELOCS 3
4969 bfd_boolean removed_sg_veneer;
4970 struct elf32_arm_stub_hash_entry *stub_entry;
4971 struct elf32_arm_link_hash_table *globals;
4972 struct bfd_link_info *info;
4973 asection *stub_sec;
4974 bfd *stub_bfd;
4975 bfd_byte *loc;
4976 bfd_vma sym_value;
4977 int template_size;
4978 int size;
4979 const insn_sequence *template_sequence;
4980 int i;
4981 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4982 int stub_reloc_offset[MAXRELOCS] = {0, 0};
4983 int nrelocs = 0;
4984 int just_allocated = 0;
4985
4986 /* Massage our args to the form they really have. */
4987 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4988 info = (struct bfd_link_info *) in_arg;
4989
4990 globals = elf32_arm_hash_table (info);
4991 if (globals == NULL)
4992 return FALSE;
4993
4994 stub_sec = stub_entry->stub_sec;
4995
4996 if ((globals->fix_cortex_a8 < 0)
4997 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4998 /* We have to do less-strictly-aligned fixes last. */
4999 return TRUE;
5000
5001 /* Assign a slot at the end of section if none assigned yet. */
5002 if (stub_entry->stub_offset == (bfd_vma) -1)
5003 {
5004 stub_entry->stub_offset = stub_sec->size;
5005 just_allocated = 1;
5006 }
5007 loc = stub_sec->contents + stub_entry->stub_offset;
5008
5009 stub_bfd = stub_sec->owner;
5010
5011 /* This is the address of the stub destination. */
5012 sym_value = (stub_entry->target_value
5013 + stub_entry->target_section->output_offset
5014 + stub_entry->target_section->output_section->vma);
5015
5016 template_sequence = stub_entry->stub_template;
5017 template_size = stub_entry->stub_template_size;
5018
5019 size = 0;
5020 for (i = 0; i < template_size; i++)
5021 {
5022 switch (template_sequence[i].type)
5023 {
5024 case THUMB16_TYPE:
5025 {
5026 bfd_vma data = (bfd_vma) template_sequence[i].data;
5027 if (template_sequence[i].reloc_addend != 0)
5028 {
5029 /* We've borrowed the reloc_addend field to mean we should
5030 insert a condition code into this (Thumb-1 branch)
5031 instruction. See THUMB16_BCOND_INSN. */
5032 BFD_ASSERT ((data & 0xff00) == 0xd000);
5033 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5034 }
5035 bfd_put_16 (stub_bfd, data, loc + size);
5036 size += 2;
5037 }
5038 break;
5039
5040 case THUMB32_TYPE:
5041 bfd_put_16 (stub_bfd,
5042 (template_sequence[i].data >> 16) & 0xffff,
5043 loc + size);
5044 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5045 loc + size + 2);
5046 if (template_sequence[i].r_type != R_ARM_NONE)
5047 {
5048 stub_reloc_idx[nrelocs] = i;
5049 stub_reloc_offset[nrelocs++] = size;
5050 }
5051 size += 4;
5052 break;
5053
5054 case ARM_TYPE:
5055 bfd_put_32 (stub_bfd, template_sequence[i].data,
5056 loc + size);
5057 /* Handle cases where the target is encoded within the
5058 instruction. */
5059 if (template_sequence[i].r_type == R_ARM_JUMP24)
5060 {
5061 stub_reloc_idx[nrelocs] = i;
5062 stub_reloc_offset[nrelocs++] = size;
5063 }
5064 size += 4;
5065 break;
5066
5067 case DATA_TYPE:
5068 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5069 stub_reloc_idx[nrelocs] = i;
5070 stub_reloc_offset[nrelocs++] = size;
5071 size += 4;
5072 break;
5073
5074 default:
5075 BFD_FAIL ();
5076 return FALSE;
5077 }
5078 }
5079
5080 if (just_allocated)
5081 stub_sec->size += size;
5082
5083 /* Stub size has already been computed in arm_size_one_stub. Check
5084 consistency. */
5085 BFD_ASSERT (size == stub_entry->stub_size);
5086
5087 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5088 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5089 sym_value |= 1;
5090
5091 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5092 to relocate in each stub. */
5093 removed_sg_veneer =
5094 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5095 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5096
5097 for (i = 0; i < nrelocs; i++)
5098 {
5099 Elf_Internal_Rela rel;
5100 bfd_boolean unresolved_reloc;
5101 char *error_message;
5102 bfd_vma points_to =
5103 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5104
5105 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5106 rel.r_info = ELF32_R_INFO (0,
5107 template_sequence[stub_reloc_idx[i]].r_type);
5108 rel.r_addend = 0;
5109
5110 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5111 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5112 template should refer back to the instruction after the original
5113 branch. We use target_section as Cortex-A8 erratum workaround stubs
5114 are only generated when both source and target are in the same
5115 section. */
5116 points_to = stub_entry->target_section->output_section->vma
5117 + stub_entry->target_section->output_offset
5118 + stub_entry->source_value;
5119
5120 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5121 (template_sequence[stub_reloc_idx[i]].r_type),
5122 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5123 points_to, info, stub_entry->target_section, "", STT_FUNC,
5124 stub_entry->branch_type,
5125 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5126 &error_message);
5127 }
5128
5129 return TRUE;
5130 #undef MAXRELOCS
5131 }
5132
5133 /* Calculate the template, template size and instruction size for a stub.
5134 Return value is the instruction size. */
5135
5136 static unsigned int
5137 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5138 const insn_sequence **stub_template,
5139 int *stub_template_size)
5140 {
5141 const insn_sequence *template_sequence = NULL;
5142 int template_size = 0, i;
5143 unsigned int size;
5144
5145 template_sequence = stub_definitions[stub_type].template_sequence;
5146 if (stub_template)
5147 *stub_template = template_sequence;
5148
5149 template_size = stub_definitions[stub_type].template_size;
5150 if (stub_template_size)
5151 *stub_template_size = template_size;
5152
5153 size = 0;
5154 for (i = 0; i < template_size; i++)
5155 {
5156 switch (template_sequence[i].type)
5157 {
5158 case THUMB16_TYPE:
5159 size += 2;
5160 break;
5161
5162 case ARM_TYPE:
5163 case THUMB32_TYPE:
5164 case DATA_TYPE:
5165 size += 4;
5166 break;
5167
5168 default:
5169 BFD_FAIL ();
5170 return 0;
5171 }
5172 }
5173
5174 return size;
5175 }
5176
5177 /* As above, but don't actually build the stub. Just bump offset so
5178 we know stub section sizes. */
5179
5180 static bfd_boolean
5181 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5182 void *in_arg ATTRIBUTE_UNUSED)
5183 {
5184 struct elf32_arm_stub_hash_entry *stub_entry;
5185 const insn_sequence *template_sequence;
5186 int template_size, size;
5187
5188 /* Massage our args to the form they really have. */
5189 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5190
5191 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5192 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5193
5194 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5195 &template_size);
5196
5197 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5198 if (stub_entry->stub_template_size)
5199 {
5200 stub_entry->stub_size = size;
5201 stub_entry->stub_template = template_sequence;
5202 stub_entry->stub_template_size = template_size;
5203 }
5204
5205 /* Already accounted for. */
5206 if (stub_entry->stub_offset != (bfd_vma) -1)
5207 return TRUE;
5208
5209 size = (size + 7) & ~7;
5210 stub_entry->stub_sec->size += size;
5211
5212 return TRUE;
5213 }
5214
5215 /* External entry points for sizing and building linker stubs. */
5216
5217 /* Set up various things so that we can make a list of input sections
5218 for each output section included in the link. Returns -1 on error,
5219 0 when no stubs will be needed, and 1 on success. */
5220
5221 int
5222 elf32_arm_setup_section_lists (bfd *output_bfd,
5223 struct bfd_link_info *info)
5224 {
5225 bfd *input_bfd;
5226 unsigned int bfd_count;
5227 unsigned int top_id, top_index;
5228 asection *section;
5229 asection **input_list, **list;
5230 bfd_size_type amt;
5231 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5232
5233 if (htab == NULL)
5234 return 0;
5235 if (! is_elf_hash_table (htab))
5236 return 0;
5237
5238 /* Count the number of input BFDs and find the top input section id. */
5239 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5240 input_bfd != NULL;
5241 input_bfd = input_bfd->link.next)
5242 {
5243 bfd_count += 1;
5244 for (section = input_bfd->sections;
5245 section != NULL;
5246 section = section->next)
5247 {
5248 if (top_id < section->id)
5249 top_id = section->id;
5250 }
5251 }
5252 htab->bfd_count = bfd_count;
5253
5254 amt = sizeof (struct map_stub) * (top_id + 1);
5255 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5256 if (htab->stub_group == NULL)
5257 return -1;
5258 htab->top_id = top_id;
5259
5260 /* We can't use output_bfd->section_count here to find the top output
5261 section index as some sections may have been removed, and
5262 _bfd_strip_section_from_output doesn't renumber the indices. */
5263 for (section = output_bfd->sections, top_index = 0;
5264 section != NULL;
5265 section = section->next)
5266 {
5267 if (top_index < section->index)
5268 top_index = section->index;
5269 }
5270
5271 htab->top_index = top_index;
5272 amt = sizeof (asection *) * (top_index + 1);
5273 input_list = (asection **) bfd_malloc (amt);
5274 htab->input_list = input_list;
5275 if (input_list == NULL)
5276 return -1;
5277
5278 /* For sections we aren't interested in, mark their entries with a
5279 value we can check later. */
5280 list = input_list + top_index;
5281 do
5282 *list = bfd_abs_section_ptr;
5283 while (list-- != input_list);
5284
5285 for (section = output_bfd->sections;
5286 section != NULL;
5287 section = section->next)
5288 {
5289 if ((section->flags & SEC_CODE) != 0)
5290 input_list[section->index] = NULL;
5291 }
5292
5293 return 1;
5294 }
5295
5296 /* The linker repeatedly calls this function for each input section,
5297 in the order that input sections are linked into output sections.
5298 Build lists of input sections to determine groupings between which
5299 we may insert linker stubs. */
5300
5301 void
5302 elf32_arm_next_input_section (struct bfd_link_info *info,
5303 asection *isec)
5304 {
5305 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5306
5307 if (htab == NULL)
5308 return;
5309
5310 if (isec->output_section->index <= htab->top_index)
5311 {
5312 asection **list = htab->input_list + isec->output_section->index;
5313
5314 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5315 {
5316 /* Steal the link_sec pointer for our list. */
5317 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5318 /* This happens to make the list in reverse order,
5319 which we reverse later. */
5320 PREV_SEC (isec) = *list;
5321 *list = isec;
5322 }
5323 }
5324 }
5325
5326 /* See whether we can group stub sections together. Grouping stub
5327 sections may result in fewer stubs. More importantly, we need to
5328 put all .init* and .fini* stubs at the end of the .init or
5329 .fini output sections respectively, because glibc splits the
5330 _init and _fini functions into multiple parts. Putting a stub in
5331 the middle of a function is not a good idea. */
5332
5333 static void
5334 group_sections (struct elf32_arm_link_hash_table *htab,
5335 bfd_size_type stub_group_size,
5336 bfd_boolean stubs_always_after_branch)
5337 {
5338 asection **list = htab->input_list;
5339
5340 do
5341 {
5342 asection *tail = *list;
5343 asection *head;
5344
5345 if (tail == bfd_abs_section_ptr)
5346 continue;
5347
5348 /* Reverse the list: we must avoid placing stubs at the
5349 beginning of the section because the beginning of the text
5350 section may be required for an interrupt vector in bare metal
5351 code. */
5352 #define NEXT_SEC PREV_SEC
5353 head = NULL;
5354 while (tail != NULL)
5355 {
5356 /* Pop from tail. */
5357 asection *item = tail;
5358 tail = PREV_SEC (item);
5359
5360 /* Push on head. */
5361 NEXT_SEC (item) = head;
5362 head = item;
5363 }
5364
5365 while (head != NULL)
5366 {
5367 asection *curr;
5368 asection *next;
5369 bfd_vma stub_group_start = head->output_offset;
5370 bfd_vma end_of_next;
5371
5372 curr = head;
5373 while (NEXT_SEC (curr) != NULL)
5374 {
5375 next = NEXT_SEC (curr);
5376 end_of_next = next->output_offset + next->size;
5377 if (end_of_next - stub_group_start >= stub_group_size)
5378 /* End of NEXT is too far from start, so stop. */
5379 break;
5380 /* Add NEXT to the group. */
5381 curr = next;
5382 }
5383
5384 /* OK, the size from the start to the start of CURR is less
5385 than stub_group_size and thus can be handled by one stub
5386 section. (Or the head section is itself larger than
5387 stub_group_size, in which case we may be toast.)
5388 We should really be keeping track of the total size of
5389 stubs added here, as stubs contribute to the final output
5390 section size. */
5391 do
5392 {
5393 next = NEXT_SEC (head);
5394 /* Set up this stub group. */
5395 htab->stub_group[head->id].link_sec = curr;
5396 }
5397 while (head != curr && (head = next) != NULL);
5398
5399 /* But wait, there's more! Input sections up to stub_group_size
5400 bytes after the stub section can be handled by it too. */
5401 if (!stubs_always_after_branch)
5402 {
5403 stub_group_start = curr->output_offset + curr->size;
5404
5405 while (next != NULL)
5406 {
5407 end_of_next = next->output_offset + next->size;
5408 if (end_of_next - stub_group_start >= stub_group_size)
5409 /* End of NEXT is too far from stubs, so stop. */
5410 break;
5411 /* Add NEXT to the stub group. */
5412 head = next;
5413 next = NEXT_SEC (head);
5414 htab->stub_group[head->id].link_sec = curr;
5415 }
5416 }
5417 head = next;
5418 }
5419 }
5420 while (list++ != htab->input_list + htab->top_index);
5421
5422 free (htab->input_list);
5423 #undef PREV_SEC
5424 #undef NEXT_SEC
5425 }
5426
5427 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5428 erratum fix. */
5429
5430 static int
5431 a8_reloc_compare (const void *a, const void *b)
5432 {
5433 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5434 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5435
5436 if (ra->from < rb->from)
5437 return -1;
5438 else if (ra->from > rb->from)
5439 return 1;
5440 else
5441 return 0;
5442 }
5443
5444 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5445 const char *, char **);
5446
5447 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5448 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5449 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5450 otherwise. */
5451
5452 static bfd_boolean
5453 cortex_a8_erratum_scan (bfd *input_bfd,
5454 struct bfd_link_info *info,
5455 struct a8_erratum_fix **a8_fixes_p,
5456 unsigned int *num_a8_fixes_p,
5457 unsigned int *a8_fix_table_size_p,
5458 struct a8_erratum_reloc *a8_relocs,
5459 unsigned int num_a8_relocs,
5460 unsigned prev_num_a8_fixes,
5461 bfd_boolean *stub_changed_p)
5462 {
5463 asection *section;
5464 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5465 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5466 unsigned int num_a8_fixes = *num_a8_fixes_p;
5467 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5468
5469 if (htab == NULL)
5470 return FALSE;
5471
5472 for (section = input_bfd->sections;
5473 section != NULL;
5474 section = section->next)
5475 {
5476 bfd_byte *contents = NULL;
5477 struct _arm_elf_section_data *sec_data;
5478 unsigned int span;
5479 bfd_vma base_vma;
5480
5481 if (elf_section_type (section) != SHT_PROGBITS
5482 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5483 || (section->flags & SEC_EXCLUDE) != 0
5484 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5485 || (section->output_section == bfd_abs_section_ptr))
5486 continue;
5487
5488 base_vma = section->output_section->vma + section->output_offset;
5489
5490 if (elf_section_data (section)->this_hdr.contents != NULL)
5491 contents = elf_section_data (section)->this_hdr.contents;
5492 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5493 return TRUE;
5494
5495 sec_data = elf32_arm_section_data (section);
5496
5497 for (span = 0; span < sec_data->mapcount; span++)
5498 {
5499 unsigned int span_start = sec_data->map[span].vma;
5500 unsigned int span_end = (span == sec_data->mapcount - 1)
5501 ? section->size : sec_data->map[span + 1].vma;
5502 unsigned int i;
5503 char span_type = sec_data->map[span].type;
5504 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5505
5506 if (span_type != 't')
5507 continue;
5508
5509 /* Span is entirely within a single 4KB region: skip scanning. */
5510 if (((base_vma + span_start) & ~0xfff)
5511 == ((base_vma + span_end) & ~0xfff))
5512 continue;
5513
5514 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5515
5516 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5517 * The branch target is in the same 4KB region as the
5518 first half of the branch.
5519 * The instruction before the branch is a 32-bit
5520 length non-branch instruction. */
5521 for (i = span_start; i < span_end;)
5522 {
5523 unsigned int insn = bfd_getl16 (&contents[i]);
5524 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5525 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5526
5527 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5528 insn_32bit = TRUE;
5529
5530 if (insn_32bit)
5531 {
5532 /* Load the rest of the insn (in manual-friendly order). */
5533 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5534
5535 /* Encoding T4: B<c>.W. */
5536 is_b = (insn & 0xf800d000) == 0xf0009000;
5537 /* Encoding T1: BL<c>.W. */
5538 is_bl = (insn & 0xf800d000) == 0xf000d000;
5539 /* Encoding T2: BLX<c>.W. */
5540 is_blx = (insn & 0xf800d000) == 0xf000c000;
5541 /* Encoding T3: B<c>.W (not permitted in IT block). */
5542 is_bcc = (insn & 0xf800d000) == 0xf0008000
5543 && (insn & 0x07f00000) != 0x03800000;
5544 }
5545
5546 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5547
5548 if (((base_vma + i) & 0xfff) == 0xffe
5549 && insn_32bit
5550 && is_32bit_branch
5551 && last_was_32bit
5552 && ! last_was_branch)
5553 {
5554 bfd_signed_vma offset = 0;
5555 bfd_boolean force_target_arm = FALSE;
5556 bfd_boolean force_target_thumb = FALSE;
5557 bfd_vma target;
5558 enum elf32_arm_stub_type stub_type = arm_stub_none;
5559 struct a8_erratum_reloc key, *found;
5560 bfd_boolean use_plt = FALSE;
5561
5562 key.from = base_vma + i;
5563 found = (struct a8_erratum_reloc *)
5564 bsearch (&key, a8_relocs, num_a8_relocs,
5565 sizeof (struct a8_erratum_reloc),
5566 &a8_reloc_compare);
5567
5568 if (found)
5569 {
5570 char *error_message = NULL;
5571 struct elf_link_hash_entry *entry;
5572
5573 /* We don't care about the error returned from this
5574 function, only if there is glue or not. */
5575 entry = find_thumb_glue (info, found->sym_name,
5576 &error_message);
5577
5578 if (entry)
5579 found->non_a8_stub = TRUE;
5580
5581 /* Keep a simpler condition, for the sake of clarity. */
5582 if (htab->root.splt != NULL && found->hash != NULL
5583 && found->hash->root.plt.offset != (bfd_vma) -1)
5584 use_plt = TRUE;
5585
5586 if (found->r_type == R_ARM_THM_CALL)
5587 {
5588 if (found->branch_type == ST_BRANCH_TO_ARM
5589 || use_plt)
5590 force_target_arm = TRUE;
5591 else
5592 force_target_thumb = TRUE;
5593 }
5594 }
5595
5596 /* Check if we have an offending branch instruction. */
5597
5598 if (found && found->non_a8_stub)
5599 /* We've already made a stub for this instruction, e.g.
5600 it's a long branch or a Thumb->ARM stub. Assume that
5601 stub will suffice to work around the A8 erratum (see
5602 setting of always_after_branch above). */
5603 ;
5604 else if (is_bcc)
5605 {
5606 offset = (insn & 0x7ff) << 1;
5607 offset |= (insn & 0x3f0000) >> 4;
5608 offset |= (insn & 0x2000) ? 0x40000 : 0;
5609 offset |= (insn & 0x800) ? 0x80000 : 0;
5610 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5611 if (offset & 0x100000)
5612 offset |= ~ ((bfd_signed_vma) 0xfffff);
5613 stub_type = arm_stub_a8_veneer_b_cond;
5614 }
5615 else if (is_b || is_bl || is_blx)
5616 {
5617 int s = (insn & 0x4000000) != 0;
5618 int j1 = (insn & 0x2000) != 0;
5619 int j2 = (insn & 0x800) != 0;
5620 int i1 = !(j1 ^ s);
5621 int i2 = !(j2 ^ s);
5622
5623 offset = (insn & 0x7ff) << 1;
5624 offset |= (insn & 0x3ff0000) >> 4;
5625 offset |= i2 << 22;
5626 offset |= i1 << 23;
5627 offset |= s << 24;
5628 if (offset & 0x1000000)
5629 offset |= ~ ((bfd_signed_vma) 0xffffff);
5630
5631 if (is_blx)
5632 offset &= ~ ((bfd_signed_vma) 3);
5633
5634 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5635 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5636 }
5637
5638 if (stub_type != arm_stub_none)
5639 {
5640 bfd_vma pc_for_insn = base_vma + i + 4;
5641
5642 /* The original instruction is a BL, but the target is
5643 an ARM instruction. If we were not making a stub,
5644 the BL would have been converted to a BLX. Use the
5645 BLX stub instead in that case. */
5646 if (htab->use_blx && force_target_arm
5647 && stub_type == arm_stub_a8_veneer_bl)
5648 {
5649 stub_type = arm_stub_a8_veneer_blx;
5650 is_blx = TRUE;
5651 is_bl = FALSE;
5652 }
5653 /* Conversely, if the original instruction was
5654 BLX but the target is Thumb mode, use the BL
5655 stub. */
5656 else if (force_target_thumb
5657 && stub_type == arm_stub_a8_veneer_blx)
5658 {
5659 stub_type = arm_stub_a8_veneer_bl;
5660 is_blx = FALSE;
5661 is_bl = TRUE;
5662 }
5663
5664 if (is_blx)
5665 pc_for_insn &= ~ ((bfd_vma) 3);
5666
5667 /* If we found a relocation, use the proper destination,
5668 not the offset in the (unrelocated) instruction.
5669 Note this is always done if we switched the stub type
5670 above. */
5671 if (found)
5672 offset =
5673 (bfd_signed_vma) (found->destination - pc_for_insn);
5674
5675 /* If the stub will use a Thumb-mode branch to a
5676 PLT target, redirect it to the preceding Thumb
5677 entry point. */
5678 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5679 offset -= PLT_THUMB_STUB_SIZE;
5680
5681 target = pc_for_insn + offset;
5682
5683 /* The BLX stub is ARM-mode code. Adjust the offset to
5684 take the different PC value (+8 instead of +4) into
5685 account. */
5686 if (stub_type == arm_stub_a8_veneer_blx)
5687 offset += 4;
5688
5689 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5690 {
5691 char *stub_name = NULL;
5692
5693 if (num_a8_fixes == a8_fix_table_size)
5694 {
5695 a8_fix_table_size *= 2;
5696 a8_fixes = (struct a8_erratum_fix *)
5697 bfd_realloc (a8_fixes,
5698 sizeof (struct a8_erratum_fix)
5699 * a8_fix_table_size);
5700 }
5701
5702 if (num_a8_fixes < prev_num_a8_fixes)
5703 {
5704 /* If we're doing a subsequent scan,
5705 check if we've found the same fix as
5706 before, and try and reuse the stub
5707 name. */
5708 stub_name = a8_fixes[num_a8_fixes].stub_name;
5709 if ((a8_fixes[num_a8_fixes].section != section)
5710 || (a8_fixes[num_a8_fixes].offset != i))
5711 {
5712 free (stub_name);
5713 stub_name = NULL;
5714 *stub_changed_p = TRUE;
5715 }
5716 }
5717
5718 if (!stub_name)
5719 {
5720 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5721 if (stub_name != NULL)
5722 sprintf (stub_name, "%x:%x", section->id, i);
5723 }
5724
5725 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5726 a8_fixes[num_a8_fixes].section = section;
5727 a8_fixes[num_a8_fixes].offset = i;
5728 a8_fixes[num_a8_fixes].target_offset =
5729 target - base_vma;
5730 a8_fixes[num_a8_fixes].orig_insn = insn;
5731 a8_fixes[num_a8_fixes].stub_name = stub_name;
5732 a8_fixes[num_a8_fixes].stub_type = stub_type;
5733 a8_fixes[num_a8_fixes].branch_type =
5734 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5735
5736 num_a8_fixes++;
5737 }
5738 }
5739 }
5740
5741 i += insn_32bit ? 4 : 2;
5742 last_was_32bit = insn_32bit;
5743 last_was_branch = is_32bit_branch;
5744 }
5745 }
5746
5747 if (elf_section_data (section)->this_hdr.contents == NULL)
5748 free (contents);
5749 }
5750
5751 *a8_fixes_p = a8_fixes;
5752 *num_a8_fixes_p = num_a8_fixes;
5753 *a8_fix_table_size_p = a8_fix_table_size;
5754
5755 return FALSE;
5756 }
5757
5758 /* Create or update a stub entry depending on whether the stub can already be
5759 found in HTAB. The stub is identified by:
5760 - its type STUB_TYPE
5761 - its source branch (note that several can share the same stub) whose
5762 section and relocation (if any) are given by SECTION and IRELA
5763 respectively
5764 - its target symbol whose input section, hash, name, value and branch type
5765 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5766 respectively
5767
5768 If found, the value of the stub's target symbol is updated from SYM_VALUE
5769 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5770 TRUE and the stub entry is initialized.
5771
5772 Returns the stub that was created or updated, or NULL if an error
5773 occurred. */
5774
5775 static struct elf32_arm_stub_hash_entry *
5776 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5777 enum elf32_arm_stub_type stub_type, asection *section,
5778 Elf_Internal_Rela *irela, asection *sym_sec,
5779 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5780 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5781 bfd_boolean *new_stub)
5782 {
5783 const asection *id_sec;
5784 char *stub_name;
5785 struct elf32_arm_stub_hash_entry *stub_entry;
5786 unsigned int r_type;
5787 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5788
5789 BFD_ASSERT (stub_type != arm_stub_none);
5790 *new_stub = FALSE;
5791
5792 if (sym_claimed)
5793 stub_name = sym_name;
5794 else
5795 {
5796 BFD_ASSERT (irela);
5797 BFD_ASSERT (section);
5798 BFD_ASSERT (section->id <= htab->top_id);
5799
5800 /* Support for grouping stub sections. */
5801 id_sec = htab->stub_group[section->id].link_sec;
5802
5803 /* Get the name of this stub. */
5804 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5805 stub_type);
5806 if (!stub_name)
5807 return NULL;
5808 }
5809
5810 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5811 FALSE);
5812 /* The proper stub has already been created, just update its value. */
5813 if (stub_entry != NULL)
5814 {
5815 if (!sym_claimed)
5816 free (stub_name);
5817 stub_entry->target_value = sym_value;
5818 return stub_entry;
5819 }
5820
5821 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5822 if (stub_entry == NULL)
5823 {
5824 if (!sym_claimed)
5825 free (stub_name);
5826 return NULL;
5827 }
5828
5829 stub_entry->target_value = sym_value;
5830 stub_entry->target_section = sym_sec;
5831 stub_entry->stub_type = stub_type;
5832 stub_entry->h = hash;
5833 stub_entry->branch_type = branch_type;
5834
5835 if (sym_claimed)
5836 stub_entry->output_name = sym_name;
5837 else
5838 {
5839 if (sym_name == NULL)
5840 sym_name = "unnamed";
5841 stub_entry->output_name = (char *)
5842 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5843 + strlen (sym_name));
5844 if (stub_entry->output_name == NULL)
5845 {
5846 free (stub_name);
5847 return NULL;
5848 }
5849
5850 /* For historical reasons, use the existing names for ARM-to-Thumb and
5851 Thumb-to-ARM stubs. */
5852 r_type = ELF32_R_TYPE (irela->r_info);
5853 if ((r_type == (unsigned int) R_ARM_THM_CALL
5854 || r_type == (unsigned int) R_ARM_THM_JUMP24
5855 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5856 && branch_type == ST_BRANCH_TO_ARM)
5857 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5858 else if ((r_type == (unsigned int) R_ARM_CALL
5859 || r_type == (unsigned int) R_ARM_JUMP24)
5860 && branch_type == ST_BRANCH_TO_THUMB)
5861 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5862 else
5863 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5864 }
5865
5866 *new_stub = TRUE;
5867 return stub_entry;
5868 }
5869
5870 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5871 gateway veneer to transition from non secure to secure state and create them
5872 accordingly.
5873
5874 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5875 defines the conditions that govern Secure Gateway veneer creation for a
5876 given symbol <SYM> as follows:
5877 - it has function type
5878 - it has non local binding
5879 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5880 same type, binding and value as <SYM> (called normal symbol).
5881 An entry function can handle secure state transition itself in which case
5882 its special symbol would have a different value from the normal symbol.
5883
5884 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5885 entry mapping while HTAB gives the name to hash entry mapping.
5886 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5887 created.
5888
5889 The return value gives whether a stub failed to be allocated. */
5890
5891 static bfd_boolean
5892 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5893 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5894 int *cmse_stub_created)
5895 {
5896 const struct elf_backend_data *bed;
5897 Elf_Internal_Shdr *symtab_hdr;
5898 unsigned i, j, sym_count, ext_start;
5899 Elf_Internal_Sym *cmse_sym, *local_syms;
5900 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5901 enum arm_st_branch_type branch_type;
5902 char *sym_name, *lsym_name;
5903 bfd_vma sym_value;
5904 asection *section;
5905 struct elf32_arm_stub_hash_entry *stub_entry;
5906 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5907
5908 bed = get_elf_backend_data (input_bfd);
5909 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5910 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5911 ext_start = symtab_hdr->sh_info;
5912 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5913 && out_attr[Tag_CPU_arch_profile].i == 'M');
5914
5915 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5916 if (local_syms == NULL)
5917 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5918 symtab_hdr->sh_info, 0, NULL, NULL,
5919 NULL);
5920 if (symtab_hdr->sh_info && local_syms == NULL)
5921 return FALSE;
5922
5923 /* Scan symbols. */
5924 for (i = 0; i < sym_count; i++)
5925 {
5926 cmse_invalid = FALSE;
5927
5928 if (i < ext_start)
5929 {
5930 cmse_sym = &local_syms[i];
5931 /* Not a special symbol. */
5932 if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5933 continue;
5934 sym_name = bfd_elf_string_from_elf_section (input_bfd,
5935 symtab_hdr->sh_link,
5936 cmse_sym->st_name);
5937 /* Special symbol with local binding. */
5938 cmse_invalid = TRUE;
5939 }
5940 else
5941 {
5942 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5943 sym_name = (char *) cmse_hash->root.root.root.string;
5944
5945 /* Not a special symbol. */
5946 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5947 continue;
5948
5949 /* Special symbol has incorrect binding or type. */
5950 if ((cmse_hash->root.root.type != bfd_link_hash_defined
5951 && cmse_hash->root.root.type != bfd_link_hash_defweak)
5952 || cmse_hash->root.type != STT_FUNC)
5953 cmse_invalid = TRUE;
5954 }
5955
5956 if (!is_v8m)
5957 {
5958 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5959 "ARMv8-M architecture or later"),
5960 input_bfd, sym_name);
5961 is_v8m = TRUE; /* Avoid multiple warning. */
5962 ret = FALSE;
5963 }
5964
5965 if (cmse_invalid)
5966 {
5967 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5968 " a global or weak function symbol"),
5969 input_bfd, sym_name);
5970 ret = FALSE;
5971 if (i < ext_start)
5972 continue;
5973 }
5974
5975 sym_name += strlen (CMSE_PREFIX);
5976 hash = (struct elf32_arm_link_hash_entry *)
5977 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5978
5979 /* No associated normal symbol or it is neither global nor weak. */
5980 if (!hash
5981 || (hash->root.root.type != bfd_link_hash_defined
5982 && hash->root.root.type != bfd_link_hash_defweak)
5983 || hash->root.type != STT_FUNC)
5984 {
5985 /* Initialize here to avoid warning about use of possibly
5986 uninitialized variable. */
5987 j = 0;
5988
5989 if (!hash)
5990 {
5991 /* Searching for a normal symbol with local binding. */
5992 for (; j < ext_start; j++)
5993 {
5994 lsym_name =
5995 bfd_elf_string_from_elf_section (input_bfd,
5996 symtab_hdr->sh_link,
5997 local_syms[j].st_name);
5998 if (!strcmp (sym_name, lsym_name))
5999 break;
6000 }
6001 }
6002
6003 if (hash || j < ext_start)
6004 {
6005 _bfd_error_handler
6006 (_("%pB: invalid standard symbol `%s'; it must be "
6007 "a global or weak function symbol"),
6008 input_bfd, sym_name);
6009 }
6010 else
6011 _bfd_error_handler
6012 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6013 ret = FALSE;
6014 if (!hash)
6015 continue;
6016 }
6017
6018 sym_value = hash->root.root.u.def.value;
6019 section = hash->root.root.u.def.section;
6020
6021 if (cmse_hash->root.root.u.def.section != section)
6022 {
6023 _bfd_error_handler
6024 (_("%pB: `%s' and its special symbol are in different sections"),
6025 input_bfd, sym_name);
6026 ret = FALSE;
6027 }
6028 if (cmse_hash->root.root.u.def.value != sym_value)
6029 continue; /* Ignore: could be an entry function starting with SG. */
6030
6031 /* If this section is a link-once section that will be discarded, then
6032 don't create any stubs. */
6033 if (section->output_section == NULL)
6034 {
6035 _bfd_error_handler
6036 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6037 continue;
6038 }
6039
6040 if (hash->root.size == 0)
6041 {
6042 _bfd_error_handler
6043 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6044 ret = FALSE;
6045 }
6046
6047 if (!ret)
6048 continue;
6049 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6050 stub_entry
6051 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6052 NULL, NULL, section, hash, sym_name,
6053 sym_value, branch_type, &new_stub);
6054
6055 if (stub_entry == NULL)
6056 ret = FALSE;
6057 else
6058 {
6059 BFD_ASSERT (new_stub);
6060 (*cmse_stub_created)++;
6061 }
6062 }
6063
6064 if (!symtab_hdr->contents)
6065 free (local_syms);
6066 return ret;
6067 }
6068
6069 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6070 code entry function, ie can be called from non secure code without using a
6071 veneer. */
6072
6073 static bfd_boolean
6074 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6075 {
6076 bfd_byte contents[4];
6077 uint32_t first_insn;
6078 asection *section;
6079 file_ptr offset;
6080 bfd *abfd;
6081
6082 /* Defined symbol of function type. */
6083 if (hash->root.root.type != bfd_link_hash_defined
6084 && hash->root.root.type != bfd_link_hash_defweak)
6085 return FALSE;
6086 if (hash->root.type != STT_FUNC)
6087 return FALSE;
6088
6089 /* Read first instruction. */
6090 section = hash->root.root.u.def.section;
6091 abfd = section->owner;
6092 offset = hash->root.root.u.def.value - section->vma;
6093 if (!bfd_get_section_contents (abfd, section, contents, offset,
6094 sizeof (contents)))
6095 return FALSE;
6096
6097 first_insn = bfd_get_32 (abfd, contents);
6098
6099 /* Starts by SG instruction. */
6100 return first_insn == 0xe97fe97f;
6101 }
6102
6103 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6104 secure gateway veneers (ie. the veneers was not in the input import library)
6105 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6106
6107 static bfd_boolean
6108 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6109 {
6110 struct elf32_arm_stub_hash_entry *stub_entry;
6111 struct bfd_link_info *info;
6112
6113 /* Massage our args to the form they really have. */
6114 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6115 info = (struct bfd_link_info *) gen_info;
6116
6117 if (info->out_implib_bfd)
6118 return TRUE;
6119
6120 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6121 return TRUE;
6122
6123 if (stub_entry->stub_offset == (bfd_vma) -1)
6124 _bfd_error_handler (" %s", stub_entry->output_name);
6125
6126 return TRUE;
6127 }
6128
6129 /* Set offset of each secure gateway veneers so that its address remain
6130 identical to the one in the input import library referred by
6131 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6132 (present in input import library but absent from the executable being
6133 linked) or if new veneers appeared and there is no output import library
6134 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6135 number of secure gateway veneers found in the input import library.
6136
6137 The function returns whether an error occurred. If no error occurred,
6138 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6139 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6140 veneer observed set for new veneers to be layed out after. */
6141
6142 static bfd_boolean
6143 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6144 struct elf32_arm_link_hash_table *htab,
6145 int *cmse_stub_created)
6146 {
6147 long symsize;
6148 char *sym_name;
6149 flagword flags;
6150 long i, symcount;
6151 bfd *in_implib_bfd;
6152 asection *stub_out_sec;
6153 bfd_boolean ret = TRUE;
6154 Elf_Internal_Sym *intsym;
6155 const char *out_sec_name;
6156 bfd_size_type cmse_stub_size;
6157 asymbol **sympp = NULL, *sym;
6158 struct elf32_arm_link_hash_entry *hash;
6159 const insn_sequence *cmse_stub_template;
6160 struct elf32_arm_stub_hash_entry *stub_entry;
6161 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6162 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6163 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6164
6165 /* No input secure gateway import library. */
6166 if (!htab->in_implib_bfd)
6167 return TRUE;
6168
6169 in_implib_bfd = htab->in_implib_bfd;
6170 if (!htab->cmse_implib)
6171 {
6172 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6173 "Gateway import libraries"), in_implib_bfd);
6174 return FALSE;
6175 }
6176
6177 /* Get symbol table size. */
6178 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6179 if (symsize < 0)
6180 return FALSE;
6181
6182 /* Read in the input secure gateway import library's symbol table. */
6183 sympp = (asymbol **) xmalloc (symsize);
6184 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6185 if (symcount < 0)
6186 {
6187 ret = FALSE;
6188 goto free_sym_buf;
6189 }
6190
6191 htab->new_cmse_stub_offset = 0;
6192 cmse_stub_size =
6193 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6194 &cmse_stub_template,
6195 &cmse_stub_template_size);
6196 out_sec_name =
6197 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6198 stub_out_sec =
6199 bfd_get_section_by_name (htab->obfd, out_sec_name);
6200 if (stub_out_sec != NULL)
6201 cmse_stub_sec_vma = stub_out_sec->vma;
6202
6203 /* Set addresses of veneers mentionned in input secure gateway import
6204 library's symbol table. */
6205 for (i = 0; i < symcount; i++)
6206 {
6207 sym = sympp[i];
6208 flags = sym->flags;
6209 sym_name = (char *) bfd_asymbol_name (sym);
6210 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6211
6212 if (sym->section != bfd_abs_section_ptr
6213 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6214 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6215 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6216 != ST_BRANCH_TO_THUMB))
6217 {
6218 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6219 "symbol should be absolute, global and "
6220 "refer to Thumb functions"),
6221 in_implib_bfd, sym_name);
6222 ret = FALSE;
6223 continue;
6224 }
6225
6226 veneer_value = bfd_asymbol_value (sym);
6227 stub_offset = veneer_value - cmse_stub_sec_vma;
6228 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6229 FALSE, FALSE);
6230 hash = (struct elf32_arm_link_hash_entry *)
6231 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6232
6233 /* Stub entry should have been created by cmse_scan or the symbol be of
6234 a secure function callable from non secure code. */
6235 if (!stub_entry && !hash)
6236 {
6237 bfd_boolean new_stub;
6238
6239 _bfd_error_handler
6240 (_("entry function `%s' disappeared from secure code"), sym_name);
6241 hash = (struct elf32_arm_link_hash_entry *)
6242 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6243 stub_entry
6244 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6245 NULL, NULL, bfd_abs_section_ptr, hash,
6246 sym_name, veneer_value,
6247 ST_BRANCH_TO_THUMB, &new_stub);
6248 if (stub_entry == NULL)
6249 ret = FALSE;
6250 else
6251 {
6252 BFD_ASSERT (new_stub);
6253 new_cmse_stubs_created++;
6254 (*cmse_stub_created)++;
6255 }
6256 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6257 stub_entry->stub_offset = stub_offset;
6258 }
6259 /* Symbol found is not callable from non secure code. */
6260 else if (!stub_entry)
6261 {
6262 if (!cmse_entry_fct_p (hash))
6263 {
6264 _bfd_error_handler (_("`%s' refers to a non entry function"),
6265 sym_name);
6266 ret = FALSE;
6267 }
6268 continue;
6269 }
6270 else
6271 {
6272 /* Only stubs for SG veneers should have been created. */
6273 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6274
6275 /* Check visibility hasn't changed. */
6276 if (!!(flags & BSF_GLOBAL)
6277 != (hash->root.root.type == bfd_link_hash_defined))
6278 _bfd_error_handler
6279 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6280 sym_name);
6281
6282 stub_entry->stub_offset = stub_offset;
6283 }
6284
6285 /* Size should match that of a SG veneer. */
6286 if (intsym->st_size != cmse_stub_size)
6287 {
6288 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6289 in_implib_bfd, sym_name);
6290 ret = FALSE;
6291 }
6292
6293 /* Previous veneer address is before current SG veneer section. */
6294 if (veneer_value < cmse_stub_sec_vma)
6295 {
6296 /* Avoid offset underflow. */
6297 if (stub_entry)
6298 stub_entry->stub_offset = 0;
6299 stub_offset = 0;
6300 ret = FALSE;
6301 }
6302
6303 /* Complain if stub offset not a multiple of stub size. */
6304 if (stub_offset % cmse_stub_size)
6305 {
6306 _bfd_error_handler
6307 (_("offset of veneer for entry function `%s' not a multiple of "
6308 "its size"), sym_name);
6309 ret = FALSE;
6310 }
6311
6312 if (!ret)
6313 continue;
6314
6315 new_cmse_stubs_created--;
6316 if (veneer_value < cmse_stub_array_start)
6317 cmse_stub_array_start = veneer_value;
6318 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6319 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6320 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6321 }
6322
6323 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6324 {
6325 BFD_ASSERT (new_cmse_stubs_created > 0);
6326 _bfd_error_handler
6327 (_("new entry function(s) introduced but no output import library "
6328 "specified:"));
6329 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6330 }
6331
6332 if (cmse_stub_array_start != cmse_stub_sec_vma)
6333 {
6334 _bfd_error_handler
6335 (_("start address of `%s' is different from previous link"),
6336 out_sec_name);
6337 ret = FALSE;
6338 }
6339
6340 free_sym_buf:
6341 free (sympp);
6342 return ret;
6343 }
6344
6345 /* Determine and set the size of the stub section for a final link.
6346
6347 The basic idea here is to examine all the relocations looking for
6348 PC-relative calls to a target that is unreachable with a "bl"
6349 instruction. */
6350
6351 bfd_boolean
6352 elf32_arm_size_stubs (bfd *output_bfd,
6353 bfd *stub_bfd,
6354 struct bfd_link_info *info,
6355 bfd_signed_vma group_size,
6356 asection * (*add_stub_section) (const char *, asection *,
6357 asection *,
6358 unsigned int),
6359 void (*layout_sections_again) (void))
6360 {
6361 bfd_boolean ret = TRUE;
6362 obj_attribute *out_attr;
6363 int cmse_stub_created = 0;
6364 bfd_size_type stub_group_size;
6365 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6366 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6367 struct a8_erratum_fix *a8_fixes = NULL;
6368 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6369 struct a8_erratum_reloc *a8_relocs = NULL;
6370 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6371
6372 if (htab == NULL)
6373 return FALSE;
6374
6375 if (htab->fix_cortex_a8)
6376 {
6377 a8_fixes = (struct a8_erratum_fix *)
6378 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6379 a8_relocs = (struct a8_erratum_reloc *)
6380 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6381 }
6382
6383 /* Propagate mach to stub bfd, because it may not have been
6384 finalized when we created stub_bfd. */
6385 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6386 bfd_get_mach (output_bfd));
6387
6388 /* Stash our params away. */
6389 htab->stub_bfd = stub_bfd;
6390 htab->add_stub_section = add_stub_section;
6391 htab->layout_sections_again = layout_sections_again;
6392 stubs_always_after_branch = group_size < 0;
6393
6394 out_attr = elf_known_obj_attributes_proc (output_bfd);
6395 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6396
6397 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6398 as the first half of a 32-bit branch straddling two 4K pages. This is a
6399 crude way of enforcing that. */
6400 if (htab->fix_cortex_a8)
6401 stubs_always_after_branch = 1;
6402
6403 if (group_size < 0)
6404 stub_group_size = -group_size;
6405 else
6406 stub_group_size = group_size;
6407
6408 if (stub_group_size == 1)
6409 {
6410 /* Default values. */
6411 /* Thumb branch range is +-4MB has to be used as the default
6412 maximum size (a given section can contain both ARM and Thumb
6413 code, so the worst case has to be taken into account).
6414
6415 This value is 24K less than that, which allows for 2025
6416 12-byte stubs. If we exceed that, then we will fail to link.
6417 The user will have to relink with an explicit group size
6418 option. */
6419 stub_group_size = 4170000;
6420 }
6421
6422 group_sections (htab, stub_group_size, stubs_always_after_branch);
6423
6424 /* If we're applying the cortex A8 fix, we need to determine the
6425 program header size now, because we cannot change it later --
6426 that could alter section placements. Notice the A8 erratum fix
6427 ends up requiring the section addresses to remain unchanged
6428 modulo the page size. That's something we cannot represent
6429 inside BFD, and we don't want to force the section alignment to
6430 be the page size. */
6431 if (htab->fix_cortex_a8)
6432 (*htab->layout_sections_again) ();
6433
6434 while (1)
6435 {
6436 bfd *input_bfd;
6437 unsigned int bfd_indx;
6438 asection *stub_sec;
6439 enum elf32_arm_stub_type stub_type;
6440 bfd_boolean stub_changed = FALSE;
6441 unsigned prev_num_a8_fixes = num_a8_fixes;
6442
6443 num_a8_fixes = 0;
6444 for (input_bfd = info->input_bfds, bfd_indx = 0;
6445 input_bfd != NULL;
6446 input_bfd = input_bfd->link.next, bfd_indx++)
6447 {
6448 Elf_Internal_Shdr *symtab_hdr;
6449 asection *section;
6450 Elf_Internal_Sym *local_syms = NULL;
6451
6452 if (!is_arm_elf (input_bfd)
6453 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0)
6454 continue;
6455
6456 num_a8_relocs = 0;
6457
6458 /* We'll need the symbol table in a second. */
6459 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6460 if (symtab_hdr->sh_info == 0)
6461 continue;
6462
6463 /* Limit scan of symbols to object file whose profile is
6464 Microcontroller to not hinder performance in the general case. */
6465 if (m_profile && first_veneer_scan)
6466 {
6467 struct elf_link_hash_entry **sym_hashes;
6468
6469 sym_hashes = elf_sym_hashes (input_bfd);
6470 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6471 &cmse_stub_created))
6472 goto error_ret_free_local;
6473
6474 if (cmse_stub_created != 0)
6475 stub_changed = TRUE;
6476 }
6477
6478 /* Walk over each section attached to the input bfd. */
6479 for (section = input_bfd->sections;
6480 section != NULL;
6481 section = section->next)
6482 {
6483 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6484
6485 /* If there aren't any relocs, then there's nothing more
6486 to do. */
6487 if ((section->flags & SEC_RELOC) == 0
6488 || section->reloc_count == 0
6489 || (section->flags & SEC_CODE) == 0)
6490 continue;
6491
6492 /* If this section is a link-once section that will be
6493 discarded, then don't create any stubs. */
6494 if (section->output_section == NULL
6495 || section->output_section->owner != output_bfd)
6496 continue;
6497
6498 /* Get the relocs. */
6499 internal_relocs
6500 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6501 NULL, info->keep_memory);
6502 if (internal_relocs == NULL)
6503 goto error_ret_free_local;
6504
6505 /* Now examine each relocation. */
6506 irela = internal_relocs;
6507 irelaend = irela + section->reloc_count;
6508 for (; irela < irelaend; irela++)
6509 {
6510 unsigned int r_type, r_indx;
6511 asection *sym_sec;
6512 bfd_vma sym_value;
6513 bfd_vma destination;
6514 struct elf32_arm_link_hash_entry *hash;
6515 const char *sym_name;
6516 unsigned char st_type;
6517 enum arm_st_branch_type branch_type;
6518 bfd_boolean created_stub = FALSE;
6519
6520 r_type = ELF32_R_TYPE (irela->r_info);
6521 r_indx = ELF32_R_SYM (irela->r_info);
6522
6523 if (r_type >= (unsigned int) R_ARM_max)
6524 {
6525 bfd_set_error (bfd_error_bad_value);
6526 error_ret_free_internal:
6527 if (elf_section_data (section)->relocs == NULL)
6528 free (internal_relocs);
6529 /* Fall through. */
6530 error_ret_free_local:
6531 if (local_syms != NULL
6532 && (symtab_hdr->contents
6533 != (unsigned char *) local_syms))
6534 free (local_syms);
6535 return FALSE;
6536 }
6537
6538 hash = NULL;
6539 if (r_indx >= symtab_hdr->sh_info)
6540 hash = elf32_arm_hash_entry
6541 (elf_sym_hashes (input_bfd)
6542 [r_indx - symtab_hdr->sh_info]);
6543
6544 /* Only look for stubs on branch instructions, or
6545 non-relaxed TLSCALL */
6546 if ((r_type != (unsigned int) R_ARM_CALL)
6547 && (r_type != (unsigned int) R_ARM_THM_CALL)
6548 && (r_type != (unsigned int) R_ARM_JUMP24)
6549 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6550 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6551 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6552 && (r_type != (unsigned int) R_ARM_PLT32)
6553 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6554 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6555 && r_type == elf32_arm_tls_transition
6556 (info, r_type, &hash->root)
6557 && ((hash ? hash->tls_type
6558 : (elf32_arm_local_got_tls_type
6559 (input_bfd)[r_indx]))
6560 & GOT_TLS_GDESC) != 0))
6561 continue;
6562
6563 /* Now determine the call target, its name, value,
6564 section. */
6565 sym_sec = NULL;
6566 sym_value = 0;
6567 destination = 0;
6568 sym_name = NULL;
6569
6570 if (r_type == (unsigned int) R_ARM_TLS_CALL
6571 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6572 {
6573 /* A non-relaxed TLS call. The target is the
6574 plt-resident trampoline and nothing to do
6575 with the symbol. */
6576 BFD_ASSERT (htab->tls_trampoline > 0);
6577 sym_sec = htab->root.splt;
6578 sym_value = htab->tls_trampoline;
6579 hash = 0;
6580 st_type = STT_FUNC;
6581 branch_type = ST_BRANCH_TO_ARM;
6582 }
6583 else if (!hash)
6584 {
6585 /* It's a local symbol. */
6586 Elf_Internal_Sym *sym;
6587
6588 if (local_syms == NULL)
6589 {
6590 local_syms
6591 = (Elf_Internal_Sym *) symtab_hdr->contents;
6592 if (local_syms == NULL)
6593 local_syms
6594 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6595 symtab_hdr->sh_info, 0,
6596 NULL, NULL, NULL);
6597 if (local_syms == NULL)
6598 goto error_ret_free_internal;
6599 }
6600
6601 sym = local_syms + r_indx;
6602 if (sym->st_shndx == SHN_UNDEF)
6603 sym_sec = bfd_und_section_ptr;
6604 else if (sym->st_shndx == SHN_ABS)
6605 sym_sec = bfd_abs_section_ptr;
6606 else if (sym->st_shndx == SHN_COMMON)
6607 sym_sec = bfd_com_section_ptr;
6608 else
6609 sym_sec =
6610 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6611
6612 if (!sym_sec)
6613 /* This is an undefined symbol. It can never
6614 be resolved. */
6615 continue;
6616
6617 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6618 sym_value = sym->st_value;
6619 destination = (sym_value + irela->r_addend
6620 + sym_sec->output_offset
6621 + sym_sec->output_section->vma);
6622 st_type = ELF_ST_TYPE (sym->st_info);
6623 branch_type =
6624 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6625 sym_name
6626 = bfd_elf_string_from_elf_section (input_bfd,
6627 symtab_hdr->sh_link,
6628 sym->st_name);
6629 }
6630 else
6631 {
6632 /* It's an external symbol. */
6633 while (hash->root.root.type == bfd_link_hash_indirect
6634 || hash->root.root.type == bfd_link_hash_warning)
6635 hash = ((struct elf32_arm_link_hash_entry *)
6636 hash->root.root.u.i.link);
6637
6638 if (hash->root.root.type == bfd_link_hash_defined
6639 || hash->root.root.type == bfd_link_hash_defweak)
6640 {
6641 sym_sec = hash->root.root.u.def.section;
6642 sym_value = hash->root.root.u.def.value;
6643
6644 struct elf32_arm_link_hash_table *globals =
6645 elf32_arm_hash_table (info);
6646
6647 /* For a destination in a shared library,
6648 use the PLT stub as target address to
6649 decide whether a branch stub is
6650 needed. */
6651 if (globals != NULL
6652 && globals->root.splt != NULL
6653 && hash != NULL
6654 && hash->root.plt.offset != (bfd_vma) -1)
6655 {
6656 sym_sec = globals->root.splt;
6657 sym_value = hash->root.plt.offset;
6658 if (sym_sec->output_section != NULL)
6659 destination = (sym_value
6660 + sym_sec->output_offset
6661 + sym_sec->output_section->vma);
6662 }
6663 else if (sym_sec->output_section != NULL)
6664 destination = (sym_value + irela->r_addend
6665 + sym_sec->output_offset
6666 + sym_sec->output_section->vma);
6667 }
6668 else if ((hash->root.root.type == bfd_link_hash_undefined)
6669 || (hash->root.root.type == bfd_link_hash_undefweak))
6670 {
6671 /* For a shared library, use the PLT stub as
6672 target address to decide whether a long
6673 branch stub is needed.
6674 For absolute code, they cannot be handled. */
6675 struct elf32_arm_link_hash_table *globals =
6676 elf32_arm_hash_table (info);
6677
6678 if (globals != NULL
6679 && globals->root.splt != NULL
6680 && hash != NULL
6681 && hash->root.plt.offset != (bfd_vma) -1)
6682 {
6683 sym_sec = globals->root.splt;
6684 sym_value = hash->root.plt.offset;
6685 if (sym_sec->output_section != NULL)
6686 destination = (sym_value
6687 + sym_sec->output_offset
6688 + sym_sec->output_section->vma);
6689 }
6690 else
6691 continue;
6692 }
6693 else
6694 {
6695 bfd_set_error (bfd_error_bad_value);
6696 goto error_ret_free_internal;
6697 }
6698 st_type = hash->root.type;
6699 branch_type =
6700 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6701 sym_name = hash->root.root.root.string;
6702 }
6703
6704 do
6705 {
6706 bfd_boolean new_stub;
6707 struct elf32_arm_stub_hash_entry *stub_entry;
6708
6709 /* Determine what (if any) linker stub is needed. */
6710 stub_type = arm_type_of_stub (info, section, irela,
6711 st_type, &branch_type,
6712 hash, destination, sym_sec,
6713 input_bfd, sym_name);
6714 if (stub_type == arm_stub_none)
6715 break;
6716
6717 /* We've either created a stub for this reloc already,
6718 or we are about to. */
6719 stub_entry =
6720 elf32_arm_create_stub (htab, stub_type, section, irela,
6721 sym_sec, hash,
6722 (char *) sym_name, sym_value,
6723 branch_type, &new_stub);
6724
6725 created_stub = stub_entry != NULL;
6726 if (!created_stub)
6727 goto error_ret_free_internal;
6728 else if (!new_stub)
6729 break;
6730 else
6731 stub_changed = TRUE;
6732 }
6733 while (0);
6734
6735 /* Look for relocations which might trigger Cortex-A8
6736 erratum. */
6737 if (htab->fix_cortex_a8
6738 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6739 || r_type == (unsigned int) R_ARM_THM_JUMP19
6740 || r_type == (unsigned int) R_ARM_THM_CALL
6741 || r_type == (unsigned int) R_ARM_THM_XPC22))
6742 {
6743 bfd_vma from = section->output_section->vma
6744 + section->output_offset
6745 + irela->r_offset;
6746
6747 if ((from & 0xfff) == 0xffe)
6748 {
6749 /* Found a candidate. Note we haven't checked the
6750 destination is within 4K here: if we do so (and
6751 don't create an entry in a8_relocs) we can't tell
6752 that a branch should have been relocated when
6753 scanning later. */
6754 if (num_a8_relocs == a8_reloc_table_size)
6755 {
6756 a8_reloc_table_size *= 2;
6757 a8_relocs = (struct a8_erratum_reloc *)
6758 bfd_realloc (a8_relocs,
6759 sizeof (struct a8_erratum_reloc)
6760 * a8_reloc_table_size);
6761 }
6762
6763 a8_relocs[num_a8_relocs].from = from;
6764 a8_relocs[num_a8_relocs].destination = destination;
6765 a8_relocs[num_a8_relocs].r_type = r_type;
6766 a8_relocs[num_a8_relocs].branch_type = branch_type;
6767 a8_relocs[num_a8_relocs].sym_name = sym_name;
6768 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6769 a8_relocs[num_a8_relocs].hash = hash;
6770
6771 num_a8_relocs++;
6772 }
6773 }
6774 }
6775
6776 /* We're done with the internal relocs, free them. */
6777 if (elf_section_data (section)->relocs == NULL)
6778 free (internal_relocs);
6779 }
6780
6781 if (htab->fix_cortex_a8)
6782 {
6783 /* Sort relocs which might apply to Cortex-A8 erratum. */
6784 qsort (a8_relocs, num_a8_relocs,
6785 sizeof (struct a8_erratum_reloc),
6786 &a8_reloc_compare);
6787
6788 /* Scan for branches which might trigger Cortex-A8 erratum. */
6789 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6790 &num_a8_fixes, &a8_fix_table_size,
6791 a8_relocs, num_a8_relocs,
6792 prev_num_a8_fixes, &stub_changed)
6793 != 0)
6794 goto error_ret_free_local;
6795 }
6796
6797 if (local_syms != NULL
6798 && symtab_hdr->contents != (unsigned char *) local_syms)
6799 {
6800 if (!info->keep_memory)
6801 free (local_syms);
6802 else
6803 symtab_hdr->contents = (unsigned char *) local_syms;
6804 }
6805 }
6806
6807 if (first_veneer_scan
6808 && !set_cmse_veneer_addr_from_implib (info, htab,
6809 &cmse_stub_created))
6810 ret = FALSE;
6811
6812 if (prev_num_a8_fixes != num_a8_fixes)
6813 stub_changed = TRUE;
6814
6815 if (!stub_changed)
6816 break;
6817
6818 /* OK, we've added some stubs. Find out the new size of the
6819 stub sections. */
6820 for (stub_sec = htab->stub_bfd->sections;
6821 stub_sec != NULL;
6822 stub_sec = stub_sec->next)
6823 {
6824 /* Ignore non-stub sections. */
6825 if (!strstr (stub_sec->name, STUB_SUFFIX))
6826 continue;
6827
6828 stub_sec->size = 0;
6829 }
6830
6831 /* Add new SG veneers after those already in the input import
6832 library. */
6833 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6834 stub_type++)
6835 {
6836 bfd_vma *start_offset_p;
6837 asection **stub_sec_p;
6838
6839 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6840 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6841 if (start_offset_p == NULL)
6842 continue;
6843
6844 BFD_ASSERT (stub_sec_p != NULL);
6845 if (*stub_sec_p != NULL)
6846 (*stub_sec_p)->size = *start_offset_p;
6847 }
6848
6849 /* Compute stub section size, considering padding. */
6850 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6851 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6852 stub_type++)
6853 {
6854 int size, padding;
6855 asection **stub_sec_p;
6856
6857 padding = arm_dedicated_stub_section_padding (stub_type);
6858 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6859 /* Skip if no stub input section or no stub section padding
6860 required. */
6861 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6862 continue;
6863 /* Stub section padding required but no dedicated section. */
6864 BFD_ASSERT (stub_sec_p);
6865
6866 size = (*stub_sec_p)->size;
6867 size = (size + padding - 1) & ~(padding - 1);
6868 (*stub_sec_p)->size = size;
6869 }
6870
6871 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6872 if (htab->fix_cortex_a8)
6873 for (i = 0; i < num_a8_fixes; i++)
6874 {
6875 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6876 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6877
6878 if (stub_sec == NULL)
6879 return FALSE;
6880
6881 stub_sec->size
6882 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6883 NULL);
6884 }
6885
6886
6887 /* Ask the linker to do its stuff. */
6888 (*htab->layout_sections_again) ();
6889 first_veneer_scan = FALSE;
6890 }
6891
6892 /* Add stubs for Cortex-A8 erratum fixes now. */
6893 if (htab->fix_cortex_a8)
6894 {
6895 for (i = 0; i < num_a8_fixes; i++)
6896 {
6897 struct elf32_arm_stub_hash_entry *stub_entry;
6898 char *stub_name = a8_fixes[i].stub_name;
6899 asection *section = a8_fixes[i].section;
6900 unsigned int section_id = a8_fixes[i].section->id;
6901 asection *link_sec = htab->stub_group[section_id].link_sec;
6902 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6903 const insn_sequence *template_sequence;
6904 int template_size, size = 0;
6905
6906 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6907 TRUE, FALSE);
6908 if (stub_entry == NULL)
6909 {
6910 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6911 section->owner, stub_name);
6912 return FALSE;
6913 }
6914
6915 stub_entry->stub_sec = stub_sec;
6916 stub_entry->stub_offset = (bfd_vma) -1;
6917 stub_entry->id_sec = link_sec;
6918 stub_entry->stub_type = a8_fixes[i].stub_type;
6919 stub_entry->source_value = a8_fixes[i].offset;
6920 stub_entry->target_section = a8_fixes[i].section;
6921 stub_entry->target_value = a8_fixes[i].target_offset;
6922 stub_entry->orig_insn = a8_fixes[i].orig_insn;
6923 stub_entry->branch_type = a8_fixes[i].branch_type;
6924
6925 size = find_stub_size_and_template (a8_fixes[i].stub_type,
6926 &template_sequence,
6927 &template_size);
6928
6929 stub_entry->stub_size = size;
6930 stub_entry->stub_template = template_sequence;
6931 stub_entry->stub_template_size = template_size;
6932 }
6933
6934 /* Stash the Cortex-A8 erratum fix array for use later in
6935 elf32_arm_write_section(). */
6936 htab->a8_erratum_fixes = a8_fixes;
6937 htab->num_a8_erratum_fixes = num_a8_fixes;
6938 }
6939 else
6940 {
6941 htab->a8_erratum_fixes = NULL;
6942 htab->num_a8_erratum_fixes = 0;
6943 }
6944 return ret;
6945 }
6946
6947 /* Build all the stubs associated with the current output file. The
6948 stubs are kept in a hash table attached to the main linker hash
6949 table. We also set up the .plt entries for statically linked PIC
6950 functions here. This function is called via arm_elf_finish in the
6951 linker. */
6952
6953 bfd_boolean
6954 elf32_arm_build_stubs (struct bfd_link_info *info)
6955 {
6956 asection *stub_sec;
6957 struct bfd_hash_table *table;
6958 enum elf32_arm_stub_type stub_type;
6959 struct elf32_arm_link_hash_table *htab;
6960
6961 htab = elf32_arm_hash_table (info);
6962 if (htab == NULL)
6963 return FALSE;
6964
6965 for (stub_sec = htab->stub_bfd->sections;
6966 stub_sec != NULL;
6967 stub_sec = stub_sec->next)
6968 {
6969 bfd_size_type size;
6970
6971 /* Ignore non-stub sections. */
6972 if (!strstr (stub_sec->name, STUB_SUFFIX))
6973 continue;
6974
6975 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
6976 must at least be done for stub section requiring padding and for SG
6977 veneers to ensure that a non secure code branching to a removed SG
6978 veneer causes an error. */
6979 size = stub_sec->size;
6980 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6981 if (stub_sec->contents == NULL && size != 0)
6982 return FALSE;
6983
6984 stub_sec->size = 0;
6985 }
6986
6987 /* Add new SG veneers after those already in the input import library. */
6988 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6989 {
6990 bfd_vma *start_offset_p;
6991 asection **stub_sec_p;
6992
6993 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6994 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6995 if (start_offset_p == NULL)
6996 continue;
6997
6998 BFD_ASSERT (stub_sec_p != NULL);
6999 if (*stub_sec_p != NULL)
7000 (*stub_sec_p)->size = *start_offset_p;
7001 }
7002
7003 /* Build the stubs as directed by the stub hash table. */
7004 table = &htab->stub_hash_table;
7005 bfd_hash_traverse (table, arm_build_one_stub, info);
7006 if (htab->fix_cortex_a8)
7007 {
7008 /* Place the cortex a8 stubs last. */
7009 htab->fix_cortex_a8 = -1;
7010 bfd_hash_traverse (table, arm_build_one_stub, info);
7011 }
7012
7013 return TRUE;
7014 }
7015
7016 /* Locate the Thumb encoded calling stub for NAME. */
7017
7018 static struct elf_link_hash_entry *
7019 find_thumb_glue (struct bfd_link_info *link_info,
7020 const char *name,
7021 char **error_message)
7022 {
7023 char *tmp_name;
7024 struct elf_link_hash_entry *hash;
7025 struct elf32_arm_link_hash_table *hash_table;
7026
7027 /* We need a pointer to the armelf specific hash table. */
7028 hash_table = elf32_arm_hash_table (link_info);
7029 if (hash_table == NULL)
7030 return NULL;
7031
7032 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7033 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7034
7035 BFD_ASSERT (tmp_name);
7036
7037 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7038
7039 hash = elf_link_hash_lookup
7040 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7041
7042 if (hash == NULL
7043 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7044 "Thumb", tmp_name, name) == -1)
7045 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7046
7047 free (tmp_name);
7048
7049 return hash;
7050 }
7051
7052 /* Locate the ARM encoded calling stub for NAME. */
7053
7054 static struct elf_link_hash_entry *
7055 find_arm_glue (struct bfd_link_info *link_info,
7056 const char *name,
7057 char **error_message)
7058 {
7059 char *tmp_name;
7060 struct elf_link_hash_entry *myh;
7061 struct elf32_arm_link_hash_table *hash_table;
7062
7063 /* We need a pointer to the elfarm specific hash table. */
7064 hash_table = elf32_arm_hash_table (link_info);
7065 if (hash_table == NULL)
7066 return NULL;
7067
7068 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7069 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7070
7071 BFD_ASSERT (tmp_name);
7072
7073 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7074
7075 myh = elf_link_hash_lookup
7076 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7077
7078 if (myh == NULL
7079 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7080 "ARM", tmp_name, name) == -1)
7081 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7082
7083 free (tmp_name);
7084
7085 return myh;
7086 }
7087
7088 /* ARM->Thumb glue (static images):
7089
7090 .arm
7091 __func_from_arm:
7092 ldr r12, __func_addr
7093 bx r12
7094 __func_addr:
7095 .word func @ behave as if you saw a ARM_32 reloc.
7096
7097 (v5t static images)
7098 .arm
7099 __func_from_arm:
7100 ldr pc, __func_addr
7101 __func_addr:
7102 .word func @ behave as if you saw a ARM_32 reloc.
7103
7104 (relocatable images)
7105 .arm
7106 __func_from_arm:
7107 ldr r12, __func_offset
7108 add r12, r12, pc
7109 bx r12
7110 __func_offset:
7111 .word func - . */
7112
7113 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7114 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7115 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7116 static const insn32 a2t3_func_addr_insn = 0x00000001;
7117
7118 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7119 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7120 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7121
7122 #define ARM2THUMB_PIC_GLUE_SIZE 16
7123 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7124 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7125 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7126
7127 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7128
7129 .thumb .thumb
7130 .align 2 .align 2
7131 __func_from_thumb: __func_from_thumb:
7132 bx pc push {r6, lr}
7133 nop ldr r6, __func_addr
7134 .arm mov lr, pc
7135 b func bx r6
7136 .arm
7137 ;; back_to_thumb
7138 ldmia r13! {r6, lr}
7139 bx lr
7140 __func_addr:
7141 .word func */
7142
7143 #define THUMB2ARM_GLUE_SIZE 8
7144 static const insn16 t2a1_bx_pc_insn = 0x4778;
7145 static const insn16 t2a2_noop_insn = 0x46c0;
7146 static const insn32 t2a3_b_insn = 0xea000000;
7147
7148 #define VFP11_ERRATUM_VENEER_SIZE 8
7149 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7150 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7151
7152 #define ARM_BX_VENEER_SIZE 12
7153 static const insn32 armbx1_tst_insn = 0xe3100001;
7154 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7155 static const insn32 armbx3_bx_insn = 0xe12fff10;
7156
7157 #ifndef ELFARM_NABI_C_INCLUDED
7158 static void
7159 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7160 {
7161 asection * s;
7162 bfd_byte * contents;
7163
7164 if (size == 0)
7165 {
7166 /* Do not include empty glue sections in the output. */
7167 if (abfd != NULL)
7168 {
7169 s = bfd_get_linker_section (abfd, name);
7170 if (s != NULL)
7171 s->flags |= SEC_EXCLUDE;
7172 }
7173 return;
7174 }
7175
7176 BFD_ASSERT (abfd != NULL);
7177
7178 s = bfd_get_linker_section (abfd, name);
7179 BFD_ASSERT (s != NULL);
7180
7181 contents = (bfd_byte *) bfd_alloc (abfd, size);
7182
7183 BFD_ASSERT (s->size == size);
7184 s->contents = contents;
7185 }
7186
7187 bfd_boolean
7188 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7189 {
7190 struct elf32_arm_link_hash_table * globals;
7191
7192 globals = elf32_arm_hash_table (info);
7193 BFD_ASSERT (globals != NULL);
7194
7195 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7196 globals->arm_glue_size,
7197 ARM2THUMB_GLUE_SECTION_NAME);
7198
7199 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7200 globals->thumb_glue_size,
7201 THUMB2ARM_GLUE_SECTION_NAME);
7202
7203 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7204 globals->vfp11_erratum_glue_size,
7205 VFP11_ERRATUM_VENEER_SECTION_NAME);
7206
7207 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7208 globals->stm32l4xx_erratum_glue_size,
7209 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7210
7211 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7212 globals->bx_glue_size,
7213 ARM_BX_GLUE_SECTION_NAME);
7214
7215 return TRUE;
7216 }
7217
7218 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7219 returns the symbol identifying the stub. */
7220
7221 static struct elf_link_hash_entry *
7222 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7223 struct elf_link_hash_entry * h)
7224 {
7225 const char * name = h->root.root.string;
7226 asection * s;
7227 char * tmp_name;
7228 struct elf_link_hash_entry * myh;
7229 struct bfd_link_hash_entry * bh;
7230 struct elf32_arm_link_hash_table * globals;
7231 bfd_vma val;
7232 bfd_size_type size;
7233
7234 globals = elf32_arm_hash_table (link_info);
7235 BFD_ASSERT (globals != NULL);
7236 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7237
7238 s = bfd_get_linker_section
7239 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7240
7241 BFD_ASSERT (s != NULL);
7242
7243 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7244 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7245
7246 BFD_ASSERT (tmp_name);
7247
7248 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7249
7250 myh = elf_link_hash_lookup
7251 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7252
7253 if (myh != NULL)
7254 {
7255 /* We've already seen this guy. */
7256 free (tmp_name);
7257 return myh;
7258 }
7259
7260 /* The only trick here is using hash_table->arm_glue_size as the value.
7261 Even though the section isn't allocated yet, this is where we will be
7262 putting it. The +1 on the value marks that the stub has not been
7263 output yet - not that it is a Thumb function. */
7264 bh = NULL;
7265 val = globals->arm_glue_size + 1;
7266 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7267 tmp_name, BSF_GLOBAL, s, val,
7268 NULL, TRUE, FALSE, &bh);
7269
7270 myh = (struct elf_link_hash_entry *) bh;
7271 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7272 myh->forced_local = 1;
7273
7274 free (tmp_name);
7275
7276 if (bfd_link_pic (link_info)
7277 || globals->root.is_relocatable_executable
7278 || globals->pic_veneer)
7279 size = ARM2THUMB_PIC_GLUE_SIZE;
7280 else if (globals->use_blx)
7281 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7282 else
7283 size = ARM2THUMB_STATIC_GLUE_SIZE;
7284
7285 s->size += size;
7286 globals->arm_glue_size += size;
7287
7288 return myh;
7289 }
7290
7291 /* Allocate space for ARMv4 BX veneers. */
7292
7293 static void
7294 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7295 {
7296 asection * s;
7297 struct elf32_arm_link_hash_table *globals;
7298 char *tmp_name;
7299 struct elf_link_hash_entry *myh;
7300 struct bfd_link_hash_entry *bh;
7301 bfd_vma val;
7302
7303 /* BX PC does not need a veneer. */
7304 if (reg == 15)
7305 return;
7306
7307 globals = elf32_arm_hash_table (link_info);
7308 BFD_ASSERT (globals != NULL);
7309 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7310
7311 /* Check if this veneer has already been allocated. */
7312 if (globals->bx_glue_offset[reg])
7313 return;
7314
7315 s = bfd_get_linker_section
7316 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7317
7318 BFD_ASSERT (s != NULL);
7319
7320 /* Add symbol for veneer. */
7321 tmp_name = (char *)
7322 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7323
7324 BFD_ASSERT (tmp_name);
7325
7326 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7327
7328 myh = elf_link_hash_lookup
7329 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7330
7331 BFD_ASSERT (myh == NULL);
7332
7333 bh = NULL;
7334 val = globals->bx_glue_size;
7335 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7336 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7337 NULL, TRUE, FALSE, &bh);
7338
7339 myh = (struct elf_link_hash_entry *) bh;
7340 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7341 myh->forced_local = 1;
7342
7343 s->size += ARM_BX_VENEER_SIZE;
7344 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7345 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7346 }
7347
7348
7349 /* Add an entry to the code/data map for section SEC. */
7350
7351 static void
7352 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7353 {
7354 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7355 unsigned int newidx;
7356
7357 if (sec_data->map == NULL)
7358 {
7359 sec_data->map = (elf32_arm_section_map *)
7360 bfd_malloc (sizeof (elf32_arm_section_map));
7361 sec_data->mapcount = 0;
7362 sec_data->mapsize = 1;
7363 }
7364
7365 newidx = sec_data->mapcount++;
7366
7367 if (sec_data->mapcount > sec_data->mapsize)
7368 {
7369 sec_data->mapsize *= 2;
7370 sec_data->map = (elf32_arm_section_map *)
7371 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7372 * sizeof (elf32_arm_section_map));
7373 }
7374
7375 if (sec_data->map)
7376 {
7377 sec_data->map[newidx].vma = vma;
7378 sec_data->map[newidx].type = type;
7379 }
7380 }
7381
7382
7383 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7384 veneers are handled for now. */
7385
7386 static bfd_vma
7387 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7388 elf32_vfp11_erratum_list *branch,
7389 bfd *branch_bfd,
7390 asection *branch_sec,
7391 unsigned int offset)
7392 {
7393 asection *s;
7394 struct elf32_arm_link_hash_table *hash_table;
7395 char *tmp_name;
7396 struct elf_link_hash_entry *myh;
7397 struct bfd_link_hash_entry *bh;
7398 bfd_vma val;
7399 struct _arm_elf_section_data *sec_data;
7400 elf32_vfp11_erratum_list *newerr;
7401
7402 hash_table = elf32_arm_hash_table (link_info);
7403 BFD_ASSERT (hash_table != NULL);
7404 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7405
7406 s = bfd_get_linker_section
7407 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7408
7409 sec_data = elf32_arm_section_data (s);
7410
7411 BFD_ASSERT (s != NULL);
7412
7413 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7414 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7415
7416 BFD_ASSERT (tmp_name);
7417
7418 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7419 hash_table->num_vfp11_fixes);
7420
7421 myh = elf_link_hash_lookup
7422 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7423
7424 BFD_ASSERT (myh == NULL);
7425
7426 bh = NULL;
7427 val = hash_table->vfp11_erratum_glue_size;
7428 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7429 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7430 NULL, TRUE, FALSE, &bh);
7431
7432 myh = (struct elf_link_hash_entry *) bh;
7433 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7434 myh->forced_local = 1;
7435
7436 /* Link veneer back to calling location. */
7437 sec_data->erratumcount += 1;
7438 newerr = (elf32_vfp11_erratum_list *)
7439 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7440
7441 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7442 newerr->vma = -1;
7443 newerr->u.v.branch = branch;
7444 newerr->u.v.id = hash_table->num_vfp11_fixes;
7445 branch->u.b.veneer = newerr;
7446
7447 newerr->next = sec_data->erratumlist;
7448 sec_data->erratumlist = newerr;
7449
7450 /* A symbol for the return from the veneer. */
7451 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7452 hash_table->num_vfp11_fixes);
7453
7454 myh = elf_link_hash_lookup
7455 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7456
7457 if (myh != NULL)
7458 abort ();
7459
7460 bh = NULL;
7461 val = offset + 4;
7462 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7463 branch_sec, val, NULL, TRUE, FALSE, &bh);
7464
7465 myh = (struct elf_link_hash_entry *) bh;
7466 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7467 myh->forced_local = 1;
7468
7469 free (tmp_name);
7470
7471 /* Generate a mapping symbol for the veneer section, and explicitly add an
7472 entry for that symbol to the code/data map for the section. */
7473 if (hash_table->vfp11_erratum_glue_size == 0)
7474 {
7475 bh = NULL;
7476 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7477 ever requires this erratum fix. */
7478 _bfd_generic_link_add_one_symbol (link_info,
7479 hash_table->bfd_of_glue_owner, "$a",
7480 BSF_LOCAL, s, 0, NULL,
7481 TRUE, FALSE, &bh);
7482
7483 myh = (struct elf_link_hash_entry *) bh;
7484 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7485 myh->forced_local = 1;
7486
7487 /* The elf32_arm_init_maps function only cares about symbols from input
7488 BFDs. We must make a note of this generated mapping symbol
7489 ourselves so that code byteswapping works properly in
7490 elf32_arm_write_section. */
7491 elf32_arm_section_map_add (s, 'a', 0);
7492 }
7493
7494 s->size += VFP11_ERRATUM_VENEER_SIZE;
7495 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7496 hash_table->num_vfp11_fixes++;
7497
7498 /* The offset of the veneer. */
7499 return val;
7500 }
7501
7502 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7503 veneers need to be handled because used only in Cortex-M. */
7504
7505 static bfd_vma
7506 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7507 elf32_stm32l4xx_erratum_list *branch,
7508 bfd *branch_bfd,
7509 asection *branch_sec,
7510 unsigned int offset,
7511 bfd_size_type veneer_size)
7512 {
7513 asection *s;
7514 struct elf32_arm_link_hash_table *hash_table;
7515 char *tmp_name;
7516 struct elf_link_hash_entry *myh;
7517 struct bfd_link_hash_entry *bh;
7518 bfd_vma val;
7519 struct _arm_elf_section_data *sec_data;
7520 elf32_stm32l4xx_erratum_list *newerr;
7521
7522 hash_table = elf32_arm_hash_table (link_info);
7523 BFD_ASSERT (hash_table != NULL);
7524 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7525
7526 s = bfd_get_linker_section
7527 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7528
7529 BFD_ASSERT (s != NULL);
7530
7531 sec_data = elf32_arm_section_data (s);
7532
7533 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7534 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7535
7536 BFD_ASSERT (tmp_name);
7537
7538 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7539 hash_table->num_stm32l4xx_fixes);
7540
7541 myh = elf_link_hash_lookup
7542 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7543
7544 BFD_ASSERT (myh == NULL);
7545
7546 bh = NULL;
7547 val = hash_table->stm32l4xx_erratum_glue_size;
7548 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7549 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7550 NULL, TRUE, FALSE, &bh);
7551
7552 myh = (struct elf_link_hash_entry *) bh;
7553 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7554 myh->forced_local = 1;
7555
7556 /* Link veneer back to calling location. */
7557 sec_data->stm32l4xx_erratumcount += 1;
7558 newerr = (elf32_stm32l4xx_erratum_list *)
7559 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7560
7561 newerr->type = STM32L4XX_ERRATUM_VENEER;
7562 newerr->vma = -1;
7563 newerr->u.v.branch = branch;
7564 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7565 branch->u.b.veneer = newerr;
7566
7567 newerr->next = sec_data->stm32l4xx_erratumlist;
7568 sec_data->stm32l4xx_erratumlist = newerr;
7569
7570 /* A symbol for the return from the veneer. */
7571 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7572 hash_table->num_stm32l4xx_fixes);
7573
7574 myh = elf_link_hash_lookup
7575 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7576
7577 if (myh != NULL)
7578 abort ();
7579
7580 bh = NULL;
7581 val = offset + 4;
7582 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7583 branch_sec, val, NULL, TRUE, FALSE, &bh);
7584
7585 myh = (struct elf_link_hash_entry *) bh;
7586 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7587 myh->forced_local = 1;
7588
7589 free (tmp_name);
7590
7591 /* Generate a mapping symbol for the veneer section, and explicitly add an
7592 entry for that symbol to the code/data map for the section. */
7593 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7594 {
7595 bh = NULL;
7596 /* Creates a THUMB symbol since there is no other choice. */
7597 _bfd_generic_link_add_one_symbol (link_info,
7598 hash_table->bfd_of_glue_owner, "$t",
7599 BSF_LOCAL, s, 0, NULL,
7600 TRUE, FALSE, &bh);
7601
7602 myh = (struct elf_link_hash_entry *) bh;
7603 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7604 myh->forced_local = 1;
7605
7606 /* The elf32_arm_init_maps function only cares about symbols from input
7607 BFDs. We must make a note of this generated mapping symbol
7608 ourselves so that code byteswapping works properly in
7609 elf32_arm_write_section. */
7610 elf32_arm_section_map_add (s, 't', 0);
7611 }
7612
7613 s->size += veneer_size;
7614 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7615 hash_table->num_stm32l4xx_fixes++;
7616
7617 /* The offset of the veneer. */
7618 return val;
7619 }
7620
7621 #define ARM_GLUE_SECTION_FLAGS \
7622 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7623 | SEC_READONLY | SEC_LINKER_CREATED)
7624
7625 /* Create a fake section for use by the ARM backend of the linker. */
7626
7627 static bfd_boolean
7628 arm_make_glue_section (bfd * abfd, const char * name)
7629 {
7630 asection * sec;
7631
7632 sec = bfd_get_linker_section (abfd, name);
7633 if (sec != NULL)
7634 /* Already made. */
7635 return TRUE;
7636
7637 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7638
7639 if (sec == NULL
7640 || !bfd_set_section_alignment (abfd, sec, 2))
7641 return FALSE;
7642
7643 /* Set the gc mark to prevent the section from being removed by garbage
7644 collection, despite the fact that no relocs refer to this section. */
7645 sec->gc_mark = 1;
7646
7647 return TRUE;
7648 }
7649
7650 /* Set size of .plt entries. This function is called from the
7651 linker scripts in ld/emultempl/{armelf}.em. */
7652
7653 void
7654 bfd_elf32_arm_use_long_plt (void)
7655 {
7656 elf32_arm_use_long_plt_entry = TRUE;
7657 }
7658
7659 /* Add the glue sections to ABFD. This function is called from the
7660 linker scripts in ld/emultempl/{armelf}.em. */
7661
7662 bfd_boolean
7663 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7664 struct bfd_link_info *info)
7665 {
7666 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7667 bfd_boolean dostm32l4xx = globals
7668 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7669 bfd_boolean addglue;
7670
7671 /* If we are only performing a partial
7672 link do not bother adding the glue. */
7673 if (bfd_link_relocatable (info))
7674 return TRUE;
7675
7676 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7677 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7678 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7679 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7680
7681 if (!dostm32l4xx)
7682 return addglue;
7683
7684 return addglue
7685 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7686 }
7687
7688 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7689 ensures they are not marked for deletion by
7690 strip_excluded_output_sections () when veneers are going to be created
7691 later. Not doing so would trigger assert on empty section size in
7692 lang_size_sections_1 (). */
7693
7694 void
7695 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7696 {
7697 enum elf32_arm_stub_type stub_type;
7698
7699 /* If we are only performing a partial
7700 link do not bother adding the glue. */
7701 if (bfd_link_relocatable (info))
7702 return;
7703
7704 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7705 {
7706 asection *out_sec;
7707 const char *out_sec_name;
7708
7709 if (!arm_dedicated_stub_output_section_required (stub_type))
7710 continue;
7711
7712 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7713 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7714 if (out_sec != NULL)
7715 out_sec->flags |= SEC_KEEP;
7716 }
7717 }
7718
7719 /* Select a BFD to be used to hold the sections used by the glue code.
7720 This function is called from the linker scripts in ld/emultempl/
7721 {armelf/pe}.em. */
7722
7723 bfd_boolean
7724 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7725 {
7726 struct elf32_arm_link_hash_table *globals;
7727
7728 /* If we are only performing a partial link
7729 do not bother getting a bfd to hold the glue. */
7730 if (bfd_link_relocatable (info))
7731 return TRUE;
7732
7733 /* Make sure we don't attach the glue sections to a dynamic object. */
7734 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7735
7736 globals = elf32_arm_hash_table (info);
7737 BFD_ASSERT (globals != NULL);
7738
7739 if (globals->bfd_of_glue_owner != NULL)
7740 return TRUE;
7741
7742 /* Save the bfd for later use. */
7743 globals->bfd_of_glue_owner = abfd;
7744
7745 return TRUE;
7746 }
7747
7748 static void
7749 check_use_blx (struct elf32_arm_link_hash_table *globals)
7750 {
7751 int cpu_arch;
7752
7753 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7754 Tag_CPU_arch);
7755
7756 if (globals->fix_arm1176)
7757 {
7758 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7759 globals->use_blx = 1;
7760 }
7761 else
7762 {
7763 if (cpu_arch > TAG_CPU_ARCH_V4T)
7764 globals->use_blx = 1;
7765 }
7766 }
7767
7768 bfd_boolean
7769 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7770 struct bfd_link_info *link_info)
7771 {
7772 Elf_Internal_Shdr *symtab_hdr;
7773 Elf_Internal_Rela *internal_relocs = NULL;
7774 Elf_Internal_Rela *irel, *irelend;
7775 bfd_byte *contents = NULL;
7776
7777 asection *sec;
7778 struct elf32_arm_link_hash_table *globals;
7779
7780 /* If we are only performing a partial link do not bother
7781 to construct any glue. */
7782 if (bfd_link_relocatable (link_info))
7783 return TRUE;
7784
7785 /* Here we have a bfd that is to be included on the link. We have a
7786 hook to do reloc rummaging, before section sizes are nailed down. */
7787 globals = elf32_arm_hash_table (link_info);
7788 BFD_ASSERT (globals != NULL);
7789
7790 check_use_blx (globals);
7791
7792 if (globals->byteswap_code && !bfd_big_endian (abfd))
7793 {
7794 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7795 abfd);
7796 return FALSE;
7797 }
7798
7799 /* PR 5398: If we have not decided to include any loadable sections in
7800 the output then we will not have a glue owner bfd. This is OK, it
7801 just means that there is nothing else for us to do here. */
7802 if (globals->bfd_of_glue_owner == NULL)
7803 return TRUE;
7804
7805 /* Rummage around all the relocs and map the glue vectors. */
7806 sec = abfd->sections;
7807
7808 if (sec == NULL)
7809 return TRUE;
7810
7811 for (; sec != NULL; sec = sec->next)
7812 {
7813 if (sec->reloc_count == 0)
7814 continue;
7815
7816 if ((sec->flags & SEC_EXCLUDE) != 0)
7817 continue;
7818
7819 symtab_hdr = & elf_symtab_hdr (abfd);
7820
7821 /* Load the relocs. */
7822 internal_relocs
7823 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7824
7825 if (internal_relocs == NULL)
7826 goto error_return;
7827
7828 irelend = internal_relocs + sec->reloc_count;
7829 for (irel = internal_relocs; irel < irelend; irel++)
7830 {
7831 long r_type;
7832 unsigned long r_index;
7833
7834 struct elf_link_hash_entry *h;
7835
7836 r_type = ELF32_R_TYPE (irel->r_info);
7837 r_index = ELF32_R_SYM (irel->r_info);
7838
7839 /* These are the only relocation types we care about. */
7840 if ( r_type != R_ARM_PC24
7841 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7842 continue;
7843
7844 /* Get the section contents if we haven't done so already. */
7845 if (contents == NULL)
7846 {
7847 /* Get cached copy if it exists. */
7848 if (elf_section_data (sec)->this_hdr.contents != NULL)
7849 contents = elf_section_data (sec)->this_hdr.contents;
7850 else
7851 {
7852 /* Go get them off disk. */
7853 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7854 goto error_return;
7855 }
7856 }
7857
7858 if (r_type == R_ARM_V4BX)
7859 {
7860 int reg;
7861
7862 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7863 record_arm_bx_glue (link_info, reg);
7864 continue;
7865 }
7866
7867 /* If the relocation is not against a symbol it cannot concern us. */
7868 h = NULL;
7869
7870 /* We don't care about local symbols. */
7871 if (r_index < symtab_hdr->sh_info)
7872 continue;
7873
7874 /* This is an external symbol. */
7875 r_index -= symtab_hdr->sh_info;
7876 h = (struct elf_link_hash_entry *)
7877 elf_sym_hashes (abfd)[r_index];
7878
7879 /* If the relocation is against a static symbol it must be within
7880 the current section and so cannot be a cross ARM/Thumb relocation. */
7881 if (h == NULL)
7882 continue;
7883
7884 /* If the call will go through a PLT entry then we do not need
7885 glue. */
7886 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7887 continue;
7888
7889 switch (r_type)
7890 {
7891 case R_ARM_PC24:
7892 /* This one is a call from arm code. We need to look up
7893 the target of the call. If it is a thumb target, we
7894 insert glue. */
7895 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7896 == ST_BRANCH_TO_THUMB)
7897 record_arm_to_thumb_glue (link_info, h);
7898 break;
7899
7900 default:
7901 abort ();
7902 }
7903 }
7904
7905 if (contents != NULL
7906 && elf_section_data (sec)->this_hdr.contents != contents)
7907 free (contents);
7908 contents = NULL;
7909
7910 if (internal_relocs != NULL
7911 && elf_section_data (sec)->relocs != internal_relocs)
7912 free (internal_relocs);
7913 internal_relocs = NULL;
7914 }
7915
7916 return TRUE;
7917
7918 error_return:
7919 if (contents != NULL
7920 && elf_section_data (sec)->this_hdr.contents != contents)
7921 free (contents);
7922 if (internal_relocs != NULL
7923 && elf_section_data (sec)->relocs != internal_relocs)
7924 free (internal_relocs);
7925
7926 return FALSE;
7927 }
7928 #endif
7929
7930
7931 /* Initialise maps of ARM/Thumb/data for input BFDs. */
7932
7933 void
7934 bfd_elf32_arm_init_maps (bfd *abfd)
7935 {
7936 Elf_Internal_Sym *isymbuf;
7937 Elf_Internal_Shdr *hdr;
7938 unsigned int i, localsyms;
7939
7940 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
7941 if (! is_arm_elf (abfd))
7942 return;
7943
7944 if ((abfd->flags & DYNAMIC) != 0)
7945 return;
7946
7947 hdr = & elf_symtab_hdr (abfd);
7948 localsyms = hdr->sh_info;
7949
7950 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7951 should contain the number of local symbols, which should come before any
7952 global symbols. Mapping symbols are always local. */
7953 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7954 NULL);
7955
7956 /* No internal symbols read? Skip this BFD. */
7957 if (isymbuf == NULL)
7958 return;
7959
7960 for (i = 0; i < localsyms; i++)
7961 {
7962 Elf_Internal_Sym *isym = &isymbuf[i];
7963 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7964 const char *name;
7965
7966 if (sec != NULL
7967 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7968 {
7969 name = bfd_elf_string_from_elf_section (abfd,
7970 hdr->sh_link, isym->st_name);
7971
7972 if (bfd_is_arm_special_symbol_name (name,
7973 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7974 elf32_arm_section_map_add (sec, name[1], isym->st_value);
7975 }
7976 }
7977 }
7978
7979
7980 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7981 say what they wanted. */
7982
7983 void
7984 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7985 {
7986 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7987 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7988
7989 if (globals == NULL)
7990 return;
7991
7992 if (globals->fix_cortex_a8 == -1)
7993 {
7994 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
7995 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7996 && (out_attr[Tag_CPU_arch_profile].i == 'A'
7997 || out_attr[Tag_CPU_arch_profile].i == 0))
7998 globals->fix_cortex_a8 = 1;
7999 else
8000 globals->fix_cortex_a8 = 0;
8001 }
8002 }
8003
8004
8005 void
8006 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8007 {
8008 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8009 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8010
8011 if (globals == NULL)
8012 return;
8013 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8014 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8015 {
8016 switch (globals->vfp11_fix)
8017 {
8018 case BFD_ARM_VFP11_FIX_DEFAULT:
8019 case BFD_ARM_VFP11_FIX_NONE:
8020 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8021 break;
8022
8023 default:
8024 /* Give a warning, but do as the user requests anyway. */
8025 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8026 "workaround is not necessary for target architecture"), obfd);
8027 }
8028 }
8029 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8030 /* For earlier architectures, we might need the workaround, but do not
8031 enable it by default. If users is running with broken hardware, they
8032 must enable the erratum fix explicitly. */
8033 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8034 }
8035
8036 void
8037 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8038 {
8039 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8040 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8041
8042 if (globals == NULL)
8043 return;
8044
8045 /* We assume only Cortex-M4 may require the fix. */
8046 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8047 || out_attr[Tag_CPU_arch_profile].i != 'M')
8048 {
8049 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8050 /* Give a warning, but do as the user requests anyway. */
8051 _bfd_error_handler
8052 (_("%pB: warning: selected STM32L4XX erratum "
8053 "workaround is not necessary for target architecture"), obfd);
8054 }
8055 }
8056
8057 enum bfd_arm_vfp11_pipe
8058 {
8059 VFP11_FMAC,
8060 VFP11_LS,
8061 VFP11_DS,
8062 VFP11_BAD
8063 };
8064
8065 /* Return a VFP register number. This is encoded as RX:X for single-precision
8066 registers, or X:RX for double-precision registers, where RX is the group of
8067 four bits in the instruction encoding and X is the single extension bit.
8068 RX and X fields are specified using their lowest (starting) bit. The return
8069 value is:
8070
8071 0...31: single-precision registers s0...s31
8072 32...63: double-precision registers d0...d31.
8073
8074 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8075 encounter VFP3 instructions, so we allow the full range for DP registers. */
8076
8077 static unsigned int
8078 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8079 unsigned int x)
8080 {
8081 if (is_double)
8082 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8083 else
8084 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8085 }
8086
8087 /* Set bits in *WMASK according to a register number REG as encoded by
8088 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8089
8090 static void
8091 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8092 {
8093 if (reg < 32)
8094 *wmask |= 1 << reg;
8095 else if (reg < 48)
8096 *wmask |= 3 << ((reg - 32) * 2);
8097 }
8098
8099 /* Return TRUE if WMASK overwrites anything in REGS. */
8100
8101 static bfd_boolean
8102 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8103 {
8104 int i;
8105
8106 for (i = 0; i < numregs; i++)
8107 {
8108 unsigned int reg = regs[i];
8109
8110 if (reg < 32 && (wmask & (1 << reg)) != 0)
8111 return TRUE;
8112
8113 reg -= 32;
8114
8115 if (reg >= 16)
8116 continue;
8117
8118 if ((wmask & (3 << (reg * 2))) != 0)
8119 return TRUE;
8120 }
8121
8122 return FALSE;
8123 }
8124
8125 /* In this function, we're interested in two things: finding input registers
8126 for VFP data-processing instructions, and finding the set of registers which
8127 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8128 hold the written set, so FLDM etc. are easy to deal with (we're only
8129 interested in 32 SP registers or 16 dp registers, due to the VFP version
8130 implemented by the chip in question). DP registers are marked by setting
8131 both SP registers in the write mask). */
8132
8133 static enum bfd_arm_vfp11_pipe
8134 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8135 int *numregs)
8136 {
8137 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8138 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8139
8140 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8141 {
8142 unsigned int pqrs;
8143 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8144 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8145
8146 pqrs = ((insn & 0x00800000) >> 20)
8147 | ((insn & 0x00300000) >> 19)
8148 | ((insn & 0x00000040) >> 6);
8149
8150 switch (pqrs)
8151 {
8152 case 0: /* fmac[sd]. */
8153 case 1: /* fnmac[sd]. */
8154 case 2: /* fmsc[sd]. */
8155 case 3: /* fnmsc[sd]. */
8156 vpipe = VFP11_FMAC;
8157 bfd_arm_vfp11_write_mask (destmask, fd);
8158 regs[0] = fd;
8159 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8160 regs[2] = fm;
8161 *numregs = 3;
8162 break;
8163
8164 case 4: /* fmul[sd]. */
8165 case 5: /* fnmul[sd]. */
8166 case 6: /* fadd[sd]. */
8167 case 7: /* fsub[sd]. */
8168 vpipe = VFP11_FMAC;
8169 goto vfp_binop;
8170
8171 case 8: /* fdiv[sd]. */
8172 vpipe = VFP11_DS;
8173 vfp_binop:
8174 bfd_arm_vfp11_write_mask (destmask, fd);
8175 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8176 regs[1] = fm;
8177 *numregs = 2;
8178 break;
8179
8180 case 15: /* extended opcode. */
8181 {
8182 unsigned int extn = ((insn >> 15) & 0x1e)
8183 | ((insn >> 7) & 1);
8184
8185 switch (extn)
8186 {
8187 case 0: /* fcpy[sd]. */
8188 case 1: /* fabs[sd]. */
8189 case 2: /* fneg[sd]. */
8190 case 8: /* fcmp[sd]. */
8191 case 9: /* fcmpe[sd]. */
8192 case 10: /* fcmpz[sd]. */
8193 case 11: /* fcmpez[sd]. */
8194 case 16: /* fuito[sd]. */
8195 case 17: /* fsito[sd]. */
8196 case 24: /* ftoui[sd]. */
8197 case 25: /* ftouiz[sd]. */
8198 case 26: /* ftosi[sd]. */
8199 case 27: /* ftosiz[sd]. */
8200 /* These instructions will not bounce due to underflow. */
8201 *numregs = 0;
8202 vpipe = VFP11_FMAC;
8203 break;
8204
8205 case 3: /* fsqrt[sd]. */
8206 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8207 registers to cause the erratum in previous instructions. */
8208 bfd_arm_vfp11_write_mask (destmask, fd);
8209 vpipe = VFP11_DS;
8210 break;
8211
8212 case 15: /* fcvt{ds,sd}. */
8213 {
8214 int rnum = 0;
8215
8216 bfd_arm_vfp11_write_mask (destmask, fd);
8217
8218 /* Only FCVTSD can underflow. */
8219 if ((insn & 0x100) != 0)
8220 regs[rnum++] = fm;
8221
8222 *numregs = rnum;
8223
8224 vpipe = VFP11_FMAC;
8225 }
8226 break;
8227
8228 default:
8229 return VFP11_BAD;
8230 }
8231 }
8232 break;
8233
8234 default:
8235 return VFP11_BAD;
8236 }
8237 }
8238 /* Two-register transfer. */
8239 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8240 {
8241 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8242
8243 if ((insn & 0x100000) == 0)
8244 {
8245 if (is_double)
8246 bfd_arm_vfp11_write_mask (destmask, fm);
8247 else
8248 {
8249 bfd_arm_vfp11_write_mask (destmask, fm);
8250 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8251 }
8252 }
8253
8254 vpipe = VFP11_LS;
8255 }
8256 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8257 {
8258 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8259 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8260
8261 switch (puw)
8262 {
8263 case 0: /* Two-reg transfer. We should catch these above. */
8264 abort ();
8265
8266 case 2: /* fldm[sdx]. */
8267 case 3:
8268 case 5:
8269 {
8270 unsigned int i, offset = insn & 0xff;
8271
8272 if (is_double)
8273 offset >>= 1;
8274
8275 for (i = fd; i < fd + offset; i++)
8276 bfd_arm_vfp11_write_mask (destmask, i);
8277 }
8278 break;
8279
8280 case 4: /* fld[sd]. */
8281 case 6:
8282 bfd_arm_vfp11_write_mask (destmask, fd);
8283 break;
8284
8285 default:
8286 return VFP11_BAD;
8287 }
8288
8289 vpipe = VFP11_LS;
8290 }
8291 /* Single-register transfer. Note L==0. */
8292 else if ((insn & 0x0f100e10) == 0x0e000a10)
8293 {
8294 unsigned int opcode = (insn >> 21) & 7;
8295 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8296
8297 switch (opcode)
8298 {
8299 case 0: /* fmsr/fmdlr. */
8300 case 1: /* fmdhr. */
8301 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8302 destination register. I don't know if this is exactly right,
8303 but it is the conservative choice. */
8304 bfd_arm_vfp11_write_mask (destmask, fn);
8305 break;
8306
8307 case 7: /* fmxr. */
8308 break;
8309 }
8310
8311 vpipe = VFP11_LS;
8312 }
8313
8314 return vpipe;
8315 }
8316
8317
8318 static int elf32_arm_compare_mapping (const void * a, const void * b);
8319
8320
8321 /* Look for potentially-troublesome code sequences which might trigger the
8322 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8323 (available from ARM) for details of the erratum. A short version is
8324 described in ld.texinfo. */
8325
8326 bfd_boolean
8327 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8328 {
8329 asection *sec;
8330 bfd_byte *contents = NULL;
8331 int state = 0;
8332 int regs[3], numregs = 0;
8333 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8334 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8335
8336 if (globals == NULL)
8337 return FALSE;
8338
8339 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8340 The states transition as follows:
8341
8342 0 -> 1 (vector) or 0 -> 2 (scalar)
8343 A VFP FMAC-pipeline instruction has been seen. Fill
8344 regs[0]..regs[numregs-1] with its input operands. Remember this
8345 instruction in 'first_fmac'.
8346
8347 1 -> 2
8348 Any instruction, except for a VFP instruction which overwrites
8349 regs[*].
8350
8351 1 -> 3 [ -> 0 ] or
8352 2 -> 3 [ -> 0 ]
8353 A VFP instruction has been seen which overwrites any of regs[*].
8354 We must make a veneer! Reset state to 0 before examining next
8355 instruction.
8356
8357 2 -> 0
8358 If we fail to match anything in state 2, reset to state 0 and reset
8359 the instruction pointer to the instruction after 'first_fmac'.
8360
8361 If the VFP11 vector mode is in use, there must be at least two unrelated
8362 instructions between anti-dependent VFP11 instructions to properly avoid
8363 triggering the erratum, hence the use of the extra state 1. */
8364
8365 /* If we are only performing a partial link do not bother
8366 to construct any glue. */
8367 if (bfd_link_relocatable (link_info))
8368 return TRUE;
8369
8370 /* Skip if this bfd does not correspond to an ELF image. */
8371 if (! is_arm_elf (abfd))
8372 return TRUE;
8373
8374 /* We should have chosen a fix type by the time we get here. */
8375 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8376
8377 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8378 return TRUE;
8379
8380 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8381 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8382 return TRUE;
8383
8384 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8385 {
8386 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8387 struct _arm_elf_section_data *sec_data;
8388
8389 /* If we don't have executable progbits, we're not interested in this
8390 section. Also skip if section is to be excluded. */
8391 if (elf_section_type (sec) != SHT_PROGBITS
8392 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8393 || (sec->flags & SEC_EXCLUDE) != 0
8394 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8395 || sec->output_section == bfd_abs_section_ptr
8396 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8397 continue;
8398
8399 sec_data = elf32_arm_section_data (sec);
8400
8401 if (sec_data->mapcount == 0)
8402 continue;
8403
8404 if (elf_section_data (sec)->this_hdr.contents != NULL)
8405 contents = elf_section_data (sec)->this_hdr.contents;
8406 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8407 goto error_return;
8408
8409 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8410 elf32_arm_compare_mapping);
8411
8412 for (span = 0; span < sec_data->mapcount; span++)
8413 {
8414 unsigned int span_start = sec_data->map[span].vma;
8415 unsigned int span_end = (span == sec_data->mapcount - 1)
8416 ? sec->size : sec_data->map[span + 1].vma;
8417 char span_type = sec_data->map[span].type;
8418
8419 /* FIXME: Only ARM mode is supported at present. We may need to
8420 support Thumb-2 mode also at some point. */
8421 if (span_type != 'a')
8422 continue;
8423
8424 for (i = span_start; i < span_end;)
8425 {
8426 unsigned int next_i = i + 4;
8427 unsigned int insn = bfd_big_endian (abfd)
8428 ? (contents[i] << 24)
8429 | (contents[i + 1] << 16)
8430 | (contents[i + 2] << 8)
8431 | contents[i + 3]
8432 : (contents[i + 3] << 24)
8433 | (contents[i + 2] << 16)
8434 | (contents[i + 1] << 8)
8435 | contents[i];
8436 unsigned int writemask = 0;
8437 enum bfd_arm_vfp11_pipe vpipe;
8438
8439 switch (state)
8440 {
8441 case 0:
8442 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8443 &numregs);
8444 /* I'm assuming the VFP11 erratum can trigger with denorm
8445 operands on either the FMAC or the DS pipeline. This might
8446 lead to slightly overenthusiastic veneer insertion. */
8447 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8448 {
8449 state = use_vector ? 1 : 2;
8450 first_fmac = i;
8451 veneer_of_insn = insn;
8452 }
8453 break;
8454
8455 case 1:
8456 {
8457 int other_regs[3], other_numregs;
8458 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8459 other_regs,
8460 &other_numregs);
8461 if (vpipe != VFP11_BAD
8462 && bfd_arm_vfp11_antidependency (writemask, regs,
8463 numregs))
8464 state = 3;
8465 else
8466 state = 2;
8467 }
8468 break;
8469
8470 case 2:
8471 {
8472 int other_regs[3], other_numregs;
8473 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8474 other_regs,
8475 &other_numregs);
8476 if (vpipe != VFP11_BAD
8477 && bfd_arm_vfp11_antidependency (writemask, regs,
8478 numregs))
8479 state = 3;
8480 else
8481 {
8482 state = 0;
8483 next_i = first_fmac + 4;
8484 }
8485 }
8486 break;
8487
8488 case 3:
8489 abort (); /* Should be unreachable. */
8490 }
8491
8492 if (state == 3)
8493 {
8494 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8495 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8496
8497 elf32_arm_section_data (sec)->erratumcount += 1;
8498
8499 newerr->u.b.vfp_insn = veneer_of_insn;
8500
8501 switch (span_type)
8502 {
8503 case 'a':
8504 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8505 break;
8506
8507 default:
8508 abort ();
8509 }
8510
8511 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8512 first_fmac);
8513
8514 newerr->vma = -1;
8515
8516 newerr->next = sec_data->erratumlist;
8517 sec_data->erratumlist = newerr;
8518
8519 state = 0;
8520 }
8521
8522 i = next_i;
8523 }
8524 }
8525
8526 if (contents != NULL
8527 && elf_section_data (sec)->this_hdr.contents != contents)
8528 free (contents);
8529 contents = NULL;
8530 }
8531
8532 return TRUE;
8533
8534 error_return:
8535 if (contents != NULL
8536 && elf_section_data (sec)->this_hdr.contents != contents)
8537 free (contents);
8538
8539 return FALSE;
8540 }
8541
8542 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8543 after sections have been laid out, using specially-named symbols. */
8544
8545 void
8546 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8547 struct bfd_link_info *link_info)
8548 {
8549 asection *sec;
8550 struct elf32_arm_link_hash_table *globals;
8551 char *tmp_name;
8552
8553 if (bfd_link_relocatable (link_info))
8554 return;
8555
8556 /* Skip if this bfd does not correspond to an ELF image. */
8557 if (! is_arm_elf (abfd))
8558 return;
8559
8560 globals = elf32_arm_hash_table (link_info);
8561 if (globals == NULL)
8562 return;
8563
8564 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8565 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8566
8567 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8568 {
8569 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8570 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8571
8572 for (; errnode != NULL; errnode = errnode->next)
8573 {
8574 struct elf_link_hash_entry *myh;
8575 bfd_vma vma;
8576
8577 switch (errnode->type)
8578 {
8579 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8580 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8581 /* Find veneer symbol. */
8582 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8583 errnode->u.b.veneer->u.v.id);
8584
8585 myh = elf_link_hash_lookup
8586 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8587
8588 if (myh == NULL)
8589 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8590 abfd, "VFP11", tmp_name);
8591
8592 vma = myh->root.u.def.section->output_section->vma
8593 + myh->root.u.def.section->output_offset
8594 + myh->root.u.def.value;
8595
8596 errnode->u.b.veneer->vma = vma;
8597 break;
8598
8599 case VFP11_ERRATUM_ARM_VENEER:
8600 case VFP11_ERRATUM_THUMB_VENEER:
8601 /* Find return location. */
8602 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8603 errnode->u.v.id);
8604
8605 myh = elf_link_hash_lookup
8606 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8607
8608 if (myh == NULL)
8609 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8610 abfd, "VFP11", tmp_name);
8611
8612 vma = myh->root.u.def.section->output_section->vma
8613 + myh->root.u.def.section->output_offset
8614 + myh->root.u.def.value;
8615
8616 errnode->u.v.branch->vma = vma;
8617 break;
8618
8619 default:
8620 abort ();
8621 }
8622 }
8623 }
8624
8625 free (tmp_name);
8626 }
8627
8628 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8629 return locations after sections have been laid out, using
8630 specially-named symbols. */
8631
8632 void
8633 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8634 struct bfd_link_info *link_info)
8635 {
8636 asection *sec;
8637 struct elf32_arm_link_hash_table *globals;
8638 char *tmp_name;
8639
8640 if (bfd_link_relocatable (link_info))
8641 return;
8642
8643 /* Skip if this bfd does not correspond to an ELF image. */
8644 if (! is_arm_elf (abfd))
8645 return;
8646
8647 globals = elf32_arm_hash_table (link_info);
8648 if (globals == NULL)
8649 return;
8650
8651 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8652 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8653
8654 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8655 {
8656 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8657 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8658
8659 for (; errnode != NULL; errnode = errnode->next)
8660 {
8661 struct elf_link_hash_entry *myh;
8662 bfd_vma vma;
8663
8664 switch (errnode->type)
8665 {
8666 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8667 /* Find veneer symbol. */
8668 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8669 errnode->u.b.veneer->u.v.id);
8670
8671 myh = elf_link_hash_lookup
8672 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8673
8674 if (myh == NULL)
8675 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8676 abfd, "STM32L4XX", tmp_name);
8677
8678 vma = myh->root.u.def.section->output_section->vma
8679 + myh->root.u.def.section->output_offset
8680 + myh->root.u.def.value;
8681
8682 errnode->u.b.veneer->vma = vma;
8683 break;
8684
8685 case STM32L4XX_ERRATUM_VENEER:
8686 /* Find return location. */
8687 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8688 errnode->u.v.id);
8689
8690 myh = elf_link_hash_lookup
8691 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8692
8693 if (myh == NULL)
8694 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8695 abfd, "STM32L4XX", tmp_name);
8696
8697 vma = myh->root.u.def.section->output_section->vma
8698 + myh->root.u.def.section->output_offset
8699 + myh->root.u.def.value;
8700
8701 errnode->u.v.branch->vma = vma;
8702 break;
8703
8704 default:
8705 abort ();
8706 }
8707 }
8708 }
8709
8710 free (tmp_name);
8711 }
8712
8713 static inline bfd_boolean
8714 is_thumb2_ldmia (const insn32 insn)
8715 {
8716 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8717 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8718 return (insn & 0xffd02000) == 0xe8900000;
8719 }
8720
8721 static inline bfd_boolean
8722 is_thumb2_ldmdb (const insn32 insn)
8723 {
8724 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8725 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8726 return (insn & 0xffd02000) == 0xe9100000;
8727 }
8728
8729 static inline bfd_boolean
8730 is_thumb2_vldm (const insn32 insn)
8731 {
8732 /* A6.5 Extension register load or store instruction
8733 A7.7.229
8734 We look for SP 32-bit and DP 64-bit registers.
8735 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8736 <list> is consecutive 64-bit registers
8737 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8738 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8739 <list> is consecutive 32-bit registers
8740 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8741 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8742 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8743 return
8744 (((insn & 0xfe100f00) == 0xec100b00) ||
8745 ((insn & 0xfe100f00) == 0xec100a00))
8746 && /* (IA without !). */
8747 (((((insn << 7) >> 28) & 0xd) == 0x4)
8748 /* (IA with !), includes VPOP (when reg number is SP). */
8749 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8750 /* (DB with !). */
8751 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8752 }
8753
8754 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8755 VLDM opcode and:
8756 - computes the number and the mode of memory accesses
8757 - decides if the replacement should be done:
8758 . replaces only if > 8-word accesses
8759 . or (testing purposes only) replaces all accesses. */
8760
8761 static bfd_boolean
8762 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8763 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8764 {
8765 int nb_words = 0;
8766
8767 /* The field encoding the register list is the same for both LDMIA
8768 and LDMDB encodings. */
8769 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8770 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8771 else if (is_thumb2_vldm (insn))
8772 nb_words = (insn & 0xff);
8773
8774 /* DEFAULT mode accounts for the real bug condition situation,
8775 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8776 return
8777 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8778 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8779 }
8780
8781 /* Look for potentially-troublesome code sequences which might trigger
8782 the STM STM32L4XX erratum. */
8783
8784 bfd_boolean
8785 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8786 struct bfd_link_info *link_info)
8787 {
8788 asection *sec;
8789 bfd_byte *contents = NULL;
8790 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8791
8792 if (globals == NULL)
8793 return FALSE;
8794
8795 /* If we are only performing a partial link do not bother
8796 to construct any glue. */
8797 if (bfd_link_relocatable (link_info))
8798 return TRUE;
8799
8800 /* Skip if this bfd does not correspond to an ELF image. */
8801 if (! is_arm_elf (abfd))
8802 return TRUE;
8803
8804 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8805 return TRUE;
8806
8807 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8808 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8809 return TRUE;
8810
8811 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8812 {
8813 unsigned int i, span;
8814 struct _arm_elf_section_data *sec_data;
8815
8816 /* If we don't have executable progbits, we're not interested in this
8817 section. Also skip if section is to be excluded. */
8818 if (elf_section_type (sec) != SHT_PROGBITS
8819 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8820 || (sec->flags & SEC_EXCLUDE) != 0
8821 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8822 || sec->output_section == bfd_abs_section_ptr
8823 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8824 continue;
8825
8826 sec_data = elf32_arm_section_data (sec);
8827
8828 if (sec_data->mapcount == 0)
8829 continue;
8830
8831 if (elf_section_data (sec)->this_hdr.contents != NULL)
8832 contents = elf_section_data (sec)->this_hdr.contents;
8833 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8834 goto error_return;
8835
8836 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8837 elf32_arm_compare_mapping);
8838
8839 for (span = 0; span < sec_data->mapcount; span++)
8840 {
8841 unsigned int span_start = sec_data->map[span].vma;
8842 unsigned int span_end = (span == sec_data->mapcount - 1)
8843 ? sec->size : sec_data->map[span + 1].vma;
8844 char span_type = sec_data->map[span].type;
8845 int itblock_current_pos = 0;
8846
8847 /* Only Thumb2 mode need be supported with this CM4 specific
8848 code, we should not encounter any arm mode eg span_type
8849 != 'a'. */
8850 if (span_type != 't')
8851 continue;
8852
8853 for (i = span_start; i < span_end;)
8854 {
8855 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8856 bfd_boolean insn_32bit = FALSE;
8857 bfd_boolean is_ldm = FALSE;
8858 bfd_boolean is_vldm = FALSE;
8859 bfd_boolean is_not_last_in_it_block = FALSE;
8860
8861 /* The first 16-bits of all 32-bit thumb2 instructions start
8862 with opcode[15..13]=0b111 and the encoded op1 can be anything
8863 except opcode[12..11]!=0b00.
8864 See 32-bit Thumb instruction encoding. */
8865 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8866 insn_32bit = TRUE;
8867
8868 /* Compute the predicate that tells if the instruction
8869 is concerned by the IT block
8870 - Creates an error if there is a ldm that is not
8871 last in the IT block thus cannot be replaced
8872 - Otherwise we can create a branch at the end of the
8873 IT block, it will be controlled naturally by IT
8874 with the proper pseudo-predicate
8875 - So the only interesting predicate is the one that
8876 tells that we are not on the last item of an IT
8877 block. */
8878 if (itblock_current_pos != 0)
8879 is_not_last_in_it_block = !!--itblock_current_pos;
8880
8881 if (insn_32bit)
8882 {
8883 /* Load the rest of the insn (in manual-friendly order). */
8884 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8885 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8886 is_vldm = is_thumb2_vldm (insn);
8887
8888 /* Veneers are created for (v)ldm depending on
8889 option flags and memory accesses conditions; but
8890 if the instruction is not the last instruction of
8891 an IT block, we cannot create a jump there, so we
8892 bail out. */
8893 if ((is_ldm || is_vldm)
8894 && stm32l4xx_need_create_replacing_stub
8895 (insn, globals->stm32l4xx_fix))
8896 {
8897 if (is_not_last_in_it_block)
8898 {
8899 _bfd_error_handler
8900 /* xgettext:c-format */
8901 (_("%pB(%pA+%#x): error: multiple load detected"
8902 " in non-last IT block instruction:"
8903 " STM32L4XX veneer cannot be generated; "
8904 "use gcc option -mrestrict-it to generate"
8905 " only one instruction per IT block"),
8906 abfd, sec, i);
8907 }
8908 else
8909 {
8910 elf32_stm32l4xx_erratum_list *newerr =
8911 (elf32_stm32l4xx_erratum_list *)
8912 bfd_zmalloc
8913 (sizeof (elf32_stm32l4xx_erratum_list));
8914
8915 elf32_arm_section_data (sec)
8916 ->stm32l4xx_erratumcount += 1;
8917 newerr->u.b.insn = insn;
8918 /* We create only thumb branches. */
8919 newerr->type =
8920 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8921 record_stm32l4xx_erratum_veneer
8922 (link_info, newerr, abfd, sec,
8923 i,
8924 is_ldm ?
8925 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8926 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8927 newerr->vma = -1;
8928 newerr->next = sec_data->stm32l4xx_erratumlist;
8929 sec_data->stm32l4xx_erratumlist = newerr;
8930 }
8931 }
8932 }
8933 else
8934 {
8935 /* A7.7.37 IT p208
8936 IT blocks are only encoded in T1
8937 Encoding T1: IT{x{y{z}}} <firstcond>
8938 1 0 1 1 - 1 1 1 1 - firstcond - mask
8939 if mask = '0000' then see 'related encodings'
8940 We don't deal with UNPREDICTABLE, just ignore these.
8941 There can be no nested IT blocks so an IT block
8942 is naturally a new one for which it is worth
8943 computing its size. */
8944 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8945 && ((insn & 0x000f) != 0x0000);
8946 /* If we have a new IT block we compute its size. */
8947 if (is_newitblock)
8948 {
8949 /* Compute the number of instructions controlled
8950 by the IT block, it will be used to decide
8951 whether we are inside an IT block or not. */
8952 unsigned int mask = insn & 0x000f;
8953 itblock_current_pos = 4 - ctz (mask);
8954 }
8955 }
8956
8957 i += insn_32bit ? 4 : 2;
8958 }
8959 }
8960
8961 if (contents != NULL
8962 && elf_section_data (sec)->this_hdr.contents != contents)
8963 free (contents);
8964 contents = NULL;
8965 }
8966
8967 return TRUE;
8968
8969 error_return:
8970 if (contents != NULL
8971 && elf_section_data (sec)->this_hdr.contents != contents)
8972 free (contents);
8973
8974 return FALSE;
8975 }
8976
8977 /* Set target relocation values needed during linking. */
8978
8979 void
8980 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8981 struct bfd_link_info *link_info,
8982 struct elf32_arm_params *params)
8983 {
8984 struct elf32_arm_link_hash_table *globals;
8985
8986 globals = elf32_arm_hash_table (link_info);
8987 if (globals == NULL)
8988 return;
8989
8990 globals->target1_is_rel = params->target1_is_rel;
8991 if (globals->fdpic_p)
8992 globals->target2_reloc = R_ARM_GOT32;
8993 else if (strcmp (params->target2_type, "rel") == 0)
8994 globals->target2_reloc = R_ARM_REL32;
8995 else if (strcmp (params->target2_type, "abs") == 0)
8996 globals->target2_reloc = R_ARM_ABS32;
8997 else if (strcmp (params->target2_type, "got-rel") == 0)
8998 globals->target2_reloc = R_ARM_GOT_PREL;
8999 else
9000 {
9001 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9002 params->target2_type);
9003 }
9004 globals->fix_v4bx = params->fix_v4bx;
9005 globals->use_blx |= params->use_blx;
9006 globals->vfp11_fix = params->vfp11_denorm_fix;
9007 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9008 if (globals->fdpic_p)
9009 globals->pic_veneer = 1;
9010 else
9011 globals->pic_veneer = params->pic_veneer;
9012 globals->fix_cortex_a8 = params->fix_cortex_a8;
9013 globals->fix_arm1176 = params->fix_arm1176;
9014 globals->cmse_implib = params->cmse_implib;
9015 globals->in_implib_bfd = params->in_implib_bfd;
9016
9017 BFD_ASSERT (is_arm_elf (output_bfd));
9018 elf_arm_tdata (output_bfd)->no_enum_size_warning
9019 = params->no_enum_size_warning;
9020 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9021 = params->no_wchar_size_warning;
9022 }
9023
9024 /* Replace the target offset of a Thumb bl or b.w instruction. */
9025
9026 static void
9027 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9028 {
9029 bfd_vma upper;
9030 bfd_vma lower;
9031 int reloc_sign;
9032
9033 BFD_ASSERT ((offset & 1) == 0);
9034
9035 upper = bfd_get_16 (abfd, insn);
9036 lower = bfd_get_16 (abfd, insn + 2);
9037 reloc_sign = (offset < 0) ? 1 : 0;
9038 upper = (upper & ~(bfd_vma) 0x7ff)
9039 | ((offset >> 12) & 0x3ff)
9040 | (reloc_sign << 10);
9041 lower = (lower & ~(bfd_vma) 0x2fff)
9042 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9043 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9044 | ((offset >> 1) & 0x7ff);
9045 bfd_put_16 (abfd, upper, insn);
9046 bfd_put_16 (abfd, lower, insn + 2);
9047 }
9048
9049 /* Thumb code calling an ARM function. */
9050
9051 static int
9052 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9053 const char * name,
9054 bfd * input_bfd,
9055 bfd * output_bfd,
9056 asection * input_section,
9057 bfd_byte * hit_data,
9058 asection * sym_sec,
9059 bfd_vma offset,
9060 bfd_signed_vma addend,
9061 bfd_vma val,
9062 char **error_message)
9063 {
9064 asection * s = 0;
9065 bfd_vma my_offset;
9066 long int ret_offset;
9067 struct elf_link_hash_entry * myh;
9068 struct elf32_arm_link_hash_table * globals;
9069
9070 myh = find_thumb_glue (info, name, error_message);
9071 if (myh == NULL)
9072 return FALSE;
9073
9074 globals = elf32_arm_hash_table (info);
9075 BFD_ASSERT (globals != NULL);
9076 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9077
9078 my_offset = myh->root.u.def.value;
9079
9080 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9081 THUMB2ARM_GLUE_SECTION_NAME);
9082
9083 BFD_ASSERT (s != NULL);
9084 BFD_ASSERT (s->contents != NULL);
9085 BFD_ASSERT (s->output_section != NULL);
9086
9087 if ((my_offset & 0x01) == 0x01)
9088 {
9089 if (sym_sec != NULL
9090 && sym_sec->owner != NULL
9091 && !INTERWORK_FLAG (sym_sec->owner))
9092 {
9093 _bfd_error_handler
9094 (_("%pB(%s): warning: interworking not enabled;"
9095 " first occurrence: %pB: %s call to %s"),
9096 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9097
9098 return FALSE;
9099 }
9100
9101 --my_offset;
9102 myh->root.u.def.value = my_offset;
9103
9104 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9105 s->contents + my_offset);
9106
9107 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9108 s->contents + my_offset + 2);
9109
9110 ret_offset =
9111 /* Address of destination of the stub. */
9112 ((bfd_signed_vma) val)
9113 - ((bfd_signed_vma)
9114 /* Offset from the start of the current section
9115 to the start of the stubs. */
9116 (s->output_offset
9117 /* Offset of the start of this stub from the start of the stubs. */
9118 + my_offset
9119 /* Address of the start of the current section. */
9120 + s->output_section->vma)
9121 /* The branch instruction is 4 bytes into the stub. */
9122 + 4
9123 /* ARM branches work from the pc of the instruction + 8. */
9124 + 8);
9125
9126 put_arm_insn (globals, output_bfd,
9127 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9128 s->contents + my_offset + 4);
9129 }
9130
9131 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9132
9133 /* Now go back and fix up the original BL insn to point to here. */
9134 ret_offset =
9135 /* Address of where the stub is located. */
9136 (s->output_section->vma + s->output_offset + my_offset)
9137 /* Address of where the BL is located. */
9138 - (input_section->output_section->vma + input_section->output_offset
9139 + offset)
9140 /* Addend in the relocation. */
9141 - addend
9142 /* Biassing for PC-relative addressing. */
9143 - 8;
9144
9145 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9146
9147 return TRUE;
9148 }
9149
9150 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9151
9152 static struct elf_link_hash_entry *
9153 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9154 const char * name,
9155 bfd * input_bfd,
9156 bfd * output_bfd,
9157 asection * sym_sec,
9158 bfd_vma val,
9159 asection * s,
9160 char ** error_message)
9161 {
9162 bfd_vma my_offset;
9163 long int ret_offset;
9164 struct elf_link_hash_entry * myh;
9165 struct elf32_arm_link_hash_table * globals;
9166
9167 myh = find_arm_glue (info, name, error_message);
9168 if (myh == NULL)
9169 return NULL;
9170
9171 globals = elf32_arm_hash_table (info);
9172 BFD_ASSERT (globals != NULL);
9173 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9174
9175 my_offset = myh->root.u.def.value;
9176
9177 if ((my_offset & 0x01) == 0x01)
9178 {
9179 if (sym_sec != NULL
9180 && sym_sec->owner != NULL
9181 && !INTERWORK_FLAG (sym_sec->owner))
9182 {
9183 _bfd_error_handler
9184 (_("%pB(%s): warning: interworking not enabled;"
9185 " first occurrence: %pB: %s call to %s"),
9186 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9187 }
9188
9189 --my_offset;
9190 myh->root.u.def.value = my_offset;
9191
9192 if (bfd_link_pic (info)
9193 || globals->root.is_relocatable_executable
9194 || globals->pic_veneer)
9195 {
9196 /* For relocatable objects we can't use absolute addresses,
9197 so construct the address from a relative offset. */
9198 /* TODO: If the offset is small it's probably worth
9199 constructing the address with adds. */
9200 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9201 s->contents + my_offset);
9202 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9203 s->contents + my_offset + 4);
9204 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9205 s->contents + my_offset + 8);
9206 /* Adjust the offset by 4 for the position of the add,
9207 and 8 for the pipeline offset. */
9208 ret_offset = (val - (s->output_offset
9209 + s->output_section->vma
9210 + my_offset + 12))
9211 | 1;
9212 bfd_put_32 (output_bfd, ret_offset,
9213 s->contents + my_offset + 12);
9214 }
9215 else if (globals->use_blx)
9216 {
9217 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9218 s->contents + my_offset);
9219
9220 /* It's a thumb address. Add the low order bit. */
9221 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9222 s->contents + my_offset + 4);
9223 }
9224 else
9225 {
9226 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9227 s->contents + my_offset);
9228
9229 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9230 s->contents + my_offset + 4);
9231
9232 /* It's a thumb address. Add the low order bit. */
9233 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9234 s->contents + my_offset + 8);
9235
9236 my_offset += 12;
9237 }
9238 }
9239
9240 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9241
9242 return myh;
9243 }
9244
9245 /* Arm code calling a Thumb function. */
9246
9247 static int
9248 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9249 const char * name,
9250 bfd * input_bfd,
9251 bfd * output_bfd,
9252 asection * input_section,
9253 bfd_byte * hit_data,
9254 asection * sym_sec,
9255 bfd_vma offset,
9256 bfd_signed_vma addend,
9257 bfd_vma val,
9258 char **error_message)
9259 {
9260 unsigned long int tmp;
9261 bfd_vma my_offset;
9262 asection * s;
9263 long int ret_offset;
9264 struct elf_link_hash_entry * myh;
9265 struct elf32_arm_link_hash_table * globals;
9266
9267 globals = elf32_arm_hash_table (info);
9268 BFD_ASSERT (globals != NULL);
9269 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9270
9271 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9272 ARM2THUMB_GLUE_SECTION_NAME);
9273 BFD_ASSERT (s != NULL);
9274 BFD_ASSERT (s->contents != NULL);
9275 BFD_ASSERT (s->output_section != NULL);
9276
9277 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9278 sym_sec, val, s, error_message);
9279 if (!myh)
9280 return FALSE;
9281
9282 my_offset = myh->root.u.def.value;
9283 tmp = bfd_get_32 (input_bfd, hit_data);
9284 tmp = tmp & 0xFF000000;
9285
9286 /* Somehow these are both 4 too far, so subtract 8. */
9287 ret_offset = (s->output_offset
9288 + my_offset
9289 + s->output_section->vma
9290 - (input_section->output_offset
9291 + input_section->output_section->vma
9292 + offset + addend)
9293 - 8);
9294
9295 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9296
9297 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9298
9299 return TRUE;
9300 }
9301
9302 /* Populate Arm stub for an exported Thumb function. */
9303
9304 static bfd_boolean
9305 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9306 {
9307 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9308 asection * s;
9309 struct elf_link_hash_entry * myh;
9310 struct elf32_arm_link_hash_entry *eh;
9311 struct elf32_arm_link_hash_table * globals;
9312 asection *sec;
9313 bfd_vma val;
9314 char *error_message;
9315
9316 eh = elf32_arm_hash_entry (h);
9317 /* Allocate stubs for exported Thumb functions on v4t. */
9318 if (eh->export_glue == NULL)
9319 return TRUE;
9320
9321 globals = elf32_arm_hash_table (info);
9322 BFD_ASSERT (globals != NULL);
9323 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9324
9325 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9326 ARM2THUMB_GLUE_SECTION_NAME);
9327 BFD_ASSERT (s != NULL);
9328 BFD_ASSERT (s->contents != NULL);
9329 BFD_ASSERT (s->output_section != NULL);
9330
9331 sec = eh->export_glue->root.u.def.section;
9332
9333 BFD_ASSERT (sec->output_section != NULL);
9334
9335 val = eh->export_glue->root.u.def.value + sec->output_offset
9336 + sec->output_section->vma;
9337
9338 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9339 h->root.u.def.section->owner,
9340 globals->obfd, sec, val, s,
9341 &error_message);
9342 BFD_ASSERT (myh);
9343 return TRUE;
9344 }
9345
9346 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9347
9348 static bfd_vma
9349 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9350 {
9351 bfd_byte *p;
9352 bfd_vma glue_addr;
9353 asection *s;
9354 struct elf32_arm_link_hash_table *globals;
9355
9356 globals = elf32_arm_hash_table (info);
9357 BFD_ASSERT (globals != NULL);
9358 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9359
9360 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9361 ARM_BX_GLUE_SECTION_NAME);
9362 BFD_ASSERT (s != NULL);
9363 BFD_ASSERT (s->contents != NULL);
9364 BFD_ASSERT (s->output_section != NULL);
9365
9366 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9367
9368 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9369
9370 if ((globals->bx_glue_offset[reg] & 1) == 0)
9371 {
9372 p = s->contents + glue_addr;
9373 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9374 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9375 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9376 globals->bx_glue_offset[reg] |= 1;
9377 }
9378
9379 return glue_addr + s->output_section->vma + s->output_offset;
9380 }
9381
9382 /* Generate Arm stubs for exported Thumb symbols. */
9383 static void
9384 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9385 struct bfd_link_info *link_info)
9386 {
9387 struct elf32_arm_link_hash_table * globals;
9388
9389 if (link_info == NULL)
9390 /* Ignore this if we are not called by the ELF backend linker. */
9391 return;
9392
9393 globals = elf32_arm_hash_table (link_info);
9394 if (globals == NULL)
9395 return;
9396
9397 /* If blx is available then exported Thumb symbols are OK and there is
9398 nothing to do. */
9399 if (globals->use_blx)
9400 return;
9401
9402 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9403 link_info);
9404 }
9405
9406 /* Reserve space for COUNT dynamic relocations in relocation selection
9407 SRELOC. */
9408
9409 static void
9410 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9411 bfd_size_type count)
9412 {
9413 struct elf32_arm_link_hash_table *htab;
9414
9415 htab = elf32_arm_hash_table (info);
9416 BFD_ASSERT (htab->root.dynamic_sections_created);
9417 if (sreloc == NULL)
9418 abort ();
9419 sreloc->size += RELOC_SIZE (htab) * count;
9420 }
9421
9422 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9423 dynamic, the relocations should go in SRELOC, otherwise they should
9424 go in the special .rel.iplt section. */
9425
9426 static void
9427 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9428 bfd_size_type count)
9429 {
9430 struct elf32_arm_link_hash_table *htab;
9431
9432 htab = elf32_arm_hash_table (info);
9433 if (!htab->root.dynamic_sections_created)
9434 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9435 else
9436 {
9437 BFD_ASSERT (sreloc != NULL);
9438 sreloc->size += RELOC_SIZE (htab) * count;
9439 }
9440 }
9441
9442 /* Add relocation REL to the end of relocation section SRELOC. */
9443
9444 static void
9445 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9446 asection *sreloc, Elf_Internal_Rela *rel)
9447 {
9448 bfd_byte *loc;
9449 struct elf32_arm_link_hash_table *htab;
9450
9451 htab = elf32_arm_hash_table (info);
9452 if (!htab->root.dynamic_sections_created
9453 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9454 sreloc = htab->root.irelplt;
9455 if (sreloc == NULL)
9456 abort ();
9457 loc = sreloc->contents;
9458 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9459 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9460 abort ();
9461 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9462 }
9463
9464 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9465 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9466 to .plt. */
9467
9468 static void
9469 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9470 bfd_boolean is_iplt_entry,
9471 union gotplt_union *root_plt,
9472 struct arm_plt_info *arm_plt)
9473 {
9474 struct elf32_arm_link_hash_table *htab;
9475 asection *splt;
9476 asection *sgotplt;
9477
9478 htab = elf32_arm_hash_table (info);
9479
9480 if (is_iplt_entry)
9481 {
9482 splt = htab->root.iplt;
9483 sgotplt = htab->root.igotplt;
9484
9485 /* NaCl uses a special first entry in .iplt too. */
9486 if (htab->nacl_p && splt->size == 0)
9487 splt->size += htab->plt_header_size;
9488
9489 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9490 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9491 }
9492 else
9493 {
9494 splt = htab->root.splt;
9495 sgotplt = htab->root.sgotplt;
9496
9497 if (htab->fdpic_p)
9498 {
9499 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9500 /* For lazy binding, relocations will be put into .rel.plt, in
9501 .rel.got otherwise. */
9502 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9503 if (info->flags & DF_BIND_NOW)
9504 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9505 else
9506 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9507 }
9508 else
9509 {
9510 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9511 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9512 }
9513
9514 /* If this is the first .plt entry, make room for the special
9515 first entry. */
9516 if (splt->size == 0)
9517 splt->size += htab->plt_header_size;
9518
9519 htab->next_tls_desc_index++;
9520 }
9521
9522 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9523 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9524 splt->size += PLT_THUMB_STUB_SIZE;
9525 root_plt->offset = splt->size;
9526 splt->size += htab->plt_entry_size;
9527
9528 if (!htab->symbian_p)
9529 {
9530 /* We also need to make an entry in the .got.plt section, which
9531 will be placed in the .got section by the linker script. */
9532 if (is_iplt_entry)
9533 arm_plt->got_offset = sgotplt->size;
9534 else
9535 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9536 if (htab->fdpic_p)
9537 /* Function descriptor takes 64 bits in GOT. */
9538 sgotplt->size += 8;
9539 else
9540 sgotplt->size += 4;
9541 }
9542 }
9543
9544 static bfd_vma
9545 arm_movw_immediate (bfd_vma value)
9546 {
9547 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9548 }
9549
9550 static bfd_vma
9551 arm_movt_immediate (bfd_vma value)
9552 {
9553 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9554 }
9555
9556 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9557 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9558 Otherwise, DYNINDX is the index of the symbol in the dynamic
9559 symbol table and SYM_VALUE is undefined.
9560
9561 ROOT_PLT points to the offset of the PLT entry from the start of its
9562 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9563 bookkeeping information.
9564
9565 Returns FALSE if there was a problem. */
9566
9567 static bfd_boolean
9568 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9569 union gotplt_union *root_plt,
9570 struct arm_plt_info *arm_plt,
9571 int dynindx, bfd_vma sym_value)
9572 {
9573 struct elf32_arm_link_hash_table *htab;
9574 asection *sgot;
9575 asection *splt;
9576 asection *srel;
9577 bfd_byte *loc;
9578 bfd_vma plt_index;
9579 Elf_Internal_Rela rel;
9580 bfd_vma plt_header_size;
9581 bfd_vma got_header_size;
9582
9583 htab = elf32_arm_hash_table (info);
9584
9585 /* Pick the appropriate sections and sizes. */
9586 if (dynindx == -1)
9587 {
9588 splt = htab->root.iplt;
9589 sgot = htab->root.igotplt;
9590 srel = htab->root.irelplt;
9591
9592 /* There are no reserved entries in .igot.plt, and no special
9593 first entry in .iplt. */
9594 got_header_size = 0;
9595 plt_header_size = 0;
9596 }
9597 else
9598 {
9599 splt = htab->root.splt;
9600 sgot = htab->root.sgotplt;
9601 srel = htab->root.srelplt;
9602
9603 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9604 plt_header_size = htab->plt_header_size;
9605 }
9606 BFD_ASSERT (splt != NULL && srel != NULL);
9607
9608 /* Fill in the entry in the procedure linkage table. */
9609 if (htab->symbian_p)
9610 {
9611 BFD_ASSERT (dynindx >= 0);
9612 put_arm_insn (htab, output_bfd,
9613 elf32_arm_symbian_plt_entry[0],
9614 splt->contents + root_plt->offset);
9615 bfd_put_32 (output_bfd,
9616 elf32_arm_symbian_plt_entry[1],
9617 splt->contents + root_plt->offset + 4);
9618
9619 /* Fill in the entry in the .rel.plt section. */
9620 rel.r_offset = (splt->output_section->vma
9621 + splt->output_offset
9622 + root_plt->offset + 4);
9623 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9624
9625 /* Get the index in the procedure linkage table which
9626 corresponds to this symbol. This is the index of this symbol
9627 in all the symbols for which we are making plt entries. The
9628 first entry in the procedure linkage table is reserved. */
9629 plt_index = ((root_plt->offset - plt_header_size)
9630 / htab->plt_entry_size);
9631 }
9632 else
9633 {
9634 bfd_vma got_offset, got_address, plt_address;
9635 bfd_vma got_displacement, initial_got_entry;
9636 bfd_byte * ptr;
9637
9638 BFD_ASSERT (sgot != NULL);
9639
9640 /* Get the offset into the .(i)got.plt table of the entry that
9641 corresponds to this function. */
9642 got_offset = (arm_plt->got_offset & -2);
9643
9644 /* Get the index in the procedure linkage table which
9645 corresponds to this symbol. This is the index of this symbol
9646 in all the symbols for which we are making plt entries.
9647 After the reserved .got.plt entries, all symbols appear in
9648 the same order as in .plt. */
9649 if (htab->fdpic_p)
9650 /* Function descriptor takes 8 bytes. */
9651 plt_index = (got_offset - got_header_size) / 8;
9652 else
9653 plt_index = (got_offset - got_header_size) / 4;
9654
9655 /* Calculate the address of the GOT entry. */
9656 got_address = (sgot->output_section->vma
9657 + sgot->output_offset
9658 + got_offset);
9659
9660 /* ...and the address of the PLT entry. */
9661 plt_address = (splt->output_section->vma
9662 + splt->output_offset
9663 + root_plt->offset);
9664
9665 ptr = splt->contents + root_plt->offset;
9666 if (htab->vxworks_p && bfd_link_pic (info))
9667 {
9668 unsigned int i;
9669 bfd_vma val;
9670
9671 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9672 {
9673 val = elf32_arm_vxworks_shared_plt_entry[i];
9674 if (i == 2)
9675 val |= got_address - sgot->output_section->vma;
9676 if (i == 5)
9677 val |= plt_index * RELOC_SIZE (htab);
9678 if (i == 2 || i == 5)
9679 bfd_put_32 (output_bfd, val, ptr);
9680 else
9681 put_arm_insn (htab, output_bfd, val, ptr);
9682 }
9683 }
9684 else if (htab->vxworks_p)
9685 {
9686 unsigned int i;
9687 bfd_vma val;
9688
9689 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9690 {
9691 val = elf32_arm_vxworks_exec_plt_entry[i];
9692 if (i == 2)
9693 val |= got_address;
9694 if (i == 4)
9695 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9696 if (i == 5)
9697 val |= plt_index * RELOC_SIZE (htab);
9698 if (i == 2 || i == 5)
9699 bfd_put_32 (output_bfd, val, ptr);
9700 else
9701 put_arm_insn (htab, output_bfd, val, ptr);
9702 }
9703
9704 loc = (htab->srelplt2->contents
9705 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9706
9707 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9708 referencing the GOT for this PLT entry. */
9709 rel.r_offset = plt_address + 8;
9710 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9711 rel.r_addend = got_offset;
9712 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9713 loc += RELOC_SIZE (htab);
9714
9715 /* Create the R_ARM_ABS32 relocation referencing the
9716 beginning of the PLT for this GOT entry. */
9717 rel.r_offset = got_address;
9718 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9719 rel.r_addend = 0;
9720 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9721 }
9722 else if (htab->nacl_p)
9723 {
9724 /* Calculate the displacement between the PLT slot and the
9725 common tail that's part of the special initial PLT slot. */
9726 int32_t tail_displacement
9727 = ((splt->output_section->vma + splt->output_offset
9728 + ARM_NACL_PLT_TAIL_OFFSET)
9729 - (plt_address + htab->plt_entry_size + 4));
9730 BFD_ASSERT ((tail_displacement & 3) == 0);
9731 tail_displacement >>= 2;
9732
9733 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9734 || (-tail_displacement & 0xff000000) == 0);
9735
9736 /* Calculate the displacement between the PLT slot and the entry
9737 in the GOT. The offset accounts for the value produced by
9738 adding to pc in the penultimate instruction of the PLT stub. */
9739 got_displacement = (got_address
9740 - (plt_address + htab->plt_entry_size));
9741
9742 /* NaCl does not support interworking at all. */
9743 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9744
9745 put_arm_insn (htab, output_bfd,
9746 elf32_arm_nacl_plt_entry[0]
9747 | arm_movw_immediate (got_displacement),
9748 ptr + 0);
9749 put_arm_insn (htab, output_bfd,
9750 elf32_arm_nacl_plt_entry[1]
9751 | arm_movt_immediate (got_displacement),
9752 ptr + 4);
9753 put_arm_insn (htab, output_bfd,
9754 elf32_arm_nacl_plt_entry[2],
9755 ptr + 8);
9756 put_arm_insn (htab, output_bfd,
9757 elf32_arm_nacl_plt_entry[3]
9758 | (tail_displacement & 0x00ffffff),
9759 ptr + 12);
9760 }
9761 else if (htab->fdpic_p)
9762 {
9763 const bfd_vma *plt_entry = using_thumb_only(htab)
9764 ? elf32_arm_fdpic_thumb_plt_entry
9765 : elf32_arm_fdpic_plt_entry;
9766
9767 /* Fill-up Thumb stub if needed. */
9768 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9769 {
9770 put_thumb_insn (htab, output_bfd,
9771 elf32_arm_plt_thumb_stub[0], ptr - 4);
9772 put_thumb_insn (htab, output_bfd,
9773 elf32_arm_plt_thumb_stub[1], ptr - 2);
9774 }
9775 /* As we are using 32 bit instructions even for the Thumb
9776 version, we have to use 'put_arm_insn' instead of
9777 'put_thumb_insn'. */
9778 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9779 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9780 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9781 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9782 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9783
9784 if (!(info->flags & DF_BIND_NOW))
9785 {
9786 /* funcdesc_value_reloc_offset. */
9787 bfd_put_32 (output_bfd,
9788 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9789 ptr + 20);
9790 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9791 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9792 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9793 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9794 }
9795 }
9796 else if (using_thumb_only (htab))
9797 {
9798 /* PR ld/16017: Generate thumb only PLT entries. */
9799 if (!using_thumb2 (htab))
9800 {
9801 /* FIXME: We ought to be able to generate thumb-1 PLT
9802 instructions... */
9803 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9804 output_bfd);
9805 return FALSE;
9806 }
9807
9808 /* Calculate the displacement between the PLT slot and the entry in
9809 the GOT. The 12-byte offset accounts for the value produced by
9810 adding to pc in the 3rd instruction of the PLT stub. */
9811 got_displacement = got_address - (plt_address + 12);
9812
9813 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9814 instead of 'put_thumb_insn'. */
9815 put_arm_insn (htab, output_bfd,
9816 elf32_thumb2_plt_entry[0]
9817 | ((got_displacement & 0x000000ff) << 16)
9818 | ((got_displacement & 0x00000700) << 20)
9819 | ((got_displacement & 0x00000800) >> 1)
9820 | ((got_displacement & 0x0000f000) >> 12),
9821 ptr + 0);
9822 put_arm_insn (htab, output_bfd,
9823 elf32_thumb2_plt_entry[1]
9824 | ((got_displacement & 0x00ff0000) )
9825 | ((got_displacement & 0x07000000) << 4)
9826 | ((got_displacement & 0x08000000) >> 17)
9827 | ((got_displacement & 0xf0000000) >> 28),
9828 ptr + 4);
9829 put_arm_insn (htab, output_bfd,
9830 elf32_thumb2_plt_entry[2],
9831 ptr + 8);
9832 put_arm_insn (htab, output_bfd,
9833 elf32_thumb2_plt_entry[3],
9834 ptr + 12);
9835 }
9836 else
9837 {
9838 /* Calculate the displacement between the PLT slot and the
9839 entry in the GOT. The eight-byte offset accounts for the
9840 value produced by adding to pc in the first instruction
9841 of the PLT stub. */
9842 got_displacement = got_address - (plt_address + 8);
9843
9844 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9845 {
9846 put_thumb_insn (htab, output_bfd,
9847 elf32_arm_plt_thumb_stub[0], ptr - 4);
9848 put_thumb_insn (htab, output_bfd,
9849 elf32_arm_plt_thumb_stub[1], ptr - 2);
9850 }
9851
9852 if (!elf32_arm_use_long_plt_entry)
9853 {
9854 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9855
9856 put_arm_insn (htab, output_bfd,
9857 elf32_arm_plt_entry_short[0]
9858 | ((got_displacement & 0x0ff00000) >> 20),
9859 ptr + 0);
9860 put_arm_insn (htab, output_bfd,
9861 elf32_arm_plt_entry_short[1]
9862 | ((got_displacement & 0x000ff000) >> 12),
9863 ptr+ 4);
9864 put_arm_insn (htab, output_bfd,
9865 elf32_arm_plt_entry_short[2]
9866 | (got_displacement & 0x00000fff),
9867 ptr + 8);
9868 #ifdef FOUR_WORD_PLT
9869 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9870 #endif
9871 }
9872 else
9873 {
9874 put_arm_insn (htab, output_bfd,
9875 elf32_arm_plt_entry_long[0]
9876 | ((got_displacement & 0xf0000000) >> 28),
9877 ptr + 0);
9878 put_arm_insn (htab, output_bfd,
9879 elf32_arm_plt_entry_long[1]
9880 | ((got_displacement & 0x0ff00000) >> 20),
9881 ptr + 4);
9882 put_arm_insn (htab, output_bfd,
9883 elf32_arm_plt_entry_long[2]
9884 | ((got_displacement & 0x000ff000) >> 12),
9885 ptr+ 8);
9886 put_arm_insn (htab, output_bfd,
9887 elf32_arm_plt_entry_long[3]
9888 | (got_displacement & 0x00000fff),
9889 ptr + 12);
9890 }
9891 }
9892
9893 /* Fill in the entry in the .rel(a).(i)plt section. */
9894 rel.r_offset = got_address;
9895 rel.r_addend = 0;
9896 if (dynindx == -1)
9897 {
9898 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9899 The dynamic linker or static executable then calls SYM_VALUE
9900 to determine the correct run-time value of the .igot.plt entry. */
9901 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9902 initial_got_entry = sym_value;
9903 }
9904 else
9905 {
9906 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9907 used by PLT entry. */
9908 if (htab->fdpic_p)
9909 {
9910 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9911 initial_got_entry = 0;
9912 }
9913 else
9914 {
9915 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9916 initial_got_entry = (splt->output_section->vma
9917 + splt->output_offset);
9918 }
9919 }
9920
9921 /* Fill in the entry in the global offset table. */
9922 bfd_put_32 (output_bfd, initial_got_entry,
9923 sgot->contents + got_offset);
9924
9925 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9926 {
9927 /* Setup initial funcdesc value. */
9928 /* FIXME: we don't support lazy binding because there is a
9929 race condition between both words getting written and
9930 some other thread attempting to read them. The ARM
9931 architecture does not have an atomic 64 bit load/store
9932 instruction that could be used to prevent it; it is
9933 recommended that threaded FDPIC applications run with the
9934 LD_BIND_NOW environment variable set. */
9935 bfd_put_32(output_bfd, plt_address + 0x18,
9936 sgot->contents + got_offset);
9937 bfd_put_32(output_bfd, -1 /*TODO*/,
9938 sgot->contents + got_offset + 4);
9939 }
9940 }
9941
9942 if (dynindx == -1)
9943 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9944 else
9945 {
9946 if (htab->fdpic_p)
9947 {
9948 /* For FDPIC we put PLT relocationss into .rel.got when not
9949 lazy binding otherwise we put them in .rel.plt. For now,
9950 we don't support lazy binding so put it in .rel.got. */
9951 if (info->flags & DF_BIND_NOW)
9952 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9953 else
9954 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9955 }
9956 else
9957 {
9958 loc = srel->contents + plt_index * RELOC_SIZE (htab);
9959 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9960 }
9961 }
9962
9963 return TRUE;
9964 }
9965
9966 /* Some relocations map to different relocations depending on the
9967 target. Return the real relocation. */
9968
9969 static int
9970 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9971 int r_type)
9972 {
9973 switch (r_type)
9974 {
9975 case R_ARM_TARGET1:
9976 if (globals->target1_is_rel)
9977 return R_ARM_REL32;
9978 else
9979 return R_ARM_ABS32;
9980
9981 case R_ARM_TARGET2:
9982 return globals->target2_reloc;
9983
9984 default:
9985 return r_type;
9986 }
9987 }
9988
9989 /* Return the base VMA address which should be subtracted from real addresses
9990 when resolving @dtpoff relocation.
9991 This is PT_TLS segment p_vaddr. */
9992
9993 static bfd_vma
9994 dtpoff_base (struct bfd_link_info *info)
9995 {
9996 /* If tls_sec is NULL, we should have signalled an error already. */
9997 if (elf_hash_table (info)->tls_sec == NULL)
9998 return 0;
9999 return elf_hash_table (info)->tls_sec->vma;
10000 }
10001
10002 /* Return the relocation value for @tpoff relocation
10003 if STT_TLS virtual address is ADDRESS. */
10004
10005 static bfd_vma
10006 tpoff (struct bfd_link_info *info, bfd_vma address)
10007 {
10008 struct elf_link_hash_table *htab = elf_hash_table (info);
10009 bfd_vma base;
10010
10011 /* If tls_sec is NULL, we should have signalled an error already. */
10012 if (htab->tls_sec == NULL)
10013 return 0;
10014 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10015 return address - htab->tls_sec->vma + base;
10016 }
10017
10018 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10019 VALUE is the relocation value. */
10020
10021 static bfd_reloc_status_type
10022 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10023 {
10024 if (value > 0xfff)
10025 return bfd_reloc_overflow;
10026
10027 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10028 bfd_put_32 (abfd, value, data);
10029 return bfd_reloc_ok;
10030 }
10031
10032 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10033 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10034 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10035
10036 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10037 is to then call final_link_relocate. Return other values in the
10038 case of error.
10039
10040 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10041 the pre-relaxed code. It would be nice if the relocs were updated
10042 to match the optimization. */
10043
10044 static bfd_reloc_status_type
10045 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10046 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10047 Elf_Internal_Rela *rel, unsigned long is_local)
10048 {
10049 unsigned long insn;
10050
10051 switch (ELF32_R_TYPE (rel->r_info))
10052 {
10053 default:
10054 return bfd_reloc_notsupported;
10055
10056 case R_ARM_TLS_GOTDESC:
10057 if (is_local)
10058 insn = 0;
10059 else
10060 {
10061 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10062 if (insn & 1)
10063 insn -= 5; /* THUMB */
10064 else
10065 insn -= 8; /* ARM */
10066 }
10067 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10068 return bfd_reloc_continue;
10069
10070 case R_ARM_THM_TLS_DESCSEQ:
10071 /* Thumb insn. */
10072 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10073 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10074 {
10075 if (is_local)
10076 /* nop */
10077 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10078 }
10079 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10080 {
10081 if (is_local)
10082 /* nop */
10083 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10084 else
10085 /* ldr rx,[ry] */
10086 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10087 }
10088 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10089 {
10090 if (is_local)
10091 /* nop */
10092 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10093 else
10094 /* mov r0, rx */
10095 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10096 contents + rel->r_offset);
10097 }
10098 else
10099 {
10100 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10101 /* It's a 32 bit instruction, fetch the rest of it for
10102 error generation. */
10103 insn = (insn << 16)
10104 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10105 _bfd_error_handler
10106 /* xgettext:c-format */
10107 (_("%pB(%pA+%#" PRIx64 "): "
10108 "unexpected %s instruction '%#lx' in TLS trampoline"),
10109 input_bfd, input_sec, (uint64_t) rel->r_offset,
10110 "Thumb", insn);
10111 return bfd_reloc_notsupported;
10112 }
10113 break;
10114
10115 case R_ARM_TLS_DESCSEQ:
10116 /* arm insn. */
10117 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10118 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10119 {
10120 if (is_local)
10121 /* mov rx, ry */
10122 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10123 contents + rel->r_offset);
10124 }
10125 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10126 {
10127 if (is_local)
10128 /* nop */
10129 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10130 else
10131 /* ldr rx,[ry] */
10132 bfd_put_32 (input_bfd, insn & 0xfffff000,
10133 contents + rel->r_offset);
10134 }
10135 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10136 {
10137 if (is_local)
10138 /* nop */
10139 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10140 else
10141 /* mov r0, rx */
10142 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10143 contents + rel->r_offset);
10144 }
10145 else
10146 {
10147 _bfd_error_handler
10148 /* xgettext:c-format */
10149 (_("%pB(%pA+%#" PRIx64 "): "
10150 "unexpected %s instruction '%#lx' in TLS trampoline"),
10151 input_bfd, input_sec, (uint64_t) rel->r_offset,
10152 "ARM", insn);
10153 return bfd_reloc_notsupported;
10154 }
10155 break;
10156
10157 case R_ARM_TLS_CALL:
10158 /* GD->IE relaxation, turn the instruction into 'nop' or
10159 'ldr r0, [pc,r0]' */
10160 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10161 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10162 break;
10163
10164 case R_ARM_THM_TLS_CALL:
10165 /* GD->IE relaxation. */
10166 if (!is_local)
10167 /* add r0,pc; ldr r0, [r0] */
10168 insn = 0x44786800;
10169 else if (using_thumb2 (globals))
10170 /* nop.w */
10171 insn = 0xf3af8000;
10172 else
10173 /* nop; nop */
10174 insn = 0xbf00bf00;
10175
10176 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10177 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10178 break;
10179 }
10180 return bfd_reloc_ok;
10181 }
10182
10183 /* For a given value of n, calculate the value of G_n as required to
10184 deal with group relocations. We return it in the form of an
10185 encoded constant-and-rotation, together with the final residual. If n is
10186 specified as less than zero, then final_residual is filled with the
10187 input value and no further action is performed. */
10188
10189 static bfd_vma
10190 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10191 {
10192 int current_n;
10193 bfd_vma g_n;
10194 bfd_vma encoded_g_n = 0;
10195 bfd_vma residual = value; /* Also known as Y_n. */
10196
10197 for (current_n = 0; current_n <= n; current_n++)
10198 {
10199 int shift;
10200
10201 /* Calculate which part of the value to mask. */
10202 if (residual == 0)
10203 shift = 0;
10204 else
10205 {
10206 int msb;
10207
10208 /* Determine the most significant bit in the residual and
10209 align the resulting value to a 2-bit boundary. */
10210 for (msb = 30; msb >= 0; msb -= 2)
10211 if (residual & (3 << msb))
10212 break;
10213
10214 /* The desired shift is now (msb - 6), or zero, whichever
10215 is the greater. */
10216 shift = msb - 6;
10217 if (shift < 0)
10218 shift = 0;
10219 }
10220
10221 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10222 g_n = residual & (0xff << shift);
10223 encoded_g_n = (g_n >> shift)
10224 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10225
10226 /* Calculate the residual for the next time around. */
10227 residual &= ~g_n;
10228 }
10229
10230 *final_residual = residual;
10231
10232 return encoded_g_n;
10233 }
10234
10235 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10236 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10237
10238 static int
10239 identify_add_or_sub (bfd_vma insn)
10240 {
10241 int opcode = insn & 0x1e00000;
10242
10243 if (opcode == 1 << 23) /* ADD */
10244 return 1;
10245
10246 if (opcode == 1 << 22) /* SUB */
10247 return -1;
10248
10249 return 0;
10250 }
10251
10252 /* Perform a relocation as part of a final link. */
10253
10254 static bfd_reloc_status_type
10255 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10256 bfd * input_bfd,
10257 bfd * output_bfd,
10258 asection * input_section,
10259 bfd_byte * contents,
10260 Elf_Internal_Rela * rel,
10261 bfd_vma value,
10262 struct bfd_link_info * info,
10263 asection * sym_sec,
10264 const char * sym_name,
10265 unsigned char st_type,
10266 enum arm_st_branch_type branch_type,
10267 struct elf_link_hash_entry * h,
10268 bfd_boolean * unresolved_reloc_p,
10269 char ** error_message)
10270 {
10271 unsigned long r_type = howto->type;
10272 unsigned long r_symndx;
10273 bfd_byte * hit_data = contents + rel->r_offset;
10274 bfd_vma * local_got_offsets;
10275 bfd_vma * local_tlsdesc_gotents;
10276 asection * sgot;
10277 asection * splt;
10278 asection * sreloc = NULL;
10279 asection * srelgot;
10280 bfd_vma addend;
10281 bfd_signed_vma signed_addend;
10282 unsigned char dynreloc_st_type;
10283 bfd_vma dynreloc_value;
10284 struct elf32_arm_link_hash_table * globals;
10285 struct elf32_arm_link_hash_entry *eh;
10286 union gotplt_union *root_plt;
10287 struct arm_plt_info *arm_plt;
10288 bfd_vma plt_offset;
10289 bfd_vma gotplt_offset;
10290 bfd_boolean has_iplt_entry;
10291 bfd_boolean resolved_to_zero;
10292
10293 globals = elf32_arm_hash_table (info);
10294 if (globals == NULL)
10295 return bfd_reloc_notsupported;
10296
10297 BFD_ASSERT (is_arm_elf (input_bfd));
10298 BFD_ASSERT (howto != NULL);
10299
10300 /* Some relocation types map to different relocations depending on the
10301 target. We pick the right one here. */
10302 r_type = arm_real_reloc_type (globals, r_type);
10303
10304 /* It is possible to have linker relaxations on some TLS access
10305 models. Update our information here. */
10306 r_type = elf32_arm_tls_transition (info, r_type, h);
10307
10308 if (r_type != howto->type)
10309 howto = elf32_arm_howto_from_type (r_type);
10310
10311 eh = (struct elf32_arm_link_hash_entry *) h;
10312 sgot = globals->root.sgot;
10313 local_got_offsets = elf_local_got_offsets (input_bfd);
10314 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10315
10316 if (globals->root.dynamic_sections_created)
10317 srelgot = globals->root.srelgot;
10318 else
10319 srelgot = NULL;
10320
10321 r_symndx = ELF32_R_SYM (rel->r_info);
10322
10323 if (globals->use_rel)
10324 {
10325 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10326
10327 if (addend & ((howto->src_mask + 1) >> 1))
10328 {
10329 signed_addend = -1;
10330 signed_addend &= ~ howto->src_mask;
10331 signed_addend |= addend;
10332 }
10333 else
10334 signed_addend = addend;
10335 }
10336 else
10337 addend = signed_addend = rel->r_addend;
10338
10339 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10340 are resolving a function call relocation. */
10341 if (using_thumb_only (globals)
10342 && (r_type == R_ARM_THM_CALL
10343 || r_type == R_ARM_THM_JUMP24)
10344 && branch_type == ST_BRANCH_TO_ARM)
10345 branch_type = ST_BRANCH_TO_THUMB;
10346
10347 /* Record the symbol information that should be used in dynamic
10348 relocations. */
10349 dynreloc_st_type = st_type;
10350 dynreloc_value = value;
10351 if (branch_type == ST_BRANCH_TO_THUMB)
10352 dynreloc_value |= 1;
10353
10354 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10355 VALUE appropriately for relocations that we resolve at link time. */
10356 has_iplt_entry = FALSE;
10357 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10358 &arm_plt)
10359 && root_plt->offset != (bfd_vma) -1)
10360 {
10361 plt_offset = root_plt->offset;
10362 gotplt_offset = arm_plt->got_offset;
10363
10364 if (h == NULL || eh->is_iplt)
10365 {
10366 has_iplt_entry = TRUE;
10367 splt = globals->root.iplt;
10368
10369 /* Populate .iplt entries here, because not all of them will
10370 be seen by finish_dynamic_symbol. The lower bit is set if
10371 we have already populated the entry. */
10372 if (plt_offset & 1)
10373 plt_offset--;
10374 else
10375 {
10376 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10377 -1, dynreloc_value))
10378 root_plt->offset |= 1;
10379 else
10380 return bfd_reloc_notsupported;
10381 }
10382
10383 /* Static relocations always resolve to the .iplt entry. */
10384 st_type = STT_FUNC;
10385 value = (splt->output_section->vma
10386 + splt->output_offset
10387 + plt_offset);
10388 branch_type = ST_BRANCH_TO_ARM;
10389
10390 /* If there are non-call relocations that resolve to the .iplt
10391 entry, then all dynamic ones must too. */
10392 if (arm_plt->noncall_refcount != 0)
10393 {
10394 dynreloc_st_type = st_type;
10395 dynreloc_value = value;
10396 }
10397 }
10398 else
10399 /* We populate the .plt entry in finish_dynamic_symbol. */
10400 splt = globals->root.splt;
10401 }
10402 else
10403 {
10404 splt = NULL;
10405 plt_offset = (bfd_vma) -1;
10406 gotplt_offset = (bfd_vma) -1;
10407 }
10408
10409 resolved_to_zero = (h != NULL
10410 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10411
10412 switch (r_type)
10413 {
10414 case R_ARM_NONE:
10415 /* We don't need to find a value for this symbol. It's just a
10416 marker. */
10417 *unresolved_reloc_p = FALSE;
10418 return bfd_reloc_ok;
10419
10420 case R_ARM_ABS12:
10421 if (!globals->vxworks_p)
10422 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10423 /* Fall through. */
10424
10425 case R_ARM_PC24:
10426 case R_ARM_ABS32:
10427 case R_ARM_ABS32_NOI:
10428 case R_ARM_REL32:
10429 case R_ARM_REL32_NOI:
10430 case R_ARM_CALL:
10431 case R_ARM_JUMP24:
10432 case R_ARM_XPC25:
10433 case R_ARM_PREL31:
10434 case R_ARM_PLT32:
10435 /* Handle relocations which should use the PLT entry. ABS32/REL32
10436 will use the symbol's value, which may point to a PLT entry, but we
10437 don't need to handle that here. If we created a PLT entry, all
10438 branches in this object should go to it, except if the PLT is too
10439 far away, in which case a long branch stub should be inserted. */
10440 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10441 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10442 && r_type != R_ARM_CALL
10443 && r_type != R_ARM_JUMP24
10444 && r_type != R_ARM_PLT32)
10445 && plt_offset != (bfd_vma) -1)
10446 {
10447 /* If we've created a .plt section, and assigned a PLT entry
10448 to this function, it must either be a STT_GNU_IFUNC reference
10449 or not be known to bind locally. In other cases, we should
10450 have cleared the PLT entry by now. */
10451 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10452
10453 value = (splt->output_section->vma
10454 + splt->output_offset
10455 + plt_offset);
10456 *unresolved_reloc_p = FALSE;
10457 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10458 contents, rel->r_offset, value,
10459 rel->r_addend);
10460 }
10461
10462 /* When generating a shared object or relocatable executable, these
10463 relocations are copied into the output file to be resolved at
10464 run time. */
10465 if ((bfd_link_pic (info)
10466 || globals->root.is_relocatable_executable
10467 || globals->fdpic_p)
10468 && (input_section->flags & SEC_ALLOC)
10469 && !(globals->vxworks_p
10470 && strcmp (input_section->output_section->name,
10471 ".tls_vars") == 0)
10472 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10473 || !SYMBOL_CALLS_LOCAL (info, h))
10474 && !(input_bfd == globals->stub_bfd
10475 && strstr (input_section->name, STUB_SUFFIX))
10476 && (h == NULL
10477 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10478 && !resolved_to_zero)
10479 || h->root.type != bfd_link_hash_undefweak)
10480 && r_type != R_ARM_PC24
10481 && r_type != R_ARM_CALL
10482 && r_type != R_ARM_JUMP24
10483 && r_type != R_ARM_PREL31
10484 && r_type != R_ARM_PLT32)
10485 {
10486 Elf_Internal_Rela outrel;
10487 bfd_boolean skip, relocate;
10488 int isrofixup = 0;
10489
10490 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10491 && !h->def_regular)
10492 {
10493 char *v = _("shared object");
10494
10495 if (bfd_link_executable (info))
10496 v = _("PIE executable");
10497
10498 _bfd_error_handler
10499 (_("%pB: relocation %s against external or undefined symbol `%s'"
10500 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10501 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10502 return bfd_reloc_notsupported;
10503 }
10504
10505 *unresolved_reloc_p = FALSE;
10506
10507 if (sreloc == NULL && globals->root.dynamic_sections_created)
10508 {
10509 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10510 ! globals->use_rel);
10511
10512 if (sreloc == NULL)
10513 return bfd_reloc_notsupported;
10514 }
10515
10516 skip = FALSE;
10517 relocate = FALSE;
10518
10519 outrel.r_addend = addend;
10520 outrel.r_offset =
10521 _bfd_elf_section_offset (output_bfd, info, input_section,
10522 rel->r_offset);
10523 if (outrel.r_offset == (bfd_vma) -1)
10524 skip = TRUE;
10525 else if (outrel.r_offset == (bfd_vma) -2)
10526 skip = TRUE, relocate = TRUE;
10527 outrel.r_offset += (input_section->output_section->vma
10528 + input_section->output_offset);
10529
10530 if (skip)
10531 memset (&outrel, 0, sizeof outrel);
10532 else if (h != NULL
10533 && h->dynindx != -1
10534 && (!bfd_link_pic (info)
10535 || !(bfd_link_pie (info)
10536 || SYMBOLIC_BIND (info, h))
10537 || !h->def_regular))
10538 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10539 else
10540 {
10541 int symbol;
10542
10543 /* This symbol is local, or marked to become local. */
10544 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10545 || (globals->fdpic_p && !bfd_link_pic(info)));
10546 if (globals->symbian_p)
10547 {
10548 asection *osec;
10549
10550 /* On Symbian OS, the data segment and text segement
10551 can be relocated independently. Therefore, we
10552 must indicate the segment to which this
10553 relocation is relative. The BPABI allows us to
10554 use any symbol in the right segment; we just use
10555 the section symbol as it is convenient. (We
10556 cannot use the symbol given by "h" directly as it
10557 will not appear in the dynamic symbol table.)
10558
10559 Note that the dynamic linker ignores the section
10560 symbol value, so we don't subtract osec->vma
10561 from the emitted reloc addend. */
10562 if (sym_sec)
10563 osec = sym_sec->output_section;
10564 else
10565 osec = input_section->output_section;
10566 symbol = elf_section_data (osec)->dynindx;
10567 if (symbol == 0)
10568 {
10569 struct elf_link_hash_table *htab = elf_hash_table (info);
10570
10571 if ((osec->flags & SEC_READONLY) == 0
10572 && htab->data_index_section != NULL)
10573 osec = htab->data_index_section;
10574 else
10575 osec = htab->text_index_section;
10576 symbol = elf_section_data (osec)->dynindx;
10577 }
10578 BFD_ASSERT (symbol != 0);
10579 }
10580 else
10581 /* On SVR4-ish systems, the dynamic loader cannot
10582 relocate the text and data segments independently,
10583 so the symbol does not matter. */
10584 symbol = 0;
10585 if (dynreloc_st_type == STT_GNU_IFUNC)
10586 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10587 to the .iplt entry. Instead, every non-call reference
10588 must use an R_ARM_IRELATIVE relocation to obtain the
10589 correct run-time address. */
10590 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10591 else if (globals->fdpic_p && !bfd_link_pic(info))
10592 isrofixup = 1;
10593 else
10594 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10595 if (globals->use_rel)
10596 relocate = TRUE;
10597 else
10598 outrel.r_addend += dynreloc_value;
10599 }
10600
10601 if (isrofixup)
10602 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10603 else
10604 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10605
10606 /* If this reloc is against an external symbol, we do not want to
10607 fiddle with the addend. Otherwise, we need to include the symbol
10608 value so that it becomes an addend for the dynamic reloc. */
10609 if (! relocate)
10610 return bfd_reloc_ok;
10611
10612 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10613 contents, rel->r_offset,
10614 dynreloc_value, (bfd_vma) 0);
10615 }
10616 else switch (r_type)
10617 {
10618 case R_ARM_ABS12:
10619 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10620
10621 case R_ARM_XPC25: /* Arm BLX instruction. */
10622 case R_ARM_CALL:
10623 case R_ARM_JUMP24:
10624 case R_ARM_PC24: /* Arm B/BL instruction. */
10625 case R_ARM_PLT32:
10626 {
10627 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10628
10629 if (r_type == R_ARM_XPC25)
10630 {
10631 /* Check for Arm calling Arm function. */
10632 /* FIXME: Should we translate the instruction into a BL
10633 instruction instead ? */
10634 if (branch_type != ST_BRANCH_TO_THUMB)
10635 _bfd_error_handler
10636 (_("\%pB: warning: %s BLX instruction targets"
10637 " %s function '%s'"),
10638 input_bfd, "ARM",
10639 "ARM", h ? h->root.root.string : "(local)");
10640 }
10641 else if (r_type == R_ARM_PC24)
10642 {
10643 /* Check for Arm calling Thumb function. */
10644 if (branch_type == ST_BRANCH_TO_THUMB)
10645 {
10646 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10647 output_bfd, input_section,
10648 hit_data, sym_sec, rel->r_offset,
10649 signed_addend, value,
10650 error_message))
10651 return bfd_reloc_ok;
10652 else
10653 return bfd_reloc_dangerous;
10654 }
10655 }
10656
10657 /* Check if a stub has to be inserted because the
10658 destination is too far or we are changing mode. */
10659 if ( r_type == R_ARM_CALL
10660 || r_type == R_ARM_JUMP24
10661 || r_type == R_ARM_PLT32)
10662 {
10663 enum elf32_arm_stub_type stub_type = arm_stub_none;
10664 struct elf32_arm_link_hash_entry *hash;
10665
10666 hash = (struct elf32_arm_link_hash_entry *) h;
10667 stub_type = arm_type_of_stub (info, input_section, rel,
10668 st_type, &branch_type,
10669 hash, value, sym_sec,
10670 input_bfd, sym_name);
10671
10672 if (stub_type != arm_stub_none)
10673 {
10674 /* The target is out of reach, so redirect the
10675 branch to the local stub for this function. */
10676 stub_entry = elf32_arm_get_stub_entry (input_section,
10677 sym_sec, h,
10678 rel, globals,
10679 stub_type);
10680 {
10681 if (stub_entry != NULL)
10682 value = (stub_entry->stub_offset
10683 + stub_entry->stub_sec->output_offset
10684 + stub_entry->stub_sec->output_section->vma);
10685
10686 if (plt_offset != (bfd_vma) -1)
10687 *unresolved_reloc_p = FALSE;
10688 }
10689 }
10690 else
10691 {
10692 /* If the call goes through a PLT entry, make sure to
10693 check distance to the right destination address. */
10694 if (plt_offset != (bfd_vma) -1)
10695 {
10696 value = (splt->output_section->vma
10697 + splt->output_offset
10698 + plt_offset);
10699 *unresolved_reloc_p = FALSE;
10700 /* The PLT entry is in ARM mode, regardless of the
10701 target function. */
10702 branch_type = ST_BRANCH_TO_ARM;
10703 }
10704 }
10705 }
10706
10707 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10708 where:
10709 S is the address of the symbol in the relocation.
10710 P is address of the instruction being relocated.
10711 A is the addend (extracted from the instruction) in bytes.
10712
10713 S is held in 'value'.
10714 P is the base address of the section containing the
10715 instruction plus the offset of the reloc into that
10716 section, ie:
10717 (input_section->output_section->vma +
10718 input_section->output_offset +
10719 rel->r_offset).
10720 A is the addend, converted into bytes, ie:
10721 (signed_addend * 4)
10722
10723 Note: None of these operations have knowledge of the pipeline
10724 size of the processor, thus it is up to the assembler to
10725 encode this information into the addend. */
10726 value -= (input_section->output_section->vma
10727 + input_section->output_offset);
10728 value -= rel->r_offset;
10729 if (globals->use_rel)
10730 value += (signed_addend << howto->size);
10731 else
10732 /* RELA addends do not have to be adjusted by howto->size. */
10733 value += signed_addend;
10734
10735 signed_addend = value;
10736 signed_addend >>= howto->rightshift;
10737
10738 /* A branch to an undefined weak symbol is turned into a jump to
10739 the next instruction unless a PLT entry will be created.
10740 Do the same for local undefined symbols (but not for STN_UNDEF).
10741 The jump to the next instruction is optimized as a NOP depending
10742 on the architecture. */
10743 if (h ? (h->root.type == bfd_link_hash_undefweak
10744 && plt_offset == (bfd_vma) -1)
10745 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10746 {
10747 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10748
10749 if (arch_has_arm_nop (globals))
10750 value |= 0x0320f000;
10751 else
10752 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10753 }
10754 else
10755 {
10756 /* Perform a signed range check. */
10757 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10758 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10759 return bfd_reloc_overflow;
10760
10761 addend = (value & 2);
10762
10763 value = (signed_addend & howto->dst_mask)
10764 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10765
10766 if (r_type == R_ARM_CALL)
10767 {
10768 /* Set the H bit in the BLX instruction. */
10769 if (branch_type == ST_BRANCH_TO_THUMB)
10770 {
10771 if (addend)
10772 value |= (1 << 24);
10773 else
10774 value &= ~(bfd_vma)(1 << 24);
10775 }
10776
10777 /* Select the correct instruction (BL or BLX). */
10778 /* Only if we are not handling a BL to a stub. In this
10779 case, mode switching is performed by the stub. */
10780 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10781 value |= (1 << 28);
10782 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10783 {
10784 value &= ~(bfd_vma)(1 << 28);
10785 value |= (1 << 24);
10786 }
10787 }
10788 }
10789 }
10790 break;
10791
10792 case R_ARM_ABS32:
10793 value += addend;
10794 if (branch_type == ST_BRANCH_TO_THUMB)
10795 value |= 1;
10796 break;
10797
10798 case R_ARM_ABS32_NOI:
10799 value += addend;
10800 break;
10801
10802 case R_ARM_REL32:
10803 value += addend;
10804 if (branch_type == ST_BRANCH_TO_THUMB)
10805 value |= 1;
10806 value -= (input_section->output_section->vma
10807 + input_section->output_offset + rel->r_offset);
10808 break;
10809
10810 case R_ARM_REL32_NOI:
10811 value += addend;
10812 value -= (input_section->output_section->vma
10813 + input_section->output_offset + rel->r_offset);
10814 break;
10815
10816 case R_ARM_PREL31:
10817 value -= (input_section->output_section->vma
10818 + input_section->output_offset + rel->r_offset);
10819 value += signed_addend;
10820 if (! h || h->root.type != bfd_link_hash_undefweak)
10821 {
10822 /* Check for overflow. */
10823 if ((value ^ (value >> 1)) & (1 << 30))
10824 return bfd_reloc_overflow;
10825 }
10826 value &= 0x7fffffff;
10827 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10828 if (branch_type == ST_BRANCH_TO_THUMB)
10829 value |= 1;
10830 break;
10831 }
10832
10833 bfd_put_32 (input_bfd, value, hit_data);
10834 return bfd_reloc_ok;
10835
10836 case R_ARM_ABS8:
10837 /* PR 16202: Refectch the addend using the correct size. */
10838 if (globals->use_rel)
10839 addend = bfd_get_8 (input_bfd, hit_data);
10840 value += addend;
10841
10842 /* There is no way to tell whether the user intended to use a signed or
10843 unsigned addend. When checking for overflow we accept either,
10844 as specified by the AAELF. */
10845 if ((long) value > 0xff || (long) value < -0x80)
10846 return bfd_reloc_overflow;
10847
10848 bfd_put_8 (input_bfd, value, hit_data);
10849 return bfd_reloc_ok;
10850
10851 case R_ARM_ABS16:
10852 /* PR 16202: Refectch the addend using the correct size. */
10853 if (globals->use_rel)
10854 addend = bfd_get_16 (input_bfd, hit_data);
10855 value += addend;
10856
10857 /* See comment for R_ARM_ABS8. */
10858 if ((long) value > 0xffff || (long) value < -0x8000)
10859 return bfd_reloc_overflow;
10860
10861 bfd_put_16 (input_bfd, value, hit_data);
10862 return bfd_reloc_ok;
10863
10864 case R_ARM_THM_ABS5:
10865 /* Support ldr and str instructions for the thumb. */
10866 if (globals->use_rel)
10867 {
10868 /* Need to refetch addend. */
10869 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10870 /* ??? Need to determine shift amount from operand size. */
10871 addend >>= howto->rightshift;
10872 }
10873 value += addend;
10874
10875 /* ??? Isn't value unsigned? */
10876 if ((long) value > 0x1f || (long) value < -0x10)
10877 return bfd_reloc_overflow;
10878
10879 /* ??? Value needs to be properly shifted into place first. */
10880 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10881 bfd_put_16 (input_bfd, value, hit_data);
10882 return bfd_reloc_ok;
10883
10884 case R_ARM_THM_ALU_PREL_11_0:
10885 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10886 {
10887 bfd_vma insn;
10888 bfd_signed_vma relocation;
10889
10890 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10891 | bfd_get_16 (input_bfd, hit_data + 2);
10892
10893 if (globals->use_rel)
10894 {
10895 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10896 | ((insn & (1 << 26)) >> 15);
10897 if (insn & 0xf00000)
10898 signed_addend = -signed_addend;
10899 }
10900
10901 relocation = value + signed_addend;
10902 relocation -= Pa (input_section->output_section->vma
10903 + input_section->output_offset
10904 + rel->r_offset);
10905
10906 /* PR 21523: Use an absolute value. The user of this reloc will
10907 have already selected an ADD or SUB insn appropriately. */
10908 value = labs (relocation);
10909
10910 if (value >= 0x1000)
10911 return bfd_reloc_overflow;
10912
10913 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10914 if (branch_type == ST_BRANCH_TO_THUMB)
10915 value |= 1;
10916
10917 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10918 | ((value & 0x700) << 4)
10919 | ((value & 0x800) << 15);
10920 if (relocation < 0)
10921 insn |= 0xa00000;
10922
10923 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10924 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10925
10926 return bfd_reloc_ok;
10927 }
10928
10929 case R_ARM_THM_PC8:
10930 /* PR 10073: This reloc is not generated by the GNU toolchain,
10931 but it is supported for compatibility with third party libraries
10932 generated by other compilers, specifically the ARM/IAR. */
10933 {
10934 bfd_vma insn;
10935 bfd_signed_vma relocation;
10936
10937 insn = bfd_get_16 (input_bfd, hit_data);
10938
10939 if (globals->use_rel)
10940 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10941
10942 relocation = value + addend;
10943 relocation -= Pa (input_section->output_section->vma
10944 + input_section->output_offset
10945 + rel->r_offset);
10946
10947 value = relocation;
10948
10949 /* We do not check for overflow of this reloc. Although strictly
10950 speaking this is incorrect, it appears to be necessary in order
10951 to work with IAR generated relocs. Since GCC and GAS do not
10952 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10953 a problem for them. */
10954 value &= 0x3fc;
10955
10956 insn = (insn & 0xff00) | (value >> 2);
10957
10958 bfd_put_16 (input_bfd, insn, hit_data);
10959
10960 return bfd_reloc_ok;
10961 }
10962
10963 case R_ARM_THM_PC12:
10964 /* Corresponds to: ldr.w reg, [pc, #offset]. */
10965 {
10966 bfd_vma insn;
10967 bfd_signed_vma relocation;
10968
10969 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10970 | bfd_get_16 (input_bfd, hit_data + 2);
10971
10972 if (globals->use_rel)
10973 {
10974 signed_addend = insn & 0xfff;
10975 if (!(insn & (1 << 23)))
10976 signed_addend = -signed_addend;
10977 }
10978
10979 relocation = value + signed_addend;
10980 relocation -= Pa (input_section->output_section->vma
10981 + input_section->output_offset
10982 + rel->r_offset);
10983
10984 value = relocation;
10985
10986 if (value >= 0x1000)
10987 return bfd_reloc_overflow;
10988
10989 insn = (insn & 0xff7ff000) | value;
10990 if (relocation >= 0)
10991 insn |= (1 << 23);
10992
10993 bfd_put_16 (input_bfd, insn >> 16, hit_data);
10994 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10995
10996 return bfd_reloc_ok;
10997 }
10998
10999 case R_ARM_THM_XPC22:
11000 case R_ARM_THM_CALL:
11001 case R_ARM_THM_JUMP24:
11002 /* Thumb BL (branch long instruction). */
11003 {
11004 bfd_vma relocation;
11005 bfd_vma reloc_sign;
11006 bfd_boolean overflow = FALSE;
11007 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11008 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11009 bfd_signed_vma reloc_signed_max;
11010 bfd_signed_vma reloc_signed_min;
11011 bfd_vma check;
11012 bfd_signed_vma signed_check;
11013 int bitsize;
11014 const int thumb2 = using_thumb2 (globals);
11015 const int thumb2_bl = using_thumb2_bl (globals);
11016
11017 /* A branch to an undefined weak symbol is turned into a jump to
11018 the next instruction unless a PLT entry will be created.
11019 The jump to the next instruction is optimized as a NOP.W for
11020 Thumb-2 enabled architectures. */
11021 if (h && h->root.type == bfd_link_hash_undefweak
11022 && plt_offset == (bfd_vma) -1)
11023 {
11024 if (thumb2)
11025 {
11026 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11027 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11028 }
11029 else
11030 {
11031 bfd_put_16 (input_bfd, 0xe000, hit_data);
11032 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11033 }
11034 return bfd_reloc_ok;
11035 }
11036
11037 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11038 with Thumb-1) involving the J1 and J2 bits. */
11039 if (globals->use_rel)
11040 {
11041 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11042 bfd_vma upper = upper_insn & 0x3ff;
11043 bfd_vma lower = lower_insn & 0x7ff;
11044 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11045 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11046 bfd_vma i1 = j1 ^ s ? 0 : 1;
11047 bfd_vma i2 = j2 ^ s ? 0 : 1;
11048
11049 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11050 /* Sign extend. */
11051 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11052
11053 signed_addend = addend;
11054 }
11055
11056 if (r_type == R_ARM_THM_XPC22)
11057 {
11058 /* Check for Thumb to Thumb call. */
11059 /* FIXME: Should we translate the instruction into a BL
11060 instruction instead ? */
11061 if (branch_type == ST_BRANCH_TO_THUMB)
11062 _bfd_error_handler
11063 (_("%pB: warning: %s BLX instruction targets"
11064 " %s function '%s'"),
11065 input_bfd, "Thumb",
11066 "Thumb", h ? h->root.root.string : "(local)");
11067 }
11068 else
11069 {
11070 /* If it is not a call to Thumb, assume call to Arm.
11071 If it is a call relative to a section name, then it is not a
11072 function call at all, but rather a long jump. Calls through
11073 the PLT do not require stubs. */
11074 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11075 {
11076 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11077 {
11078 /* Convert BL to BLX. */
11079 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11080 }
11081 else if (( r_type != R_ARM_THM_CALL)
11082 && (r_type != R_ARM_THM_JUMP24))
11083 {
11084 if (elf32_thumb_to_arm_stub
11085 (info, sym_name, input_bfd, output_bfd, input_section,
11086 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11087 error_message))
11088 return bfd_reloc_ok;
11089 else
11090 return bfd_reloc_dangerous;
11091 }
11092 }
11093 else if (branch_type == ST_BRANCH_TO_THUMB
11094 && globals->use_blx
11095 && r_type == R_ARM_THM_CALL)
11096 {
11097 /* Make sure this is a BL. */
11098 lower_insn |= 0x1800;
11099 }
11100 }
11101
11102 enum elf32_arm_stub_type stub_type = arm_stub_none;
11103 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11104 {
11105 /* Check if a stub has to be inserted because the destination
11106 is too far. */
11107 struct elf32_arm_stub_hash_entry *stub_entry;
11108 struct elf32_arm_link_hash_entry *hash;
11109
11110 hash = (struct elf32_arm_link_hash_entry *) h;
11111
11112 stub_type = arm_type_of_stub (info, input_section, rel,
11113 st_type, &branch_type,
11114 hash, value, sym_sec,
11115 input_bfd, sym_name);
11116
11117 if (stub_type != arm_stub_none)
11118 {
11119 /* The target is out of reach or we are changing modes, so
11120 redirect the branch to the local stub for this
11121 function. */
11122 stub_entry = elf32_arm_get_stub_entry (input_section,
11123 sym_sec, h,
11124 rel, globals,
11125 stub_type);
11126 if (stub_entry != NULL)
11127 {
11128 value = (stub_entry->stub_offset
11129 + stub_entry->stub_sec->output_offset
11130 + stub_entry->stub_sec->output_section->vma);
11131
11132 if (plt_offset != (bfd_vma) -1)
11133 *unresolved_reloc_p = FALSE;
11134 }
11135
11136 /* If this call becomes a call to Arm, force BLX. */
11137 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11138 {
11139 if ((stub_entry
11140 && !arm_stub_is_thumb (stub_entry->stub_type))
11141 || branch_type != ST_BRANCH_TO_THUMB)
11142 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11143 }
11144 }
11145 }
11146
11147 /* Handle calls via the PLT. */
11148 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11149 {
11150 value = (splt->output_section->vma
11151 + splt->output_offset
11152 + plt_offset);
11153
11154 if (globals->use_blx
11155 && r_type == R_ARM_THM_CALL
11156 && ! using_thumb_only (globals))
11157 {
11158 /* If the Thumb BLX instruction is available, convert
11159 the BL to a BLX instruction to call the ARM-mode
11160 PLT entry. */
11161 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11162 branch_type = ST_BRANCH_TO_ARM;
11163 }
11164 else
11165 {
11166 if (! using_thumb_only (globals))
11167 /* Target the Thumb stub before the ARM PLT entry. */
11168 value -= PLT_THUMB_STUB_SIZE;
11169 branch_type = ST_BRANCH_TO_THUMB;
11170 }
11171 *unresolved_reloc_p = FALSE;
11172 }
11173
11174 relocation = value + signed_addend;
11175
11176 relocation -= (input_section->output_section->vma
11177 + input_section->output_offset
11178 + rel->r_offset);
11179
11180 check = relocation >> howto->rightshift;
11181
11182 /* If this is a signed value, the rightshift just dropped
11183 leading 1 bits (assuming twos complement). */
11184 if ((bfd_signed_vma) relocation >= 0)
11185 signed_check = check;
11186 else
11187 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11188
11189 /* Calculate the permissable maximum and minimum values for
11190 this relocation according to whether we're relocating for
11191 Thumb-2 or not. */
11192 bitsize = howto->bitsize;
11193 if (!thumb2_bl)
11194 bitsize -= 2;
11195 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11196 reloc_signed_min = ~reloc_signed_max;
11197
11198 /* Assumes two's complement. */
11199 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11200 overflow = TRUE;
11201
11202 if ((lower_insn & 0x5000) == 0x4000)
11203 /* For a BLX instruction, make sure that the relocation is rounded up
11204 to a word boundary. This follows the semantics of the instruction
11205 which specifies that bit 1 of the target address will come from bit
11206 1 of the base address. */
11207 relocation = (relocation + 2) & ~ 3;
11208
11209 /* Put RELOCATION back into the insn. Assumes two's complement.
11210 We use the Thumb-2 encoding, which is safe even if dealing with
11211 a Thumb-1 instruction by virtue of our overflow check above. */
11212 reloc_sign = (signed_check < 0) ? 1 : 0;
11213 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11214 | ((relocation >> 12) & 0x3ff)
11215 | (reloc_sign << 10);
11216 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11217 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11218 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11219 | ((relocation >> 1) & 0x7ff);
11220
11221 /* Put the relocated value back in the object file: */
11222 bfd_put_16 (input_bfd, upper_insn, hit_data);
11223 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11224
11225 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11226 }
11227 break;
11228
11229 case R_ARM_THM_JUMP19:
11230 /* Thumb32 conditional branch instruction. */
11231 {
11232 bfd_vma relocation;
11233 bfd_boolean overflow = FALSE;
11234 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11235 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11236 bfd_signed_vma reloc_signed_max = 0xffffe;
11237 bfd_signed_vma reloc_signed_min = -0x100000;
11238 bfd_signed_vma signed_check;
11239 enum elf32_arm_stub_type stub_type = arm_stub_none;
11240 struct elf32_arm_stub_hash_entry *stub_entry;
11241 struct elf32_arm_link_hash_entry *hash;
11242
11243 /* Need to refetch the addend, reconstruct the top three bits,
11244 and squish the two 11 bit pieces together. */
11245 if (globals->use_rel)
11246 {
11247 bfd_vma S = (upper_insn & 0x0400) >> 10;
11248 bfd_vma upper = (upper_insn & 0x003f);
11249 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11250 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11251 bfd_vma lower = (lower_insn & 0x07ff);
11252
11253 upper |= J1 << 6;
11254 upper |= J2 << 7;
11255 upper |= (!S) << 8;
11256 upper -= 0x0100; /* Sign extend. */
11257
11258 addend = (upper << 12) | (lower << 1);
11259 signed_addend = addend;
11260 }
11261
11262 /* Handle calls via the PLT. */
11263 if (plt_offset != (bfd_vma) -1)
11264 {
11265 value = (splt->output_section->vma
11266 + splt->output_offset
11267 + plt_offset);
11268 /* Target the Thumb stub before the ARM PLT entry. */
11269 value -= PLT_THUMB_STUB_SIZE;
11270 *unresolved_reloc_p = FALSE;
11271 }
11272
11273 hash = (struct elf32_arm_link_hash_entry *)h;
11274
11275 stub_type = arm_type_of_stub (info, input_section, rel,
11276 st_type, &branch_type,
11277 hash, value, sym_sec,
11278 input_bfd, sym_name);
11279 if (stub_type != arm_stub_none)
11280 {
11281 stub_entry = elf32_arm_get_stub_entry (input_section,
11282 sym_sec, h,
11283 rel, globals,
11284 stub_type);
11285 if (stub_entry != NULL)
11286 {
11287 value = (stub_entry->stub_offset
11288 + stub_entry->stub_sec->output_offset
11289 + stub_entry->stub_sec->output_section->vma);
11290 }
11291 }
11292
11293 relocation = value + signed_addend;
11294 relocation -= (input_section->output_section->vma
11295 + input_section->output_offset
11296 + rel->r_offset);
11297 signed_check = (bfd_signed_vma) relocation;
11298
11299 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11300 overflow = TRUE;
11301
11302 /* Put RELOCATION back into the insn. */
11303 {
11304 bfd_vma S = (relocation & 0x00100000) >> 20;
11305 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11306 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11307 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11308 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11309
11310 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11311 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11312 }
11313
11314 /* Put the relocated value back in the object file: */
11315 bfd_put_16 (input_bfd, upper_insn, hit_data);
11316 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11317
11318 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11319 }
11320
11321 case R_ARM_THM_JUMP11:
11322 case R_ARM_THM_JUMP8:
11323 case R_ARM_THM_JUMP6:
11324 /* Thumb B (branch) instruction). */
11325 {
11326 bfd_signed_vma relocation;
11327 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11328 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11329 bfd_signed_vma signed_check;
11330
11331 /* CZB cannot jump backward. */
11332 if (r_type == R_ARM_THM_JUMP6)
11333 reloc_signed_min = 0;
11334
11335 if (globals->use_rel)
11336 {
11337 /* Need to refetch addend. */
11338 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11339 if (addend & ((howto->src_mask + 1) >> 1))
11340 {
11341 signed_addend = -1;
11342 signed_addend &= ~ howto->src_mask;
11343 signed_addend |= addend;
11344 }
11345 else
11346 signed_addend = addend;
11347 /* The value in the insn has been right shifted. We need to
11348 undo this, so that we can perform the address calculation
11349 in terms of bytes. */
11350 signed_addend <<= howto->rightshift;
11351 }
11352 relocation = value + signed_addend;
11353
11354 relocation -= (input_section->output_section->vma
11355 + input_section->output_offset
11356 + rel->r_offset);
11357
11358 relocation >>= howto->rightshift;
11359 signed_check = relocation;
11360
11361 if (r_type == R_ARM_THM_JUMP6)
11362 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11363 else
11364 relocation &= howto->dst_mask;
11365 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11366
11367 bfd_put_16 (input_bfd, relocation, hit_data);
11368
11369 /* Assumes two's complement. */
11370 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11371 return bfd_reloc_overflow;
11372
11373 return bfd_reloc_ok;
11374 }
11375
11376 case R_ARM_ALU_PCREL7_0:
11377 case R_ARM_ALU_PCREL15_8:
11378 case R_ARM_ALU_PCREL23_15:
11379 {
11380 bfd_vma insn;
11381 bfd_vma relocation;
11382
11383 insn = bfd_get_32 (input_bfd, hit_data);
11384 if (globals->use_rel)
11385 {
11386 /* Extract the addend. */
11387 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11388 signed_addend = addend;
11389 }
11390 relocation = value + signed_addend;
11391
11392 relocation -= (input_section->output_section->vma
11393 + input_section->output_offset
11394 + rel->r_offset);
11395 insn = (insn & ~0xfff)
11396 | ((howto->bitpos << 7) & 0xf00)
11397 | ((relocation >> howto->bitpos) & 0xff);
11398 bfd_put_32 (input_bfd, value, hit_data);
11399 }
11400 return bfd_reloc_ok;
11401
11402 case R_ARM_GNU_VTINHERIT:
11403 case R_ARM_GNU_VTENTRY:
11404 return bfd_reloc_ok;
11405
11406 case R_ARM_GOTOFF32:
11407 /* Relocation is relative to the start of the
11408 global offset table. */
11409
11410 BFD_ASSERT (sgot != NULL);
11411 if (sgot == NULL)
11412 return bfd_reloc_notsupported;
11413
11414 /* If we are addressing a Thumb function, we need to adjust the
11415 address by one, so that attempts to call the function pointer will
11416 correctly interpret it as Thumb code. */
11417 if (branch_type == ST_BRANCH_TO_THUMB)
11418 value += 1;
11419
11420 /* Note that sgot->output_offset is not involved in this
11421 calculation. We always want the start of .got. If we
11422 define _GLOBAL_OFFSET_TABLE in a different way, as is
11423 permitted by the ABI, we might have to change this
11424 calculation. */
11425 value -= sgot->output_section->vma;
11426 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11427 contents, rel->r_offset, value,
11428 rel->r_addend);
11429
11430 case R_ARM_GOTPC:
11431 /* Use global offset table as symbol value. */
11432 BFD_ASSERT (sgot != NULL);
11433
11434 if (sgot == NULL)
11435 return bfd_reloc_notsupported;
11436
11437 *unresolved_reloc_p = FALSE;
11438 value = sgot->output_section->vma;
11439 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11440 contents, rel->r_offset, value,
11441 rel->r_addend);
11442
11443 case R_ARM_GOT32:
11444 case R_ARM_GOT_PREL:
11445 /* Relocation is to the entry for this symbol in the
11446 global offset table. */
11447 if (sgot == NULL)
11448 return bfd_reloc_notsupported;
11449
11450 if (dynreloc_st_type == STT_GNU_IFUNC
11451 && plt_offset != (bfd_vma) -1
11452 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11453 {
11454 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11455 symbol, and the relocation resolves directly to the runtime
11456 target rather than to the .iplt entry. This means that any
11457 .got entry would be the same value as the .igot.plt entry,
11458 so there's no point creating both. */
11459 sgot = globals->root.igotplt;
11460 value = sgot->output_offset + gotplt_offset;
11461 }
11462 else if (h != NULL)
11463 {
11464 bfd_vma off;
11465
11466 off = h->got.offset;
11467 BFD_ASSERT (off != (bfd_vma) -1);
11468 if ((off & 1) != 0)
11469 {
11470 /* We have already processsed one GOT relocation against
11471 this symbol. */
11472 off &= ~1;
11473 if (globals->root.dynamic_sections_created
11474 && !SYMBOL_REFERENCES_LOCAL (info, h))
11475 *unresolved_reloc_p = FALSE;
11476 }
11477 else
11478 {
11479 Elf_Internal_Rela outrel;
11480 int isrofixup = 0;
11481
11482 if (((h->dynindx != -1) || globals->fdpic_p)
11483 && !SYMBOL_REFERENCES_LOCAL (info, h))
11484 {
11485 /* If the symbol doesn't resolve locally in a static
11486 object, we have an undefined reference. If the
11487 symbol doesn't resolve locally in a dynamic object,
11488 it should be resolved by the dynamic linker. */
11489 if (globals->root.dynamic_sections_created)
11490 {
11491 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11492 *unresolved_reloc_p = FALSE;
11493 }
11494 else
11495 outrel.r_info = 0;
11496 outrel.r_addend = 0;
11497 }
11498 else
11499 {
11500 if (dynreloc_st_type == STT_GNU_IFUNC)
11501 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11502 else if (bfd_link_pic (info)
11503 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11504 || h->root.type != bfd_link_hash_undefweak))
11505 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11506 else
11507 {
11508 outrel.r_info = 0;
11509 if (globals->fdpic_p)
11510 isrofixup = 1;
11511 }
11512 outrel.r_addend = dynreloc_value;
11513 }
11514
11515 /* The GOT entry is initialized to zero by default.
11516 See if we should install a different value. */
11517 if (outrel.r_addend != 0
11518 && (globals->use_rel || outrel.r_info == 0))
11519 {
11520 bfd_put_32 (output_bfd, outrel.r_addend,
11521 sgot->contents + off);
11522 outrel.r_addend = 0;
11523 }
11524
11525 if (isrofixup)
11526 arm_elf_add_rofixup (output_bfd,
11527 elf32_arm_hash_table(info)->srofixup,
11528 sgot->output_section->vma
11529 + sgot->output_offset + off);
11530
11531 else if (outrel.r_info != 0)
11532 {
11533 outrel.r_offset = (sgot->output_section->vma
11534 + sgot->output_offset
11535 + off);
11536 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11537 }
11538
11539 h->got.offset |= 1;
11540 }
11541 value = sgot->output_offset + off;
11542 }
11543 else
11544 {
11545 bfd_vma off;
11546
11547 BFD_ASSERT (local_got_offsets != NULL
11548 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11549
11550 off = local_got_offsets[r_symndx];
11551
11552 /* The offset must always be a multiple of 4. We use the
11553 least significant bit to record whether we have already
11554 generated the necessary reloc. */
11555 if ((off & 1) != 0)
11556 off &= ~1;
11557 else
11558 {
11559 Elf_Internal_Rela outrel;
11560 int isrofixup = 0;
11561
11562 if (dynreloc_st_type == STT_GNU_IFUNC)
11563 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11564 else if (bfd_link_pic (info))
11565 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11566 else
11567 {
11568 outrel.r_info = 0;
11569 if (globals->fdpic_p)
11570 isrofixup = 1;
11571 }
11572
11573 /* The GOT entry is initialized to zero by default.
11574 See if we should install a different value. */
11575 if (globals->use_rel || outrel.r_info == 0)
11576 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11577
11578 if (isrofixup)
11579 arm_elf_add_rofixup (output_bfd,
11580 globals->srofixup,
11581 sgot->output_section->vma
11582 + sgot->output_offset + off);
11583
11584 else if (outrel.r_info != 0)
11585 {
11586 outrel.r_addend = addend + dynreloc_value;
11587 outrel.r_offset = (sgot->output_section->vma
11588 + sgot->output_offset
11589 + off);
11590 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11591 }
11592
11593 local_got_offsets[r_symndx] |= 1;
11594 }
11595
11596 value = sgot->output_offset + off;
11597 }
11598 if (r_type != R_ARM_GOT32)
11599 value += sgot->output_section->vma;
11600
11601 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11602 contents, rel->r_offset, value,
11603 rel->r_addend);
11604
11605 case R_ARM_TLS_LDO32:
11606 value = value - dtpoff_base (info);
11607
11608 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11609 contents, rel->r_offset, value,
11610 rel->r_addend);
11611
11612 case R_ARM_TLS_LDM32:
11613 case R_ARM_TLS_LDM32_FDPIC:
11614 {
11615 bfd_vma off;
11616
11617 if (sgot == NULL)
11618 abort ();
11619
11620 off = globals->tls_ldm_got.offset;
11621
11622 if ((off & 1) != 0)
11623 off &= ~1;
11624 else
11625 {
11626 /* If we don't know the module number, create a relocation
11627 for it. */
11628 if (bfd_link_pic (info))
11629 {
11630 Elf_Internal_Rela outrel;
11631
11632 if (srelgot == NULL)
11633 abort ();
11634
11635 outrel.r_addend = 0;
11636 outrel.r_offset = (sgot->output_section->vma
11637 + sgot->output_offset + off);
11638 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11639
11640 if (globals->use_rel)
11641 bfd_put_32 (output_bfd, outrel.r_addend,
11642 sgot->contents + off);
11643
11644 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11645 }
11646 else
11647 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11648
11649 globals->tls_ldm_got.offset |= 1;
11650 }
11651
11652 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11653 {
11654 bfd_put_32(output_bfd,
11655 globals->root.sgot->output_offset + off,
11656 contents + rel->r_offset);
11657
11658 return bfd_reloc_ok;
11659 }
11660 else
11661 {
11662 value = sgot->output_section->vma + sgot->output_offset + off
11663 - (input_section->output_section->vma
11664 + input_section->output_offset + rel->r_offset);
11665
11666 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11667 contents, rel->r_offset, value,
11668 rel->r_addend);
11669 }
11670 }
11671
11672 case R_ARM_TLS_CALL:
11673 case R_ARM_THM_TLS_CALL:
11674 case R_ARM_TLS_GD32:
11675 case R_ARM_TLS_GD32_FDPIC:
11676 case R_ARM_TLS_IE32:
11677 case R_ARM_TLS_IE32_FDPIC:
11678 case R_ARM_TLS_GOTDESC:
11679 case R_ARM_TLS_DESCSEQ:
11680 case R_ARM_THM_TLS_DESCSEQ:
11681 {
11682 bfd_vma off, offplt;
11683 int indx = 0;
11684 char tls_type;
11685
11686 BFD_ASSERT (sgot != NULL);
11687
11688 if (h != NULL)
11689 {
11690 bfd_boolean dyn;
11691 dyn = globals->root.dynamic_sections_created;
11692 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11693 bfd_link_pic (info),
11694 h)
11695 && (!bfd_link_pic (info)
11696 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11697 {
11698 *unresolved_reloc_p = FALSE;
11699 indx = h->dynindx;
11700 }
11701 off = h->got.offset;
11702 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11703 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11704 }
11705 else
11706 {
11707 BFD_ASSERT (local_got_offsets != NULL);
11708 off = local_got_offsets[r_symndx];
11709 offplt = local_tlsdesc_gotents[r_symndx];
11710 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11711 }
11712
11713 /* Linker relaxations happens from one of the
11714 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11715 if (ELF32_R_TYPE(rel->r_info) != r_type)
11716 tls_type = GOT_TLS_IE;
11717
11718 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11719
11720 if ((off & 1) != 0)
11721 off &= ~1;
11722 else
11723 {
11724 bfd_boolean need_relocs = FALSE;
11725 Elf_Internal_Rela outrel;
11726 int cur_off = off;
11727
11728 /* The GOT entries have not been initialized yet. Do it
11729 now, and emit any relocations. If both an IE GOT and a
11730 GD GOT are necessary, we emit the GD first. */
11731
11732 if ((bfd_link_pic (info) || indx != 0)
11733 && (h == NULL
11734 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11735 && !resolved_to_zero)
11736 || h->root.type != bfd_link_hash_undefweak))
11737 {
11738 need_relocs = TRUE;
11739 BFD_ASSERT (srelgot != NULL);
11740 }
11741
11742 if (tls_type & GOT_TLS_GDESC)
11743 {
11744 bfd_byte *loc;
11745
11746 /* We should have relaxed, unless this is an undefined
11747 weak symbol. */
11748 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11749 || bfd_link_pic (info));
11750 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11751 <= globals->root.sgotplt->size);
11752
11753 outrel.r_addend = 0;
11754 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11755 + globals->root.sgotplt->output_offset
11756 + offplt
11757 + globals->sgotplt_jump_table_size);
11758
11759 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11760 sreloc = globals->root.srelplt;
11761 loc = sreloc->contents;
11762 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11763 BFD_ASSERT (loc + RELOC_SIZE (globals)
11764 <= sreloc->contents + sreloc->size);
11765
11766 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11767
11768 /* For globals, the first word in the relocation gets
11769 the relocation index and the top bit set, or zero,
11770 if we're binding now. For locals, it gets the
11771 symbol's offset in the tls section. */
11772 bfd_put_32 (output_bfd,
11773 !h ? value - elf_hash_table (info)->tls_sec->vma
11774 : info->flags & DF_BIND_NOW ? 0
11775 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11776 globals->root.sgotplt->contents + offplt
11777 + globals->sgotplt_jump_table_size);
11778
11779 /* Second word in the relocation is always zero. */
11780 bfd_put_32 (output_bfd, 0,
11781 globals->root.sgotplt->contents + offplt
11782 + globals->sgotplt_jump_table_size + 4);
11783 }
11784 if (tls_type & GOT_TLS_GD)
11785 {
11786 if (need_relocs)
11787 {
11788 outrel.r_addend = 0;
11789 outrel.r_offset = (sgot->output_section->vma
11790 + sgot->output_offset
11791 + cur_off);
11792 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11793
11794 if (globals->use_rel)
11795 bfd_put_32 (output_bfd, outrel.r_addend,
11796 sgot->contents + cur_off);
11797
11798 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11799
11800 if (indx == 0)
11801 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11802 sgot->contents + cur_off + 4);
11803 else
11804 {
11805 outrel.r_addend = 0;
11806 outrel.r_info = ELF32_R_INFO (indx,
11807 R_ARM_TLS_DTPOFF32);
11808 outrel.r_offset += 4;
11809
11810 if (globals->use_rel)
11811 bfd_put_32 (output_bfd, outrel.r_addend,
11812 sgot->contents + cur_off + 4);
11813
11814 elf32_arm_add_dynreloc (output_bfd, info,
11815 srelgot, &outrel);
11816 }
11817 }
11818 else
11819 {
11820 /* If we are not emitting relocations for a
11821 general dynamic reference, then we must be in a
11822 static link or an executable link with the
11823 symbol binding locally. Mark it as belonging
11824 to module 1, the executable. */
11825 bfd_put_32 (output_bfd, 1,
11826 sgot->contents + cur_off);
11827 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11828 sgot->contents + cur_off + 4);
11829 }
11830
11831 cur_off += 8;
11832 }
11833
11834 if (tls_type & GOT_TLS_IE)
11835 {
11836 if (need_relocs)
11837 {
11838 if (indx == 0)
11839 outrel.r_addend = value - dtpoff_base (info);
11840 else
11841 outrel.r_addend = 0;
11842 outrel.r_offset = (sgot->output_section->vma
11843 + sgot->output_offset
11844 + cur_off);
11845 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11846
11847 if (globals->use_rel)
11848 bfd_put_32 (output_bfd, outrel.r_addend,
11849 sgot->contents + cur_off);
11850
11851 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11852 }
11853 else
11854 bfd_put_32 (output_bfd, tpoff (info, value),
11855 sgot->contents + cur_off);
11856 cur_off += 4;
11857 }
11858
11859 if (h != NULL)
11860 h->got.offset |= 1;
11861 else
11862 local_got_offsets[r_symndx] |= 1;
11863 }
11864
11865 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11866 off += 8;
11867 else if (tls_type & GOT_TLS_GDESC)
11868 off = offplt;
11869
11870 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11871 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11872 {
11873 bfd_signed_vma offset;
11874 /* TLS stubs are arm mode. The original symbol is a
11875 data object, so branch_type is bogus. */
11876 branch_type = ST_BRANCH_TO_ARM;
11877 enum elf32_arm_stub_type stub_type
11878 = arm_type_of_stub (info, input_section, rel,
11879 st_type, &branch_type,
11880 (struct elf32_arm_link_hash_entry *)h,
11881 globals->tls_trampoline, globals->root.splt,
11882 input_bfd, sym_name);
11883
11884 if (stub_type != arm_stub_none)
11885 {
11886 struct elf32_arm_stub_hash_entry *stub_entry
11887 = elf32_arm_get_stub_entry
11888 (input_section, globals->root.splt, 0, rel,
11889 globals, stub_type);
11890 offset = (stub_entry->stub_offset
11891 + stub_entry->stub_sec->output_offset
11892 + stub_entry->stub_sec->output_section->vma);
11893 }
11894 else
11895 offset = (globals->root.splt->output_section->vma
11896 + globals->root.splt->output_offset
11897 + globals->tls_trampoline);
11898
11899 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11900 {
11901 unsigned long inst;
11902
11903 offset -= (input_section->output_section->vma
11904 + input_section->output_offset
11905 + rel->r_offset + 8);
11906
11907 inst = offset >> 2;
11908 inst &= 0x00ffffff;
11909 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11910 }
11911 else
11912 {
11913 /* Thumb blx encodes the offset in a complicated
11914 fashion. */
11915 unsigned upper_insn, lower_insn;
11916 unsigned neg;
11917
11918 offset -= (input_section->output_section->vma
11919 + input_section->output_offset
11920 + rel->r_offset + 4);
11921
11922 if (stub_type != arm_stub_none
11923 && arm_stub_is_thumb (stub_type))
11924 {
11925 lower_insn = 0xd000;
11926 }
11927 else
11928 {
11929 lower_insn = 0xc000;
11930 /* Round up the offset to a word boundary. */
11931 offset = (offset + 2) & ~2;
11932 }
11933
11934 neg = offset < 0;
11935 upper_insn = (0xf000
11936 | ((offset >> 12) & 0x3ff)
11937 | (neg << 10));
11938 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11939 | (((!((offset >> 22) & 1)) ^ neg) << 11)
11940 | ((offset >> 1) & 0x7ff);
11941 bfd_put_16 (input_bfd, upper_insn, hit_data);
11942 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11943 return bfd_reloc_ok;
11944 }
11945 }
11946 /* These relocations needs special care, as besides the fact
11947 they point somewhere in .gotplt, the addend must be
11948 adjusted accordingly depending on the type of instruction
11949 we refer to. */
11950 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11951 {
11952 unsigned long data, insn;
11953 unsigned thumb;
11954
11955 data = bfd_get_32 (input_bfd, hit_data);
11956 thumb = data & 1;
11957 data &= ~1u;
11958
11959 if (thumb)
11960 {
11961 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11962 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11963 insn = (insn << 16)
11964 | bfd_get_16 (input_bfd,
11965 contents + rel->r_offset - data + 2);
11966 if ((insn & 0xf800c000) == 0xf000c000)
11967 /* bl/blx */
11968 value = -6;
11969 else if ((insn & 0xffffff00) == 0x4400)
11970 /* add */
11971 value = -5;
11972 else
11973 {
11974 _bfd_error_handler
11975 /* xgettext:c-format */
11976 (_("%pB(%pA+%#" PRIx64 "): "
11977 "unexpected %s instruction '%#lx' "
11978 "referenced by TLS_GOTDESC"),
11979 input_bfd, input_section, (uint64_t) rel->r_offset,
11980 "Thumb", insn);
11981 return bfd_reloc_notsupported;
11982 }
11983 }
11984 else
11985 {
11986 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11987
11988 switch (insn >> 24)
11989 {
11990 case 0xeb: /* bl */
11991 case 0xfa: /* blx */
11992 value = -4;
11993 break;
11994
11995 case 0xe0: /* add */
11996 value = -8;
11997 break;
11998
11999 default:
12000 _bfd_error_handler
12001 /* xgettext:c-format */
12002 (_("%pB(%pA+%#" PRIx64 "): "
12003 "unexpected %s instruction '%#lx' "
12004 "referenced by TLS_GOTDESC"),
12005 input_bfd, input_section, (uint64_t) rel->r_offset,
12006 "ARM", insn);
12007 return bfd_reloc_notsupported;
12008 }
12009 }
12010
12011 value += ((globals->root.sgotplt->output_section->vma
12012 + globals->root.sgotplt->output_offset + off)
12013 - (input_section->output_section->vma
12014 + input_section->output_offset
12015 + rel->r_offset)
12016 + globals->sgotplt_jump_table_size);
12017 }
12018 else
12019 value = ((globals->root.sgot->output_section->vma
12020 + globals->root.sgot->output_offset + off)
12021 - (input_section->output_section->vma
12022 + input_section->output_offset + rel->r_offset));
12023
12024 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12025 r_type == R_ARM_TLS_IE32_FDPIC))
12026 {
12027 /* For FDPIC relocations, resolve to the offset of the GOT
12028 entry from the start of GOT. */
12029 bfd_put_32(output_bfd,
12030 globals->root.sgot->output_offset + off,
12031 contents + rel->r_offset);
12032
12033 return bfd_reloc_ok;
12034 }
12035 else
12036 {
12037 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12038 contents, rel->r_offset, value,
12039 rel->r_addend);
12040 }
12041 }
12042
12043 case R_ARM_TLS_LE32:
12044 if (bfd_link_dll (info))
12045 {
12046 _bfd_error_handler
12047 /* xgettext:c-format */
12048 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12049 "in shared object"),
12050 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12051 return bfd_reloc_notsupported;
12052 }
12053 else
12054 value = tpoff (info, value);
12055
12056 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12057 contents, rel->r_offset, value,
12058 rel->r_addend);
12059
12060 case R_ARM_V4BX:
12061 if (globals->fix_v4bx)
12062 {
12063 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12064
12065 /* Ensure that we have a BX instruction. */
12066 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12067
12068 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12069 {
12070 /* Branch to veneer. */
12071 bfd_vma glue_addr;
12072 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12073 glue_addr -= input_section->output_section->vma
12074 + input_section->output_offset
12075 + rel->r_offset + 8;
12076 insn = (insn & 0xf0000000) | 0x0a000000
12077 | ((glue_addr >> 2) & 0x00ffffff);
12078 }
12079 else
12080 {
12081 /* Preserve Rm (lowest four bits) and the condition code
12082 (highest four bits). Other bits encode MOV PC,Rm. */
12083 insn = (insn & 0xf000000f) | 0x01a0f000;
12084 }
12085
12086 bfd_put_32 (input_bfd, insn, hit_data);
12087 }
12088 return bfd_reloc_ok;
12089
12090 case R_ARM_MOVW_ABS_NC:
12091 case R_ARM_MOVT_ABS:
12092 case R_ARM_MOVW_PREL_NC:
12093 case R_ARM_MOVT_PREL:
12094 /* Until we properly support segment-base-relative addressing then
12095 we assume the segment base to be zero, as for the group relocations.
12096 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12097 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12098 case R_ARM_MOVW_BREL_NC:
12099 case R_ARM_MOVW_BREL:
12100 case R_ARM_MOVT_BREL:
12101 {
12102 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12103
12104 if (globals->use_rel)
12105 {
12106 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12107 signed_addend = (addend ^ 0x8000) - 0x8000;
12108 }
12109
12110 value += signed_addend;
12111
12112 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12113 value -= (input_section->output_section->vma
12114 + input_section->output_offset + rel->r_offset);
12115
12116 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12117 return bfd_reloc_overflow;
12118
12119 if (branch_type == ST_BRANCH_TO_THUMB)
12120 value |= 1;
12121
12122 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12123 || r_type == R_ARM_MOVT_BREL)
12124 value >>= 16;
12125
12126 insn &= 0xfff0f000;
12127 insn |= value & 0xfff;
12128 insn |= (value & 0xf000) << 4;
12129 bfd_put_32 (input_bfd, insn, hit_data);
12130 }
12131 return bfd_reloc_ok;
12132
12133 case R_ARM_THM_MOVW_ABS_NC:
12134 case R_ARM_THM_MOVT_ABS:
12135 case R_ARM_THM_MOVW_PREL_NC:
12136 case R_ARM_THM_MOVT_PREL:
12137 /* Until we properly support segment-base-relative addressing then
12138 we assume the segment base to be zero, as for the above relocations.
12139 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12140 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12141 as R_ARM_THM_MOVT_ABS. */
12142 case R_ARM_THM_MOVW_BREL_NC:
12143 case R_ARM_THM_MOVW_BREL:
12144 case R_ARM_THM_MOVT_BREL:
12145 {
12146 bfd_vma insn;
12147
12148 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12149 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12150
12151 if (globals->use_rel)
12152 {
12153 addend = ((insn >> 4) & 0xf000)
12154 | ((insn >> 15) & 0x0800)
12155 | ((insn >> 4) & 0x0700)
12156 | (insn & 0x00ff);
12157 signed_addend = (addend ^ 0x8000) - 0x8000;
12158 }
12159
12160 value += signed_addend;
12161
12162 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12163 value -= (input_section->output_section->vma
12164 + input_section->output_offset + rel->r_offset);
12165
12166 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12167 return bfd_reloc_overflow;
12168
12169 if (branch_type == ST_BRANCH_TO_THUMB)
12170 value |= 1;
12171
12172 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12173 || r_type == R_ARM_THM_MOVT_BREL)
12174 value >>= 16;
12175
12176 insn &= 0xfbf08f00;
12177 insn |= (value & 0xf000) << 4;
12178 insn |= (value & 0x0800) << 15;
12179 insn |= (value & 0x0700) << 4;
12180 insn |= (value & 0x00ff);
12181
12182 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12183 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12184 }
12185 return bfd_reloc_ok;
12186
12187 case R_ARM_ALU_PC_G0_NC:
12188 case R_ARM_ALU_PC_G1_NC:
12189 case R_ARM_ALU_PC_G0:
12190 case R_ARM_ALU_PC_G1:
12191 case R_ARM_ALU_PC_G2:
12192 case R_ARM_ALU_SB_G0_NC:
12193 case R_ARM_ALU_SB_G1_NC:
12194 case R_ARM_ALU_SB_G0:
12195 case R_ARM_ALU_SB_G1:
12196 case R_ARM_ALU_SB_G2:
12197 {
12198 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12199 bfd_vma pc = input_section->output_section->vma
12200 + input_section->output_offset + rel->r_offset;
12201 /* sb is the origin of the *segment* containing the symbol. */
12202 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12203 bfd_vma residual;
12204 bfd_vma g_n;
12205 bfd_signed_vma signed_value;
12206 int group = 0;
12207
12208 /* Determine which group of bits to select. */
12209 switch (r_type)
12210 {
12211 case R_ARM_ALU_PC_G0_NC:
12212 case R_ARM_ALU_PC_G0:
12213 case R_ARM_ALU_SB_G0_NC:
12214 case R_ARM_ALU_SB_G0:
12215 group = 0;
12216 break;
12217
12218 case R_ARM_ALU_PC_G1_NC:
12219 case R_ARM_ALU_PC_G1:
12220 case R_ARM_ALU_SB_G1_NC:
12221 case R_ARM_ALU_SB_G1:
12222 group = 1;
12223 break;
12224
12225 case R_ARM_ALU_PC_G2:
12226 case R_ARM_ALU_SB_G2:
12227 group = 2;
12228 break;
12229
12230 default:
12231 abort ();
12232 }
12233
12234 /* If REL, extract the addend from the insn. If RELA, it will
12235 have already been fetched for us. */
12236 if (globals->use_rel)
12237 {
12238 int negative;
12239 bfd_vma constant = insn & 0xff;
12240 bfd_vma rotation = (insn & 0xf00) >> 8;
12241
12242 if (rotation == 0)
12243 signed_addend = constant;
12244 else
12245 {
12246 /* Compensate for the fact that in the instruction, the
12247 rotation is stored in multiples of 2 bits. */
12248 rotation *= 2;
12249
12250 /* Rotate "constant" right by "rotation" bits. */
12251 signed_addend = (constant >> rotation) |
12252 (constant << (8 * sizeof (bfd_vma) - rotation));
12253 }
12254
12255 /* Determine if the instruction is an ADD or a SUB.
12256 (For REL, this determines the sign of the addend.) */
12257 negative = identify_add_or_sub (insn);
12258 if (negative == 0)
12259 {
12260 _bfd_error_handler
12261 /* xgettext:c-format */
12262 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12263 "are allowed for ALU group relocations"),
12264 input_bfd, input_section, (uint64_t) rel->r_offset);
12265 return bfd_reloc_overflow;
12266 }
12267
12268 signed_addend *= negative;
12269 }
12270
12271 /* Compute the value (X) to go in the place. */
12272 if (r_type == R_ARM_ALU_PC_G0_NC
12273 || r_type == R_ARM_ALU_PC_G1_NC
12274 || r_type == R_ARM_ALU_PC_G0
12275 || r_type == R_ARM_ALU_PC_G1
12276 || r_type == R_ARM_ALU_PC_G2)
12277 /* PC relative. */
12278 signed_value = value - pc + signed_addend;
12279 else
12280 /* Section base relative. */
12281 signed_value = value - sb + signed_addend;
12282
12283 /* If the target symbol is a Thumb function, then set the
12284 Thumb bit in the address. */
12285 if (branch_type == ST_BRANCH_TO_THUMB)
12286 signed_value |= 1;
12287
12288 /* Calculate the value of the relevant G_n, in encoded
12289 constant-with-rotation format. */
12290 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12291 group, &residual);
12292
12293 /* Check for overflow if required. */
12294 if ((r_type == R_ARM_ALU_PC_G0
12295 || r_type == R_ARM_ALU_PC_G1
12296 || r_type == R_ARM_ALU_PC_G2
12297 || r_type == R_ARM_ALU_SB_G0
12298 || r_type == R_ARM_ALU_SB_G1
12299 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12300 {
12301 _bfd_error_handler
12302 /* xgettext:c-format */
12303 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12304 "splitting %#" PRIx64 " for group relocation %s"),
12305 input_bfd, input_section, (uint64_t) rel->r_offset,
12306 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12307 howto->name);
12308 return bfd_reloc_overflow;
12309 }
12310
12311 /* Mask out the value and the ADD/SUB part of the opcode; take care
12312 not to destroy the S bit. */
12313 insn &= 0xff1ff000;
12314
12315 /* Set the opcode according to whether the value to go in the
12316 place is negative. */
12317 if (signed_value < 0)
12318 insn |= 1 << 22;
12319 else
12320 insn |= 1 << 23;
12321
12322 /* Encode the offset. */
12323 insn |= g_n;
12324
12325 bfd_put_32 (input_bfd, insn, hit_data);
12326 }
12327 return bfd_reloc_ok;
12328
12329 case R_ARM_LDR_PC_G0:
12330 case R_ARM_LDR_PC_G1:
12331 case R_ARM_LDR_PC_G2:
12332 case R_ARM_LDR_SB_G0:
12333 case R_ARM_LDR_SB_G1:
12334 case R_ARM_LDR_SB_G2:
12335 {
12336 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12337 bfd_vma pc = input_section->output_section->vma
12338 + input_section->output_offset + rel->r_offset;
12339 /* sb is the origin of the *segment* containing the symbol. */
12340 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12341 bfd_vma residual;
12342 bfd_signed_vma signed_value;
12343 int group = 0;
12344
12345 /* Determine which groups of bits to calculate. */
12346 switch (r_type)
12347 {
12348 case R_ARM_LDR_PC_G0:
12349 case R_ARM_LDR_SB_G0:
12350 group = 0;
12351 break;
12352
12353 case R_ARM_LDR_PC_G1:
12354 case R_ARM_LDR_SB_G1:
12355 group = 1;
12356 break;
12357
12358 case R_ARM_LDR_PC_G2:
12359 case R_ARM_LDR_SB_G2:
12360 group = 2;
12361 break;
12362
12363 default:
12364 abort ();
12365 }
12366
12367 /* If REL, extract the addend from the insn. If RELA, it will
12368 have already been fetched for us. */
12369 if (globals->use_rel)
12370 {
12371 int negative = (insn & (1 << 23)) ? 1 : -1;
12372 signed_addend = negative * (insn & 0xfff);
12373 }
12374
12375 /* Compute the value (X) to go in the place. */
12376 if (r_type == R_ARM_LDR_PC_G0
12377 || r_type == R_ARM_LDR_PC_G1
12378 || r_type == R_ARM_LDR_PC_G2)
12379 /* PC relative. */
12380 signed_value = value - pc + signed_addend;
12381 else
12382 /* Section base relative. */
12383 signed_value = value - sb + signed_addend;
12384
12385 /* Calculate the value of the relevant G_{n-1} to obtain
12386 the residual at that stage. */
12387 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12388 group - 1, &residual);
12389
12390 /* Check for overflow. */
12391 if (residual >= 0x1000)
12392 {
12393 _bfd_error_handler
12394 /* xgettext:c-format */
12395 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12396 "splitting %#" PRIx64 " for group relocation %s"),
12397 input_bfd, input_section, (uint64_t) rel->r_offset,
12398 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12399 howto->name);
12400 return bfd_reloc_overflow;
12401 }
12402
12403 /* Mask out the value and U bit. */
12404 insn &= 0xff7ff000;
12405
12406 /* Set the U bit if the value to go in the place is non-negative. */
12407 if (signed_value >= 0)
12408 insn |= 1 << 23;
12409
12410 /* Encode the offset. */
12411 insn |= residual;
12412
12413 bfd_put_32 (input_bfd, insn, hit_data);
12414 }
12415 return bfd_reloc_ok;
12416
12417 case R_ARM_LDRS_PC_G0:
12418 case R_ARM_LDRS_PC_G1:
12419 case R_ARM_LDRS_PC_G2:
12420 case R_ARM_LDRS_SB_G0:
12421 case R_ARM_LDRS_SB_G1:
12422 case R_ARM_LDRS_SB_G2:
12423 {
12424 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12425 bfd_vma pc = input_section->output_section->vma
12426 + input_section->output_offset + rel->r_offset;
12427 /* sb is the origin of the *segment* containing the symbol. */
12428 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12429 bfd_vma residual;
12430 bfd_signed_vma signed_value;
12431 int group = 0;
12432
12433 /* Determine which groups of bits to calculate. */
12434 switch (r_type)
12435 {
12436 case R_ARM_LDRS_PC_G0:
12437 case R_ARM_LDRS_SB_G0:
12438 group = 0;
12439 break;
12440
12441 case R_ARM_LDRS_PC_G1:
12442 case R_ARM_LDRS_SB_G1:
12443 group = 1;
12444 break;
12445
12446 case R_ARM_LDRS_PC_G2:
12447 case R_ARM_LDRS_SB_G2:
12448 group = 2;
12449 break;
12450
12451 default:
12452 abort ();
12453 }
12454
12455 /* If REL, extract the addend from the insn. If RELA, it will
12456 have already been fetched for us. */
12457 if (globals->use_rel)
12458 {
12459 int negative = (insn & (1 << 23)) ? 1 : -1;
12460 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12461 }
12462
12463 /* Compute the value (X) to go in the place. */
12464 if (r_type == R_ARM_LDRS_PC_G0
12465 || r_type == R_ARM_LDRS_PC_G1
12466 || r_type == R_ARM_LDRS_PC_G2)
12467 /* PC relative. */
12468 signed_value = value - pc + signed_addend;
12469 else
12470 /* Section base relative. */
12471 signed_value = value - sb + signed_addend;
12472
12473 /* Calculate the value of the relevant G_{n-1} to obtain
12474 the residual at that stage. */
12475 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12476 group - 1, &residual);
12477
12478 /* Check for overflow. */
12479 if (residual >= 0x100)
12480 {
12481 _bfd_error_handler
12482 /* xgettext:c-format */
12483 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12484 "splitting %#" PRIx64 " for group relocation %s"),
12485 input_bfd, input_section, (uint64_t) rel->r_offset,
12486 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12487 howto->name);
12488 return bfd_reloc_overflow;
12489 }
12490
12491 /* Mask out the value and U bit. */
12492 insn &= 0xff7ff0f0;
12493
12494 /* Set the U bit if the value to go in the place is non-negative. */
12495 if (signed_value >= 0)
12496 insn |= 1 << 23;
12497
12498 /* Encode the offset. */
12499 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12500
12501 bfd_put_32 (input_bfd, insn, hit_data);
12502 }
12503 return bfd_reloc_ok;
12504
12505 case R_ARM_LDC_PC_G0:
12506 case R_ARM_LDC_PC_G1:
12507 case R_ARM_LDC_PC_G2:
12508 case R_ARM_LDC_SB_G0:
12509 case R_ARM_LDC_SB_G1:
12510 case R_ARM_LDC_SB_G2:
12511 {
12512 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12513 bfd_vma pc = input_section->output_section->vma
12514 + input_section->output_offset + rel->r_offset;
12515 /* sb is the origin of the *segment* containing the symbol. */
12516 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12517 bfd_vma residual;
12518 bfd_signed_vma signed_value;
12519 int group = 0;
12520
12521 /* Determine which groups of bits to calculate. */
12522 switch (r_type)
12523 {
12524 case R_ARM_LDC_PC_G0:
12525 case R_ARM_LDC_SB_G0:
12526 group = 0;
12527 break;
12528
12529 case R_ARM_LDC_PC_G1:
12530 case R_ARM_LDC_SB_G1:
12531 group = 1;
12532 break;
12533
12534 case R_ARM_LDC_PC_G2:
12535 case R_ARM_LDC_SB_G2:
12536 group = 2;
12537 break;
12538
12539 default:
12540 abort ();
12541 }
12542
12543 /* If REL, extract the addend from the insn. If RELA, it will
12544 have already been fetched for us. */
12545 if (globals->use_rel)
12546 {
12547 int negative = (insn & (1 << 23)) ? 1 : -1;
12548 signed_addend = negative * ((insn & 0xff) << 2);
12549 }
12550
12551 /* Compute the value (X) to go in the place. */
12552 if (r_type == R_ARM_LDC_PC_G0
12553 || r_type == R_ARM_LDC_PC_G1
12554 || r_type == R_ARM_LDC_PC_G2)
12555 /* PC relative. */
12556 signed_value = value - pc + signed_addend;
12557 else
12558 /* Section base relative. */
12559 signed_value = value - sb + signed_addend;
12560
12561 /* Calculate the value of the relevant G_{n-1} to obtain
12562 the residual at that stage. */
12563 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12564 group - 1, &residual);
12565
12566 /* Check for overflow. (The absolute value to go in the place must be
12567 divisible by four and, after having been divided by four, must
12568 fit in eight bits.) */
12569 if ((residual & 0x3) != 0 || residual >= 0x400)
12570 {
12571 _bfd_error_handler
12572 /* xgettext:c-format */
12573 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12574 "splitting %#" PRIx64 " for group relocation %s"),
12575 input_bfd, input_section, (uint64_t) rel->r_offset,
12576 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12577 howto->name);
12578 return bfd_reloc_overflow;
12579 }
12580
12581 /* Mask out the value and U bit. */
12582 insn &= 0xff7fff00;
12583
12584 /* Set the U bit if the value to go in the place is non-negative. */
12585 if (signed_value >= 0)
12586 insn |= 1 << 23;
12587
12588 /* Encode the offset. */
12589 insn |= residual >> 2;
12590
12591 bfd_put_32 (input_bfd, insn, hit_data);
12592 }
12593 return bfd_reloc_ok;
12594
12595 case R_ARM_THM_ALU_ABS_G0_NC:
12596 case R_ARM_THM_ALU_ABS_G1_NC:
12597 case R_ARM_THM_ALU_ABS_G2_NC:
12598 case R_ARM_THM_ALU_ABS_G3_NC:
12599 {
12600 const int shift_array[4] = {0, 8, 16, 24};
12601 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12602 bfd_vma addr = value;
12603 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12604
12605 /* Compute address. */
12606 if (globals->use_rel)
12607 signed_addend = insn & 0xff;
12608 addr += signed_addend;
12609 if (branch_type == ST_BRANCH_TO_THUMB)
12610 addr |= 1;
12611 /* Clean imm8 insn. */
12612 insn &= 0xff00;
12613 /* And update with correct part of address. */
12614 insn |= (addr >> shift) & 0xff;
12615 /* Update insn. */
12616 bfd_put_16 (input_bfd, insn, hit_data);
12617 }
12618
12619 *unresolved_reloc_p = FALSE;
12620 return bfd_reloc_ok;
12621
12622 case R_ARM_GOTOFFFUNCDESC:
12623 {
12624 if (h == NULL)
12625 {
12626 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12627 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12628 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12629 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12630 bfd_vma seg = -1;
12631
12632 if (bfd_link_pic(info) && dynindx == 0)
12633 abort();
12634
12635 /* Resolve relocation. */
12636 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12637 , contents + rel->r_offset);
12638 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12639 not done yet. */
12640 arm_elf_fill_funcdesc(output_bfd, info,
12641 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12642 dynindx, offset, addr, dynreloc_value, seg);
12643 }
12644 else
12645 {
12646 int dynindx;
12647 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12648 bfd_vma addr;
12649 bfd_vma seg = -1;
12650
12651 /* For static binaries, sym_sec can be null. */
12652 if (sym_sec)
12653 {
12654 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12655 addr = dynreloc_value - sym_sec->output_section->vma;
12656 }
12657 else
12658 {
12659 dynindx = 0;
12660 addr = 0;
12661 }
12662
12663 if (bfd_link_pic(info) && dynindx == 0)
12664 abort();
12665
12666 /* This case cannot occur since funcdesc is allocated by
12667 the dynamic loader so we cannot resolve the relocation. */
12668 if (h->dynindx != -1)
12669 abort();
12670
12671 /* Resolve relocation. */
12672 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12673 contents + rel->r_offset);
12674 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12675 arm_elf_fill_funcdesc(output_bfd, info,
12676 &eh->fdpic_cnts.funcdesc_offset,
12677 dynindx, offset, addr, dynreloc_value, seg);
12678 }
12679 }
12680 *unresolved_reloc_p = FALSE;
12681 return bfd_reloc_ok;
12682
12683 case R_ARM_GOTFUNCDESC:
12684 {
12685 if (h != NULL)
12686 {
12687 Elf_Internal_Rela outrel;
12688
12689 /* Resolve relocation. */
12690 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12691 + sgot->output_offset),
12692 contents + rel->r_offset);
12693 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12694 if(h->dynindx == -1)
12695 {
12696 int dynindx;
12697 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12698 bfd_vma addr;
12699 bfd_vma seg = -1;
12700
12701 /* For static binaries sym_sec can be null. */
12702 if (sym_sec)
12703 {
12704 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12705 addr = dynreloc_value - sym_sec->output_section->vma;
12706 }
12707 else
12708 {
12709 dynindx = 0;
12710 addr = 0;
12711 }
12712
12713 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12714 arm_elf_fill_funcdesc(output_bfd, info,
12715 &eh->fdpic_cnts.funcdesc_offset,
12716 dynindx, offset, addr, dynreloc_value, seg);
12717 }
12718
12719 /* Add a dynamic relocation on GOT entry if not already done. */
12720 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12721 {
12722 if (h->dynindx == -1)
12723 {
12724 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12725 if (h->root.type == bfd_link_hash_undefweak)
12726 bfd_put_32(output_bfd, 0, sgot->contents
12727 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12728 else
12729 bfd_put_32(output_bfd, sgot->output_section->vma
12730 + sgot->output_offset
12731 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12732 sgot->contents
12733 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12734 }
12735 else
12736 {
12737 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12738 }
12739 outrel.r_offset = sgot->output_section->vma
12740 + sgot->output_offset
12741 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12742 outrel.r_addend = 0;
12743 if (h->dynindx == -1 && !bfd_link_pic(info))
12744 if (h->root.type == bfd_link_hash_undefweak)
12745 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12746 else
12747 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12748 else
12749 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12750 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12751 }
12752 }
12753 else
12754 {
12755 /* Such relocation on static function should not have been
12756 emitted by the compiler. */
12757 abort();
12758 }
12759 }
12760 *unresolved_reloc_p = FALSE;
12761 return bfd_reloc_ok;
12762
12763 case R_ARM_FUNCDESC:
12764 {
12765 if (h == NULL)
12766 {
12767 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12768 Elf_Internal_Rela outrel;
12769 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12770 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12771 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12772 bfd_vma seg = -1;
12773
12774 if (bfd_link_pic(info) && dynindx == 0)
12775 abort();
12776
12777 /* Replace static FUNCDESC relocation with a
12778 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12779 executable. */
12780 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12781 outrel.r_offset = input_section->output_section->vma
12782 + input_section->output_offset + rel->r_offset;
12783 outrel.r_addend = 0;
12784 if (bfd_link_pic(info))
12785 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12786 else
12787 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12788
12789 bfd_put_32 (input_bfd, sgot->output_section->vma
12790 + sgot->output_offset + offset, hit_data);
12791
12792 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12793 arm_elf_fill_funcdesc(output_bfd, info,
12794 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12795 dynindx, offset, addr, dynreloc_value, seg);
12796 }
12797 else
12798 {
12799 if (h->dynindx == -1)
12800 {
12801 int dynindx;
12802 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12803 bfd_vma addr;
12804 bfd_vma seg = -1;
12805 Elf_Internal_Rela outrel;
12806
12807 /* For static binaries sym_sec can be null. */
12808 if (sym_sec)
12809 {
12810 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12811 addr = dynreloc_value - sym_sec->output_section->vma;
12812 }
12813 else
12814 {
12815 dynindx = 0;
12816 addr = 0;
12817 }
12818
12819 if (bfd_link_pic(info) && dynindx == 0)
12820 abort();
12821
12822 /* Replace static FUNCDESC relocation with a
12823 R_ARM_RELATIVE dynamic relocation. */
12824 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12825 outrel.r_offset = input_section->output_section->vma
12826 + input_section->output_offset + rel->r_offset;
12827 outrel.r_addend = 0;
12828 if (bfd_link_pic(info))
12829 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12830 else
12831 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12832
12833 bfd_put_32 (input_bfd, sgot->output_section->vma
12834 + sgot->output_offset + offset, hit_data);
12835
12836 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12837 arm_elf_fill_funcdesc(output_bfd, info,
12838 &eh->fdpic_cnts.funcdesc_offset,
12839 dynindx, offset, addr, dynreloc_value, seg);
12840 }
12841 else
12842 {
12843 Elf_Internal_Rela outrel;
12844
12845 /* Add a dynamic relocation. */
12846 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12847 outrel.r_offset = input_section->output_section->vma
12848 + input_section->output_offset + rel->r_offset;
12849 outrel.r_addend = 0;
12850 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12851 }
12852 }
12853 }
12854 *unresolved_reloc_p = FALSE;
12855 return bfd_reloc_ok;
12856
12857 default:
12858 return bfd_reloc_notsupported;
12859 }
12860 }
12861
12862 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
12863 static void
12864 arm_add_to_rel (bfd * abfd,
12865 bfd_byte * address,
12866 reloc_howto_type * howto,
12867 bfd_signed_vma increment)
12868 {
12869 bfd_signed_vma addend;
12870
12871 if (howto->type == R_ARM_THM_CALL
12872 || howto->type == R_ARM_THM_JUMP24)
12873 {
12874 int upper_insn, lower_insn;
12875 int upper, lower;
12876
12877 upper_insn = bfd_get_16 (abfd, address);
12878 lower_insn = bfd_get_16 (abfd, address + 2);
12879 upper = upper_insn & 0x7ff;
12880 lower = lower_insn & 0x7ff;
12881
12882 addend = (upper << 12) | (lower << 1);
12883 addend += increment;
12884 addend >>= 1;
12885
12886 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12887 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12888
12889 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12890 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12891 }
12892 else
12893 {
12894 bfd_vma contents;
12895
12896 contents = bfd_get_32 (abfd, address);
12897
12898 /* Get the (signed) value from the instruction. */
12899 addend = contents & howto->src_mask;
12900 if (addend & ((howto->src_mask + 1) >> 1))
12901 {
12902 bfd_signed_vma mask;
12903
12904 mask = -1;
12905 mask &= ~ howto->src_mask;
12906 addend |= mask;
12907 }
12908
12909 /* Add in the increment, (which is a byte value). */
12910 switch (howto->type)
12911 {
12912 default:
12913 addend += increment;
12914 break;
12915
12916 case R_ARM_PC24:
12917 case R_ARM_PLT32:
12918 case R_ARM_CALL:
12919 case R_ARM_JUMP24:
12920 addend <<= howto->size;
12921 addend += increment;
12922
12923 /* Should we check for overflow here ? */
12924
12925 /* Drop any undesired bits. */
12926 addend >>= howto->rightshift;
12927 break;
12928 }
12929
12930 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12931
12932 bfd_put_32 (abfd, contents, address);
12933 }
12934 }
12935
12936 #define IS_ARM_TLS_RELOC(R_TYPE) \
12937 ((R_TYPE) == R_ARM_TLS_GD32 \
12938 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
12939 || (R_TYPE) == R_ARM_TLS_LDO32 \
12940 || (R_TYPE) == R_ARM_TLS_LDM32 \
12941 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
12942 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
12943 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
12944 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
12945 || (R_TYPE) == R_ARM_TLS_LE32 \
12946 || (R_TYPE) == R_ARM_TLS_IE32 \
12947 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
12948 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12949
12950 /* Specific set of relocations for the gnu tls dialect. */
12951 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
12952 ((R_TYPE) == R_ARM_TLS_GOTDESC \
12953 || (R_TYPE) == R_ARM_TLS_CALL \
12954 || (R_TYPE) == R_ARM_THM_TLS_CALL \
12955 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
12956 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12957
12958 /* Relocate an ARM ELF section. */
12959
12960 static bfd_boolean
12961 elf32_arm_relocate_section (bfd * output_bfd,
12962 struct bfd_link_info * info,
12963 bfd * input_bfd,
12964 asection * input_section,
12965 bfd_byte * contents,
12966 Elf_Internal_Rela * relocs,
12967 Elf_Internal_Sym * local_syms,
12968 asection ** local_sections)
12969 {
12970 Elf_Internal_Shdr *symtab_hdr;
12971 struct elf_link_hash_entry **sym_hashes;
12972 Elf_Internal_Rela *rel;
12973 Elf_Internal_Rela *relend;
12974 const char *name;
12975 struct elf32_arm_link_hash_table * globals;
12976
12977 globals = elf32_arm_hash_table (info);
12978 if (globals == NULL)
12979 return FALSE;
12980
12981 symtab_hdr = & elf_symtab_hdr (input_bfd);
12982 sym_hashes = elf_sym_hashes (input_bfd);
12983
12984 rel = relocs;
12985 relend = relocs + input_section->reloc_count;
12986 for (; rel < relend; rel++)
12987 {
12988 int r_type;
12989 reloc_howto_type * howto;
12990 unsigned long r_symndx;
12991 Elf_Internal_Sym * sym;
12992 asection * sec;
12993 struct elf_link_hash_entry * h;
12994 bfd_vma relocation;
12995 bfd_reloc_status_type r;
12996 arelent bfd_reloc;
12997 char sym_type;
12998 bfd_boolean unresolved_reloc = FALSE;
12999 char *error_message = NULL;
13000
13001 r_symndx = ELF32_R_SYM (rel->r_info);
13002 r_type = ELF32_R_TYPE (rel->r_info);
13003 r_type = arm_real_reloc_type (globals, r_type);
13004
13005 if ( r_type == R_ARM_GNU_VTENTRY
13006 || r_type == R_ARM_GNU_VTINHERIT)
13007 continue;
13008
13009 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13010
13011 if (howto == NULL)
13012 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13013
13014 h = NULL;
13015 sym = NULL;
13016 sec = NULL;
13017
13018 if (r_symndx < symtab_hdr->sh_info)
13019 {
13020 sym = local_syms + r_symndx;
13021 sym_type = ELF32_ST_TYPE (sym->st_info);
13022 sec = local_sections[r_symndx];
13023
13024 /* An object file might have a reference to a local
13025 undefined symbol. This is a daft object file, but we
13026 should at least do something about it. V4BX & NONE
13027 relocations do not use the symbol and are explicitly
13028 allowed to use the undefined symbol, so allow those.
13029 Likewise for relocations against STN_UNDEF. */
13030 if (r_type != R_ARM_V4BX
13031 && r_type != R_ARM_NONE
13032 && r_symndx != STN_UNDEF
13033 && bfd_is_und_section (sec)
13034 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13035 (*info->callbacks->undefined_symbol)
13036 (info, bfd_elf_string_from_elf_section
13037 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13038 input_bfd, input_section,
13039 rel->r_offset, TRUE);
13040
13041 if (globals->use_rel)
13042 {
13043 relocation = (sec->output_section->vma
13044 + sec->output_offset
13045 + sym->st_value);
13046 if (!bfd_link_relocatable (info)
13047 && (sec->flags & SEC_MERGE)
13048 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13049 {
13050 asection *msec;
13051 bfd_vma addend, value;
13052
13053 switch (r_type)
13054 {
13055 case R_ARM_MOVW_ABS_NC:
13056 case R_ARM_MOVT_ABS:
13057 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13058 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13059 addend = (addend ^ 0x8000) - 0x8000;
13060 break;
13061
13062 case R_ARM_THM_MOVW_ABS_NC:
13063 case R_ARM_THM_MOVT_ABS:
13064 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13065 << 16;
13066 value |= bfd_get_16 (input_bfd,
13067 contents + rel->r_offset + 2);
13068 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13069 | ((value & 0x04000000) >> 15);
13070 addend = (addend ^ 0x8000) - 0x8000;
13071 break;
13072
13073 default:
13074 if (howto->rightshift
13075 || (howto->src_mask & (howto->src_mask + 1)))
13076 {
13077 _bfd_error_handler
13078 /* xgettext:c-format */
13079 (_("%pB(%pA+%#" PRIx64 "): "
13080 "%s relocation against SEC_MERGE section"),
13081 input_bfd, input_section,
13082 (uint64_t) rel->r_offset, howto->name);
13083 return FALSE;
13084 }
13085
13086 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13087
13088 /* Get the (signed) value from the instruction. */
13089 addend = value & howto->src_mask;
13090 if (addend & ((howto->src_mask + 1) >> 1))
13091 {
13092 bfd_signed_vma mask;
13093
13094 mask = -1;
13095 mask &= ~ howto->src_mask;
13096 addend |= mask;
13097 }
13098 break;
13099 }
13100
13101 msec = sec;
13102 addend =
13103 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13104 - relocation;
13105 addend += msec->output_section->vma + msec->output_offset;
13106
13107 /* Cases here must match those in the preceding
13108 switch statement. */
13109 switch (r_type)
13110 {
13111 case R_ARM_MOVW_ABS_NC:
13112 case R_ARM_MOVT_ABS:
13113 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13114 | (addend & 0xfff);
13115 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13116 break;
13117
13118 case R_ARM_THM_MOVW_ABS_NC:
13119 case R_ARM_THM_MOVT_ABS:
13120 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13121 | (addend & 0xff) | ((addend & 0x0800) << 15);
13122 bfd_put_16 (input_bfd, value >> 16,
13123 contents + rel->r_offset);
13124 bfd_put_16 (input_bfd, value,
13125 contents + rel->r_offset + 2);
13126 break;
13127
13128 default:
13129 value = (value & ~ howto->dst_mask)
13130 | (addend & howto->dst_mask);
13131 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13132 break;
13133 }
13134 }
13135 }
13136 else
13137 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13138 }
13139 else
13140 {
13141 bfd_boolean warned, ignored;
13142
13143 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13144 r_symndx, symtab_hdr, sym_hashes,
13145 h, sec, relocation,
13146 unresolved_reloc, warned, ignored);
13147
13148 sym_type = h->type;
13149 }
13150
13151 if (sec != NULL && discarded_section (sec))
13152 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13153 rel, 1, relend, howto, 0, contents);
13154
13155 if (bfd_link_relocatable (info))
13156 {
13157 /* This is a relocatable link. We don't have to change
13158 anything, unless the reloc is against a section symbol,
13159 in which case we have to adjust according to where the
13160 section symbol winds up in the output section. */
13161 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13162 {
13163 if (globals->use_rel)
13164 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13165 howto, (bfd_signed_vma) sec->output_offset);
13166 else
13167 rel->r_addend += sec->output_offset;
13168 }
13169 continue;
13170 }
13171
13172 if (h != NULL)
13173 name = h->root.root.string;
13174 else
13175 {
13176 name = (bfd_elf_string_from_elf_section
13177 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13178 if (name == NULL || *name == '\0')
13179 name = bfd_section_name (input_bfd, sec);
13180 }
13181
13182 if (r_symndx != STN_UNDEF
13183 && r_type != R_ARM_NONE
13184 && (h == NULL
13185 || h->root.type == bfd_link_hash_defined
13186 || h->root.type == bfd_link_hash_defweak)
13187 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13188 {
13189 _bfd_error_handler
13190 ((sym_type == STT_TLS
13191 /* xgettext:c-format */
13192 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13193 /* xgettext:c-format */
13194 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13195 input_bfd,
13196 input_section,
13197 (uint64_t) rel->r_offset,
13198 howto->name,
13199 name);
13200 }
13201
13202 /* We call elf32_arm_final_link_relocate unless we're completely
13203 done, i.e., the relaxation produced the final output we want,
13204 and we won't let anybody mess with it. Also, we have to do
13205 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13206 both in relaxed and non-relaxed cases. */
13207 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13208 || (IS_ARM_TLS_GNU_RELOC (r_type)
13209 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13210 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13211 & GOT_TLS_GDESC)))
13212 {
13213 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13214 contents, rel, h == NULL);
13215 /* This may have been marked unresolved because it came from
13216 a shared library. But we've just dealt with that. */
13217 unresolved_reloc = 0;
13218 }
13219 else
13220 r = bfd_reloc_continue;
13221
13222 if (r == bfd_reloc_continue)
13223 {
13224 unsigned char branch_type =
13225 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13226 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13227
13228 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13229 input_section, contents, rel,
13230 relocation, info, sec, name,
13231 sym_type, branch_type, h,
13232 &unresolved_reloc,
13233 &error_message);
13234 }
13235
13236 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13237 because such sections are not SEC_ALLOC and thus ld.so will
13238 not process them. */
13239 if (unresolved_reloc
13240 && !((input_section->flags & SEC_DEBUGGING) != 0
13241 && h->def_dynamic)
13242 && _bfd_elf_section_offset (output_bfd, info, input_section,
13243 rel->r_offset) != (bfd_vma) -1)
13244 {
13245 _bfd_error_handler
13246 /* xgettext:c-format */
13247 (_("%pB(%pA+%#" PRIx64 "): "
13248 "unresolvable %s relocation against symbol `%s'"),
13249 input_bfd,
13250 input_section,
13251 (uint64_t) rel->r_offset,
13252 howto->name,
13253 h->root.root.string);
13254 return FALSE;
13255 }
13256
13257 if (r != bfd_reloc_ok)
13258 {
13259 switch (r)
13260 {
13261 case bfd_reloc_overflow:
13262 /* If the overflowing reloc was to an undefined symbol,
13263 we have already printed one error message and there
13264 is no point complaining again. */
13265 if (!h || h->root.type != bfd_link_hash_undefined)
13266 (*info->callbacks->reloc_overflow)
13267 (info, (h ? &h->root : NULL), name, howto->name,
13268 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13269 break;
13270
13271 case bfd_reloc_undefined:
13272 (*info->callbacks->undefined_symbol)
13273 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13274 break;
13275
13276 case bfd_reloc_outofrange:
13277 error_message = _("out of range");
13278 goto common_error;
13279
13280 case bfd_reloc_notsupported:
13281 error_message = _("unsupported relocation");
13282 goto common_error;
13283
13284 case bfd_reloc_dangerous:
13285 /* error_message should already be set. */
13286 goto common_error;
13287
13288 default:
13289 error_message = _("unknown error");
13290 /* Fall through. */
13291
13292 common_error:
13293 BFD_ASSERT (error_message != NULL);
13294 (*info->callbacks->reloc_dangerous)
13295 (info, error_message, input_bfd, input_section, rel->r_offset);
13296 break;
13297 }
13298 }
13299 }
13300
13301 return TRUE;
13302 }
13303
13304 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13305 adds the edit to the start of the list. (The list must be built in order of
13306 ascending TINDEX: the function's callers are primarily responsible for
13307 maintaining that condition). */
13308
13309 static void
13310 add_unwind_table_edit (arm_unwind_table_edit **head,
13311 arm_unwind_table_edit **tail,
13312 arm_unwind_edit_type type,
13313 asection *linked_section,
13314 unsigned int tindex)
13315 {
13316 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13317 xmalloc (sizeof (arm_unwind_table_edit));
13318
13319 new_edit->type = type;
13320 new_edit->linked_section = linked_section;
13321 new_edit->index = tindex;
13322
13323 if (tindex > 0)
13324 {
13325 new_edit->next = NULL;
13326
13327 if (*tail)
13328 (*tail)->next = new_edit;
13329
13330 (*tail) = new_edit;
13331
13332 if (!*head)
13333 (*head) = new_edit;
13334 }
13335 else
13336 {
13337 new_edit->next = *head;
13338
13339 if (!*tail)
13340 *tail = new_edit;
13341
13342 *head = new_edit;
13343 }
13344 }
13345
13346 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13347
13348 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13349 static void
13350 adjust_exidx_size(asection *exidx_sec, int adjust)
13351 {
13352 asection *out_sec;
13353
13354 if (!exidx_sec->rawsize)
13355 exidx_sec->rawsize = exidx_sec->size;
13356
13357 bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13358 out_sec = exidx_sec->output_section;
13359 /* Adjust size of output section. */
13360 bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13361 }
13362
13363 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13364 static void
13365 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13366 {
13367 struct _arm_elf_section_data *exidx_arm_data;
13368
13369 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13370 add_unwind_table_edit (
13371 &exidx_arm_data->u.exidx.unwind_edit_list,
13372 &exidx_arm_data->u.exidx.unwind_edit_tail,
13373 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13374
13375 exidx_arm_data->additional_reloc_count++;
13376
13377 adjust_exidx_size(exidx_sec, 8);
13378 }
13379
13380 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13381 made to those tables, such that:
13382
13383 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13384 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13385 codes which have been inlined into the index).
13386
13387 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13388
13389 The edits are applied when the tables are written
13390 (in elf32_arm_write_section). */
13391
13392 bfd_boolean
13393 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13394 unsigned int num_text_sections,
13395 struct bfd_link_info *info,
13396 bfd_boolean merge_exidx_entries)
13397 {
13398 bfd *inp;
13399 unsigned int last_second_word = 0, i;
13400 asection *last_exidx_sec = NULL;
13401 asection *last_text_sec = NULL;
13402 int last_unwind_type = -1;
13403
13404 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13405 text sections. */
13406 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13407 {
13408 asection *sec;
13409
13410 for (sec = inp->sections; sec != NULL; sec = sec->next)
13411 {
13412 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13413 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13414
13415 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13416 continue;
13417
13418 if (elf_sec->linked_to)
13419 {
13420 Elf_Internal_Shdr *linked_hdr
13421 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13422 struct _arm_elf_section_data *linked_sec_arm_data
13423 = get_arm_elf_section_data (linked_hdr->bfd_section);
13424
13425 if (linked_sec_arm_data == NULL)
13426 continue;
13427
13428 /* Link this .ARM.exidx section back from the text section it
13429 describes. */
13430 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13431 }
13432 }
13433 }
13434
13435 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13436 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13437 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13438
13439 for (i = 0; i < num_text_sections; i++)
13440 {
13441 asection *sec = text_section_order[i];
13442 asection *exidx_sec;
13443 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13444 struct _arm_elf_section_data *exidx_arm_data;
13445 bfd_byte *contents = NULL;
13446 int deleted_exidx_bytes = 0;
13447 bfd_vma j;
13448 arm_unwind_table_edit *unwind_edit_head = NULL;
13449 arm_unwind_table_edit *unwind_edit_tail = NULL;
13450 Elf_Internal_Shdr *hdr;
13451 bfd *ibfd;
13452
13453 if (arm_data == NULL)
13454 continue;
13455
13456 exidx_sec = arm_data->u.text.arm_exidx_sec;
13457 if (exidx_sec == NULL)
13458 {
13459 /* Section has no unwind data. */
13460 if (last_unwind_type == 0 || !last_exidx_sec)
13461 continue;
13462
13463 /* Ignore zero sized sections. */
13464 if (sec->size == 0)
13465 continue;
13466
13467 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13468 last_unwind_type = 0;
13469 continue;
13470 }
13471
13472 /* Skip /DISCARD/ sections. */
13473 if (bfd_is_abs_section (exidx_sec->output_section))
13474 continue;
13475
13476 hdr = &elf_section_data (exidx_sec)->this_hdr;
13477 if (hdr->sh_type != SHT_ARM_EXIDX)
13478 continue;
13479
13480 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13481 if (exidx_arm_data == NULL)
13482 continue;
13483
13484 ibfd = exidx_sec->owner;
13485
13486 if (hdr->contents != NULL)
13487 contents = hdr->contents;
13488 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13489 /* An error? */
13490 continue;
13491
13492 if (last_unwind_type > 0)
13493 {
13494 unsigned int first_word = bfd_get_32 (ibfd, contents);
13495 /* Add cantunwind if first unwind item does not match section
13496 start. */
13497 if (first_word != sec->vma)
13498 {
13499 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13500 last_unwind_type = 0;
13501 }
13502 }
13503
13504 for (j = 0; j < hdr->sh_size; j += 8)
13505 {
13506 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13507 int unwind_type;
13508 int elide = 0;
13509
13510 /* An EXIDX_CANTUNWIND entry. */
13511 if (second_word == 1)
13512 {
13513 if (last_unwind_type == 0)
13514 elide = 1;
13515 unwind_type = 0;
13516 }
13517 /* Inlined unwinding data. Merge if equal to previous. */
13518 else if ((second_word & 0x80000000) != 0)
13519 {
13520 if (merge_exidx_entries
13521 && last_second_word == second_word && last_unwind_type == 1)
13522 elide = 1;
13523 unwind_type = 1;
13524 last_second_word = second_word;
13525 }
13526 /* Normal table entry. In theory we could merge these too,
13527 but duplicate entries are likely to be much less common. */
13528 else
13529 unwind_type = 2;
13530
13531 if (elide && !bfd_link_relocatable (info))
13532 {
13533 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13534 DELETE_EXIDX_ENTRY, NULL, j / 8);
13535
13536 deleted_exidx_bytes += 8;
13537 }
13538
13539 last_unwind_type = unwind_type;
13540 }
13541
13542 /* Free contents if we allocated it ourselves. */
13543 if (contents != hdr->contents)
13544 free (contents);
13545
13546 /* Record edits to be applied later (in elf32_arm_write_section). */
13547 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13548 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13549
13550 if (deleted_exidx_bytes > 0)
13551 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13552
13553 last_exidx_sec = exidx_sec;
13554 last_text_sec = sec;
13555 }
13556
13557 /* Add terminating CANTUNWIND entry. */
13558 if (!bfd_link_relocatable (info) && last_exidx_sec
13559 && last_unwind_type != 0)
13560 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13561
13562 return TRUE;
13563 }
13564
13565 static bfd_boolean
13566 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13567 bfd *ibfd, const char *name)
13568 {
13569 asection *sec, *osec;
13570
13571 sec = bfd_get_linker_section (ibfd, name);
13572 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13573 return TRUE;
13574
13575 osec = sec->output_section;
13576 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13577 return TRUE;
13578
13579 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13580 sec->output_offset, sec->size))
13581 return FALSE;
13582
13583 return TRUE;
13584 }
13585
13586 static bfd_boolean
13587 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13588 {
13589 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13590 asection *sec, *osec;
13591
13592 if (globals == NULL)
13593 return FALSE;
13594
13595 /* Invoke the regular ELF backend linker to do all the work. */
13596 if (!bfd_elf_final_link (abfd, info))
13597 return FALSE;
13598
13599 /* Process stub sections (eg BE8 encoding, ...). */
13600 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13601 unsigned int i;
13602 for (i=0; i<htab->top_id; i++)
13603 {
13604 sec = htab->stub_group[i].stub_sec;
13605 /* Only process it once, in its link_sec slot. */
13606 if (sec && i == htab->stub_group[i].link_sec->id)
13607 {
13608 osec = sec->output_section;
13609 elf32_arm_write_section (abfd, info, sec, sec->contents);
13610 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13611 sec->output_offset, sec->size))
13612 return FALSE;
13613 }
13614 }
13615
13616 /* Write out any glue sections now that we have created all the
13617 stubs. */
13618 if (globals->bfd_of_glue_owner != NULL)
13619 {
13620 if (! elf32_arm_output_glue_section (info, abfd,
13621 globals->bfd_of_glue_owner,
13622 ARM2THUMB_GLUE_SECTION_NAME))
13623 return FALSE;
13624
13625 if (! elf32_arm_output_glue_section (info, abfd,
13626 globals->bfd_of_glue_owner,
13627 THUMB2ARM_GLUE_SECTION_NAME))
13628 return FALSE;
13629
13630 if (! elf32_arm_output_glue_section (info, abfd,
13631 globals->bfd_of_glue_owner,
13632 VFP11_ERRATUM_VENEER_SECTION_NAME))
13633 return FALSE;
13634
13635 if (! elf32_arm_output_glue_section (info, abfd,
13636 globals->bfd_of_glue_owner,
13637 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13638 return FALSE;
13639
13640 if (! elf32_arm_output_glue_section (info, abfd,
13641 globals->bfd_of_glue_owner,
13642 ARM_BX_GLUE_SECTION_NAME))
13643 return FALSE;
13644 }
13645
13646 return TRUE;
13647 }
13648
13649 /* Return a best guess for the machine number based on the attributes. */
13650
13651 static unsigned int
13652 bfd_arm_get_mach_from_attributes (bfd * abfd)
13653 {
13654 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13655
13656 switch (arch)
13657 {
13658 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13659 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13660 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13661 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13662
13663 case TAG_CPU_ARCH_V5TE:
13664 {
13665 char * name;
13666
13667 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13668 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13669
13670 if (name)
13671 {
13672 if (strcmp (name, "IWMMXT2") == 0)
13673 return bfd_mach_arm_iWMMXt2;
13674
13675 if (strcmp (name, "IWMMXT") == 0)
13676 return bfd_mach_arm_iWMMXt;
13677
13678 if (strcmp (name, "XSCALE") == 0)
13679 {
13680 int wmmx;
13681
13682 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13683 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13684 switch (wmmx)
13685 {
13686 case 1: return bfd_mach_arm_iWMMXt;
13687 case 2: return bfd_mach_arm_iWMMXt2;
13688 default: return bfd_mach_arm_XScale;
13689 }
13690 }
13691 }
13692
13693 return bfd_mach_arm_5TE;
13694 }
13695
13696 case TAG_CPU_ARCH_V5TEJ:
13697 return bfd_mach_arm_5TEJ;
13698 case TAG_CPU_ARCH_V6:
13699 return bfd_mach_arm_6;
13700 case TAG_CPU_ARCH_V6KZ:
13701 return bfd_mach_arm_6KZ;
13702 case TAG_CPU_ARCH_V6T2:
13703 return bfd_mach_arm_6T2;
13704 case TAG_CPU_ARCH_V6K:
13705 return bfd_mach_arm_6K;
13706 case TAG_CPU_ARCH_V7:
13707 return bfd_mach_arm_7;
13708 case TAG_CPU_ARCH_V6_M:
13709 return bfd_mach_arm_6M;
13710 case TAG_CPU_ARCH_V6S_M:
13711 return bfd_mach_arm_6SM;
13712 case TAG_CPU_ARCH_V7E_M:
13713 return bfd_mach_arm_7EM;
13714 case TAG_CPU_ARCH_V8:
13715 return bfd_mach_arm_8;
13716 case TAG_CPU_ARCH_V8R:
13717 return bfd_mach_arm_8R;
13718 case TAG_CPU_ARCH_V8M_BASE:
13719 return bfd_mach_arm_8M_BASE;
13720 case TAG_CPU_ARCH_V8M_MAIN:
13721 return bfd_mach_arm_8M_MAIN;
13722
13723 default:
13724 /* Force entry to be added for any new known Tag_CPU_arch value. */
13725 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13726
13727 /* Unknown Tag_CPU_arch value. */
13728 return bfd_mach_arm_unknown;
13729 }
13730 }
13731
13732 /* Set the right machine number. */
13733
13734 static bfd_boolean
13735 elf32_arm_object_p (bfd *abfd)
13736 {
13737 unsigned int mach;
13738
13739 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13740
13741 if (mach == bfd_mach_arm_unknown)
13742 {
13743 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13744 mach = bfd_mach_arm_ep9312;
13745 else
13746 mach = bfd_arm_get_mach_from_attributes (abfd);
13747 }
13748
13749 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13750 return TRUE;
13751 }
13752
13753 /* Function to keep ARM specific flags in the ELF header. */
13754
13755 static bfd_boolean
13756 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13757 {
13758 if (elf_flags_init (abfd)
13759 && elf_elfheader (abfd)->e_flags != flags)
13760 {
13761 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13762 {
13763 if (flags & EF_ARM_INTERWORK)
13764 _bfd_error_handler
13765 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13766 abfd);
13767 else
13768 _bfd_error_handler
13769 (_("warning: clearing the interworking flag of %pB due to outside request"),
13770 abfd);
13771 }
13772 }
13773 else
13774 {
13775 elf_elfheader (abfd)->e_flags = flags;
13776 elf_flags_init (abfd) = TRUE;
13777 }
13778
13779 return TRUE;
13780 }
13781
13782 /* Copy backend specific data from one object module to another. */
13783
13784 static bfd_boolean
13785 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13786 {
13787 flagword in_flags;
13788 flagword out_flags;
13789
13790 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13791 return TRUE;
13792
13793 in_flags = elf_elfheader (ibfd)->e_flags;
13794 out_flags = elf_elfheader (obfd)->e_flags;
13795
13796 if (elf_flags_init (obfd)
13797 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13798 && in_flags != out_flags)
13799 {
13800 /* Cannot mix APCS26 and APCS32 code. */
13801 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13802 return FALSE;
13803
13804 /* Cannot mix float APCS and non-float APCS code. */
13805 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13806 return FALSE;
13807
13808 /* If the src and dest have different interworking flags
13809 then turn off the interworking bit. */
13810 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13811 {
13812 if (out_flags & EF_ARM_INTERWORK)
13813 _bfd_error_handler
13814 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13815 obfd, ibfd);
13816
13817 in_flags &= ~EF_ARM_INTERWORK;
13818 }
13819
13820 /* Likewise for PIC, though don't warn for this case. */
13821 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13822 in_flags &= ~EF_ARM_PIC;
13823 }
13824
13825 elf_elfheader (obfd)->e_flags = in_flags;
13826 elf_flags_init (obfd) = TRUE;
13827
13828 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13829 }
13830
13831 /* Values for Tag_ABI_PCS_R9_use. */
13832 enum
13833 {
13834 AEABI_R9_V6,
13835 AEABI_R9_SB,
13836 AEABI_R9_TLS,
13837 AEABI_R9_unused
13838 };
13839
13840 /* Values for Tag_ABI_PCS_RW_data. */
13841 enum
13842 {
13843 AEABI_PCS_RW_data_absolute,
13844 AEABI_PCS_RW_data_PCrel,
13845 AEABI_PCS_RW_data_SBrel,
13846 AEABI_PCS_RW_data_unused
13847 };
13848
13849 /* Values for Tag_ABI_enum_size. */
13850 enum
13851 {
13852 AEABI_enum_unused,
13853 AEABI_enum_short,
13854 AEABI_enum_wide,
13855 AEABI_enum_forced_wide
13856 };
13857
13858 /* Determine whether an object attribute tag takes an integer, a
13859 string or both. */
13860
13861 static int
13862 elf32_arm_obj_attrs_arg_type (int tag)
13863 {
13864 if (tag == Tag_compatibility)
13865 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13866 else if (tag == Tag_nodefaults)
13867 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13868 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13869 return ATTR_TYPE_FLAG_STR_VAL;
13870 else if (tag < 32)
13871 return ATTR_TYPE_FLAG_INT_VAL;
13872 else
13873 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13874 }
13875
13876 /* The ABI defines that Tag_conformance should be emitted first, and that
13877 Tag_nodefaults should be second (if either is defined). This sets those
13878 two positions, and bumps up the position of all the remaining tags to
13879 compensate. */
13880 static int
13881 elf32_arm_obj_attrs_order (int num)
13882 {
13883 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13884 return Tag_conformance;
13885 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13886 return Tag_nodefaults;
13887 if ((num - 2) < Tag_nodefaults)
13888 return num - 2;
13889 if ((num - 1) < Tag_conformance)
13890 return num - 1;
13891 return num;
13892 }
13893
13894 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
13895 static bfd_boolean
13896 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13897 {
13898 if ((tag & 127) < 64)
13899 {
13900 _bfd_error_handler
13901 (_("%pB: unknown mandatory EABI object attribute %d"),
13902 abfd, tag);
13903 bfd_set_error (bfd_error_bad_value);
13904 return FALSE;
13905 }
13906 else
13907 {
13908 _bfd_error_handler
13909 (_("warning: %pB: unknown EABI object attribute %d"),
13910 abfd, tag);
13911 return TRUE;
13912 }
13913 }
13914
13915 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13916 Returns -1 if no architecture could be read. */
13917
13918 static int
13919 get_secondary_compatible_arch (bfd *abfd)
13920 {
13921 obj_attribute *attr =
13922 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13923
13924 /* Note: the tag and its argument below are uleb128 values, though
13925 currently-defined values fit in one byte for each. */
13926 if (attr->s
13927 && attr->s[0] == Tag_CPU_arch
13928 && (attr->s[1] & 128) != 128
13929 && attr->s[2] == 0)
13930 return attr->s[1];
13931
13932 /* This tag is "safely ignorable", so don't complain if it looks funny. */
13933 return -1;
13934 }
13935
13936 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13937 The tag is removed if ARCH is -1. */
13938
13939 static void
13940 set_secondary_compatible_arch (bfd *abfd, int arch)
13941 {
13942 obj_attribute *attr =
13943 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13944
13945 if (arch == -1)
13946 {
13947 attr->s = NULL;
13948 return;
13949 }
13950
13951 /* Note: the tag and its argument below are uleb128 values, though
13952 currently-defined values fit in one byte for each. */
13953 if (!attr->s)
13954 attr->s = (char *) bfd_alloc (abfd, 3);
13955 attr->s[0] = Tag_CPU_arch;
13956 attr->s[1] = arch;
13957 attr->s[2] = '\0';
13958 }
13959
13960 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13961 into account. */
13962
13963 static int
13964 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
13965 int newtag, int secondary_compat)
13966 {
13967 #define T(X) TAG_CPU_ARCH_##X
13968 int tagl, tagh, result;
13969 const int v6t2[] =
13970 {
13971 T(V6T2), /* PRE_V4. */
13972 T(V6T2), /* V4. */
13973 T(V6T2), /* V4T. */
13974 T(V6T2), /* V5T. */
13975 T(V6T2), /* V5TE. */
13976 T(V6T2), /* V5TEJ. */
13977 T(V6T2), /* V6. */
13978 T(V7), /* V6KZ. */
13979 T(V6T2) /* V6T2. */
13980 };
13981 const int v6k[] =
13982 {
13983 T(V6K), /* PRE_V4. */
13984 T(V6K), /* V4. */
13985 T(V6K), /* V4T. */
13986 T(V6K), /* V5T. */
13987 T(V6K), /* V5TE. */
13988 T(V6K), /* V5TEJ. */
13989 T(V6K), /* V6. */
13990 T(V6KZ), /* V6KZ. */
13991 T(V7), /* V6T2. */
13992 T(V6K) /* V6K. */
13993 };
13994 const int v7[] =
13995 {
13996 T(V7), /* PRE_V4. */
13997 T(V7), /* V4. */
13998 T(V7), /* V4T. */
13999 T(V7), /* V5T. */
14000 T(V7), /* V5TE. */
14001 T(V7), /* V5TEJ. */
14002 T(V7), /* V6. */
14003 T(V7), /* V6KZ. */
14004 T(V7), /* V6T2. */
14005 T(V7), /* V6K. */
14006 T(V7) /* V7. */
14007 };
14008 const int v6_m[] =
14009 {
14010 -1, /* PRE_V4. */
14011 -1, /* V4. */
14012 T(V6K), /* V4T. */
14013 T(V6K), /* V5T. */
14014 T(V6K), /* V5TE. */
14015 T(V6K), /* V5TEJ. */
14016 T(V6K), /* V6. */
14017 T(V6KZ), /* V6KZ. */
14018 T(V7), /* V6T2. */
14019 T(V6K), /* V6K. */
14020 T(V7), /* V7. */
14021 T(V6_M) /* V6_M. */
14022 };
14023 const int v6s_m[] =
14024 {
14025 -1, /* PRE_V4. */
14026 -1, /* V4. */
14027 T(V6K), /* V4T. */
14028 T(V6K), /* V5T. */
14029 T(V6K), /* V5TE. */
14030 T(V6K), /* V5TEJ. */
14031 T(V6K), /* V6. */
14032 T(V6KZ), /* V6KZ. */
14033 T(V7), /* V6T2. */
14034 T(V6K), /* V6K. */
14035 T(V7), /* V7. */
14036 T(V6S_M), /* V6_M. */
14037 T(V6S_M) /* V6S_M. */
14038 };
14039 const int v7e_m[] =
14040 {
14041 -1, /* PRE_V4. */
14042 -1, /* V4. */
14043 T(V7E_M), /* V4T. */
14044 T(V7E_M), /* V5T. */
14045 T(V7E_M), /* V5TE. */
14046 T(V7E_M), /* V5TEJ. */
14047 T(V7E_M), /* V6. */
14048 T(V7E_M), /* V6KZ. */
14049 T(V7E_M), /* V6T2. */
14050 T(V7E_M), /* V6K. */
14051 T(V7E_M), /* V7. */
14052 T(V7E_M), /* V6_M. */
14053 T(V7E_M), /* V6S_M. */
14054 T(V7E_M) /* V7E_M. */
14055 };
14056 const int v8[] =
14057 {
14058 T(V8), /* PRE_V4. */
14059 T(V8), /* V4. */
14060 T(V8), /* V4T. */
14061 T(V8), /* V5T. */
14062 T(V8), /* V5TE. */
14063 T(V8), /* V5TEJ. */
14064 T(V8), /* V6. */
14065 T(V8), /* V6KZ. */
14066 T(V8), /* V6T2. */
14067 T(V8), /* V6K. */
14068 T(V8), /* V7. */
14069 T(V8), /* V6_M. */
14070 T(V8), /* V6S_M. */
14071 T(V8), /* V7E_M. */
14072 T(V8) /* V8. */
14073 };
14074 const int v8r[] =
14075 {
14076 T(V8R), /* PRE_V4. */
14077 T(V8R), /* V4. */
14078 T(V8R), /* V4T. */
14079 T(V8R), /* V5T. */
14080 T(V8R), /* V5TE. */
14081 T(V8R), /* V5TEJ. */
14082 T(V8R), /* V6. */
14083 T(V8R), /* V6KZ. */
14084 T(V8R), /* V6T2. */
14085 T(V8R), /* V6K. */
14086 T(V8R), /* V7. */
14087 T(V8R), /* V6_M. */
14088 T(V8R), /* V6S_M. */
14089 T(V8R), /* V7E_M. */
14090 T(V8), /* V8. */
14091 T(V8R), /* V8R. */
14092 };
14093 const int v8m_baseline[] =
14094 {
14095 -1, /* PRE_V4. */
14096 -1, /* V4. */
14097 -1, /* V4T. */
14098 -1, /* V5T. */
14099 -1, /* V5TE. */
14100 -1, /* V5TEJ. */
14101 -1, /* V6. */
14102 -1, /* V6KZ. */
14103 -1, /* V6T2. */
14104 -1, /* V6K. */
14105 -1, /* V7. */
14106 T(V8M_BASE), /* V6_M. */
14107 T(V8M_BASE), /* V6S_M. */
14108 -1, /* V7E_M. */
14109 -1, /* V8. */
14110 -1, /* V8R. */
14111 T(V8M_BASE) /* V8-M BASELINE. */
14112 };
14113 const int v8m_mainline[] =
14114 {
14115 -1, /* PRE_V4. */
14116 -1, /* V4. */
14117 -1, /* V4T. */
14118 -1, /* V5T. */
14119 -1, /* V5TE. */
14120 -1, /* V5TEJ. */
14121 -1, /* V6. */
14122 -1, /* V6KZ. */
14123 -1, /* V6T2. */
14124 -1, /* V6K. */
14125 T(V8M_MAIN), /* V7. */
14126 T(V8M_MAIN), /* V6_M. */
14127 T(V8M_MAIN), /* V6S_M. */
14128 T(V8M_MAIN), /* V7E_M. */
14129 -1, /* V8. */
14130 -1, /* V8R. */
14131 T(V8M_MAIN), /* V8-M BASELINE. */
14132 T(V8M_MAIN) /* V8-M MAINLINE. */
14133 };
14134 const int v4t_plus_v6_m[] =
14135 {
14136 -1, /* PRE_V4. */
14137 -1, /* V4. */
14138 T(V4T), /* V4T. */
14139 T(V5T), /* V5T. */
14140 T(V5TE), /* V5TE. */
14141 T(V5TEJ), /* V5TEJ. */
14142 T(V6), /* V6. */
14143 T(V6KZ), /* V6KZ. */
14144 T(V6T2), /* V6T2. */
14145 T(V6K), /* V6K. */
14146 T(V7), /* V7. */
14147 T(V6_M), /* V6_M. */
14148 T(V6S_M), /* V6S_M. */
14149 T(V7E_M), /* V7E_M. */
14150 T(V8), /* V8. */
14151 -1, /* V8R. */
14152 T(V8M_BASE), /* V8-M BASELINE. */
14153 T(V8M_MAIN), /* V8-M MAINLINE. */
14154 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14155 };
14156 const int *comb[] =
14157 {
14158 v6t2,
14159 v6k,
14160 v7,
14161 v6_m,
14162 v6s_m,
14163 v7e_m,
14164 v8,
14165 v8r,
14166 v8m_baseline,
14167 v8m_mainline,
14168 /* Pseudo-architecture. */
14169 v4t_plus_v6_m
14170 };
14171
14172 /* Check we've not got a higher architecture than we know about. */
14173
14174 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14175 {
14176 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14177 return -1;
14178 }
14179
14180 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14181
14182 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14183 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14184 oldtag = T(V4T_PLUS_V6_M);
14185
14186 /* And override the new tag if we have a Tag_also_compatible_with on the
14187 input. */
14188
14189 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14190 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14191 newtag = T(V4T_PLUS_V6_M);
14192
14193 tagl = (oldtag < newtag) ? oldtag : newtag;
14194 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14195
14196 /* Architectures before V6KZ add features monotonically. */
14197 if (tagh <= TAG_CPU_ARCH_V6KZ)
14198 return result;
14199
14200 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14201
14202 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14203 as the canonical version. */
14204 if (result == T(V4T_PLUS_V6_M))
14205 {
14206 result = T(V4T);
14207 *secondary_compat_out = T(V6_M);
14208 }
14209 else
14210 *secondary_compat_out = -1;
14211
14212 if (result == -1)
14213 {
14214 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14215 ibfd, oldtag, newtag);
14216 return -1;
14217 }
14218
14219 return result;
14220 #undef T
14221 }
14222
14223 /* Query attributes object to see if integer divide instructions may be
14224 present in an object. */
14225 static bfd_boolean
14226 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14227 {
14228 int arch = attr[Tag_CPU_arch].i;
14229 int profile = attr[Tag_CPU_arch_profile].i;
14230
14231 switch (attr[Tag_DIV_use].i)
14232 {
14233 case 0:
14234 /* Integer divide allowed if instruction contained in archetecture. */
14235 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14236 return TRUE;
14237 else if (arch >= TAG_CPU_ARCH_V7E_M)
14238 return TRUE;
14239 else
14240 return FALSE;
14241
14242 case 1:
14243 /* Integer divide explicitly prohibited. */
14244 return FALSE;
14245
14246 default:
14247 /* Unrecognised case - treat as allowing divide everywhere. */
14248 case 2:
14249 /* Integer divide allowed in ARM state. */
14250 return TRUE;
14251 }
14252 }
14253
14254 /* Query attributes object to see if integer divide instructions are
14255 forbidden to be in the object. This is not the inverse of
14256 elf32_arm_attributes_accept_div. */
14257 static bfd_boolean
14258 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14259 {
14260 return attr[Tag_DIV_use].i == 1;
14261 }
14262
14263 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14264 are conflicting attributes. */
14265
14266 static bfd_boolean
14267 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14268 {
14269 bfd *obfd = info->output_bfd;
14270 obj_attribute *in_attr;
14271 obj_attribute *out_attr;
14272 /* Some tags have 0 = don't care, 1 = strong requirement,
14273 2 = weak requirement. */
14274 static const int order_021[3] = {0, 2, 1};
14275 int i;
14276 bfd_boolean result = TRUE;
14277 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14278
14279 /* Skip the linker stubs file. This preserves previous behavior
14280 of accepting unknown attributes in the first input file - but
14281 is that a bug? */
14282 if (ibfd->flags & BFD_LINKER_CREATED)
14283 return TRUE;
14284
14285 /* Skip any input that hasn't attribute section.
14286 This enables to link object files without attribute section with
14287 any others. */
14288 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14289 return TRUE;
14290
14291 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14292 {
14293 /* This is the first object. Copy the attributes. */
14294 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14295
14296 out_attr = elf_known_obj_attributes_proc (obfd);
14297
14298 /* Use the Tag_null value to indicate the attributes have been
14299 initialized. */
14300 out_attr[0].i = 1;
14301
14302 /* We do not output objects with Tag_MPextension_use_legacy - we move
14303 the attribute's value to Tag_MPextension_use. */
14304 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14305 {
14306 if (out_attr[Tag_MPextension_use].i != 0
14307 && out_attr[Tag_MPextension_use_legacy].i
14308 != out_attr[Tag_MPextension_use].i)
14309 {
14310 _bfd_error_handler
14311 (_("Error: %pB has both the current and legacy "
14312 "Tag_MPextension_use attributes"), ibfd);
14313 result = FALSE;
14314 }
14315
14316 out_attr[Tag_MPextension_use] =
14317 out_attr[Tag_MPextension_use_legacy];
14318 out_attr[Tag_MPextension_use_legacy].type = 0;
14319 out_attr[Tag_MPextension_use_legacy].i = 0;
14320 }
14321
14322 return result;
14323 }
14324
14325 in_attr = elf_known_obj_attributes_proc (ibfd);
14326 out_attr = elf_known_obj_attributes_proc (obfd);
14327 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14328 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14329 {
14330 /* Ignore mismatches if the object doesn't use floating point or is
14331 floating point ABI independent. */
14332 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14333 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14334 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14335 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14336 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14337 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14338 {
14339 _bfd_error_handler
14340 (_("error: %pB uses VFP register arguments, %pB does not"),
14341 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14342 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14343 result = FALSE;
14344 }
14345 }
14346
14347 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14348 {
14349 /* Merge this attribute with existing attributes. */
14350 switch (i)
14351 {
14352 case Tag_CPU_raw_name:
14353 case Tag_CPU_name:
14354 /* These are merged after Tag_CPU_arch. */
14355 break;
14356
14357 case Tag_ABI_optimization_goals:
14358 case Tag_ABI_FP_optimization_goals:
14359 /* Use the first value seen. */
14360 break;
14361
14362 case Tag_CPU_arch:
14363 {
14364 int secondary_compat = -1, secondary_compat_out = -1;
14365 unsigned int saved_out_attr = out_attr[i].i;
14366 int arch_attr;
14367 static const char *name_table[] =
14368 {
14369 /* These aren't real CPU names, but we can't guess
14370 that from the architecture version alone. */
14371 "Pre v4",
14372 "ARM v4",
14373 "ARM v4T",
14374 "ARM v5T",
14375 "ARM v5TE",
14376 "ARM v5TEJ",
14377 "ARM v6",
14378 "ARM v6KZ",
14379 "ARM v6T2",
14380 "ARM v6K",
14381 "ARM v7",
14382 "ARM v6-M",
14383 "ARM v6S-M",
14384 "ARM v8",
14385 "",
14386 "ARM v8-M.baseline",
14387 "ARM v8-M.mainline",
14388 };
14389
14390 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14391 secondary_compat = get_secondary_compatible_arch (ibfd);
14392 secondary_compat_out = get_secondary_compatible_arch (obfd);
14393 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14394 &secondary_compat_out,
14395 in_attr[i].i,
14396 secondary_compat);
14397
14398 /* Return with error if failed to merge. */
14399 if (arch_attr == -1)
14400 return FALSE;
14401
14402 out_attr[i].i = arch_attr;
14403
14404 set_secondary_compatible_arch (obfd, secondary_compat_out);
14405
14406 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14407 if (out_attr[i].i == saved_out_attr)
14408 ; /* Leave the names alone. */
14409 else if (out_attr[i].i == in_attr[i].i)
14410 {
14411 /* The output architecture has been changed to match the
14412 input architecture. Use the input names. */
14413 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14414 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14415 : NULL;
14416 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14417 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14418 : NULL;
14419 }
14420 else
14421 {
14422 out_attr[Tag_CPU_name].s = NULL;
14423 out_attr[Tag_CPU_raw_name].s = NULL;
14424 }
14425
14426 /* If we still don't have a value for Tag_CPU_name,
14427 make one up now. Tag_CPU_raw_name remains blank. */
14428 if (out_attr[Tag_CPU_name].s == NULL
14429 && out_attr[i].i < ARRAY_SIZE (name_table))
14430 out_attr[Tag_CPU_name].s =
14431 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14432 }
14433 break;
14434
14435 case Tag_ARM_ISA_use:
14436 case Tag_THUMB_ISA_use:
14437 case Tag_WMMX_arch:
14438 case Tag_Advanced_SIMD_arch:
14439 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14440 case Tag_ABI_FP_rounding:
14441 case Tag_ABI_FP_exceptions:
14442 case Tag_ABI_FP_user_exceptions:
14443 case Tag_ABI_FP_number_model:
14444 case Tag_FP_HP_extension:
14445 case Tag_CPU_unaligned_access:
14446 case Tag_T2EE_use:
14447 case Tag_MPextension_use:
14448 /* Use the largest value specified. */
14449 if (in_attr[i].i > out_attr[i].i)
14450 out_attr[i].i = in_attr[i].i;
14451 break;
14452
14453 case Tag_ABI_align_preserved:
14454 case Tag_ABI_PCS_RO_data:
14455 /* Use the smallest value specified. */
14456 if (in_attr[i].i < out_attr[i].i)
14457 out_attr[i].i = in_attr[i].i;
14458 break;
14459
14460 case Tag_ABI_align_needed:
14461 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14462 && (in_attr[Tag_ABI_align_preserved].i == 0
14463 || out_attr[Tag_ABI_align_preserved].i == 0))
14464 {
14465 /* This error message should be enabled once all non-conformant
14466 binaries in the toolchain have had the attributes set
14467 properly.
14468 _bfd_error_handler
14469 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14470 obfd, ibfd);
14471 result = FALSE; */
14472 }
14473 /* Fall through. */
14474 case Tag_ABI_FP_denormal:
14475 case Tag_ABI_PCS_GOT_use:
14476 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14477 value if greater than 2 (for future-proofing). */
14478 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14479 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14480 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14481 out_attr[i].i = in_attr[i].i;
14482 break;
14483
14484 case Tag_Virtualization_use:
14485 /* The virtualization tag effectively stores two bits of
14486 information: the intended use of TrustZone (in bit 0), and the
14487 intended use of Virtualization (in bit 1). */
14488 if (out_attr[i].i == 0)
14489 out_attr[i].i = in_attr[i].i;
14490 else if (in_attr[i].i != 0
14491 && in_attr[i].i != out_attr[i].i)
14492 {
14493 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14494 out_attr[i].i = 3;
14495 else
14496 {
14497 _bfd_error_handler
14498 (_("error: %pB: unable to merge virtualization attributes "
14499 "with %pB"),
14500 obfd, ibfd);
14501 result = FALSE;
14502 }
14503 }
14504 break;
14505
14506 case Tag_CPU_arch_profile:
14507 if (out_attr[i].i != in_attr[i].i)
14508 {
14509 /* 0 will merge with anything.
14510 'A' and 'S' merge to 'A'.
14511 'R' and 'S' merge to 'R'.
14512 'M' and 'A|R|S' is an error. */
14513 if (out_attr[i].i == 0
14514 || (out_attr[i].i == 'S'
14515 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14516 out_attr[i].i = in_attr[i].i;
14517 else if (in_attr[i].i == 0
14518 || (in_attr[i].i == 'S'
14519 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14520 ; /* Do nothing. */
14521 else
14522 {
14523 _bfd_error_handler
14524 (_("error: %pB: conflicting architecture profiles %c/%c"),
14525 ibfd,
14526 in_attr[i].i ? in_attr[i].i : '0',
14527 out_attr[i].i ? out_attr[i].i : '0');
14528 result = FALSE;
14529 }
14530 }
14531 break;
14532
14533 case Tag_DSP_extension:
14534 /* No need to change output value if any of:
14535 - pre (<=) ARMv5T input architecture (do not have DSP)
14536 - M input profile not ARMv7E-M and do not have DSP. */
14537 if (in_attr[Tag_CPU_arch].i <= 3
14538 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14539 && in_attr[Tag_CPU_arch].i != 13
14540 && in_attr[i].i == 0))
14541 ; /* Do nothing. */
14542 /* Output value should be 0 if DSP part of architecture, ie.
14543 - post (>=) ARMv5te architecture output
14544 - A, R or S profile output or ARMv7E-M output architecture. */
14545 else if (out_attr[Tag_CPU_arch].i >= 4
14546 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14547 || out_attr[Tag_CPU_arch_profile].i == 'R'
14548 || out_attr[Tag_CPU_arch_profile].i == 'S'
14549 || out_attr[Tag_CPU_arch].i == 13))
14550 out_attr[i].i = 0;
14551 /* Otherwise, DSP instructions are added and not part of output
14552 architecture. */
14553 else
14554 out_attr[i].i = 1;
14555 break;
14556
14557 case Tag_FP_arch:
14558 {
14559 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14560 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14561 when it's 0. It might mean absence of FP hardware if
14562 Tag_FP_arch is zero. */
14563
14564 #define VFP_VERSION_COUNT 9
14565 static const struct
14566 {
14567 int ver;
14568 int regs;
14569 } vfp_versions[VFP_VERSION_COUNT] =
14570 {
14571 {0, 0},
14572 {1, 16},
14573 {2, 16},
14574 {3, 32},
14575 {3, 16},
14576 {4, 32},
14577 {4, 16},
14578 {8, 32},
14579 {8, 16}
14580 };
14581 int ver;
14582 int regs;
14583 int newval;
14584
14585 /* If the output has no requirement about FP hardware,
14586 follow the requirement of the input. */
14587 if (out_attr[i].i == 0)
14588 {
14589 /* This assert is still reasonable, we shouldn't
14590 produce the suspicious build attribute
14591 combination (See below for in_attr). */
14592 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14593 out_attr[i].i = in_attr[i].i;
14594 out_attr[Tag_ABI_HardFP_use].i
14595 = in_attr[Tag_ABI_HardFP_use].i;
14596 break;
14597 }
14598 /* If the input has no requirement about FP hardware, do
14599 nothing. */
14600 else if (in_attr[i].i == 0)
14601 {
14602 /* We used to assert that Tag_ABI_HardFP_use was
14603 zero here, but we should never assert when
14604 consuming an object file that has suspicious
14605 build attributes. The single precision variant
14606 of 'no FP architecture' is still 'no FP
14607 architecture', so we just ignore the tag in this
14608 case. */
14609 break;
14610 }
14611
14612 /* Both the input and the output have nonzero Tag_FP_arch.
14613 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14614
14615 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14616 do nothing. */
14617 if (in_attr[Tag_ABI_HardFP_use].i == 0
14618 && out_attr[Tag_ABI_HardFP_use].i == 0)
14619 ;
14620 /* If the input and the output have different Tag_ABI_HardFP_use,
14621 the combination of them is 0 (implied by Tag_FP_arch). */
14622 else if (in_attr[Tag_ABI_HardFP_use].i
14623 != out_attr[Tag_ABI_HardFP_use].i)
14624 out_attr[Tag_ABI_HardFP_use].i = 0;
14625
14626 /* Now we can handle Tag_FP_arch. */
14627
14628 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14629 pick the biggest. */
14630 if (in_attr[i].i >= VFP_VERSION_COUNT
14631 && in_attr[i].i > out_attr[i].i)
14632 {
14633 out_attr[i] = in_attr[i];
14634 break;
14635 }
14636 /* The output uses the superset of input features
14637 (ISA version) and registers. */
14638 ver = vfp_versions[in_attr[i].i].ver;
14639 if (ver < vfp_versions[out_attr[i].i].ver)
14640 ver = vfp_versions[out_attr[i].i].ver;
14641 regs = vfp_versions[in_attr[i].i].regs;
14642 if (regs < vfp_versions[out_attr[i].i].regs)
14643 regs = vfp_versions[out_attr[i].i].regs;
14644 /* This assumes all possible supersets are also a valid
14645 options. */
14646 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14647 {
14648 if (regs == vfp_versions[newval].regs
14649 && ver == vfp_versions[newval].ver)
14650 break;
14651 }
14652 out_attr[i].i = newval;
14653 }
14654 break;
14655 case Tag_PCS_config:
14656 if (out_attr[i].i == 0)
14657 out_attr[i].i = in_attr[i].i;
14658 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14659 {
14660 /* It's sometimes ok to mix different configs, so this is only
14661 a warning. */
14662 _bfd_error_handler
14663 (_("warning: %pB: conflicting platform configuration"), ibfd);
14664 }
14665 break;
14666 case Tag_ABI_PCS_R9_use:
14667 if (in_attr[i].i != out_attr[i].i
14668 && out_attr[i].i != AEABI_R9_unused
14669 && in_attr[i].i != AEABI_R9_unused)
14670 {
14671 _bfd_error_handler
14672 (_("error: %pB: conflicting use of R9"), ibfd);
14673 result = FALSE;
14674 }
14675 if (out_attr[i].i == AEABI_R9_unused)
14676 out_attr[i].i = in_attr[i].i;
14677 break;
14678 case Tag_ABI_PCS_RW_data:
14679 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14680 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14681 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14682 {
14683 _bfd_error_handler
14684 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14685 ibfd);
14686 result = FALSE;
14687 }
14688 /* Use the smallest value specified. */
14689 if (in_attr[i].i < out_attr[i].i)
14690 out_attr[i].i = in_attr[i].i;
14691 break;
14692 case Tag_ABI_PCS_wchar_t:
14693 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14694 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14695 {
14696 _bfd_error_handler
14697 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14698 ibfd, in_attr[i].i, out_attr[i].i);
14699 }
14700 else if (in_attr[i].i && !out_attr[i].i)
14701 out_attr[i].i = in_attr[i].i;
14702 break;
14703 case Tag_ABI_enum_size:
14704 if (in_attr[i].i != AEABI_enum_unused)
14705 {
14706 if (out_attr[i].i == AEABI_enum_unused
14707 || out_attr[i].i == AEABI_enum_forced_wide)
14708 {
14709 /* The existing object is compatible with anything.
14710 Use whatever requirements the new object has. */
14711 out_attr[i].i = in_attr[i].i;
14712 }
14713 else if (in_attr[i].i != AEABI_enum_forced_wide
14714 && out_attr[i].i != in_attr[i].i
14715 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14716 {
14717 static const char *aeabi_enum_names[] =
14718 { "", "variable-size", "32-bit", "" };
14719 const char *in_name =
14720 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14721 ? aeabi_enum_names[in_attr[i].i]
14722 : "<unknown>";
14723 const char *out_name =
14724 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14725 ? aeabi_enum_names[out_attr[i].i]
14726 : "<unknown>";
14727 _bfd_error_handler
14728 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14729 ibfd, in_name, out_name);
14730 }
14731 }
14732 break;
14733 case Tag_ABI_VFP_args:
14734 /* Aready done. */
14735 break;
14736 case Tag_ABI_WMMX_args:
14737 if (in_attr[i].i != out_attr[i].i)
14738 {
14739 _bfd_error_handler
14740 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14741 ibfd, obfd);
14742 result = FALSE;
14743 }
14744 break;
14745 case Tag_compatibility:
14746 /* Merged in target-independent code. */
14747 break;
14748 case Tag_ABI_HardFP_use:
14749 /* This is handled along with Tag_FP_arch. */
14750 break;
14751 case Tag_ABI_FP_16bit_format:
14752 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14753 {
14754 if (in_attr[i].i != out_attr[i].i)
14755 {
14756 _bfd_error_handler
14757 (_("error: fp16 format mismatch between %pB and %pB"),
14758 ibfd, obfd);
14759 result = FALSE;
14760 }
14761 }
14762 if (in_attr[i].i != 0)
14763 out_attr[i].i = in_attr[i].i;
14764 break;
14765
14766 case Tag_DIV_use:
14767 /* A value of zero on input means that the divide instruction may
14768 be used if available in the base architecture as specified via
14769 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
14770 the user did not want divide instructions. A value of 2
14771 explicitly means that divide instructions were allowed in ARM
14772 and Thumb state. */
14773 if (in_attr[i].i == out_attr[i].i)
14774 /* Do nothing. */ ;
14775 else if (elf32_arm_attributes_forbid_div (in_attr)
14776 && !elf32_arm_attributes_accept_div (out_attr))
14777 out_attr[i].i = 1;
14778 else if (elf32_arm_attributes_forbid_div (out_attr)
14779 && elf32_arm_attributes_accept_div (in_attr))
14780 out_attr[i].i = in_attr[i].i;
14781 else if (in_attr[i].i == 2)
14782 out_attr[i].i = in_attr[i].i;
14783 break;
14784
14785 case Tag_MPextension_use_legacy:
14786 /* We don't output objects with Tag_MPextension_use_legacy - we
14787 move the value to Tag_MPextension_use. */
14788 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14789 {
14790 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14791 {
14792 _bfd_error_handler
14793 (_("%pB has both the current and legacy "
14794 "Tag_MPextension_use attributes"),
14795 ibfd);
14796 result = FALSE;
14797 }
14798 }
14799
14800 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14801 out_attr[Tag_MPextension_use] = in_attr[i];
14802
14803 break;
14804
14805 case Tag_nodefaults:
14806 /* This tag is set if it exists, but the value is unused (and is
14807 typically zero). We don't actually need to do anything here -
14808 the merge happens automatically when the type flags are merged
14809 below. */
14810 break;
14811 case Tag_also_compatible_with:
14812 /* Already done in Tag_CPU_arch. */
14813 break;
14814 case Tag_conformance:
14815 /* Keep the attribute if it matches. Throw it away otherwise.
14816 No attribute means no claim to conform. */
14817 if (!in_attr[i].s || !out_attr[i].s
14818 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14819 out_attr[i].s = NULL;
14820 break;
14821
14822 default:
14823 result
14824 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14825 }
14826
14827 /* If out_attr was copied from in_attr then it won't have a type yet. */
14828 if (in_attr[i].type && !out_attr[i].type)
14829 out_attr[i].type = in_attr[i].type;
14830 }
14831
14832 /* Merge Tag_compatibility attributes and any common GNU ones. */
14833 if (!_bfd_elf_merge_object_attributes (ibfd, info))
14834 return FALSE;
14835
14836 /* Check for any attributes not known on ARM. */
14837 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14838
14839 return result;
14840 }
14841
14842
14843 /* Return TRUE if the two EABI versions are incompatible. */
14844
14845 static bfd_boolean
14846 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14847 {
14848 /* v4 and v5 are the same spec before and after it was released,
14849 so allow mixing them. */
14850 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14851 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14852 return TRUE;
14853
14854 return (iver == over);
14855 }
14856
14857 /* Merge backend specific data from an object file to the output
14858 object file when linking. */
14859
14860 static bfd_boolean
14861 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14862
14863 /* Display the flags field. */
14864
14865 static bfd_boolean
14866 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14867 {
14868 FILE * file = (FILE *) ptr;
14869 unsigned long flags;
14870
14871 BFD_ASSERT (abfd != NULL && ptr != NULL);
14872
14873 /* Print normal ELF private data. */
14874 _bfd_elf_print_private_bfd_data (abfd, ptr);
14875
14876 flags = elf_elfheader (abfd)->e_flags;
14877 /* Ignore init flag - it may not be set, despite the flags field
14878 containing valid data. */
14879
14880 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
14881
14882 switch (EF_ARM_EABI_VERSION (flags))
14883 {
14884 case EF_ARM_EABI_UNKNOWN:
14885 /* The following flag bits are GNU extensions and not part of the
14886 official ARM ELF extended ABI. Hence they are only decoded if
14887 the EABI version is not set. */
14888 if (flags & EF_ARM_INTERWORK)
14889 fprintf (file, _(" [interworking enabled]"));
14890
14891 if (flags & EF_ARM_APCS_26)
14892 fprintf (file, " [APCS-26]");
14893 else
14894 fprintf (file, " [APCS-32]");
14895
14896 if (flags & EF_ARM_VFP_FLOAT)
14897 fprintf (file, _(" [VFP float format]"));
14898 else if (flags & EF_ARM_MAVERICK_FLOAT)
14899 fprintf (file, _(" [Maverick float format]"));
14900 else
14901 fprintf (file, _(" [FPA float format]"));
14902
14903 if (flags & EF_ARM_APCS_FLOAT)
14904 fprintf (file, _(" [floats passed in float registers]"));
14905
14906 if (flags & EF_ARM_PIC)
14907 fprintf (file, _(" [position independent]"));
14908
14909 if (flags & EF_ARM_NEW_ABI)
14910 fprintf (file, _(" [new ABI]"));
14911
14912 if (flags & EF_ARM_OLD_ABI)
14913 fprintf (file, _(" [old ABI]"));
14914
14915 if (flags & EF_ARM_SOFT_FLOAT)
14916 fprintf (file, _(" [software FP]"));
14917
14918 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
14919 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
14920 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
14921 | EF_ARM_MAVERICK_FLOAT);
14922 break;
14923
14924 case EF_ARM_EABI_VER1:
14925 fprintf (file, _(" [Version1 EABI]"));
14926
14927 if (flags & EF_ARM_SYMSARESORTED)
14928 fprintf (file, _(" [sorted symbol table]"));
14929 else
14930 fprintf (file, _(" [unsorted symbol table]"));
14931
14932 flags &= ~ EF_ARM_SYMSARESORTED;
14933 break;
14934
14935 case EF_ARM_EABI_VER2:
14936 fprintf (file, _(" [Version2 EABI]"));
14937
14938 if (flags & EF_ARM_SYMSARESORTED)
14939 fprintf (file, _(" [sorted symbol table]"));
14940 else
14941 fprintf (file, _(" [unsorted symbol table]"));
14942
14943 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
14944 fprintf (file, _(" [dynamic symbols use segment index]"));
14945
14946 if (flags & EF_ARM_MAPSYMSFIRST)
14947 fprintf (file, _(" [mapping symbols precede others]"));
14948
14949 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
14950 | EF_ARM_MAPSYMSFIRST);
14951 break;
14952
14953 case EF_ARM_EABI_VER3:
14954 fprintf (file, _(" [Version3 EABI]"));
14955 break;
14956
14957 case EF_ARM_EABI_VER4:
14958 fprintf (file, _(" [Version4 EABI]"));
14959 goto eabi;
14960
14961 case EF_ARM_EABI_VER5:
14962 fprintf (file, _(" [Version5 EABI]"));
14963
14964 if (flags & EF_ARM_ABI_FLOAT_SOFT)
14965 fprintf (file, _(" [soft-float ABI]"));
14966
14967 if (flags & EF_ARM_ABI_FLOAT_HARD)
14968 fprintf (file, _(" [hard-float ABI]"));
14969
14970 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
14971
14972 eabi:
14973 if (flags & EF_ARM_BE8)
14974 fprintf (file, _(" [BE8]"));
14975
14976 if (flags & EF_ARM_LE8)
14977 fprintf (file, _(" [LE8]"));
14978
14979 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
14980 break;
14981
14982 default:
14983 fprintf (file, _(" <EABI version unrecognised>"));
14984 break;
14985 }
14986
14987 flags &= ~ EF_ARM_EABIMASK;
14988
14989 if (flags & EF_ARM_RELEXEC)
14990 fprintf (file, _(" [relocatable executable]"));
14991
14992 if (flags & EF_ARM_PIC)
14993 fprintf (file, _(" [position independent]"));
14994
14995 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
14996 fprintf (file, _(" [FDPIC ABI supplement]"));
14997
14998 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
14999
15000 if (flags)
15001 fprintf (file, _("<Unrecognised flag bits set>"));
15002
15003 fputc ('\n', file);
15004
15005 return TRUE;
15006 }
15007
15008 static int
15009 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15010 {
15011 switch (ELF_ST_TYPE (elf_sym->st_info))
15012 {
15013 case STT_ARM_TFUNC:
15014 return ELF_ST_TYPE (elf_sym->st_info);
15015
15016 case STT_ARM_16BIT:
15017 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15018 This allows us to distinguish between data used by Thumb instructions
15019 and non-data (which is probably code) inside Thumb regions of an
15020 executable. */
15021 if (type != STT_OBJECT && type != STT_TLS)
15022 return ELF_ST_TYPE (elf_sym->st_info);
15023 break;
15024
15025 default:
15026 break;
15027 }
15028
15029 return type;
15030 }
15031
15032 static asection *
15033 elf32_arm_gc_mark_hook (asection *sec,
15034 struct bfd_link_info *info,
15035 Elf_Internal_Rela *rel,
15036 struct elf_link_hash_entry *h,
15037 Elf_Internal_Sym *sym)
15038 {
15039 if (h != NULL)
15040 switch (ELF32_R_TYPE (rel->r_info))
15041 {
15042 case R_ARM_GNU_VTINHERIT:
15043 case R_ARM_GNU_VTENTRY:
15044 return NULL;
15045 }
15046
15047 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15048 }
15049
15050 /* Look through the relocs for a section during the first phase. */
15051
15052 static bfd_boolean
15053 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15054 asection *sec, const Elf_Internal_Rela *relocs)
15055 {
15056 Elf_Internal_Shdr *symtab_hdr;
15057 struct elf_link_hash_entry **sym_hashes;
15058 const Elf_Internal_Rela *rel;
15059 const Elf_Internal_Rela *rel_end;
15060 bfd *dynobj;
15061 asection *sreloc;
15062 struct elf32_arm_link_hash_table *htab;
15063 bfd_boolean call_reloc_p;
15064 bfd_boolean may_become_dynamic_p;
15065 bfd_boolean may_need_local_target_p;
15066 unsigned long nsyms;
15067
15068 if (bfd_link_relocatable (info))
15069 return TRUE;
15070
15071 BFD_ASSERT (is_arm_elf (abfd));
15072
15073 htab = elf32_arm_hash_table (info);
15074 if (htab == NULL)
15075 return FALSE;
15076
15077 sreloc = NULL;
15078
15079 /* Create dynamic sections for relocatable executables so that we can
15080 copy relocations. */
15081 if (htab->root.is_relocatable_executable
15082 && ! htab->root.dynamic_sections_created)
15083 {
15084 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15085 return FALSE;
15086 }
15087
15088 if (htab->root.dynobj == NULL)
15089 htab->root.dynobj = abfd;
15090 if (!create_ifunc_sections (info))
15091 return FALSE;
15092
15093 dynobj = htab->root.dynobj;
15094
15095 symtab_hdr = & elf_symtab_hdr (abfd);
15096 sym_hashes = elf_sym_hashes (abfd);
15097 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15098
15099 rel_end = relocs + sec->reloc_count;
15100 for (rel = relocs; rel < rel_end; rel++)
15101 {
15102 Elf_Internal_Sym *isym;
15103 struct elf_link_hash_entry *h;
15104 struct elf32_arm_link_hash_entry *eh;
15105 unsigned int r_symndx;
15106 int r_type;
15107
15108 r_symndx = ELF32_R_SYM (rel->r_info);
15109 r_type = ELF32_R_TYPE (rel->r_info);
15110 r_type = arm_real_reloc_type (htab, r_type);
15111
15112 if (r_symndx >= nsyms
15113 /* PR 9934: It is possible to have relocations that do not
15114 refer to symbols, thus it is also possible to have an
15115 object file containing relocations but no symbol table. */
15116 && (r_symndx > STN_UNDEF || nsyms > 0))
15117 {
15118 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15119 r_symndx);
15120 return FALSE;
15121 }
15122
15123 h = NULL;
15124 isym = NULL;
15125 if (nsyms > 0)
15126 {
15127 if (r_symndx < symtab_hdr->sh_info)
15128 {
15129 /* A local symbol. */
15130 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15131 abfd, r_symndx);
15132 if (isym == NULL)
15133 return FALSE;
15134 }
15135 else
15136 {
15137 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15138 while (h->root.type == bfd_link_hash_indirect
15139 || h->root.type == bfd_link_hash_warning)
15140 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15141 }
15142 }
15143
15144 eh = (struct elf32_arm_link_hash_entry *) h;
15145
15146 call_reloc_p = FALSE;
15147 may_become_dynamic_p = FALSE;
15148 may_need_local_target_p = FALSE;
15149
15150 /* Could be done earlier, if h were already available. */
15151 r_type = elf32_arm_tls_transition (info, r_type, h);
15152 switch (r_type)
15153 {
15154 case R_ARM_GOTOFFFUNCDESC:
15155 {
15156 if (h == NULL)
15157 {
15158 if (!elf32_arm_allocate_local_sym_info (abfd))
15159 return FALSE;
15160 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15161 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15162 }
15163 else
15164 {
15165 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15166 }
15167 }
15168 break;
15169
15170 case R_ARM_GOTFUNCDESC:
15171 {
15172 if (h == NULL)
15173 {
15174 /* Such a relocation is not supposed to be generated
15175 by gcc on a static function. */
15176 /* Anyway if needed it could be handled. */
15177 abort();
15178 }
15179 else
15180 {
15181 eh->fdpic_cnts.gotfuncdesc_cnt++;
15182 }
15183 }
15184 break;
15185
15186 case R_ARM_FUNCDESC:
15187 {
15188 if (h == NULL)
15189 {
15190 if (!elf32_arm_allocate_local_sym_info (abfd))
15191 return FALSE;
15192 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15193 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15194 }
15195 else
15196 {
15197 eh->fdpic_cnts.funcdesc_cnt++;
15198 }
15199 }
15200 break;
15201
15202 case R_ARM_GOT32:
15203 case R_ARM_GOT_PREL:
15204 case R_ARM_TLS_GD32:
15205 case R_ARM_TLS_GD32_FDPIC:
15206 case R_ARM_TLS_IE32:
15207 case R_ARM_TLS_IE32_FDPIC:
15208 case R_ARM_TLS_GOTDESC:
15209 case R_ARM_TLS_DESCSEQ:
15210 case R_ARM_THM_TLS_DESCSEQ:
15211 case R_ARM_TLS_CALL:
15212 case R_ARM_THM_TLS_CALL:
15213 /* This symbol requires a global offset table entry. */
15214 {
15215 int tls_type, old_tls_type;
15216
15217 switch (r_type)
15218 {
15219 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15220 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15221
15222 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15223 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15224
15225 case R_ARM_TLS_GOTDESC:
15226 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15227 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15228 tls_type = GOT_TLS_GDESC; break;
15229
15230 default: tls_type = GOT_NORMAL; break;
15231 }
15232
15233 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15234 info->flags |= DF_STATIC_TLS;
15235
15236 if (h != NULL)
15237 {
15238 h->got.refcount++;
15239 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15240 }
15241 else
15242 {
15243 /* This is a global offset table entry for a local symbol. */
15244 if (!elf32_arm_allocate_local_sym_info (abfd))
15245 return FALSE;
15246 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15247 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15248 }
15249
15250 /* If a variable is accessed with both tls methods, two
15251 slots may be created. */
15252 if (GOT_TLS_GD_ANY_P (old_tls_type)
15253 && GOT_TLS_GD_ANY_P (tls_type))
15254 tls_type |= old_tls_type;
15255
15256 /* We will already have issued an error message if there
15257 is a TLS/non-TLS mismatch, based on the symbol
15258 type. So just combine any TLS types needed. */
15259 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15260 && tls_type != GOT_NORMAL)
15261 tls_type |= old_tls_type;
15262
15263 /* If the symbol is accessed in both IE and GDESC
15264 method, we're able to relax. Turn off the GDESC flag,
15265 without messing up with any other kind of tls types
15266 that may be involved. */
15267 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15268 tls_type &= ~GOT_TLS_GDESC;
15269
15270 if (old_tls_type != tls_type)
15271 {
15272 if (h != NULL)
15273 elf32_arm_hash_entry (h)->tls_type = tls_type;
15274 else
15275 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15276 }
15277 }
15278 /* Fall through. */
15279
15280 case R_ARM_TLS_LDM32:
15281 case R_ARM_TLS_LDM32_FDPIC:
15282 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15283 htab->tls_ldm_got.refcount++;
15284 /* Fall through. */
15285
15286 case R_ARM_GOTOFF32:
15287 case R_ARM_GOTPC:
15288 if (htab->root.sgot == NULL
15289 && !create_got_section (htab->root.dynobj, info))
15290 return FALSE;
15291 break;
15292
15293 case R_ARM_PC24:
15294 case R_ARM_PLT32:
15295 case R_ARM_CALL:
15296 case R_ARM_JUMP24:
15297 case R_ARM_PREL31:
15298 case R_ARM_THM_CALL:
15299 case R_ARM_THM_JUMP24:
15300 case R_ARM_THM_JUMP19:
15301 call_reloc_p = TRUE;
15302 may_need_local_target_p = TRUE;
15303 break;
15304
15305 case R_ARM_ABS12:
15306 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15307 ldr __GOTT_INDEX__ offsets. */
15308 if (!htab->vxworks_p)
15309 {
15310 may_need_local_target_p = TRUE;
15311 break;
15312 }
15313 else goto jump_over;
15314
15315 /* Fall through. */
15316
15317 case R_ARM_MOVW_ABS_NC:
15318 case R_ARM_MOVT_ABS:
15319 case R_ARM_THM_MOVW_ABS_NC:
15320 case R_ARM_THM_MOVT_ABS:
15321 if (bfd_link_pic (info))
15322 {
15323 _bfd_error_handler
15324 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15325 abfd, elf32_arm_howto_table_1[r_type].name,
15326 (h) ? h->root.root.string : "a local symbol");
15327 bfd_set_error (bfd_error_bad_value);
15328 return FALSE;
15329 }
15330
15331 /* Fall through. */
15332 case R_ARM_ABS32:
15333 case R_ARM_ABS32_NOI:
15334 jump_over:
15335 if (h != NULL && bfd_link_executable (info))
15336 {
15337 h->pointer_equality_needed = 1;
15338 }
15339 /* Fall through. */
15340 case R_ARM_REL32:
15341 case R_ARM_REL32_NOI:
15342 case R_ARM_MOVW_PREL_NC:
15343 case R_ARM_MOVT_PREL:
15344 case R_ARM_THM_MOVW_PREL_NC:
15345 case R_ARM_THM_MOVT_PREL:
15346
15347 /* Should the interworking branches be listed here? */
15348 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15349 || htab->fdpic_p)
15350 && (sec->flags & SEC_ALLOC) != 0)
15351 {
15352 if (h == NULL
15353 && elf32_arm_howto_from_type (r_type)->pc_relative)
15354 {
15355 /* In shared libraries and relocatable executables,
15356 we treat local relative references as calls;
15357 see the related SYMBOL_CALLS_LOCAL code in
15358 allocate_dynrelocs. */
15359 call_reloc_p = TRUE;
15360 may_need_local_target_p = TRUE;
15361 }
15362 else
15363 /* We are creating a shared library or relocatable
15364 executable, and this is a reloc against a global symbol,
15365 or a non-PC-relative reloc against a local symbol.
15366 We may need to copy the reloc into the output. */
15367 may_become_dynamic_p = TRUE;
15368 }
15369 else
15370 may_need_local_target_p = TRUE;
15371 break;
15372
15373 /* This relocation describes the C++ object vtable hierarchy.
15374 Reconstruct it for later use during GC. */
15375 case R_ARM_GNU_VTINHERIT:
15376 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15377 return FALSE;
15378 break;
15379
15380 /* This relocation describes which C++ vtable entries are actually
15381 used. Record for later use during GC. */
15382 case R_ARM_GNU_VTENTRY:
15383 BFD_ASSERT (h != NULL);
15384 if (h != NULL
15385 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15386 return FALSE;
15387 break;
15388 }
15389
15390 if (h != NULL)
15391 {
15392 if (call_reloc_p)
15393 /* We may need a .plt entry if the function this reloc
15394 refers to is in a different object, regardless of the
15395 symbol's type. We can't tell for sure yet, because
15396 something later might force the symbol local. */
15397 h->needs_plt = 1;
15398 else if (may_need_local_target_p)
15399 /* If this reloc is in a read-only section, we might
15400 need a copy reloc. We can't check reliably at this
15401 stage whether the section is read-only, as input
15402 sections have not yet been mapped to output sections.
15403 Tentatively set the flag for now, and correct in
15404 adjust_dynamic_symbol. */
15405 h->non_got_ref = 1;
15406 }
15407
15408 if (may_need_local_target_p
15409 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15410 {
15411 union gotplt_union *root_plt;
15412 struct arm_plt_info *arm_plt;
15413 struct arm_local_iplt_info *local_iplt;
15414
15415 if (h != NULL)
15416 {
15417 root_plt = &h->plt;
15418 arm_plt = &eh->plt;
15419 }
15420 else
15421 {
15422 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15423 if (local_iplt == NULL)
15424 return FALSE;
15425 root_plt = &local_iplt->root;
15426 arm_plt = &local_iplt->arm;
15427 }
15428
15429 /* If the symbol is a function that doesn't bind locally,
15430 this relocation will need a PLT entry. */
15431 if (root_plt->refcount != -1)
15432 root_plt->refcount += 1;
15433
15434 if (!call_reloc_p)
15435 arm_plt->noncall_refcount++;
15436
15437 /* It's too early to use htab->use_blx here, so we have to
15438 record possible blx references separately from
15439 relocs that definitely need a thumb stub. */
15440
15441 if (r_type == R_ARM_THM_CALL)
15442 arm_plt->maybe_thumb_refcount += 1;
15443
15444 if (r_type == R_ARM_THM_JUMP24
15445 || r_type == R_ARM_THM_JUMP19)
15446 arm_plt->thumb_refcount += 1;
15447 }
15448
15449 if (may_become_dynamic_p)
15450 {
15451 struct elf_dyn_relocs *p, **head;
15452
15453 /* Create a reloc section in dynobj. */
15454 if (sreloc == NULL)
15455 {
15456 sreloc = _bfd_elf_make_dynamic_reloc_section
15457 (sec, dynobj, 2, abfd, ! htab->use_rel);
15458
15459 if (sreloc == NULL)
15460 return FALSE;
15461
15462 /* BPABI objects never have dynamic relocations mapped. */
15463 if (htab->symbian_p)
15464 {
15465 flagword flags;
15466
15467 flags = bfd_get_section_flags (dynobj, sreloc);
15468 flags &= ~(SEC_LOAD | SEC_ALLOC);
15469 bfd_set_section_flags (dynobj, sreloc, flags);
15470 }
15471 }
15472
15473 /* If this is a global symbol, count the number of
15474 relocations we need for this symbol. */
15475 if (h != NULL)
15476 head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15477 else
15478 {
15479 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15480 if (head == NULL)
15481 return FALSE;
15482 }
15483
15484 p = *head;
15485 if (p == NULL || p->sec != sec)
15486 {
15487 bfd_size_type amt = sizeof *p;
15488
15489 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15490 if (p == NULL)
15491 return FALSE;
15492 p->next = *head;
15493 *head = p;
15494 p->sec = sec;
15495 p->count = 0;
15496 p->pc_count = 0;
15497 }
15498
15499 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15500 p->pc_count += 1;
15501 p->count += 1;
15502 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15503 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15504 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15505 that will become rofixup. */
15506 /* This is due to the fact that we suppose all will become rofixup. */
15507 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15508 _bfd_error_handler
15509 (_("FDPIC does not yet support %s relocation"
15510 " to become dynamic for executable"),
15511 elf32_arm_howto_table_1[r_type].name);
15512 abort();
15513 }
15514 }
15515 }
15516
15517 return TRUE;
15518 }
15519
15520 static void
15521 elf32_arm_update_relocs (asection *o,
15522 struct bfd_elf_section_reloc_data *reldata)
15523 {
15524 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15525 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15526 const struct elf_backend_data *bed;
15527 _arm_elf_section_data *eado;
15528 struct bfd_link_order *p;
15529 bfd_byte *erela_head, *erela;
15530 Elf_Internal_Rela *irela_head, *irela;
15531 Elf_Internal_Shdr *rel_hdr;
15532 bfd *abfd;
15533 unsigned int count;
15534
15535 eado = get_arm_elf_section_data (o);
15536
15537 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15538 return;
15539
15540 abfd = o->owner;
15541 bed = get_elf_backend_data (abfd);
15542 rel_hdr = reldata->hdr;
15543
15544 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15545 {
15546 swap_in = bed->s->swap_reloc_in;
15547 swap_out = bed->s->swap_reloc_out;
15548 }
15549 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15550 {
15551 swap_in = bed->s->swap_reloca_in;
15552 swap_out = bed->s->swap_reloca_out;
15553 }
15554 else
15555 abort ();
15556
15557 erela_head = rel_hdr->contents;
15558 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15559 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15560
15561 erela = erela_head;
15562 irela = irela_head;
15563 count = 0;
15564
15565 for (p = o->map_head.link_order; p; p = p->next)
15566 {
15567 if (p->type == bfd_section_reloc_link_order
15568 || p->type == bfd_symbol_reloc_link_order)
15569 {
15570 (*swap_in) (abfd, erela, irela);
15571 erela += rel_hdr->sh_entsize;
15572 irela++;
15573 count++;
15574 }
15575 else if (p->type == bfd_indirect_link_order)
15576 {
15577 struct bfd_elf_section_reloc_data *input_reldata;
15578 arm_unwind_table_edit *edit_list, *edit_tail;
15579 _arm_elf_section_data *eadi;
15580 bfd_size_type j;
15581 bfd_vma offset;
15582 asection *i;
15583
15584 i = p->u.indirect.section;
15585
15586 eadi = get_arm_elf_section_data (i);
15587 edit_list = eadi->u.exidx.unwind_edit_list;
15588 edit_tail = eadi->u.exidx.unwind_edit_tail;
15589 offset = o->vma + i->output_offset;
15590
15591 if (eadi->elf.rel.hdr &&
15592 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15593 input_reldata = &eadi->elf.rel;
15594 else if (eadi->elf.rela.hdr &&
15595 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15596 input_reldata = &eadi->elf.rela;
15597 else
15598 abort ();
15599
15600 if (edit_list)
15601 {
15602 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15603 {
15604 arm_unwind_table_edit *edit_node, *edit_next;
15605 bfd_vma bias;
15606 bfd_vma reloc_index;
15607
15608 (*swap_in) (abfd, erela, irela);
15609 reloc_index = (irela->r_offset - offset) / 8;
15610
15611 bias = 0;
15612 edit_node = edit_list;
15613 for (edit_next = edit_list;
15614 edit_next && edit_next->index <= reloc_index;
15615 edit_next = edit_node->next)
15616 {
15617 bias++;
15618 edit_node = edit_next;
15619 }
15620
15621 if (edit_node->type != DELETE_EXIDX_ENTRY
15622 || edit_node->index != reloc_index)
15623 {
15624 irela->r_offset -= bias * 8;
15625 irela++;
15626 count++;
15627 }
15628
15629 erela += rel_hdr->sh_entsize;
15630 }
15631
15632 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15633 {
15634 /* New relocation entity. */
15635 asection *text_sec = edit_tail->linked_section;
15636 asection *text_out = text_sec->output_section;
15637 bfd_vma exidx_offset = offset + i->size - 8;
15638
15639 irela->r_addend = 0;
15640 irela->r_offset = exidx_offset;
15641 irela->r_info = ELF32_R_INFO
15642 (text_out->target_index, R_ARM_PREL31);
15643 irela++;
15644 count++;
15645 }
15646 }
15647 else
15648 {
15649 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15650 {
15651 (*swap_in) (abfd, erela, irela);
15652 erela += rel_hdr->sh_entsize;
15653 irela++;
15654 }
15655
15656 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15657 }
15658 }
15659 }
15660
15661 reldata->count = count;
15662 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15663
15664 erela = erela_head;
15665 irela = irela_head;
15666 while (count > 0)
15667 {
15668 (*swap_out) (abfd, irela, erela);
15669 erela += rel_hdr->sh_entsize;
15670 irela++;
15671 count--;
15672 }
15673
15674 free (irela_head);
15675
15676 /* Hashes are no longer valid. */
15677 free (reldata->hashes);
15678 reldata->hashes = NULL;
15679 }
15680
15681 /* Unwinding tables are not referenced directly. This pass marks them as
15682 required if the corresponding code section is marked. Similarly, ARMv8-M
15683 secure entry functions can only be referenced by SG veneers which are
15684 created after the GC process. They need to be marked in case they reside in
15685 their own section (as would be the case if code was compiled with
15686 -ffunction-sections). */
15687
15688 static bfd_boolean
15689 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15690 elf_gc_mark_hook_fn gc_mark_hook)
15691 {
15692 bfd *sub;
15693 Elf_Internal_Shdr **elf_shdrp;
15694 asection *cmse_sec;
15695 obj_attribute *out_attr;
15696 Elf_Internal_Shdr *symtab_hdr;
15697 unsigned i, sym_count, ext_start;
15698 const struct elf_backend_data *bed;
15699 struct elf_link_hash_entry **sym_hashes;
15700 struct elf32_arm_link_hash_entry *cmse_hash;
15701 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15702
15703 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15704
15705 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15706 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15707 && out_attr[Tag_CPU_arch_profile].i == 'M';
15708
15709 /* Marking EH data may cause additional code sections to be marked,
15710 requiring multiple passes. */
15711 again = TRUE;
15712 while (again)
15713 {
15714 again = FALSE;
15715 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15716 {
15717 asection *o;
15718
15719 if (! is_arm_elf (sub))
15720 continue;
15721
15722 elf_shdrp = elf_elfsections (sub);
15723 for (o = sub->sections; o != NULL; o = o->next)
15724 {
15725 Elf_Internal_Shdr *hdr;
15726
15727 hdr = &elf_section_data (o)->this_hdr;
15728 if (hdr->sh_type == SHT_ARM_EXIDX
15729 && hdr->sh_link
15730 && hdr->sh_link < elf_numsections (sub)
15731 && !o->gc_mark
15732 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15733 {
15734 again = TRUE;
15735 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15736 return FALSE;
15737 }
15738 }
15739
15740 /* Mark section holding ARMv8-M secure entry functions. We mark all
15741 of them so no need for a second browsing. */
15742 if (is_v8m && first_bfd_browse)
15743 {
15744 sym_hashes = elf_sym_hashes (sub);
15745 bed = get_elf_backend_data (sub);
15746 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15747 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15748 ext_start = symtab_hdr->sh_info;
15749
15750 /* Scan symbols. */
15751 for (i = ext_start; i < sym_count; i++)
15752 {
15753 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15754
15755 /* Assume it is a special symbol. If not, cmse_scan will
15756 warn about it and user can do something about it. */
15757 if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15758 {
15759 cmse_sec = cmse_hash->root.root.u.def.section;
15760 if (!cmse_sec->gc_mark
15761 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15762 return FALSE;
15763 }
15764 }
15765 }
15766 }
15767 first_bfd_browse = FALSE;
15768 }
15769
15770 return TRUE;
15771 }
15772
15773 /* Treat mapping symbols as special target symbols. */
15774
15775 static bfd_boolean
15776 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15777 {
15778 return bfd_is_arm_special_symbol_name (sym->name,
15779 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15780 }
15781
15782 /* This is a copy of elf_find_function() from elf.c except that
15783 ARM mapping symbols are ignored when looking for function names
15784 and STT_ARM_TFUNC is considered to a function type. */
15785
15786 static bfd_boolean
15787 arm_elf_find_function (bfd * abfd ATTRIBUTE_UNUSED,
15788 asymbol ** symbols,
15789 asection * section,
15790 bfd_vma offset,
15791 const char ** filename_ptr,
15792 const char ** functionname_ptr)
15793 {
15794 const char * filename = NULL;
15795 asymbol * func = NULL;
15796 bfd_vma low_func = 0;
15797 asymbol ** p;
15798
15799 for (p = symbols; *p != NULL; p++)
15800 {
15801 elf_symbol_type *q;
15802
15803 q = (elf_symbol_type *) *p;
15804
15805 switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15806 {
15807 default:
15808 break;
15809 case STT_FILE:
15810 filename = bfd_asymbol_name (&q->symbol);
15811 break;
15812 case STT_FUNC:
15813 case STT_ARM_TFUNC:
15814 case STT_NOTYPE:
15815 /* Skip mapping symbols. */
15816 if ((q->symbol.flags & BSF_LOCAL)
15817 && bfd_is_arm_special_symbol_name (q->symbol.name,
15818 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15819 continue;
15820 /* Fall through. */
15821 if (bfd_get_section (&q->symbol) == section
15822 && q->symbol.value >= low_func
15823 && q->symbol.value <= offset)
15824 {
15825 func = (asymbol *) q;
15826 low_func = q->symbol.value;
15827 }
15828 break;
15829 }
15830 }
15831
15832 if (func == NULL)
15833 return FALSE;
15834
15835 if (filename_ptr)
15836 *filename_ptr = filename;
15837 if (functionname_ptr)
15838 *functionname_ptr = bfd_asymbol_name (func);
15839
15840 return TRUE;
15841 }
15842
15843
15844 /* Find the nearest line to a particular section and offset, for error
15845 reporting. This code is a duplicate of the code in elf.c, except
15846 that it uses arm_elf_find_function. */
15847
15848 static bfd_boolean
15849 elf32_arm_find_nearest_line (bfd * abfd,
15850 asymbol ** symbols,
15851 asection * section,
15852 bfd_vma offset,
15853 const char ** filename_ptr,
15854 const char ** functionname_ptr,
15855 unsigned int * line_ptr,
15856 unsigned int * discriminator_ptr)
15857 {
15858 bfd_boolean found = FALSE;
15859
15860 if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
15861 filename_ptr, functionname_ptr,
15862 line_ptr, discriminator_ptr,
15863 dwarf_debug_sections, 0,
15864 & elf_tdata (abfd)->dwarf2_find_line_info))
15865 {
15866 if (!*functionname_ptr)
15867 arm_elf_find_function (abfd, symbols, section, offset,
15868 *filename_ptr ? NULL : filename_ptr,
15869 functionname_ptr);
15870
15871 return TRUE;
15872 }
15873
15874 /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15875 uses DWARF1. */
15876
15877 if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
15878 & found, filename_ptr,
15879 functionname_ptr, line_ptr,
15880 & elf_tdata (abfd)->line_info))
15881 return FALSE;
15882
15883 if (found && (*functionname_ptr || *line_ptr))
15884 return TRUE;
15885
15886 if (symbols == NULL)
15887 return FALSE;
15888
15889 if (! arm_elf_find_function (abfd, symbols, section, offset,
15890 filename_ptr, functionname_ptr))
15891 return FALSE;
15892
15893 *line_ptr = 0;
15894 return TRUE;
15895 }
15896
15897 static bfd_boolean
15898 elf32_arm_find_inliner_info (bfd * abfd,
15899 const char ** filename_ptr,
15900 const char ** functionname_ptr,
15901 unsigned int * line_ptr)
15902 {
15903 bfd_boolean found;
15904 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15905 functionname_ptr, line_ptr,
15906 & elf_tdata (abfd)->dwarf2_find_line_info);
15907 return found;
15908 }
15909
15910 /* Find dynamic relocs for H that apply to read-only sections. */
15911
15912 static asection *
15913 readonly_dynrelocs (struct elf_link_hash_entry *h)
15914 {
15915 struct elf_dyn_relocs *p;
15916
15917 for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
15918 {
15919 asection *s = p->sec->output_section;
15920
15921 if (s != NULL && (s->flags & SEC_READONLY) != 0)
15922 return p->sec;
15923 }
15924 return NULL;
15925 }
15926
15927 /* Adjust a symbol defined by a dynamic object and referenced by a
15928 regular object. The current definition is in some section of the
15929 dynamic object, but we're not including those sections. We have to
15930 change the definition to something the rest of the link can
15931 understand. */
15932
15933 static bfd_boolean
15934 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15935 struct elf_link_hash_entry * h)
15936 {
15937 bfd * dynobj;
15938 asection *s, *srel;
15939 struct elf32_arm_link_hash_entry * eh;
15940 struct elf32_arm_link_hash_table *globals;
15941
15942 globals = elf32_arm_hash_table (info);
15943 if (globals == NULL)
15944 return FALSE;
15945
15946 dynobj = elf_hash_table (info)->dynobj;
15947
15948 /* Make sure we know what is going on here. */
15949 BFD_ASSERT (dynobj != NULL
15950 && (h->needs_plt
15951 || h->type == STT_GNU_IFUNC
15952 || h->is_weakalias
15953 || (h->def_dynamic
15954 && h->ref_regular
15955 && !h->def_regular)));
15956
15957 eh = (struct elf32_arm_link_hash_entry *) h;
15958
15959 /* If this is a function, put it in the procedure linkage table. We
15960 will fill in the contents of the procedure linkage table later,
15961 when we know the address of the .got section. */
15962 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15963 {
15964 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15965 symbol binds locally. */
15966 if (h->plt.refcount <= 0
15967 || (h->type != STT_GNU_IFUNC
15968 && (SYMBOL_CALLS_LOCAL (info, h)
15969 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15970 && h->root.type == bfd_link_hash_undefweak))))
15971 {
15972 /* This case can occur if we saw a PLT32 reloc in an input
15973 file, but the symbol was never referred to by a dynamic
15974 object, or if all references were garbage collected. In
15975 such a case, we don't actually need to build a procedure
15976 linkage table, and we can just do a PC24 reloc instead. */
15977 h->plt.offset = (bfd_vma) -1;
15978 eh->plt.thumb_refcount = 0;
15979 eh->plt.maybe_thumb_refcount = 0;
15980 eh->plt.noncall_refcount = 0;
15981 h->needs_plt = 0;
15982 }
15983
15984 return TRUE;
15985 }
15986 else
15987 {
15988 /* It's possible that we incorrectly decided a .plt reloc was
15989 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15990 in check_relocs. We can't decide accurately between function
15991 and non-function syms in check-relocs; Objects loaded later in
15992 the link may change h->type. So fix it now. */
15993 h->plt.offset = (bfd_vma) -1;
15994 eh->plt.thumb_refcount = 0;
15995 eh->plt.maybe_thumb_refcount = 0;
15996 eh->plt.noncall_refcount = 0;
15997 }
15998
15999 /* If this is a weak symbol, and there is a real definition, the
16000 processor independent code will have arranged for us to see the
16001 real definition first, and we can just use the same value. */
16002 if (h->is_weakalias)
16003 {
16004 struct elf_link_hash_entry *def = weakdef (h);
16005 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16006 h->root.u.def.section = def->root.u.def.section;
16007 h->root.u.def.value = def->root.u.def.value;
16008 return TRUE;
16009 }
16010
16011 /* If there are no non-GOT references, we do not need a copy
16012 relocation. */
16013 if (!h->non_got_ref)
16014 return TRUE;
16015
16016 /* This is a reference to a symbol defined by a dynamic object which
16017 is not a function. */
16018
16019 /* If we are creating a shared library, we must presume that the
16020 only references to the symbol are via the global offset table.
16021 For such cases we need not do anything here; the relocations will
16022 be handled correctly by relocate_section. Relocatable executables
16023 can reference data in shared objects directly, so we don't need to
16024 do anything here. */
16025 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16026 return TRUE;
16027
16028 /* We must allocate the symbol in our .dynbss section, which will
16029 become part of the .bss section of the executable. There will be
16030 an entry for this symbol in the .dynsym section. The dynamic
16031 object will contain position independent code, so all references
16032 from the dynamic object to this symbol will go through the global
16033 offset table. The dynamic linker will use the .dynsym entry to
16034 determine the address it must put in the global offset table, so
16035 both the dynamic object and the regular object will refer to the
16036 same memory location for the variable. */
16037 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16038 linker to copy the initial value out of the dynamic object and into
16039 the runtime process image. We need to remember the offset into the
16040 .rel(a).bss section we are going to use. */
16041 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16042 {
16043 s = globals->root.sdynrelro;
16044 srel = globals->root.sreldynrelro;
16045 }
16046 else
16047 {
16048 s = globals->root.sdynbss;
16049 srel = globals->root.srelbss;
16050 }
16051 if (info->nocopyreloc == 0
16052 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16053 && h->size != 0)
16054 {
16055 elf32_arm_allocate_dynrelocs (info, srel, 1);
16056 h->needs_copy = 1;
16057 }
16058
16059 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16060 }
16061
16062 /* Allocate space in .plt, .got and associated reloc sections for
16063 dynamic relocs. */
16064
16065 static bfd_boolean
16066 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16067 {
16068 struct bfd_link_info *info;
16069 struct elf32_arm_link_hash_table *htab;
16070 struct elf32_arm_link_hash_entry *eh;
16071 struct elf_dyn_relocs *p;
16072
16073 if (h->root.type == bfd_link_hash_indirect)
16074 return TRUE;
16075
16076 eh = (struct elf32_arm_link_hash_entry *) h;
16077
16078 info = (struct bfd_link_info *) inf;
16079 htab = elf32_arm_hash_table (info);
16080 if (htab == NULL)
16081 return FALSE;
16082
16083 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16084 && h->plt.refcount > 0)
16085 {
16086 /* Make sure this symbol is output as a dynamic symbol.
16087 Undefined weak syms won't yet be marked as dynamic. */
16088 if (h->dynindx == -1 && !h->forced_local
16089 && h->root.type == bfd_link_hash_undefweak)
16090 {
16091 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16092 return FALSE;
16093 }
16094
16095 /* If the call in the PLT entry binds locally, the associated
16096 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16097 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16098 than the .plt section. */
16099 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16100 {
16101 eh->is_iplt = 1;
16102 if (eh->plt.noncall_refcount == 0
16103 && SYMBOL_REFERENCES_LOCAL (info, h))
16104 /* All non-call references can be resolved directly.
16105 This means that they can (and in some cases, must)
16106 resolve directly to the run-time target, rather than
16107 to the PLT. That in turns means that any .got entry
16108 would be equal to the .igot.plt entry, so there's
16109 no point having both. */
16110 h->got.refcount = 0;
16111 }
16112
16113 if (bfd_link_pic (info)
16114 || eh->is_iplt
16115 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16116 {
16117 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16118
16119 /* If this symbol is not defined in a regular file, and we are
16120 not generating a shared library, then set the symbol to this
16121 location in the .plt. This is required to make function
16122 pointers compare as equal between the normal executable and
16123 the shared library. */
16124 if (! bfd_link_pic (info)
16125 && !h->def_regular)
16126 {
16127 h->root.u.def.section = htab->root.splt;
16128 h->root.u.def.value = h->plt.offset;
16129
16130 /* Make sure the function is not marked as Thumb, in case
16131 it is the target of an ABS32 relocation, which will
16132 point to the PLT entry. */
16133 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16134 }
16135
16136 /* VxWorks executables have a second set of relocations for
16137 each PLT entry. They go in a separate relocation section,
16138 which is processed by the kernel loader. */
16139 if (htab->vxworks_p && !bfd_link_pic (info))
16140 {
16141 /* There is a relocation for the initial PLT entry:
16142 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16143 if (h->plt.offset == htab->plt_header_size)
16144 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16145
16146 /* There are two extra relocations for each subsequent
16147 PLT entry: an R_ARM_32 relocation for the GOT entry,
16148 and an R_ARM_32 relocation for the PLT entry. */
16149 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16150 }
16151 }
16152 else
16153 {
16154 h->plt.offset = (bfd_vma) -1;
16155 h->needs_plt = 0;
16156 }
16157 }
16158 else
16159 {
16160 h->plt.offset = (bfd_vma) -1;
16161 h->needs_plt = 0;
16162 }
16163
16164 eh = (struct elf32_arm_link_hash_entry *) h;
16165 eh->tlsdesc_got = (bfd_vma) -1;
16166
16167 if (h->got.refcount > 0)
16168 {
16169 asection *s;
16170 bfd_boolean dyn;
16171 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16172 int indx;
16173
16174 /* Make sure this symbol is output as a dynamic symbol.
16175 Undefined weak syms won't yet be marked as dynamic. */
16176 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16177 && h->root.type == bfd_link_hash_undefweak)
16178 {
16179 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16180 return FALSE;
16181 }
16182
16183 if (!htab->symbian_p)
16184 {
16185 s = htab->root.sgot;
16186 h->got.offset = s->size;
16187
16188 if (tls_type == GOT_UNKNOWN)
16189 abort ();
16190
16191 if (tls_type == GOT_NORMAL)
16192 /* Non-TLS symbols need one GOT slot. */
16193 s->size += 4;
16194 else
16195 {
16196 if (tls_type & GOT_TLS_GDESC)
16197 {
16198 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16199 eh->tlsdesc_got
16200 = (htab->root.sgotplt->size
16201 - elf32_arm_compute_jump_table_size (htab));
16202 htab->root.sgotplt->size += 8;
16203 h->got.offset = (bfd_vma) -2;
16204 /* plt.got_offset needs to know there's a TLS_DESC
16205 reloc in the middle of .got.plt. */
16206 htab->num_tls_desc++;
16207 }
16208
16209 if (tls_type & GOT_TLS_GD)
16210 {
16211 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16212 consecutive GOT slots. If the symbol is both GD
16213 and GDESC, got.offset may have been
16214 overwritten. */
16215 h->got.offset = s->size;
16216 s->size += 8;
16217 }
16218
16219 if (tls_type & GOT_TLS_IE)
16220 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16221 slot. */
16222 s->size += 4;
16223 }
16224
16225 dyn = htab->root.dynamic_sections_created;
16226
16227 indx = 0;
16228 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16229 bfd_link_pic (info),
16230 h)
16231 && (!bfd_link_pic (info)
16232 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16233 indx = h->dynindx;
16234
16235 if (tls_type != GOT_NORMAL
16236 && (bfd_link_pic (info) || indx != 0)
16237 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16238 || h->root.type != bfd_link_hash_undefweak))
16239 {
16240 if (tls_type & GOT_TLS_IE)
16241 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16242
16243 if (tls_type & GOT_TLS_GD)
16244 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16245
16246 if (tls_type & GOT_TLS_GDESC)
16247 {
16248 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16249 /* GDESC needs a trampoline to jump to. */
16250 htab->tls_trampoline = -1;
16251 }
16252
16253 /* Only GD needs it. GDESC just emits one relocation per
16254 2 entries. */
16255 if ((tls_type & GOT_TLS_GD) && indx != 0)
16256 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16257 }
16258 else if (((indx != -1) || htab->fdpic_p)
16259 && !SYMBOL_REFERENCES_LOCAL (info, h))
16260 {
16261 if (htab->root.dynamic_sections_created)
16262 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16263 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16264 }
16265 else if (h->type == STT_GNU_IFUNC
16266 && eh->plt.noncall_refcount == 0)
16267 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16268 they all resolve dynamically instead. Reserve room for the
16269 GOT entry's R_ARM_IRELATIVE relocation. */
16270 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16271 else if (bfd_link_pic (info)
16272 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16273 || h->root.type != bfd_link_hash_undefweak))
16274 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16275 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16276 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16277 /* Reserve room for rofixup for FDPIC executable. */
16278 /* TLS relocs do not need space since they are completely
16279 resolved. */
16280 htab->srofixup->size += 4;
16281 }
16282 }
16283 else
16284 h->got.offset = (bfd_vma) -1;
16285
16286 /* FDPIC support. */
16287 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16288 {
16289 /* Symbol musn't be exported. */
16290 if (h->dynindx != -1)
16291 abort();
16292
16293 /* We only allocate one function descriptor with its associated relocation. */
16294 if (eh->fdpic_cnts.funcdesc_offset == -1)
16295 {
16296 asection *s = htab->root.sgot;
16297
16298 eh->fdpic_cnts.funcdesc_offset = s->size;
16299 s->size += 8;
16300 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16301 if (bfd_link_pic(info))
16302 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16303 else
16304 htab->srofixup->size += 8;
16305 }
16306 }
16307
16308 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16309 {
16310 asection *s = htab->root.sgot;
16311
16312 if (htab->root.dynamic_sections_created && h->dynindx == -1
16313 && !h->forced_local)
16314 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16315 return FALSE;
16316
16317 if (h->dynindx == -1)
16318 {
16319 /* We only allocate one function descriptor with its associated relocation. q */
16320 if (eh->fdpic_cnts.funcdesc_offset == -1)
16321 {
16322
16323 eh->fdpic_cnts.funcdesc_offset = s->size;
16324 s->size += 8;
16325 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16326 if (bfd_link_pic(info))
16327 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16328 else
16329 htab->srofixup->size += 8;
16330 }
16331 }
16332
16333 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16334 R_ARM_RELATIVE/rofixup relocation on it. */
16335 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16336 s->size += 4;
16337 if (h->dynindx == -1 && !bfd_link_pic(info))
16338 htab->srofixup->size += 4;
16339 else
16340 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16341 }
16342
16343 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16344 {
16345 if (htab->root.dynamic_sections_created && h->dynindx == -1
16346 && !h->forced_local)
16347 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16348 return FALSE;
16349
16350 if (h->dynindx == -1)
16351 {
16352 /* We only allocate one function descriptor with its associated relocation. */
16353 if (eh->fdpic_cnts.funcdesc_offset == -1)
16354 {
16355 asection *s = htab->root.sgot;
16356
16357 eh->fdpic_cnts.funcdesc_offset = s->size;
16358 s->size += 8;
16359 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16360 if (bfd_link_pic(info))
16361 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16362 else
16363 htab->srofixup->size += 8;
16364 }
16365 }
16366 if (h->dynindx == -1 && !bfd_link_pic(info))
16367 {
16368 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16369 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16370 }
16371 else
16372 {
16373 /* Will need one dynamic reloc per reference. will be either
16374 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16375 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16376 eh->fdpic_cnts.funcdesc_cnt);
16377 }
16378 }
16379
16380 /* Allocate stubs for exported Thumb functions on v4t. */
16381 if (!htab->use_blx && h->dynindx != -1
16382 && h->def_regular
16383 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16384 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16385 {
16386 struct elf_link_hash_entry * th;
16387 struct bfd_link_hash_entry * bh;
16388 struct elf_link_hash_entry * myh;
16389 char name[1024];
16390 asection *s;
16391 bh = NULL;
16392 /* Create a new symbol to regist the real location of the function. */
16393 s = h->root.u.def.section;
16394 sprintf (name, "__real_%s", h->root.root.string);
16395 _bfd_generic_link_add_one_symbol (info, s->owner,
16396 name, BSF_GLOBAL, s,
16397 h->root.u.def.value,
16398 NULL, TRUE, FALSE, &bh);
16399
16400 myh = (struct elf_link_hash_entry *) bh;
16401 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16402 myh->forced_local = 1;
16403 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16404 eh->export_glue = myh;
16405 th = record_arm_to_thumb_glue (info, h);
16406 /* Point the symbol at the stub. */
16407 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16408 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16409 h->root.u.def.section = th->root.u.def.section;
16410 h->root.u.def.value = th->root.u.def.value & ~1;
16411 }
16412
16413 if (eh->dyn_relocs == NULL)
16414 return TRUE;
16415
16416 /* In the shared -Bsymbolic case, discard space allocated for
16417 dynamic pc-relative relocs against symbols which turn out to be
16418 defined in regular objects. For the normal shared case, discard
16419 space for pc-relative relocs that have become local due to symbol
16420 visibility changes. */
16421
16422 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16423 {
16424 /* Relocs that use pc_count are PC-relative forms, which will appear
16425 on something like ".long foo - ." or "movw REG, foo - .". We want
16426 calls to protected symbols to resolve directly to the function
16427 rather than going via the plt. If people want function pointer
16428 comparisons to work as expected then they should avoid writing
16429 assembly like ".long foo - .". */
16430 if (SYMBOL_CALLS_LOCAL (info, h))
16431 {
16432 struct elf_dyn_relocs **pp;
16433
16434 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16435 {
16436 p->count -= p->pc_count;
16437 p->pc_count = 0;
16438 if (p->count == 0)
16439 *pp = p->next;
16440 else
16441 pp = &p->next;
16442 }
16443 }
16444
16445 if (htab->vxworks_p)
16446 {
16447 struct elf_dyn_relocs **pp;
16448
16449 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16450 {
16451 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16452 *pp = p->next;
16453 else
16454 pp = &p->next;
16455 }
16456 }
16457
16458 /* Also discard relocs on undefined weak syms with non-default
16459 visibility. */
16460 if (eh->dyn_relocs != NULL
16461 && h->root.type == bfd_link_hash_undefweak)
16462 {
16463 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16464 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16465 eh->dyn_relocs = NULL;
16466
16467 /* Make sure undefined weak symbols are output as a dynamic
16468 symbol in PIEs. */
16469 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16470 && !h->forced_local)
16471 {
16472 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16473 return FALSE;
16474 }
16475 }
16476
16477 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16478 && h->root.type == bfd_link_hash_new)
16479 {
16480 /* Output absolute symbols so that we can create relocations
16481 against them. For normal symbols we output a relocation
16482 against the section that contains them. */
16483 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16484 return FALSE;
16485 }
16486
16487 }
16488 else
16489 {
16490 /* For the non-shared case, discard space for relocs against
16491 symbols which turn out to need copy relocs or are not
16492 dynamic. */
16493
16494 if (!h->non_got_ref
16495 && ((h->def_dynamic
16496 && !h->def_regular)
16497 || (htab->root.dynamic_sections_created
16498 && (h->root.type == bfd_link_hash_undefweak
16499 || h->root.type == bfd_link_hash_undefined))))
16500 {
16501 /* Make sure this symbol is output as a dynamic symbol.
16502 Undefined weak syms won't yet be marked as dynamic. */
16503 if (h->dynindx == -1 && !h->forced_local
16504 && h->root.type == bfd_link_hash_undefweak)
16505 {
16506 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16507 return FALSE;
16508 }
16509
16510 /* If that succeeded, we know we'll be keeping all the
16511 relocs. */
16512 if (h->dynindx != -1)
16513 goto keep;
16514 }
16515
16516 eh->dyn_relocs = NULL;
16517
16518 keep: ;
16519 }
16520
16521 /* Finally, allocate space. */
16522 for (p = eh->dyn_relocs; p != NULL; p = p->next)
16523 {
16524 asection *sreloc = elf_section_data (p->sec)->sreloc;
16525
16526 if (h->type == STT_GNU_IFUNC
16527 && eh->plt.noncall_refcount == 0
16528 && SYMBOL_REFERENCES_LOCAL (info, h))
16529 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16530 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16531 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16532 else if (htab->fdpic_p && !bfd_link_pic(info))
16533 htab->srofixup->size += 4 * p->count;
16534 else
16535 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16536 }
16537
16538 return TRUE;
16539 }
16540
16541 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16542 read-only sections. */
16543
16544 static bfd_boolean
16545 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16546 {
16547 asection *sec;
16548
16549 if (h->root.type == bfd_link_hash_indirect)
16550 return TRUE;
16551
16552 sec = readonly_dynrelocs (h);
16553 if (sec != NULL)
16554 {
16555 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16556
16557 info->flags |= DF_TEXTREL;
16558 info->callbacks->minfo
16559 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16560 sec->owner, h->root.root.string, sec);
16561
16562 /* Not an error, just cut short the traversal. */
16563 return FALSE;
16564 }
16565
16566 return TRUE;
16567 }
16568
16569 void
16570 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16571 int byteswap_code)
16572 {
16573 struct elf32_arm_link_hash_table *globals;
16574
16575 globals = elf32_arm_hash_table (info);
16576 if (globals == NULL)
16577 return;
16578
16579 globals->byteswap_code = byteswap_code;
16580 }
16581
16582 /* Set the sizes of the dynamic sections. */
16583
16584 static bfd_boolean
16585 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16586 struct bfd_link_info * info)
16587 {
16588 bfd * dynobj;
16589 asection * s;
16590 bfd_boolean plt;
16591 bfd_boolean relocs;
16592 bfd *ibfd;
16593 struct elf32_arm_link_hash_table *htab;
16594
16595 htab = elf32_arm_hash_table (info);
16596 if (htab == NULL)
16597 return FALSE;
16598
16599 dynobj = elf_hash_table (info)->dynobj;
16600 BFD_ASSERT (dynobj != NULL);
16601 check_use_blx (htab);
16602
16603 if (elf_hash_table (info)->dynamic_sections_created)
16604 {
16605 /* Set the contents of the .interp section to the interpreter. */
16606 if (bfd_link_executable (info) && !info->nointerp)
16607 {
16608 s = bfd_get_linker_section (dynobj, ".interp");
16609 BFD_ASSERT (s != NULL);
16610 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16611 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16612 }
16613 }
16614
16615 /* Set up .got offsets for local syms, and space for local dynamic
16616 relocs. */
16617 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16618 {
16619 bfd_signed_vma *local_got;
16620 bfd_signed_vma *end_local_got;
16621 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16622 char *local_tls_type;
16623 bfd_vma *local_tlsdesc_gotent;
16624 bfd_size_type locsymcount;
16625 Elf_Internal_Shdr *symtab_hdr;
16626 asection *srel;
16627 bfd_boolean is_vxworks = htab->vxworks_p;
16628 unsigned int symndx;
16629 struct fdpic_local *local_fdpic_cnts;
16630
16631 if (! is_arm_elf (ibfd))
16632 continue;
16633
16634 for (s = ibfd->sections; s != NULL; s = s->next)
16635 {
16636 struct elf_dyn_relocs *p;
16637
16638 for (p = (struct elf_dyn_relocs *)
16639 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16640 {
16641 if (!bfd_is_abs_section (p->sec)
16642 && bfd_is_abs_section (p->sec->output_section))
16643 {
16644 /* Input section has been discarded, either because
16645 it is a copy of a linkonce section or due to
16646 linker script /DISCARD/, so we'll be discarding
16647 the relocs too. */
16648 }
16649 else if (is_vxworks
16650 && strcmp (p->sec->output_section->name,
16651 ".tls_vars") == 0)
16652 {
16653 /* Relocations in vxworks .tls_vars sections are
16654 handled specially by the loader. */
16655 }
16656 else if (p->count != 0)
16657 {
16658 srel = elf_section_data (p->sec)->sreloc;
16659 if (htab->fdpic_p && !bfd_link_pic(info))
16660 htab->srofixup->size += 4 * p->count;
16661 else
16662 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16663 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16664 info->flags |= DF_TEXTREL;
16665 }
16666 }
16667 }
16668
16669 local_got = elf_local_got_refcounts (ibfd);
16670 if (!local_got)
16671 continue;
16672
16673 symtab_hdr = & elf_symtab_hdr (ibfd);
16674 locsymcount = symtab_hdr->sh_info;
16675 end_local_got = local_got + locsymcount;
16676 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16677 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16678 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16679 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16680 symndx = 0;
16681 s = htab->root.sgot;
16682 srel = htab->root.srelgot;
16683 for (; local_got < end_local_got;
16684 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16685 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16686 {
16687 *local_tlsdesc_gotent = (bfd_vma) -1;
16688 local_iplt = *local_iplt_ptr;
16689
16690 /* FDPIC support. */
16691 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16692 {
16693 if (local_fdpic_cnts->funcdesc_offset == -1)
16694 {
16695 local_fdpic_cnts->funcdesc_offset = s->size;
16696 s->size += 8;
16697
16698 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16699 if (bfd_link_pic(info))
16700 elf32_arm_allocate_dynrelocs (info, srel, 1);
16701 else
16702 htab->srofixup->size += 8;
16703 }
16704 }
16705
16706 if (local_fdpic_cnts->funcdesc_cnt > 0)
16707 {
16708 if (local_fdpic_cnts->funcdesc_offset == -1)
16709 {
16710 local_fdpic_cnts->funcdesc_offset = s->size;
16711 s->size += 8;
16712
16713 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16714 if (bfd_link_pic(info))
16715 elf32_arm_allocate_dynrelocs (info, srel, 1);
16716 else
16717 htab->srofixup->size += 8;
16718 }
16719
16720 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16721 if (bfd_link_pic(info))
16722 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16723 else
16724 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16725 }
16726
16727 if (local_iplt != NULL)
16728 {
16729 struct elf_dyn_relocs *p;
16730
16731 if (local_iplt->root.refcount > 0)
16732 {
16733 elf32_arm_allocate_plt_entry (info, TRUE,
16734 &local_iplt->root,
16735 &local_iplt->arm);
16736 if (local_iplt->arm.noncall_refcount == 0)
16737 /* All references to the PLT are calls, so all
16738 non-call references can resolve directly to the
16739 run-time target. This means that the .got entry
16740 would be the same as the .igot.plt entry, so there's
16741 no point creating both. */
16742 *local_got = 0;
16743 }
16744 else
16745 {
16746 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16747 local_iplt->root.offset = (bfd_vma) -1;
16748 }
16749
16750 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16751 {
16752 asection *psrel;
16753
16754 psrel = elf_section_data (p->sec)->sreloc;
16755 if (local_iplt->arm.noncall_refcount == 0)
16756 elf32_arm_allocate_irelocs (info, psrel, p->count);
16757 else
16758 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16759 }
16760 }
16761 if (*local_got > 0)
16762 {
16763 Elf_Internal_Sym *isym;
16764
16765 *local_got = s->size;
16766 if (*local_tls_type & GOT_TLS_GD)
16767 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16768 s->size += 8;
16769 if (*local_tls_type & GOT_TLS_GDESC)
16770 {
16771 *local_tlsdesc_gotent = htab->root.sgotplt->size
16772 - elf32_arm_compute_jump_table_size (htab);
16773 htab->root.sgotplt->size += 8;
16774 *local_got = (bfd_vma) -2;
16775 /* plt.got_offset needs to know there's a TLS_DESC
16776 reloc in the middle of .got.plt. */
16777 htab->num_tls_desc++;
16778 }
16779 if (*local_tls_type & GOT_TLS_IE)
16780 s->size += 4;
16781
16782 if (*local_tls_type & GOT_NORMAL)
16783 {
16784 /* If the symbol is both GD and GDESC, *local_got
16785 may have been overwritten. */
16786 *local_got = s->size;
16787 s->size += 4;
16788 }
16789
16790 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16791 if (isym == NULL)
16792 return FALSE;
16793
16794 /* If all references to an STT_GNU_IFUNC PLT are calls,
16795 then all non-call references, including this GOT entry,
16796 resolve directly to the run-time target. */
16797 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16798 && (local_iplt == NULL
16799 || local_iplt->arm.noncall_refcount == 0))
16800 elf32_arm_allocate_irelocs (info, srel, 1);
16801 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16802 {
16803 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16804 elf32_arm_allocate_dynrelocs (info, srel, 1);
16805 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16806 htab->srofixup->size += 4;
16807
16808 if ((bfd_link_pic (info) || htab->fdpic_p)
16809 && *local_tls_type & GOT_TLS_GDESC)
16810 {
16811 elf32_arm_allocate_dynrelocs (info,
16812 htab->root.srelplt, 1);
16813 htab->tls_trampoline = -1;
16814 }
16815 }
16816 }
16817 else
16818 *local_got = (bfd_vma) -1;
16819 }
16820 }
16821
16822 if (htab->tls_ldm_got.refcount > 0)
16823 {
16824 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16825 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16826 htab->tls_ldm_got.offset = htab->root.sgot->size;
16827 htab->root.sgot->size += 8;
16828 if (bfd_link_pic (info))
16829 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16830 }
16831 else
16832 htab->tls_ldm_got.offset = -1;
16833
16834 /* At the very end of the .rofixup section is a pointer to the GOT,
16835 reserve space for it. */
16836 if (htab->fdpic_p && htab->srofixup != NULL)
16837 htab->srofixup->size += 4;
16838
16839 /* Allocate global sym .plt and .got entries, and space for global
16840 sym dynamic relocs. */
16841 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16842
16843 /* Here we rummage through the found bfds to collect glue information. */
16844 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16845 {
16846 if (! is_arm_elf (ibfd))
16847 continue;
16848
16849 /* Initialise mapping tables for code/data. */
16850 bfd_elf32_arm_init_maps (ibfd);
16851
16852 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16853 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16854 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16855 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16856 }
16857
16858 /* Allocate space for the glue sections now that we've sized them. */
16859 bfd_elf32_arm_allocate_interworking_sections (info);
16860
16861 /* For every jump slot reserved in the sgotplt, reloc_count is
16862 incremented. However, when we reserve space for TLS descriptors,
16863 it's not incremented, so in order to compute the space reserved
16864 for them, it suffices to multiply the reloc count by the jump
16865 slot size. */
16866 if (htab->root.srelplt)
16867 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16868
16869 if (htab->tls_trampoline)
16870 {
16871 if (htab->root.splt->size == 0)
16872 htab->root.splt->size += htab->plt_header_size;
16873
16874 htab->tls_trampoline = htab->root.splt->size;
16875 htab->root.splt->size += htab->plt_entry_size;
16876
16877 /* If we're not using lazy TLS relocations, don't generate the
16878 PLT and GOT entries they require. */
16879 if (!(info->flags & DF_BIND_NOW))
16880 {
16881 htab->dt_tlsdesc_got = htab->root.sgot->size;
16882 htab->root.sgot->size += 4;
16883
16884 htab->dt_tlsdesc_plt = htab->root.splt->size;
16885 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16886 }
16887 }
16888
16889 /* The check_relocs and adjust_dynamic_symbol entry points have
16890 determined the sizes of the various dynamic sections. Allocate
16891 memory for them. */
16892 plt = FALSE;
16893 relocs = FALSE;
16894 for (s = dynobj->sections; s != NULL; s = s->next)
16895 {
16896 const char * name;
16897
16898 if ((s->flags & SEC_LINKER_CREATED) == 0)
16899 continue;
16900
16901 /* It's OK to base decisions on the section name, because none
16902 of the dynobj section names depend upon the input files. */
16903 name = bfd_get_section_name (dynobj, s);
16904
16905 if (s == htab->root.splt)
16906 {
16907 /* Remember whether there is a PLT. */
16908 plt = s->size != 0;
16909 }
16910 else if (CONST_STRNEQ (name, ".rel"))
16911 {
16912 if (s->size != 0)
16913 {
16914 /* Remember whether there are any reloc sections other
16915 than .rel(a).plt and .rela.plt.unloaded. */
16916 if (s != htab->root.srelplt && s != htab->srelplt2)
16917 relocs = TRUE;
16918
16919 /* We use the reloc_count field as a counter if we need
16920 to copy relocs into the output file. */
16921 s->reloc_count = 0;
16922 }
16923 }
16924 else if (s != htab->root.sgot
16925 && s != htab->root.sgotplt
16926 && s != htab->root.iplt
16927 && s != htab->root.igotplt
16928 && s != htab->root.sdynbss
16929 && s != htab->root.sdynrelro
16930 && s != htab->srofixup)
16931 {
16932 /* It's not one of our sections, so don't allocate space. */
16933 continue;
16934 }
16935
16936 if (s->size == 0)
16937 {
16938 /* If we don't need this section, strip it from the
16939 output file. This is mostly to handle .rel(a).bss and
16940 .rel(a).plt. We must create both sections in
16941 create_dynamic_sections, because they must be created
16942 before the linker maps input sections to output
16943 sections. The linker does that before
16944 adjust_dynamic_symbol is called, and it is that
16945 function which decides whether anything needs to go
16946 into these sections. */
16947 s->flags |= SEC_EXCLUDE;
16948 continue;
16949 }
16950
16951 if ((s->flags & SEC_HAS_CONTENTS) == 0)
16952 continue;
16953
16954 /* Allocate memory for the section contents. */
16955 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16956 if (s->contents == NULL)
16957 return FALSE;
16958 }
16959
16960 if (elf_hash_table (info)->dynamic_sections_created)
16961 {
16962 /* Add some entries to the .dynamic section. We fill in the
16963 values later, in elf32_arm_finish_dynamic_sections, but we
16964 must add the entries now so that we get the correct size for
16965 the .dynamic section. The DT_DEBUG entry is filled in by the
16966 dynamic linker and used by the debugger. */
16967 #define add_dynamic_entry(TAG, VAL) \
16968 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16969
16970 if (bfd_link_executable (info))
16971 {
16972 if (!add_dynamic_entry (DT_DEBUG, 0))
16973 return FALSE;
16974 }
16975
16976 if (plt)
16977 {
16978 if ( !add_dynamic_entry (DT_PLTGOT, 0)
16979 || !add_dynamic_entry (DT_PLTRELSZ, 0)
16980 || !add_dynamic_entry (DT_PLTREL,
16981 htab->use_rel ? DT_REL : DT_RELA)
16982 || !add_dynamic_entry (DT_JMPREL, 0))
16983 return FALSE;
16984
16985 if (htab->dt_tlsdesc_plt
16986 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
16987 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
16988 return FALSE;
16989 }
16990
16991 if (relocs)
16992 {
16993 if (htab->use_rel)
16994 {
16995 if (!add_dynamic_entry (DT_REL, 0)
16996 || !add_dynamic_entry (DT_RELSZ, 0)
16997 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
16998 return FALSE;
16999 }
17000 else
17001 {
17002 if (!add_dynamic_entry (DT_RELA, 0)
17003 || !add_dynamic_entry (DT_RELASZ, 0)
17004 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17005 return FALSE;
17006 }
17007 }
17008
17009 /* If any dynamic relocs apply to a read-only section,
17010 then we need a DT_TEXTREL entry. */
17011 if ((info->flags & DF_TEXTREL) == 0)
17012 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17013
17014 if ((info->flags & DF_TEXTREL) != 0)
17015 {
17016 if (!add_dynamic_entry (DT_TEXTREL, 0))
17017 return FALSE;
17018 }
17019 if (htab->vxworks_p
17020 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17021 return FALSE;
17022 }
17023 #undef add_dynamic_entry
17024
17025 return TRUE;
17026 }
17027
17028 /* Size sections even though they're not dynamic. We use it to setup
17029 _TLS_MODULE_BASE_, if needed. */
17030
17031 static bfd_boolean
17032 elf32_arm_always_size_sections (bfd *output_bfd,
17033 struct bfd_link_info *info)
17034 {
17035 asection *tls_sec;
17036 struct elf32_arm_link_hash_table *htab;
17037
17038 htab = elf32_arm_hash_table (info);
17039
17040 if (bfd_link_relocatable (info))
17041 return TRUE;
17042
17043 tls_sec = elf_hash_table (info)->tls_sec;
17044
17045 if (tls_sec)
17046 {
17047 struct elf_link_hash_entry *tlsbase;
17048
17049 tlsbase = elf_link_hash_lookup
17050 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17051
17052 if (tlsbase)
17053 {
17054 struct bfd_link_hash_entry *bh = NULL;
17055 const struct elf_backend_data *bed
17056 = get_elf_backend_data (output_bfd);
17057
17058 if (!(_bfd_generic_link_add_one_symbol
17059 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17060 tls_sec, 0, NULL, FALSE,
17061 bed->collect, &bh)))
17062 return FALSE;
17063
17064 tlsbase->type = STT_TLS;
17065 tlsbase = (struct elf_link_hash_entry *)bh;
17066 tlsbase->def_regular = 1;
17067 tlsbase->other = STV_HIDDEN;
17068 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17069 }
17070 }
17071
17072 if (htab->fdpic_p && !bfd_link_relocatable (info)
17073 && !bfd_elf_stack_segment_size (output_bfd, info,
17074 "__stacksize", DEFAULT_STACK_SIZE))
17075 return FALSE;
17076
17077 return TRUE;
17078 }
17079
17080 /* Finish up dynamic symbol handling. We set the contents of various
17081 dynamic sections here. */
17082
17083 static bfd_boolean
17084 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17085 struct bfd_link_info * info,
17086 struct elf_link_hash_entry * h,
17087 Elf_Internal_Sym * sym)
17088 {
17089 struct elf32_arm_link_hash_table *htab;
17090 struct elf32_arm_link_hash_entry *eh;
17091
17092 htab = elf32_arm_hash_table (info);
17093 if (htab == NULL)
17094 return FALSE;
17095
17096 eh = (struct elf32_arm_link_hash_entry *) h;
17097
17098 if (h->plt.offset != (bfd_vma) -1)
17099 {
17100 if (!eh->is_iplt)
17101 {
17102 BFD_ASSERT (h->dynindx != -1);
17103 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17104 h->dynindx, 0))
17105 return FALSE;
17106 }
17107
17108 if (!h->def_regular)
17109 {
17110 /* Mark the symbol as undefined, rather than as defined in
17111 the .plt section. */
17112 sym->st_shndx = SHN_UNDEF;
17113 /* If the symbol is weak we need to clear the value.
17114 Otherwise, the PLT entry would provide a definition for
17115 the symbol even if the symbol wasn't defined anywhere,
17116 and so the symbol would never be NULL. Leave the value if
17117 there were any relocations where pointer equality matters
17118 (this is a clue for the dynamic linker, to make function
17119 pointer comparisons work between an application and shared
17120 library). */
17121 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17122 sym->st_value = 0;
17123 }
17124 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17125 {
17126 /* At least one non-call relocation references this .iplt entry,
17127 so the .iplt entry is the function's canonical address. */
17128 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17129 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17130 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17131 (output_bfd, htab->root.iplt->output_section));
17132 sym->st_value = (h->plt.offset
17133 + htab->root.iplt->output_section->vma
17134 + htab->root.iplt->output_offset);
17135 }
17136 }
17137
17138 if (h->needs_copy)
17139 {
17140 asection * s;
17141 Elf_Internal_Rela rel;
17142
17143 /* This symbol needs a copy reloc. Set it up. */
17144 BFD_ASSERT (h->dynindx != -1
17145 && (h->root.type == bfd_link_hash_defined
17146 || h->root.type == bfd_link_hash_defweak));
17147
17148 rel.r_addend = 0;
17149 rel.r_offset = (h->root.u.def.value
17150 + h->root.u.def.section->output_section->vma
17151 + h->root.u.def.section->output_offset);
17152 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17153 if (h->root.u.def.section == htab->root.sdynrelro)
17154 s = htab->root.sreldynrelro;
17155 else
17156 s = htab->root.srelbss;
17157 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17158 }
17159
17160 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17161 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17162 it is relative to the ".got" section. */
17163 if (h == htab->root.hdynamic
17164 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17165 sym->st_shndx = SHN_ABS;
17166
17167 return TRUE;
17168 }
17169
17170 static void
17171 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17172 void *contents,
17173 const unsigned long *template, unsigned count)
17174 {
17175 unsigned ix;
17176
17177 for (ix = 0; ix != count; ix++)
17178 {
17179 unsigned long insn = template[ix];
17180
17181 /* Emit mov pc,rx if bx is not permitted. */
17182 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17183 insn = (insn & 0xf000000f) | 0x01a0f000;
17184 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17185 }
17186 }
17187
17188 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17189 other variants, NaCl needs this entry in a static executable's
17190 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17191 zero. For .iplt really only the last bundle is useful, and .iplt
17192 could have a shorter first entry, with each individual PLT entry's
17193 relative branch calculated differently so it targets the last
17194 bundle instead of the instruction before it (labelled .Lplt_tail
17195 above). But it's simpler to keep the size and layout of PLT0
17196 consistent with the dynamic case, at the cost of some dead code at
17197 the start of .iplt and the one dead store to the stack at the start
17198 of .Lplt_tail. */
17199 static void
17200 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17201 asection *plt, bfd_vma got_displacement)
17202 {
17203 unsigned int i;
17204
17205 put_arm_insn (htab, output_bfd,
17206 elf32_arm_nacl_plt0_entry[0]
17207 | arm_movw_immediate (got_displacement),
17208 plt->contents + 0);
17209 put_arm_insn (htab, output_bfd,
17210 elf32_arm_nacl_plt0_entry[1]
17211 | arm_movt_immediate (got_displacement),
17212 plt->contents + 4);
17213
17214 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17215 put_arm_insn (htab, output_bfd,
17216 elf32_arm_nacl_plt0_entry[i],
17217 plt->contents + (i * 4));
17218 }
17219
17220 /* Finish up the dynamic sections. */
17221
17222 static bfd_boolean
17223 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17224 {
17225 bfd * dynobj;
17226 asection * sgot;
17227 asection * sdyn;
17228 struct elf32_arm_link_hash_table *htab;
17229
17230 htab = elf32_arm_hash_table (info);
17231 if (htab == NULL)
17232 return FALSE;
17233
17234 dynobj = elf_hash_table (info)->dynobj;
17235
17236 sgot = htab->root.sgotplt;
17237 /* A broken linker script might have discarded the dynamic sections.
17238 Catch this here so that we do not seg-fault later on. */
17239 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17240 return FALSE;
17241 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17242
17243 if (elf_hash_table (info)->dynamic_sections_created)
17244 {
17245 asection *splt;
17246 Elf32_External_Dyn *dyncon, *dynconend;
17247
17248 splt = htab->root.splt;
17249 BFD_ASSERT (splt != NULL && sdyn != NULL);
17250 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17251
17252 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17253 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17254
17255 for (; dyncon < dynconend; dyncon++)
17256 {
17257 Elf_Internal_Dyn dyn;
17258 const char * name;
17259 asection * s;
17260
17261 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17262
17263 switch (dyn.d_tag)
17264 {
17265 unsigned int type;
17266
17267 default:
17268 if (htab->vxworks_p
17269 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17270 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17271 break;
17272
17273 case DT_HASH:
17274 name = ".hash";
17275 goto get_vma_if_bpabi;
17276 case DT_STRTAB:
17277 name = ".dynstr";
17278 goto get_vma_if_bpabi;
17279 case DT_SYMTAB:
17280 name = ".dynsym";
17281 goto get_vma_if_bpabi;
17282 case DT_VERSYM:
17283 name = ".gnu.version";
17284 goto get_vma_if_bpabi;
17285 case DT_VERDEF:
17286 name = ".gnu.version_d";
17287 goto get_vma_if_bpabi;
17288 case DT_VERNEED:
17289 name = ".gnu.version_r";
17290 goto get_vma_if_bpabi;
17291
17292 case DT_PLTGOT:
17293 name = htab->symbian_p ? ".got" : ".got.plt";
17294 goto get_vma;
17295 case DT_JMPREL:
17296 name = RELOC_SECTION (htab, ".plt");
17297 get_vma:
17298 s = bfd_get_linker_section (dynobj, name);
17299 if (s == NULL)
17300 {
17301 _bfd_error_handler
17302 (_("could not find section %s"), name);
17303 bfd_set_error (bfd_error_invalid_operation);
17304 return FALSE;
17305 }
17306 if (!htab->symbian_p)
17307 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17308 else
17309 /* In the BPABI, tags in the PT_DYNAMIC section point
17310 at the file offset, not the memory address, for the
17311 convenience of the post linker. */
17312 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17313 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17314 break;
17315
17316 get_vma_if_bpabi:
17317 if (htab->symbian_p)
17318 goto get_vma;
17319 break;
17320
17321 case DT_PLTRELSZ:
17322 s = htab->root.srelplt;
17323 BFD_ASSERT (s != NULL);
17324 dyn.d_un.d_val = s->size;
17325 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17326 break;
17327
17328 case DT_RELSZ:
17329 case DT_RELASZ:
17330 case DT_REL:
17331 case DT_RELA:
17332 /* In the BPABI, the DT_REL tag must point at the file
17333 offset, not the VMA, of the first relocation
17334 section. So, we use code similar to that in
17335 elflink.c, but do not check for SHF_ALLOC on the
17336 relocation section, since relocation sections are
17337 never allocated under the BPABI. PLT relocs are also
17338 included. */
17339 if (htab->symbian_p)
17340 {
17341 unsigned int i;
17342 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17343 ? SHT_REL : SHT_RELA);
17344 dyn.d_un.d_val = 0;
17345 for (i = 1; i < elf_numsections (output_bfd); i++)
17346 {
17347 Elf_Internal_Shdr *hdr
17348 = elf_elfsections (output_bfd)[i];
17349 if (hdr->sh_type == type)
17350 {
17351 if (dyn.d_tag == DT_RELSZ
17352 || dyn.d_tag == DT_RELASZ)
17353 dyn.d_un.d_val += hdr->sh_size;
17354 else if ((ufile_ptr) hdr->sh_offset
17355 <= dyn.d_un.d_val - 1)
17356 dyn.d_un.d_val = hdr->sh_offset;
17357 }
17358 }
17359 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17360 }
17361 break;
17362
17363 case DT_TLSDESC_PLT:
17364 s = htab->root.splt;
17365 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17366 + htab->dt_tlsdesc_plt);
17367 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17368 break;
17369
17370 case DT_TLSDESC_GOT:
17371 s = htab->root.sgot;
17372 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17373 + htab->dt_tlsdesc_got);
17374 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17375 break;
17376
17377 /* Set the bottom bit of DT_INIT/FINI if the
17378 corresponding function is Thumb. */
17379 case DT_INIT:
17380 name = info->init_function;
17381 goto get_sym;
17382 case DT_FINI:
17383 name = info->fini_function;
17384 get_sym:
17385 /* If it wasn't set by elf_bfd_final_link
17386 then there is nothing to adjust. */
17387 if (dyn.d_un.d_val != 0)
17388 {
17389 struct elf_link_hash_entry * eh;
17390
17391 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17392 FALSE, FALSE, TRUE);
17393 if (eh != NULL
17394 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17395 == ST_BRANCH_TO_THUMB)
17396 {
17397 dyn.d_un.d_val |= 1;
17398 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17399 }
17400 }
17401 break;
17402 }
17403 }
17404
17405 /* Fill in the first entry in the procedure linkage table. */
17406 if (splt->size > 0 && htab->plt_header_size)
17407 {
17408 const bfd_vma *plt0_entry;
17409 bfd_vma got_address, plt_address, got_displacement;
17410
17411 /* Calculate the addresses of the GOT and PLT. */
17412 got_address = sgot->output_section->vma + sgot->output_offset;
17413 plt_address = splt->output_section->vma + splt->output_offset;
17414
17415 if (htab->vxworks_p)
17416 {
17417 /* The VxWorks GOT is relocated by the dynamic linker.
17418 Therefore, we must emit relocations rather than simply
17419 computing the values now. */
17420 Elf_Internal_Rela rel;
17421
17422 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17423 put_arm_insn (htab, output_bfd, plt0_entry[0],
17424 splt->contents + 0);
17425 put_arm_insn (htab, output_bfd, plt0_entry[1],
17426 splt->contents + 4);
17427 put_arm_insn (htab, output_bfd, plt0_entry[2],
17428 splt->contents + 8);
17429 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17430
17431 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17432 rel.r_offset = plt_address + 12;
17433 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17434 rel.r_addend = 0;
17435 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17436 htab->srelplt2->contents);
17437 }
17438 else if (htab->nacl_p)
17439 arm_nacl_put_plt0 (htab, output_bfd, splt,
17440 got_address + 8 - (plt_address + 16));
17441 else if (using_thumb_only (htab))
17442 {
17443 got_displacement = got_address - (plt_address + 12);
17444
17445 plt0_entry = elf32_thumb2_plt0_entry;
17446 put_arm_insn (htab, output_bfd, plt0_entry[0],
17447 splt->contents + 0);
17448 put_arm_insn (htab, output_bfd, plt0_entry[1],
17449 splt->contents + 4);
17450 put_arm_insn (htab, output_bfd, plt0_entry[2],
17451 splt->contents + 8);
17452
17453 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17454 }
17455 else
17456 {
17457 got_displacement = got_address - (plt_address + 16);
17458
17459 plt0_entry = elf32_arm_plt0_entry;
17460 put_arm_insn (htab, output_bfd, plt0_entry[0],
17461 splt->contents + 0);
17462 put_arm_insn (htab, output_bfd, plt0_entry[1],
17463 splt->contents + 4);
17464 put_arm_insn (htab, output_bfd, plt0_entry[2],
17465 splt->contents + 8);
17466 put_arm_insn (htab, output_bfd, plt0_entry[3],
17467 splt->contents + 12);
17468
17469 #ifdef FOUR_WORD_PLT
17470 /* The displacement value goes in the otherwise-unused
17471 last word of the second entry. */
17472 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17473 #else
17474 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17475 #endif
17476 }
17477 }
17478
17479 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17480 really seem like the right value. */
17481 if (splt->output_section->owner == output_bfd)
17482 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17483
17484 if (htab->dt_tlsdesc_plt)
17485 {
17486 bfd_vma got_address
17487 = sgot->output_section->vma + sgot->output_offset;
17488 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17489 + htab->root.sgot->output_offset);
17490 bfd_vma plt_address
17491 = splt->output_section->vma + splt->output_offset;
17492
17493 arm_put_trampoline (htab, output_bfd,
17494 splt->contents + htab->dt_tlsdesc_plt,
17495 dl_tlsdesc_lazy_trampoline, 6);
17496
17497 bfd_put_32 (output_bfd,
17498 gotplt_address + htab->dt_tlsdesc_got
17499 - (plt_address + htab->dt_tlsdesc_plt)
17500 - dl_tlsdesc_lazy_trampoline[6],
17501 splt->contents + htab->dt_tlsdesc_plt + 24);
17502 bfd_put_32 (output_bfd,
17503 got_address - (plt_address + htab->dt_tlsdesc_plt)
17504 - dl_tlsdesc_lazy_trampoline[7],
17505 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17506 }
17507
17508 if (htab->tls_trampoline)
17509 {
17510 arm_put_trampoline (htab, output_bfd,
17511 splt->contents + htab->tls_trampoline,
17512 tls_trampoline, 3);
17513 #ifdef FOUR_WORD_PLT
17514 bfd_put_32 (output_bfd, 0x00000000,
17515 splt->contents + htab->tls_trampoline + 12);
17516 #endif
17517 }
17518
17519 if (htab->vxworks_p
17520 && !bfd_link_pic (info)
17521 && htab->root.splt->size > 0)
17522 {
17523 /* Correct the .rel(a).plt.unloaded relocations. They will have
17524 incorrect symbol indexes. */
17525 int num_plts;
17526 unsigned char *p;
17527
17528 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17529 / htab->plt_entry_size);
17530 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17531
17532 for (; num_plts; num_plts--)
17533 {
17534 Elf_Internal_Rela rel;
17535
17536 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17537 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17538 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17539 p += RELOC_SIZE (htab);
17540
17541 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17542 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17543 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17544 p += RELOC_SIZE (htab);
17545 }
17546 }
17547 }
17548
17549 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17550 /* NaCl uses a special first entry in .iplt too. */
17551 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17552
17553 /* Fill in the first three entries in the global offset table. */
17554 if (sgot)
17555 {
17556 if (sgot->size > 0)
17557 {
17558 if (sdyn == NULL)
17559 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17560 else
17561 bfd_put_32 (output_bfd,
17562 sdyn->output_section->vma + sdyn->output_offset,
17563 sgot->contents);
17564 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17565 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17566 }
17567
17568 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17569 }
17570
17571 /* At the very end of the .rofixup section is a pointer to the GOT. */
17572 if (htab->fdpic_p && htab->srofixup != NULL)
17573 {
17574 struct elf_link_hash_entry *hgot = htab->root.hgot;
17575
17576 bfd_vma got_value = hgot->root.u.def.value
17577 + hgot->root.u.def.section->output_section->vma
17578 + hgot->root.u.def.section->output_offset;
17579
17580 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17581
17582 /* Make sure we allocated and generated the same number of fixups. */
17583 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17584 }
17585
17586 return TRUE;
17587 }
17588
17589 static void
17590 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17591 {
17592 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17593 struct elf32_arm_link_hash_table *globals;
17594 struct elf_segment_map *m;
17595
17596 i_ehdrp = elf_elfheader (abfd);
17597
17598 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17599 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17600 else
17601 _bfd_elf_post_process_headers (abfd, link_info);
17602 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17603
17604 if (link_info)
17605 {
17606 globals = elf32_arm_hash_table (link_info);
17607 if (globals != NULL && globals->byteswap_code)
17608 i_ehdrp->e_flags |= EF_ARM_BE8;
17609
17610 if (globals->fdpic_p)
17611 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17612 }
17613
17614 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17615 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17616 {
17617 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17618 if (abi == AEABI_VFP_args_vfp)
17619 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17620 else
17621 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17622 }
17623
17624 /* Scan segment to set p_flags attribute if it contains only sections with
17625 SHF_ARM_PURECODE flag. */
17626 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17627 {
17628 unsigned int j;
17629
17630 if (m->count == 0)
17631 continue;
17632 for (j = 0; j < m->count; j++)
17633 {
17634 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17635 break;
17636 }
17637 if (j == m->count)
17638 {
17639 m->p_flags = PF_X;
17640 m->p_flags_valid = 1;
17641 }
17642 }
17643 }
17644
17645 static enum elf_reloc_type_class
17646 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17647 const asection *rel_sec ATTRIBUTE_UNUSED,
17648 const Elf_Internal_Rela *rela)
17649 {
17650 switch ((int) ELF32_R_TYPE (rela->r_info))
17651 {
17652 case R_ARM_RELATIVE:
17653 return reloc_class_relative;
17654 case R_ARM_JUMP_SLOT:
17655 return reloc_class_plt;
17656 case R_ARM_COPY:
17657 return reloc_class_copy;
17658 case R_ARM_IRELATIVE:
17659 return reloc_class_ifunc;
17660 default:
17661 return reloc_class_normal;
17662 }
17663 }
17664
17665 static void
17666 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17667 {
17668 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17669 }
17670
17671 /* Return TRUE if this is an unwinding table entry. */
17672
17673 static bfd_boolean
17674 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17675 {
17676 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17677 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17678 }
17679
17680
17681 /* Set the type and flags for an ARM section. We do this by
17682 the section name, which is a hack, but ought to work. */
17683
17684 static bfd_boolean
17685 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17686 {
17687 const char * name;
17688
17689 name = bfd_get_section_name (abfd, sec);
17690
17691 if (is_arm_elf_unwind_section_name (abfd, name))
17692 {
17693 hdr->sh_type = SHT_ARM_EXIDX;
17694 hdr->sh_flags |= SHF_LINK_ORDER;
17695 }
17696
17697 if (sec->flags & SEC_ELF_PURECODE)
17698 hdr->sh_flags |= SHF_ARM_PURECODE;
17699
17700 return TRUE;
17701 }
17702
17703 /* Handle an ARM specific section when reading an object file. This is
17704 called when bfd_section_from_shdr finds a section with an unknown
17705 type. */
17706
17707 static bfd_boolean
17708 elf32_arm_section_from_shdr (bfd *abfd,
17709 Elf_Internal_Shdr * hdr,
17710 const char *name,
17711 int shindex)
17712 {
17713 /* There ought to be a place to keep ELF backend specific flags, but
17714 at the moment there isn't one. We just keep track of the
17715 sections by their name, instead. Fortunately, the ABI gives
17716 names for all the ARM specific sections, so we will probably get
17717 away with this. */
17718 switch (hdr->sh_type)
17719 {
17720 case SHT_ARM_EXIDX:
17721 case SHT_ARM_PREEMPTMAP:
17722 case SHT_ARM_ATTRIBUTES:
17723 break;
17724
17725 default:
17726 return FALSE;
17727 }
17728
17729 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17730 return FALSE;
17731
17732 return TRUE;
17733 }
17734
17735 static _arm_elf_section_data *
17736 get_arm_elf_section_data (asection * sec)
17737 {
17738 if (sec && sec->owner && is_arm_elf (sec->owner))
17739 return elf32_arm_section_data (sec);
17740 else
17741 return NULL;
17742 }
17743
17744 typedef struct
17745 {
17746 void *flaginfo;
17747 struct bfd_link_info *info;
17748 asection *sec;
17749 int sec_shndx;
17750 int (*func) (void *, const char *, Elf_Internal_Sym *,
17751 asection *, struct elf_link_hash_entry *);
17752 } output_arch_syminfo;
17753
17754 enum map_symbol_type
17755 {
17756 ARM_MAP_ARM,
17757 ARM_MAP_THUMB,
17758 ARM_MAP_DATA
17759 };
17760
17761
17762 /* Output a single mapping symbol. */
17763
17764 static bfd_boolean
17765 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17766 enum map_symbol_type type,
17767 bfd_vma offset)
17768 {
17769 static const char *names[3] = {"$a", "$t", "$d"};
17770 Elf_Internal_Sym sym;
17771
17772 sym.st_value = osi->sec->output_section->vma
17773 + osi->sec->output_offset
17774 + offset;
17775 sym.st_size = 0;
17776 sym.st_other = 0;
17777 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17778 sym.st_shndx = osi->sec_shndx;
17779 sym.st_target_internal = 0;
17780 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17781 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17782 }
17783
17784 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17785 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17786
17787 static bfd_boolean
17788 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17789 bfd_boolean is_iplt_entry_p,
17790 union gotplt_union *root_plt,
17791 struct arm_plt_info *arm_plt)
17792 {
17793 struct elf32_arm_link_hash_table *htab;
17794 bfd_vma addr, plt_header_size;
17795
17796 if (root_plt->offset == (bfd_vma) -1)
17797 return TRUE;
17798
17799 htab = elf32_arm_hash_table (osi->info);
17800 if (htab == NULL)
17801 return FALSE;
17802
17803 if (is_iplt_entry_p)
17804 {
17805 osi->sec = htab->root.iplt;
17806 plt_header_size = 0;
17807 }
17808 else
17809 {
17810 osi->sec = htab->root.splt;
17811 plt_header_size = htab->plt_header_size;
17812 }
17813 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17814 (osi->info->output_bfd, osi->sec->output_section));
17815
17816 addr = root_plt->offset & -2;
17817 if (htab->symbian_p)
17818 {
17819 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17820 return FALSE;
17821 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17822 return FALSE;
17823 }
17824 else if (htab->vxworks_p)
17825 {
17826 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17827 return FALSE;
17828 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17829 return FALSE;
17830 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17831 return FALSE;
17832 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17833 return FALSE;
17834 }
17835 else if (htab->nacl_p)
17836 {
17837 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17838 return FALSE;
17839 }
17840 else if (htab->fdpic_p)
17841 {
17842 enum map_symbol_type type = using_thumb_only(htab)
17843 ? ARM_MAP_THUMB
17844 : ARM_MAP_ARM;
17845
17846 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17847 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17848 return FALSE;
17849 if (!elf32_arm_output_map_sym (osi, type, addr))
17850 return FALSE;
17851 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17852 return FALSE;
17853 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17854 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17855 return FALSE;
17856 }
17857 else if (using_thumb_only (htab))
17858 {
17859 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17860 return FALSE;
17861 }
17862 else
17863 {
17864 bfd_boolean thumb_stub_p;
17865
17866 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17867 if (thumb_stub_p)
17868 {
17869 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17870 return FALSE;
17871 }
17872 #ifdef FOUR_WORD_PLT
17873 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17874 return FALSE;
17875 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17876 return FALSE;
17877 #else
17878 /* A three-word PLT with no Thumb thunk contains only Arm code,
17879 so only need to output a mapping symbol for the first PLT entry and
17880 entries with thumb thunks. */
17881 if (thumb_stub_p || addr == plt_header_size)
17882 {
17883 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17884 return FALSE;
17885 }
17886 #endif
17887 }
17888
17889 return TRUE;
17890 }
17891
17892 /* Output mapping symbols for PLT entries associated with H. */
17893
17894 static bfd_boolean
17895 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17896 {
17897 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17898 struct elf32_arm_link_hash_entry *eh;
17899
17900 if (h->root.type == bfd_link_hash_indirect)
17901 return TRUE;
17902
17903 if (h->root.type == bfd_link_hash_warning)
17904 /* When warning symbols are created, they **replace** the "real"
17905 entry in the hash table, thus we never get to see the real
17906 symbol in a hash traversal. So look at it now. */
17907 h = (struct elf_link_hash_entry *) h->root.u.i.link;
17908
17909 eh = (struct elf32_arm_link_hash_entry *) h;
17910 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17911 &h->plt, &eh->plt);
17912 }
17913
17914 /* Bind a veneered symbol to its veneer identified by its hash entry
17915 STUB_ENTRY. The veneered location thus loose its symbol. */
17916
17917 static void
17918 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17919 {
17920 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17921
17922 BFD_ASSERT (hash);
17923 hash->root.root.u.def.section = stub_entry->stub_sec;
17924 hash->root.root.u.def.value = stub_entry->stub_offset;
17925 hash->root.size = stub_entry->stub_size;
17926 }
17927
17928 /* Output a single local symbol for a generated stub. */
17929
17930 static bfd_boolean
17931 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17932 bfd_vma offset, bfd_vma size)
17933 {
17934 Elf_Internal_Sym sym;
17935
17936 sym.st_value = osi->sec->output_section->vma
17937 + osi->sec->output_offset
17938 + offset;
17939 sym.st_size = size;
17940 sym.st_other = 0;
17941 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17942 sym.st_shndx = osi->sec_shndx;
17943 sym.st_target_internal = 0;
17944 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17945 }
17946
17947 static bfd_boolean
17948 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17949 void * in_arg)
17950 {
17951 struct elf32_arm_stub_hash_entry *stub_entry;
17952 asection *stub_sec;
17953 bfd_vma addr;
17954 char *stub_name;
17955 output_arch_syminfo *osi;
17956 const insn_sequence *template_sequence;
17957 enum stub_insn_type prev_type;
17958 int size;
17959 int i;
17960 enum map_symbol_type sym_type;
17961
17962 /* Massage our args to the form they really have. */
17963 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17964 osi = (output_arch_syminfo *) in_arg;
17965
17966 stub_sec = stub_entry->stub_sec;
17967
17968 /* Ensure this stub is attached to the current section being
17969 processed. */
17970 if (stub_sec != osi->sec)
17971 return TRUE;
17972
17973 addr = (bfd_vma) stub_entry->stub_offset;
17974 template_sequence = stub_entry->stub_template;
17975
17976 if (arm_stub_sym_claimed (stub_entry->stub_type))
17977 arm_stub_claim_sym (stub_entry);
17978 else
17979 {
17980 stub_name = stub_entry->output_name;
17981 switch (template_sequence[0].type)
17982 {
17983 case ARM_TYPE:
17984 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17985 stub_entry->stub_size))
17986 return FALSE;
17987 break;
17988 case THUMB16_TYPE:
17989 case THUMB32_TYPE:
17990 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17991 stub_entry->stub_size))
17992 return FALSE;
17993 break;
17994 default:
17995 BFD_FAIL ();
17996 return 0;
17997 }
17998 }
17999
18000 prev_type = DATA_TYPE;
18001 size = 0;
18002 for (i = 0; i < stub_entry->stub_template_size; i++)
18003 {
18004 switch (template_sequence[i].type)
18005 {
18006 case ARM_TYPE:
18007 sym_type = ARM_MAP_ARM;
18008 break;
18009
18010 case THUMB16_TYPE:
18011 case THUMB32_TYPE:
18012 sym_type = ARM_MAP_THUMB;
18013 break;
18014
18015 case DATA_TYPE:
18016 sym_type = ARM_MAP_DATA;
18017 break;
18018
18019 default:
18020 BFD_FAIL ();
18021 return FALSE;
18022 }
18023
18024 if (template_sequence[i].type != prev_type)
18025 {
18026 prev_type = template_sequence[i].type;
18027 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18028 return FALSE;
18029 }
18030
18031 switch (template_sequence[i].type)
18032 {
18033 case ARM_TYPE:
18034 case THUMB32_TYPE:
18035 size += 4;
18036 break;
18037
18038 case THUMB16_TYPE:
18039 size += 2;
18040 break;
18041
18042 case DATA_TYPE:
18043 size += 4;
18044 break;
18045
18046 default:
18047 BFD_FAIL ();
18048 return FALSE;
18049 }
18050 }
18051
18052 return TRUE;
18053 }
18054
18055 /* Output mapping symbols for linker generated sections,
18056 and for those data-only sections that do not have a
18057 $d. */
18058
18059 static bfd_boolean
18060 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18061 struct bfd_link_info *info,
18062 void *flaginfo,
18063 int (*func) (void *, const char *,
18064 Elf_Internal_Sym *,
18065 asection *,
18066 struct elf_link_hash_entry *))
18067 {
18068 output_arch_syminfo osi;
18069 struct elf32_arm_link_hash_table *htab;
18070 bfd_vma offset;
18071 bfd_size_type size;
18072 bfd *input_bfd;
18073
18074 htab = elf32_arm_hash_table (info);
18075 if (htab == NULL)
18076 return FALSE;
18077
18078 check_use_blx (htab);
18079
18080 osi.flaginfo = flaginfo;
18081 osi.info = info;
18082 osi.func = func;
18083
18084 /* Add a $d mapping symbol to data-only sections that
18085 don't have any mapping symbol. This may result in (harmless) redundant
18086 mapping symbols. */
18087 for (input_bfd = info->input_bfds;
18088 input_bfd != NULL;
18089 input_bfd = input_bfd->link.next)
18090 {
18091 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18092 for (osi.sec = input_bfd->sections;
18093 osi.sec != NULL;
18094 osi.sec = osi.sec->next)
18095 {
18096 if (osi.sec->output_section != NULL
18097 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18098 != 0)
18099 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18100 == SEC_HAS_CONTENTS
18101 && get_arm_elf_section_data (osi.sec) != NULL
18102 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18103 && osi.sec->size > 0
18104 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18105 {
18106 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18107 (output_bfd, osi.sec->output_section);
18108 if (osi.sec_shndx != (int)SHN_BAD)
18109 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18110 }
18111 }
18112 }
18113
18114 /* ARM->Thumb glue. */
18115 if (htab->arm_glue_size > 0)
18116 {
18117 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18118 ARM2THUMB_GLUE_SECTION_NAME);
18119
18120 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18121 (output_bfd, osi.sec->output_section);
18122 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18123 || htab->pic_veneer)
18124 size = ARM2THUMB_PIC_GLUE_SIZE;
18125 else if (htab->use_blx)
18126 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18127 else
18128 size = ARM2THUMB_STATIC_GLUE_SIZE;
18129
18130 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18131 {
18132 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18133 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18134 }
18135 }
18136
18137 /* Thumb->ARM glue. */
18138 if (htab->thumb_glue_size > 0)
18139 {
18140 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18141 THUMB2ARM_GLUE_SECTION_NAME);
18142
18143 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18144 (output_bfd, osi.sec->output_section);
18145 size = THUMB2ARM_GLUE_SIZE;
18146
18147 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18148 {
18149 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18150 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18151 }
18152 }
18153
18154 /* ARMv4 BX veneers. */
18155 if (htab->bx_glue_size > 0)
18156 {
18157 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18158 ARM_BX_GLUE_SECTION_NAME);
18159
18160 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18161 (output_bfd, osi.sec->output_section);
18162
18163 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18164 }
18165
18166 /* Long calls stubs. */
18167 if (htab->stub_bfd && htab->stub_bfd->sections)
18168 {
18169 asection* stub_sec;
18170
18171 for (stub_sec = htab->stub_bfd->sections;
18172 stub_sec != NULL;
18173 stub_sec = stub_sec->next)
18174 {
18175 /* Ignore non-stub sections. */
18176 if (!strstr (stub_sec->name, STUB_SUFFIX))
18177 continue;
18178
18179 osi.sec = stub_sec;
18180
18181 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18182 (output_bfd, osi.sec->output_section);
18183
18184 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18185 }
18186 }
18187
18188 /* Finally, output mapping symbols for the PLT. */
18189 if (htab->root.splt && htab->root.splt->size > 0)
18190 {
18191 osi.sec = htab->root.splt;
18192 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18193 (output_bfd, osi.sec->output_section));
18194
18195 /* Output mapping symbols for the plt header. SymbianOS does not have a
18196 plt header. */
18197 if (htab->vxworks_p)
18198 {
18199 /* VxWorks shared libraries have no PLT header. */
18200 if (!bfd_link_pic (info))
18201 {
18202 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18203 return FALSE;
18204 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18205 return FALSE;
18206 }
18207 }
18208 else if (htab->nacl_p)
18209 {
18210 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18211 return FALSE;
18212 }
18213 else if (using_thumb_only (htab) && !htab->fdpic_p)
18214 {
18215 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18216 return FALSE;
18217 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18218 return FALSE;
18219 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18220 return FALSE;
18221 }
18222 else if (!htab->symbian_p && !htab->fdpic_p)
18223 {
18224 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18225 return FALSE;
18226 #ifndef FOUR_WORD_PLT
18227 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18228 return FALSE;
18229 #endif
18230 }
18231 }
18232 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18233 {
18234 /* NaCl uses a special first entry in .iplt too. */
18235 osi.sec = htab->root.iplt;
18236 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18237 (output_bfd, osi.sec->output_section));
18238 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18239 return FALSE;
18240 }
18241 if ((htab->root.splt && htab->root.splt->size > 0)
18242 || (htab->root.iplt && htab->root.iplt->size > 0))
18243 {
18244 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18245 for (input_bfd = info->input_bfds;
18246 input_bfd != NULL;
18247 input_bfd = input_bfd->link.next)
18248 {
18249 struct arm_local_iplt_info **local_iplt;
18250 unsigned int i, num_syms;
18251
18252 local_iplt = elf32_arm_local_iplt (input_bfd);
18253 if (local_iplt != NULL)
18254 {
18255 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18256 for (i = 0; i < num_syms; i++)
18257 if (local_iplt[i] != NULL
18258 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18259 &local_iplt[i]->root,
18260 &local_iplt[i]->arm))
18261 return FALSE;
18262 }
18263 }
18264 }
18265 if (htab->dt_tlsdesc_plt != 0)
18266 {
18267 /* Mapping symbols for the lazy tls trampoline. */
18268 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18269 return FALSE;
18270
18271 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18272 htab->dt_tlsdesc_plt + 24))
18273 return FALSE;
18274 }
18275 if (htab->tls_trampoline != 0)
18276 {
18277 /* Mapping symbols for the tls trampoline. */
18278 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18279 return FALSE;
18280 #ifdef FOUR_WORD_PLT
18281 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18282 htab->tls_trampoline + 12))
18283 return FALSE;
18284 #endif
18285 }
18286
18287 return TRUE;
18288 }
18289
18290 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18291 the import library. All SYMCOUNT symbols of ABFD can be examined
18292 from their pointers in SYMS. Pointers of symbols to keep should be
18293 stored continuously at the beginning of that array.
18294
18295 Returns the number of symbols to keep. */
18296
18297 static unsigned int
18298 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18299 struct bfd_link_info *info,
18300 asymbol **syms, long symcount)
18301 {
18302 size_t maxnamelen;
18303 char *cmse_name;
18304 long src_count, dst_count = 0;
18305 struct elf32_arm_link_hash_table *htab;
18306
18307 htab = elf32_arm_hash_table (info);
18308 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18309 symcount = 0;
18310
18311 maxnamelen = 128;
18312 cmse_name = (char *) bfd_malloc (maxnamelen);
18313 for (src_count = 0; src_count < symcount; src_count++)
18314 {
18315 struct elf32_arm_link_hash_entry *cmse_hash;
18316 asymbol *sym;
18317 flagword flags;
18318 char *name;
18319 size_t namelen;
18320
18321 sym = syms[src_count];
18322 flags = sym->flags;
18323 name = (char *) bfd_asymbol_name (sym);
18324
18325 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18326 continue;
18327 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18328 continue;
18329
18330 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18331 if (namelen > maxnamelen)
18332 {
18333 cmse_name = (char *)
18334 bfd_realloc (cmse_name, namelen);
18335 maxnamelen = namelen;
18336 }
18337 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18338 cmse_hash = (struct elf32_arm_link_hash_entry *)
18339 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18340
18341 if (!cmse_hash
18342 || (cmse_hash->root.root.type != bfd_link_hash_defined
18343 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18344 || cmse_hash->root.type != STT_FUNC)
18345 continue;
18346
18347 if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18348 continue;
18349
18350 syms[dst_count++] = sym;
18351 }
18352 free (cmse_name);
18353
18354 syms[dst_count] = NULL;
18355
18356 return dst_count;
18357 }
18358
18359 /* Filter symbols of ABFD to include in the import library. All
18360 SYMCOUNT symbols of ABFD can be examined from their pointers in
18361 SYMS. Pointers of symbols to keep should be stored continuously at
18362 the beginning of that array.
18363
18364 Returns the number of symbols to keep. */
18365
18366 static unsigned int
18367 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18368 struct bfd_link_info *info,
18369 asymbol **syms, long symcount)
18370 {
18371 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18372
18373 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18374 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18375 library to be a relocatable object file. */
18376 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18377 if (globals->cmse_implib)
18378 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18379 else
18380 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18381 }
18382
18383 /* Allocate target specific section data. */
18384
18385 static bfd_boolean
18386 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18387 {
18388 if (!sec->used_by_bfd)
18389 {
18390 _arm_elf_section_data *sdata;
18391 bfd_size_type amt = sizeof (*sdata);
18392
18393 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18394 if (sdata == NULL)
18395 return FALSE;
18396 sec->used_by_bfd = sdata;
18397 }
18398
18399 return _bfd_elf_new_section_hook (abfd, sec);
18400 }
18401
18402
18403 /* Used to order a list of mapping symbols by address. */
18404
18405 static int
18406 elf32_arm_compare_mapping (const void * a, const void * b)
18407 {
18408 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18409 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18410
18411 if (amap->vma > bmap->vma)
18412 return 1;
18413 else if (amap->vma < bmap->vma)
18414 return -1;
18415 else if (amap->type > bmap->type)
18416 /* Ensure results do not depend on the host qsort for objects with
18417 multiple mapping symbols at the same address by sorting on type
18418 after vma. */
18419 return 1;
18420 else if (amap->type < bmap->type)
18421 return -1;
18422 else
18423 return 0;
18424 }
18425
18426 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18427
18428 static unsigned long
18429 offset_prel31 (unsigned long addr, bfd_vma offset)
18430 {
18431 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18432 }
18433
18434 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18435 relocations. */
18436
18437 static void
18438 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18439 {
18440 unsigned long first_word = bfd_get_32 (output_bfd, from);
18441 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18442
18443 /* High bit of first word is supposed to be zero. */
18444 if ((first_word & 0x80000000ul) == 0)
18445 first_word = offset_prel31 (first_word, offset);
18446
18447 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18448 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18449 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18450 second_word = offset_prel31 (second_word, offset);
18451
18452 bfd_put_32 (output_bfd, first_word, to);
18453 bfd_put_32 (output_bfd, second_word, to + 4);
18454 }
18455
18456 /* Data for make_branch_to_a8_stub(). */
18457
18458 struct a8_branch_to_stub_data
18459 {
18460 asection *writing_section;
18461 bfd_byte *contents;
18462 };
18463
18464
18465 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18466 places for a particular section. */
18467
18468 static bfd_boolean
18469 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18470 void *in_arg)
18471 {
18472 struct elf32_arm_stub_hash_entry *stub_entry;
18473 struct a8_branch_to_stub_data *data;
18474 bfd_byte *contents;
18475 unsigned long branch_insn;
18476 bfd_vma veneered_insn_loc, veneer_entry_loc;
18477 bfd_signed_vma branch_offset;
18478 bfd *abfd;
18479 unsigned int loc;
18480
18481 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18482 data = (struct a8_branch_to_stub_data *) in_arg;
18483
18484 if (stub_entry->target_section != data->writing_section
18485 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18486 return TRUE;
18487
18488 contents = data->contents;
18489
18490 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18491 generated when both source and target are in the same section. */
18492 veneered_insn_loc = stub_entry->target_section->output_section->vma
18493 + stub_entry->target_section->output_offset
18494 + stub_entry->source_value;
18495
18496 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18497 + stub_entry->stub_sec->output_offset
18498 + stub_entry->stub_offset;
18499
18500 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18501 veneered_insn_loc &= ~3u;
18502
18503 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18504
18505 abfd = stub_entry->target_section->owner;
18506 loc = stub_entry->source_value;
18507
18508 /* We attempt to avoid this condition by setting stubs_always_after_branch
18509 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18510 This check is just to be on the safe side... */
18511 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18512 {
18513 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18514 "allocated in unsafe location"), abfd);
18515 return FALSE;
18516 }
18517
18518 switch (stub_entry->stub_type)
18519 {
18520 case arm_stub_a8_veneer_b:
18521 case arm_stub_a8_veneer_b_cond:
18522 branch_insn = 0xf0009000;
18523 goto jump24;
18524
18525 case arm_stub_a8_veneer_blx:
18526 branch_insn = 0xf000e800;
18527 goto jump24;
18528
18529 case arm_stub_a8_veneer_bl:
18530 {
18531 unsigned int i1, j1, i2, j2, s;
18532
18533 branch_insn = 0xf000d000;
18534
18535 jump24:
18536 if (branch_offset < -16777216 || branch_offset > 16777214)
18537 {
18538 /* There's not much we can do apart from complain if this
18539 happens. */
18540 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18541 "of range (input file too large)"), abfd);
18542 return FALSE;
18543 }
18544
18545 /* i1 = not(j1 eor s), so:
18546 not i1 = j1 eor s
18547 j1 = (not i1) eor s. */
18548
18549 branch_insn |= (branch_offset >> 1) & 0x7ff;
18550 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18551 i2 = (branch_offset >> 22) & 1;
18552 i1 = (branch_offset >> 23) & 1;
18553 s = (branch_offset >> 24) & 1;
18554 j1 = (!i1) ^ s;
18555 j2 = (!i2) ^ s;
18556 branch_insn |= j2 << 11;
18557 branch_insn |= j1 << 13;
18558 branch_insn |= s << 26;
18559 }
18560 break;
18561
18562 default:
18563 BFD_FAIL ();
18564 return FALSE;
18565 }
18566
18567 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18568 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18569
18570 return TRUE;
18571 }
18572
18573 /* Beginning of stm32l4xx work-around. */
18574
18575 /* Functions encoding instructions necessary for the emission of the
18576 fix-stm32l4xx-629360.
18577 Encoding is extracted from the
18578 ARM (C) Architecture Reference Manual
18579 ARMv7-A and ARMv7-R edition
18580 ARM DDI 0406C.b (ID072512). */
18581
18582 static inline bfd_vma
18583 create_instruction_branch_absolute (int branch_offset)
18584 {
18585 /* A8.8.18 B (A8-334)
18586 B target_address (Encoding T4). */
18587 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18588 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18589 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18590
18591 int s = ((branch_offset & 0x1000000) >> 24);
18592 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18593 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18594
18595 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18596 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18597
18598 bfd_vma patched_inst = 0xf0009000
18599 | s << 26 /* S. */
18600 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18601 | j1 << 13 /* J1. */
18602 | j2 << 11 /* J2. */
18603 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18604
18605 return patched_inst;
18606 }
18607
18608 static inline bfd_vma
18609 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18610 {
18611 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18612 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18613 bfd_vma patched_inst = 0xe8900000
18614 | (/*W=*/wback << 21)
18615 | (base_reg << 16)
18616 | (reg_mask & 0x0000ffff);
18617
18618 return patched_inst;
18619 }
18620
18621 static inline bfd_vma
18622 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18623 {
18624 /* A8.8.60 LDMDB/LDMEA (A8-402)
18625 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18626 bfd_vma patched_inst = 0xe9100000
18627 | (/*W=*/wback << 21)
18628 | (base_reg << 16)
18629 | (reg_mask & 0x0000ffff);
18630
18631 return patched_inst;
18632 }
18633
18634 static inline bfd_vma
18635 create_instruction_mov (int target_reg, int source_reg)
18636 {
18637 /* A8.8.103 MOV (register) (A8-486)
18638 MOV Rd, Rm (Encoding T1). */
18639 bfd_vma patched_inst = 0x4600
18640 | (target_reg & 0x7)
18641 | ((target_reg & 0x8) >> 3) << 7
18642 | (source_reg << 3);
18643
18644 return patched_inst;
18645 }
18646
18647 static inline bfd_vma
18648 create_instruction_sub (int target_reg, int source_reg, int value)
18649 {
18650 /* A8.8.221 SUB (immediate) (A8-708)
18651 SUB Rd, Rn, #value (Encoding T3). */
18652 bfd_vma patched_inst = 0xf1a00000
18653 | (target_reg << 8)
18654 | (source_reg << 16)
18655 | (/*S=*/0 << 20)
18656 | ((value & 0x800) >> 11) << 26
18657 | ((value & 0x700) >> 8) << 12
18658 | (value & 0x0ff);
18659
18660 return patched_inst;
18661 }
18662
18663 static inline bfd_vma
18664 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18665 int first_reg)
18666 {
18667 /* A8.8.332 VLDM (A8-922)
18668 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18669 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18670 | (/*W=*/wback << 21)
18671 | (base_reg << 16)
18672 | (num_words & 0x000000ff)
18673 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18674 | (first_reg & 0x00000001) << 22;
18675
18676 return patched_inst;
18677 }
18678
18679 static inline bfd_vma
18680 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18681 int first_reg)
18682 {
18683 /* A8.8.332 VLDM (A8-922)
18684 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18685 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18686 | (base_reg << 16)
18687 | (num_words & 0x000000ff)
18688 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18689 | (first_reg & 0x00000001) << 22;
18690
18691 return patched_inst;
18692 }
18693
18694 static inline bfd_vma
18695 create_instruction_udf_w (int value)
18696 {
18697 /* A8.8.247 UDF (A8-758)
18698 Undefined (Encoding T2). */
18699 bfd_vma patched_inst = 0xf7f0a000
18700 | (value & 0x00000fff)
18701 | (value & 0x000f0000) << 16;
18702
18703 return patched_inst;
18704 }
18705
18706 static inline bfd_vma
18707 create_instruction_udf (int value)
18708 {
18709 /* A8.8.247 UDF (A8-758)
18710 Undefined (Encoding T1). */
18711 bfd_vma patched_inst = 0xde00
18712 | (value & 0xff);
18713
18714 return patched_inst;
18715 }
18716
18717 /* Functions writing an instruction in memory, returning the next
18718 memory position to write to. */
18719
18720 static inline bfd_byte *
18721 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18722 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18723 {
18724 put_thumb2_insn (htab, output_bfd, insn, pt);
18725 return pt + 4;
18726 }
18727
18728 static inline bfd_byte *
18729 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18730 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18731 {
18732 put_thumb_insn (htab, output_bfd, insn, pt);
18733 return pt + 2;
18734 }
18735
18736 /* Function filling up a region in memory with T1 and T2 UDFs taking
18737 care of alignment. */
18738
18739 static bfd_byte *
18740 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18741 bfd * output_bfd,
18742 const bfd_byte * const base_stub_contents,
18743 bfd_byte * const from_stub_contents,
18744 const bfd_byte * const end_stub_contents)
18745 {
18746 bfd_byte *current_stub_contents = from_stub_contents;
18747
18748 /* Fill the remaining of the stub with deterministic contents : UDF
18749 instructions.
18750 Check if realignment is needed on modulo 4 frontier using T1, to
18751 further use T2. */
18752 if ((current_stub_contents < end_stub_contents)
18753 && !((current_stub_contents - base_stub_contents) % 2)
18754 && ((current_stub_contents - base_stub_contents) % 4))
18755 current_stub_contents =
18756 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18757 create_instruction_udf (0));
18758
18759 for (; current_stub_contents < end_stub_contents;)
18760 current_stub_contents =
18761 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18762 create_instruction_udf_w (0));
18763
18764 return current_stub_contents;
18765 }
18766
18767 /* Functions writing the stream of instructions equivalent to the
18768 derived sequence for ldmia, ldmdb, vldm respectively. */
18769
18770 static void
18771 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18772 bfd * output_bfd,
18773 const insn32 initial_insn,
18774 const bfd_byte *const initial_insn_addr,
18775 bfd_byte *const base_stub_contents)
18776 {
18777 int wback = (initial_insn & 0x00200000) >> 21;
18778 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18779 int insn_all_registers = initial_insn & 0x0000ffff;
18780 int insn_low_registers, insn_high_registers;
18781 int usable_register_mask;
18782 int nb_registers = elf32_arm_popcount (insn_all_registers);
18783 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18784 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18785 bfd_byte *current_stub_contents = base_stub_contents;
18786
18787 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18788
18789 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18790 smaller than 8 registers load sequences that do not cause the
18791 hardware issue. */
18792 if (nb_registers <= 8)
18793 {
18794 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18795 current_stub_contents =
18796 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18797 initial_insn);
18798
18799 /* B initial_insn_addr+4. */
18800 if (!restore_pc)
18801 current_stub_contents =
18802 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18803 create_instruction_branch_absolute
18804 (initial_insn_addr - current_stub_contents));
18805
18806 /* Fill the remaining of the stub with deterministic contents. */
18807 current_stub_contents =
18808 stm32l4xx_fill_stub_udf (htab, output_bfd,
18809 base_stub_contents, current_stub_contents,
18810 base_stub_contents +
18811 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18812
18813 return;
18814 }
18815
18816 /* - reg_list[13] == 0. */
18817 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18818
18819 /* - reg_list[14] & reg_list[15] != 1. */
18820 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18821
18822 /* - if (wback==1) reg_list[rn] == 0. */
18823 BFD_ASSERT (!wback || !restore_rn);
18824
18825 /* - nb_registers > 8. */
18826 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18827
18828 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18829
18830 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18831 - One with the 7 lowest registers (register mask 0x007F)
18832 This LDM will finally contain between 2 and 7 registers
18833 - One with the 7 highest registers (register mask 0xDF80)
18834 This ldm will finally contain between 2 and 7 registers. */
18835 insn_low_registers = insn_all_registers & 0x007F;
18836 insn_high_registers = insn_all_registers & 0xDF80;
18837
18838 /* A spare register may be needed during this veneer to temporarily
18839 handle the base register. This register will be restored with the
18840 last LDM operation.
18841 The usable register may be any general purpose register (that
18842 excludes PC, SP, LR : register mask is 0x1FFF). */
18843 usable_register_mask = 0x1FFF;
18844
18845 /* Generate the stub function. */
18846 if (wback)
18847 {
18848 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
18849 current_stub_contents =
18850 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18851 create_instruction_ldmia
18852 (rn, /*wback=*/1, insn_low_registers));
18853
18854 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
18855 current_stub_contents =
18856 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18857 create_instruction_ldmia
18858 (rn, /*wback=*/1, insn_high_registers));
18859 if (!restore_pc)
18860 {
18861 /* B initial_insn_addr+4. */
18862 current_stub_contents =
18863 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18864 create_instruction_branch_absolute
18865 (initial_insn_addr - current_stub_contents));
18866 }
18867 }
18868 else /* if (!wback). */
18869 {
18870 ri = rn;
18871
18872 /* If Rn is not part of the high-register-list, move it there. */
18873 if (!(insn_high_registers & (1 << rn)))
18874 {
18875 /* Choose a Ri in the high-register-list that will be restored. */
18876 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18877
18878 /* MOV Ri, Rn. */
18879 current_stub_contents =
18880 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18881 create_instruction_mov (ri, rn));
18882 }
18883
18884 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
18885 current_stub_contents =
18886 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18887 create_instruction_ldmia
18888 (ri, /*wback=*/1, insn_low_registers));
18889
18890 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
18891 current_stub_contents =
18892 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18893 create_instruction_ldmia
18894 (ri, /*wback=*/0, insn_high_registers));
18895
18896 if (!restore_pc)
18897 {
18898 /* B initial_insn_addr+4. */
18899 current_stub_contents =
18900 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18901 create_instruction_branch_absolute
18902 (initial_insn_addr - current_stub_contents));
18903 }
18904 }
18905
18906 /* Fill the remaining of the stub with deterministic contents. */
18907 current_stub_contents =
18908 stm32l4xx_fill_stub_udf (htab, output_bfd,
18909 base_stub_contents, current_stub_contents,
18910 base_stub_contents +
18911 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18912 }
18913
18914 static void
18915 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18916 bfd * output_bfd,
18917 const insn32 initial_insn,
18918 const bfd_byte *const initial_insn_addr,
18919 bfd_byte *const base_stub_contents)
18920 {
18921 int wback = (initial_insn & 0x00200000) >> 21;
18922 int ri, rn = (initial_insn & 0x000f0000) >> 16;
18923 int insn_all_registers = initial_insn & 0x0000ffff;
18924 int insn_low_registers, insn_high_registers;
18925 int usable_register_mask;
18926 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18927 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18928 int nb_registers = elf32_arm_popcount (insn_all_registers);
18929 bfd_byte *current_stub_contents = base_stub_contents;
18930
18931 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18932
18933 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18934 smaller than 8 registers load sequences that do not cause the
18935 hardware issue. */
18936 if (nb_registers <= 8)
18937 {
18938 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18939 current_stub_contents =
18940 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18941 initial_insn);
18942
18943 /* B initial_insn_addr+4. */
18944 current_stub_contents =
18945 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18946 create_instruction_branch_absolute
18947 (initial_insn_addr - current_stub_contents));
18948
18949 /* Fill the remaining of the stub with deterministic contents. */
18950 current_stub_contents =
18951 stm32l4xx_fill_stub_udf (htab, output_bfd,
18952 base_stub_contents, current_stub_contents,
18953 base_stub_contents +
18954 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18955
18956 return;
18957 }
18958
18959 /* - reg_list[13] == 0. */
18960 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18961
18962 /* - reg_list[14] & reg_list[15] != 1. */
18963 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18964
18965 /* - if (wback==1) reg_list[rn] == 0. */
18966 BFD_ASSERT (!wback || !restore_rn);
18967
18968 /* - nb_registers > 8. */
18969 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18970
18971 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
18972
18973 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18974 - One with the 7 lowest registers (register mask 0x007F)
18975 This LDM will finally contain between 2 and 7 registers
18976 - One with the 7 highest registers (register mask 0xDF80)
18977 This ldm will finally contain between 2 and 7 registers. */
18978 insn_low_registers = insn_all_registers & 0x007F;
18979 insn_high_registers = insn_all_registers & 0xDF80;
18980
18981 /* A spare register may be needed during this veneer to temporarily
18982 handle the base register. This register will be restored with
18983 the last LDM operation.
18984 The usable register may be any general purpose register (that excludes
18985 PC, SP, LR : register mask is 0x1FFF). */
18986 usable_register_mask = 0x1FFF;
18987
18988 /* Generate the stub function. */
18989 if (!wback && !restore_pc && !restore_rn)
18990 {
18991 /* Choose a Ri in the low-register-list that will be restored. */
18992 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18993
18994 /* MOV Ri, Rn. */
18995 current_stub_contents =
18996 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18997 create_instruction_mov (ri, rn));
18998
18999 /* LDMDB Ri!, {R-high-register-list}. */
19000 current_stub_contents =
19001 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19002 create_instruction_ldmdb
19003 (ri, /*wback=*/1, insn_high_registers));
19004
19005 /* LDMDB Ri, {R-low-register-list}. */
19006 current_stub_contents =
19007 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19008 create_instruction_ldmdb
19009 (ri, /*wback=*/0, insn_low_registers));
19010
19011 /* B initial_insn_addr+4. */
19012 current_stub_contents =
19013 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19014 create_instruction_branch_absolute
19015 (initial_insn_addr - current_stub_contents));
19016 }
19017 else if (wback && !restore_pc && !restore_rn)
19018 {
19019 /* LDMDB Rn!, {R-high-register-list}. */
19020 current_stub_contents =
19021 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19022 create_instruction_ldmdb
19023 (rn, /*wback=*/1, insn_high_registers));
19024
19025 /* LDMDB Rn!, {R-low-register-list}. */
19026 current_stub_contents =
19027 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19028 create_instruction_ldmdb
19029 (rn, /*wback=*/1, insn_low_registers));
19030
19031 /* B initial_insn_addr+4. */
19032 current_stub_contents =
19033 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19034 create_instruction_branch_absolute
19035 (initial_insn_addr - current_stub_contents));
19036 }
19037 else if (!wback && restore_pc && !restore_rn)
19038 {
19039 /* Choose a Ri in the high-register-list that will be restored. */
19040 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19041
19042 /* SUB Ri, Rn, #(4*nb_registers). */
19043 current_stub_contents =
19044 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19045 create_instruction_sub (ri, rn, (4 * nb_registers)));
19046
19047 /* LDMIA Ri!, {R-low-register-list}. */
19048 current_stub_contents =
19049 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19050 create_instruction_ldmia
19051 (ri, /*wback=*/1, insn_low_registers));
19052
19053 /* LDMIA Ri, {R-high-register-list}. */
19054 current_stub_contents =
19055 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19056 create_instruction_ldmia
19057 (ri, /*wback=*/0, insn_high_registers));
19058 }
19059 else if (wback && restore_pc && !restore_rn)
19060 {
19061 /* Choose a Ri in the high-register-list that will be restored. */
19062 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19063
19064 /* SUB Rn, Rn, #(4*nb_registers) */
19065 current_stub_contents =
19066 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19067 create_instruction_sub (rn, rn, (4 * nb_registers)));
19068
19069 /* MOV Ri, Rn. */
19070 current_stub_contents =
19071 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19072 create_instruction_mov (ri, rn));
19073
19074 /* LDMIA Ri!, {R-low-register-list}. */
19075 current_stub_contents =
19076 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19077 create_instruction_ldmia
19078 (ri, /*wback=*/1, insn_low_registers));
19079
19080 /* LDMIA Ri, {R-high-register-list}. */
19081 current_stub_contents =
19082 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19083 create_instruction_ldmia
19084 (ri, /*wback=*/0, insn_high_registers));
19085 }
19086 else if (!wback && !restore_pc && restore_rn)
19087 {
19088 ri = rn;
19089 if (!(insn_low_registers & (1 << rn)))
19090 {
19091 /* Choose a Ri in the low-register-list that will be restored. */
19092 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19093
19094 /* MOV Ri, Rn. */
19095 current_stub_contents =
19096 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19097 create_instruction_mov (ri, rn));
19098 }
19099
19100 /* LDMDB Ri!, {R-high-register-list}. */
19101 current_stub_contents =
19102 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19103 create_instruction_ldmdb
19104 (ri, /*wback=*/1, insn_high_registers));
19105
19106 /* LDMDB Ri, {R-low-register-list}. */
19107 current_stub_contents =
19108 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19109 create_instruction_ldmdb
19110 (ri, /*wback=*/0, insn_low_registers));
19111
19112 /* B initial_insn_addr+4. */
19113 current_stub_contents =
19114 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19115 create_instruction_branch_absolute
19116 (initial_insn_addr - current_stub_contents));
19117 }
19118 else if (!wback && restore_pc && restore_rn)
19119 {
19120 ri = rn;
19121 if (!(insn_high_registers & (1 << rn)))
19122 {
19123 /* Choose a Ri in the high-register-list that will be restored. */
19124 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19125 }
19126
19127 /* SUB Ri, Rn, #(4*nb_registers). */
19128 current_stub_contents =
19129 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19130 create_instruction_sub (ri, rn, (4 * nb_registers)));
19131
19132 /* LDMIA Ri!, {R-low-register-list}. */
19133 current_stub_contents =
19134 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19135 create_instruction_ldmia
19136 (ri, /*wback=*/1, insn_low_registers));
19137
19138 /* LDMIA Ri, {R-high-register-list}. */
19139 current_stub_contents =
19140 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19141 create_instruction_ldmia
19142 (ri, /*wback=*/0, insn_high_registers));
19143 }
19144 else if (wback && restore_rn)
19145 {
19146 /* The assembler should not have accepted to encode this. */
19147 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19148 "undefined behavior.\n");
19149 }
19150
19151 /* Fill the remaining of the stub with deterministic contents. */
19152 current_stub_contents =
19153 stm32l4xx_fill_stub_udf (htab, output_bfd,
19154 base_stub_contents, current_stub_contents,
19155 base_stub_contents +
19156 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19157
19158 }
19159
19160 static void
19161 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19162 bfd * output_bfd,
19163 const insn32 initial_insn,
19164 const bfd_byte *const initial_insn_addr,
19165 bfd_byte *const base_stub_contents)
19166 {
19167 int num_words = ((unsigned int) initial_insn << 24) >> 24;
19168 bfd_byte *current_stub_contents = base_stub_contents;
19169
19170 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19171
19172 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19173 smaller than 8 words load sequences that do not cause the
19174 hardware issue. */
19175 if (num_words <= 8)
19176 {
19177 /* Untouched instruction. */
19178 current_stub_contents =
19179 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19180 initial_insn);
19181
19182 /* B initial_insn_addr+4. */
19183 current_stub_contents =
19184 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19185 create_instruction_branch_absolute
19186 (initial_insn_addr - current_stub_contents));
19187 }
19188 else
19189 {
19190 bfd_boolean is_dp = /* DP encoding. */
19191 (initial_insn & 0xfe100f00) == 0xec100b00;
19192 bfd_boolean is_ia_nobang = /* (IA without !). */
19193 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19194 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19195 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19196 bfd_boolean is_db_bang = /* (DB with !). */
19197 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19198 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19199 /* d = UInt (Vd:D);. */
19200 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19201 | (((unsigned int)initial_insn << 9) >> 31);
19202
19203 /* Compute the number of 8-words chunks needed to split. */
19204 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19205 int chunk;
19206
19207 /* The test coverage has been done assuming the following
19208 hypothesis that exactly one of the previous is_ predicates is
19209 true. */
19210 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19211 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19212
19213 /* We treat the cutting of the words in one pass for all
19214 cases, then we emit the adjustments:
19215
19216 vldm rx, {...}
19217 -> vldm rx!, {8_words_or_less} for each needed 8_word
19218 -> sub rx, rx, #size (list)
19219
19220 vldm rx!, {...}
19221 -> vldm rx!, {8_words_or_less} for each needed 8_word
19222 This also handles vpop instruction (when rx is sp)
19223
19224 vldmd rx!, {...}
19225 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19226 for (chunk = 0; chunk < chunks; ++chunk)
19227 {
19228 bfd_vma new_insn = 0;
19229
19230 if (is_ia_nobang || is_ia_bang)
19231 {
19232 new_insn = create_instruction_vldmia
19233 (base_reg,
19234 is_dp,
19235 /*wback= . */1,
19236 chunks - (chunk + 1) ?
19237 8 : num_words - chunk * 8,
19238 first_reg + chunk * 8);
19239 }
19240 else if (is_db_bang)
19241 {
19242 new_insn = create_instruction_vldmdb
19243 (base_reg,
19244 is_dp,
19245 chunks - (chunk + 1) ?
19246 8 : num_words - chunk * 8,
19247 first_reg + chunk * 8);
19248 }
19249
19250 if (new_insn)
19251 current_stub_contents =
19252 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19253 new_insn);
19254 }
19255
19256 /* Only this case requires the base register compensation
19257 subtract. */
19258 if (is_ia_nobang)
19259 {
19260 current_stub_contents =
19261 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19262 create_instruction_sub
19263 (base_reg, base_reg, 4*num_words));
19264 }
19265
19266 /* B initial_insn_addr+4. */
19267 current_stub_contents =
19268 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19269 create_instruction_branch_absolute
19270 (initial_insn_addr - current_stub_contents));
19271 }
19272
19273 /* Fill the remaining of the stub with deterministic contents. */
19274 current_stub_contents =
19275 stm32l4xx_fill_stub_udf (htab, output_bfd,
19276 base_stub_contents, current_stub_contents,
19277 base_stub_contents +
19278 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19279 }
19280
19281 static void
19282 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19283 bfd * output_bfd,
19284 const insn32 wrong_insn,
19285 const bfd_byte *const wrong_insn_addr,
19286 bfd_byte *const stub_contents)
19287 {
19288 if (is_thumb2_ldmia (wrong_insn))
19289 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19290 wrong_insn, wrong_insn_addr,
19291 stub_contents);
19292 else if (is_thumb2_ldmdb (wrong_insn))
19293 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19294 wrong_insn, wrong_insn_addr,
19295 stub_contents);
19296 else if (is_thumb2_vldm (wrong_insn))
19297 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19298 wrong_insn, wrong_insn_addr,
19299 stub_contents);
19300 }
19301
19302 /* End of stm32l4xx work-around. */
19303
19304
19305 /* Do code byteswapping. Return FALSE afterwards so that the section is
19306 written out as normal. */
19307
19308 static bfd_boolean
19309 elf32_arm_write_section (bfd *output_bfd,
19310 struct bfd_link_info *link_info,
19311 asection *sec,
19312 bfd_byte *contents)
19313 {
19314 unsigned int mapcount, errcount;
19315 _arm_elf_section_data *arm_data;
19316 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19317 elf32_arm_section_map *map;
19318 elf32_vfp11_erratum_list *errnode;
19319 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19320 bfd_vma ptr;
19321 bfd_vma end;
19322 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19323 bfd_byte tmp;
19324 unsigned int i;
19325
19326 if (globals == NULL)
19327 return FALSE;
19328
19329 /* If this section has not been allocated an _arm_elf_section_data
19330 structure then we cannot record anything. */
19331 arm_data = get_arm_elf_section_data (sec);
19332 if (arm_data == NULL)
19333 return FALSE;
19334
19335 mapcount = arm_data->mapcount;
19336 map = arm_data->map;
19337 errcount = arm_data->erratumcount;
19338
19339 if (errcount != 0)
19340 {
19341 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19342
19343 for (errnode = arm_data->erratumlist; errnode != 0;
19344 errnode = errnode->next)
19345 {
19346 bfd_vma target = errnode->vma - offset;
19347
19348 switch (errnode->type)
19349 {
19350 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19351 {
19352 bfd_vma branch_to_veneer;
19353 /* Original condition code of instruction, plus bit mask for
19354 ARM B instruction. */
19355 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19356 | 0x0a000000;
19357
19358 /* The instruction is before the label. */
19359 target -= 4;
19360
19361 /* Above offset included in -4 below. */
19362 branch_to_veneer = errnode->u.b.veneer->vma
19363 - errnode->vma - 4;
19364
19365 if ((signed) branch_to_veneer < -(1 << 25)
19366 || (signed) branch_to_veneer >= (1 << 25))
19367 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19368 "range"), output_bfd);
19369
19370 insn |= (branch_to_veneer >> 2) & 0xffffff;
19371 contents[endianflip ^ target] = insn & 0xff;
19372 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19373 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19374 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19375 }
19376 break;
19377
19378 case VFP11_ERRATUM_ARM_VENEER:
19379 {
19380 bfd_vma branch_from_veneer;
19381 unsigned int insn;
19382
19383 /* Take size of veneer into account. */
19384 branch_from_veneer = errnode->u.v.branch->vma
19385 - errnode->vma - 12;
19386
19387 if ((signed) branch_from_veneer < -(1 << 25)
19388 || (signed) branch_from_veneer >= (1 << 25))
19389 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19390 "range"), output_bfd);
19391
19392 /* Original instruction. */
19393 insn = errnode->u.v.branch->u.b.vfp_insn;
19394 contents[endianflip ^ target] = insn & 0xff;
19395 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19396 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19397 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19398
19399 /* Branch back to insn after original insn. */
19400 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19401 contents[endianflip ^ (target + 4)] = insn & 0xff;
19402 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19403 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19404 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19405 }
19406 break;
19407
19408 default:
19409 abort ();
19410 }
19411 }
19412 }
19413
19414 if (arm_data->stm32l4xx_erratumcount != 0)
19415 {
19416 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19417 stm32l4xx_errnode != 0;
19418 stm32l4xx_errnode = stm32l4xx_errnode->next)
19419 {
19420 bfd_vma target = stm32l4xx_errnode->vma - offset;
19421
19422 switch (stm32l4xx_errnode->type)
19423 {
19424 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19425 {
19426 unsigned int insn;
19427 bfd_vma branch_to_veneer =
19428 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19429
19430 if ((signed) branch_to_veneer < -(1 << 24)
19431 || (signed) branch_to_veneer >= (1 << 24))
19432 {
19433 bfd_vma out_of_range =
19434 ((signed) branch_to_veneer < -(1 << 24)) ?
19435 - branch_to_veneer - (1 << 24) :
19436 ((signed) branch_to_veneer >= (1 << 24)) ?
19437 branch_to_veneer - (1 << 24) : 0;
19438
19439 _bfd_error_handler
19440 (_("%pB(%#" PRIx64 "): error: "
19441 "cannot create STM32L4XX veneer; "
19442 "jump out of range by %" PRId64 " bytes; "
19443 "cannot encode branch instruction"),
19444 output_bfd,
19445 (uint64_t) (stm32l4xx_errnode->vma - 4),
19446 (int64_t) out_of_range);
19447 continue;
19448 }
19449
19450 insn = create_instruction_branch_absolute
19451 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19452
19453 /* The instruction is before the label. */
19454 target -= 4;
19455
19456 put_thumb2_insn (globals, output_bfd,
19457 (bfd_vma) insn, contents + target);
19458 }
19459 break;
19460
19461 case STM32L4XX_ERRATUM_VENEER:
19462 {
19463 bfd_byte * veneer;
19464 bfd_byte * veneer_r;
19465 unsigned int insn;
19466
19467 veneer = contents + target;
19468 veneer_r = veneer
19469 + stm32l4xx_errnode->u.b.veneer->vma
19470 - stm32l4xx_errnode->vma - 4;
19471
19472 if ((signed) (veneer_r - veneer -
19473 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19474 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19475 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19476 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19477 || (signed) (veneer_r - veneer) >= (1 << 24))
19478 {
19479 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19480 "veneer"), output_bfd);
19481 continue;
19482 }
19483
19484 /* Original instruction. */
19485 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19486
19487 stm32l4xx_create_replacing_stub
19488 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19489 }
19490 break;
19491
19492 default:
19493 abort ();
19494 }
19495 }
19496 }
19497
19498 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19499 {
19500 arm_unwind_table_edit *edit_node
19501 = arm_data->u.exidx.unwind_edit_list;
19502 /* Now, sec->size is the size of the section we will write. The original
19503 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19504 markers) was sec->rawsize. (This isn't the case if we perform no
19505 edits, then rawsize will be zero and we should use size). */
19506 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19507 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19508 unsigned int in_index, out_index;
19509 bfd_vma add_to_offsets = 0;
19510
19511 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19512 {
19513 if (edit_node)
19514 {
19515 unsigned int edit_index = edit_node->index;
19516
19517 if (in_index < edit_index && in_index * 8 < input_size)
19518 {
19519 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19520 contents + in_index * 8, add_to_offsets);
19521 out_index++;
19522 in_index++;
19523 }
19524 else if (in_index == edit_index
19525 || (in_index * 8 >= input_size
19526 && edit_index == UINT_MAX))
19527 {
19528 switch (edit_node->type)
19529 {
19530 case DELETE_EXIDX_ENTRY:
19531 in_index++;
19532 add_to_offsets += 8;
19533 break;
19534
19535 case INSERT_EXIDX_CANTUNWIND_AT_END:
19536 {
19537 asection *text_sec = edit_node->linked_section;
19538 bfd_vma text_offset = text_sec->output_section->vma
19539 + text_sec->output_offset
19540 + text_sec->size;
19541 bfd_vma exidx_offset = offset + out_index * 8;
19542 unsigned long prel31_offset;
19543
19544 /* Note: this is meant to be equivalent to an
19545 R_ARM_PREL31 relocation. These synthetic
19546 EXIDX_CANTUNWIND markers are not relocated by the
19547 usual BFD method. */
19548 prel31_offset = (text_offset - exidx_offset)
19549 & 0x7ffffffful;
19550 if (bfd_link_relocatable (link_info))
19551 {
19552 /* Here relocation for new EXIDX_CANTUNWIND is
19553 created, so there is no need to
19554 adjust offset by hand. */
19555 prel31_offset = text_sec->output_offset
19556 + text_sec->size;
19557 }
19558
19559 /* First address we can't unwind. */
19560 bfd_put_32 (output_bfd, prel31_offset,
19561 &edited_contents[out_index * 8]);
19562
19563 /* Code for EXIDX_CANTUNWIND. */
19564 bfd_put_32 (output_bfd, 0x1,
19565 &edited_contents[out_index * 8 + 4]);
19566
19567 out_index++;
19568 add_to_offsets -= 8;
19569 }
19570 break;
19571 }
19572
19573 edit_node = edit_node->next;
19574 }
19575 }
19576 else
19577 {
19578 /* No more edits, copy remaining entries verbatim. */
19579 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19580 contents + in_index * 8, add_to_offsets);
19581 out_index++;
19582 in_index++;
19583 }
19584 }
19585
19586 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19587 bfd_set_section_contents (output_bfd, sec->output_section,
19588 edited_contents,
19589 (file_ptr) sec->output_offset, sec->size);
19590
19591 return TRUE;
19592 }
19593
19594 /* Fix code to point to Cortex-A8 erratum stubs. */
19595 if (globals->fix_cortex_a8)
19596 {
19597 struct a8_branch_to_stub_data data;
19598
19599 data.writing_section = sec;
19600 data.contents = contents;
19601
19602 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19603 & data);
19604 }
19605
19606 if (mapcount == 0)
19607 return FALSE;
19608
19609 if (globals->byteswap_code)
19610 {
19611 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19612
19613 ptr = map[0].vma;
19614 for (i = 0; i < mapcount; i++)
19615 {
19616 if (i == mapcount - 1)
19617 end = sec->size;
19618 else
19619 end = map[i + 1].vma;
19620
19621 switch (map[i].type)
19622 {
19623 case 'a':
19624 /* Byte swap code words. */
19625 while (ptr + 3 < end)
19626 {
19627 tmp = contents[ptr];
19628 contents[ptr] = contents[ptr + 3];
19629 contents[ptr + 3] = tmp;
19630 tmp = contents[ptr + 1];
19631 contents[ptr + 1] = contents[ptr + 2];
19632 contents[ptr + 2] = tmp;
19633 ptr += 4;
19634 }
19635 break;
19636
19637 case 't':
19638 /* Byte swap code halfwords. */
19639 while (ptr + 1 < end)
19640 {
19641 tmp = contents[ptr];
19642 contents[ptr] = contents[ptr + 1];
19643 contents[ptr + 1] = tmp;
19644 ptr += 2;
19645 }
19646 break;
19647
19648 case 'd':
19649 /* Leave data alone. */
19650 break;
19651 }
19652 ptr = end;
19653 }
19654 }
19655
19656 free (map);
19657 arm_data->mapcount = -1;
19658 arm_data->mapsize = 0;
19659 arm_data->map = NULL;
19660
19661 return FALSE;
19662 }
19663
19664 /* Mangle thumb function symbols as we read them in. */
19665
19666 static bfd_boolean
19667 elf32_arm_swap_symbol_in (bfd * abfd,
19668 const void *psrc,
19669 const void *pshn,
19670 Elf_Internal_Sym *dst)
19671 {
19672 Elf_Internal_Shdr *symtab_hdr;
19673 const char *name = NULL;
19674
19675 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19676 return FALSE;
19677 dst->st_target_internal = 0;
19678
19679 /* New EABI objects mark thumb function symbols by setting the low bit of
19680 the address. */
19681 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19682 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19683 {
19684 if (dst->st_value & 1)
19685 {
19686 dst->st_value &= ~(bfd_vma) 1;
19687 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19688 ST_BRANCH_TO_THUMB);
19689 }
19690 else
19691 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19692 }
19693 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19694 {
19695 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19696 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19697 }
19698 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19699 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19700 else
19701 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19702
19703 /* Mark CMSE special symbols. */
19704 symtab_hdr = & elf_symtab_hdr (abfd);
19705 if (symtab_hdr->sh_size)
19706 name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19707 if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19708 ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19709
19710 return TRUE;
19711 }
19712
19713
19714 /* Mangle thumb function symbols as we write them out. */
19715
19716 static void
19717 elf32_arm_swap_symbol_out (bfd *abfd,
19718 const Elf_Internal_Sym *src,
19719 void *cdst,
19720 void *shndx)
19721 {
19722 Elf_Internal_Sym newsym;
19723
19724 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19725 of the address set, as per the new EABI. We do this unconditionally
19726 because objcopy does not set the elf header flags until after
19727 it writes out the symbol table. */
19728 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19729 {
19730 newsym = *src;
19731 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19732 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19733 if (newsym.st_shndx != SHN_UNDEF)
19734 {
19735 /* Do this only for defined symbols. At link type, the static
19736 linker will simulate the work of dynamic linker of resolving
19737 symbols and will carry over the thumbness of found symbols to
19738 the output symbol table. It's not clear how it happens, but
19739 the thumbness of undefined symbols can well be different at
19740 runtime, and writing '1' for them will be confusing for users
19741 and possibly for dynamic linker itself.
19742 */
19743 newsym.st_value |= 1;
19744 }
19745
19746 src = &newsym;
19747 }
19748 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19749 }
19750
19751 /* Add the PT_ARM_EXIDX program header. */
19752
19753 static bfd_boolean
19754 elf32_arm_modify_segment_map (bfd *abfd,
19755 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19756 {
19757 struct elf_segment_map *m;
19758 asection *sec;
19759
19760 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19761 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19762 {
19763 /* If there is already a PT_ARM_EXIDX header, then we do not
19764 want to add another one. This situation arises when running
19765 "strip"; the input binary already has the header. */
19766 m = elf_seg_map (abfd);
19767 while (m && m->p_type != PT_ARM_EXIDX)
19768 m = m->next;
19769 if (!m)
19770 {
19771 m = (struct elf_segment_map *)
19772 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19773 if (m == NULL)
19774 return FALSE;
19775 m->p_type = PT_ARM_EXIDX;
19776 m->count = 1;
19777 m->sections[0] = sec;
19778
19779 m->next = elf_seg_map (abfd);
19780 elf_seg_map (abfd) = m;
19781 }
19782 }
19783
19784 return TRUE;
19785 }
19786
19787 /* We may add a PT_ARM_EXIDX program header. */
19788
19789 static int
19790 elf32_arm_additional_program_headers (bfd *abfd,
19791 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19792 {
19793 asection *sec;
19794
19795 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19796 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19797 return 1;
19798 else
19799 return 0;
19800 }
19801
19802 /* Hook called by the linker routine which adds symbols from an object
19803 file. */
19804
19805 static bfd_boolean
19806 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19807 Elf_Internal_Sym *sym, const char **namep,
19808 flagword *flagsp, asection **secp, bfd_vma *valp)
19809 {
19810 if (elf32_arm_hash_table (info) == NULL)
19811 return FALSE;
19812
19813 if (elf32_arm_hash_table (info)->vxworks_p
19814 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19815 flagsp, secp, valp))
19816 return FALSE;
19817
19818 return TRUE;
19819 }
19820
19821 /* We use this to override swap_symbol_in and swap_symbol_out. */
19822 const struct elf_size_info elf32_arm_size_info =
19823 {
19824 sizeof (Elf32_External_Ehdr),
19825 sizeof (Elf32_External_Phdr),
19826 sizeof (Elf32_External_Shdr),
19827 sizeof (Elf32_External_Rel),
19828 sizeof (Elf32_External_Rela),
19829 sizeof (Elf32_External_Sym),
19830 sizeof (Elf32_External_Dyn),
19831 sizeof (Elf_External_Note),
19832 4,
19833 1,
19834 32, 2,
19835 ELFCLASS32, EV_CURRENT,
19836 bfd_elf32_write_out_phdrs,
19837 bfd_elf32_write_shdrs_and_ehdr,
19838 bfd_elf32_checksum_contents,
19839 bfd_elf32_write_relocs,
19840 elf32_arm_swap_symbol_in,
19841 elf32_arm_swap_symbol_out,
19842 bfd_elf32_slurp_reloc_table,
19843 bfd_elf32_slurp_symbol_table,
19844 bfd_elf32_swap_dyn_in,
19845 bfd_elf32_swap_dyn_out,
19846 bfd_elf32_swap_reloc_in,
19847 bfd_elf32_swap_reloc_out,
19848 bfd_elf32_swap_reloca_in,
19849 bfd_elf32_swap_reloca_out
19850 };
19851
19852 static bfd_vma
19853 read_code32 (const bfd *abfd, const bfd_byte *addr)
19854 {
19855 /* V7 BE8 code is always little endian. */
19856 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19857 return bfd_getl32 (addr);
19858
19859 return bfd_get_32 (abfd, addr);
19860 }
19861
19862 static bfd_vma
19863 read_code16 (const bfd *abfd, const bfd_byte *addr)
19864 {
19865 /* V7 BE8 code is always little endian. */
19866 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19867 return bfd_getl16 (addr);
19868
19869 return bfd_get_16 (abfd, addr);
19870 }
19871
19872 /* Return size of plt0 entry starting at ADDR
19873 or (bfd_vma) -1 if size can not be determined. */
19874
19875 static bfd_vma
19876 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19877 {
19878 bfd_vma first_word;
19879 bfd_vma plt0_size;
19880
19881 first_word = read_code32 (abfd, addr);
19882
19883 if (first_word == elf32_arm_plt0_entry[0])
19884 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19885 else if (first_word == elf32_thumb2_plt0_entry[0])
19886 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19887 else
19888 /* We don't yet handle this PLT format. */
19889 return (bfd_vma) -1;
19890
19891 return plt0_size;
19892 }
19893
19894 /* Return size of plt entry starting at offset OFFSET
19895 of plt section located at address START
19896 or (bfd_vma) -1 if size can not be determined. */
19897
19898 static bfd_vma
19899 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19900 {
19901 bfd_vma first_insn;
19902 bfd_vma plt_size = 0;
19903 const bfd_byte *addr = start + offset;
19904
19905 /* PLT entry size if fixed on Thumb-only platforms. */
19906 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19907 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19908
19909 /* Respect Thumb stub if necessary. */
19910 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19911 {
19912 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19913 }
19914
19915 /* Strip immediate from first add. */
19916 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19917
19918 #ifdef FOUR_WORD_PLT
19919 if (first_insn == elf32_arm_plt_entry[0])
19920 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19921 #else
19922 if (first_insn == elf32_arm_plt_entry_long[0])
19923 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19924 else if (first_insn == elf32_arm_plt_entry_short[0])
19925 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19926 #endif
19927 else
19928 /* We don't yet handle this PLT format. */
19929 return (bfd_vma) -1;
19930
19931 return plt_size;
19932 }
19933
19934 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
19935
19936 static long
19937 elf32_arm_get_synthetic_symtab (bfd *abfd,
19938 long symcount ATTRIBUTE_UNUSED,
19939 asymbol **syms ATTRIBUTE_UNUSED,
19940 long dynsymcount,
19941 asymbol **dynsyms,
19942 asymbol **ret)
19943 {
19944 asection *relplt;
19945 asymbol *s;
19946 arelent *p;
19947 long count, i, n;
19948 size_t size;
19949 Elf_Internal_Shdr *hdr;
19950 char *names;
19951 asection *plt;
19952 bfd_vma offset;
19953 bfd_byte *data;
19954
19955 *ret = NULL;
19956
19957 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19958 return 0;
19959
19960 if (dynsymcount <= 0)
19961 return 0;
19962
19963 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19964 if (relplt == NULL)
19965 return 0;
19966
19967 hdr = &elf_section_data (relplt)->this_hdr;
19968 if (hdr->sh_link != elf_dynsymtab (abfd)
19969 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19970 return 0;
19971
19972 plt = bfd_get_section_by_name (abfd, ".plt");
19973 if (plt == NULL)
19974 return 0;
19975
19976 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19977 return -1;
19978
19979 data = plt->contents;
19980 if (data == NULL)
19981 {
19982 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19983 return -1;
19984 bfd_cache_section_contents((asection *) plt, data);
19985 }
19986
19987 count = relplt->size / hdr->sh_entsize;
19988 size = count * sizeof (asymbol);
19989 p = relplt->relocation;
19990 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19991 {
19992 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19993 if (p->addend != 0)
19994 size += sizeof ("+0x") - 1 + 8;
19995 }
19996
19997 s = *ret = (asymbol *) bfd_malloc (size);
19998 if (s == NULL)
19999 return -1;
20000
20001 offset = elf32_arm_plt0_size (abfd, data);
20002 if (offset == (bfd_vma) -1)
20003 return -1;
20004
20005 names = (char *) (s + count);
20006 p = relplt->relocation;
20007 n = 0;
20008 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20009 {
20010 size_t len;
20011
20012 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20013 if (plt_size == (bfd_vma) -1)
20014 break;
20015
20016 *s = **p->sym_ptr_ptr;
20017 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20018 we are defining a symbol, ensure one of them is set. */
20019 if ((s->flags & BSF_LOCAL) == 0)
20020 s->flags |= BSF_GLOBAL;
20021 s->flags |= BSF_SYNTHETIC;
20022 s->section = plt;
20023 s->value = offset;
20024 s->name = names;
20025 s->udata.p = NULL;
20026 len = strlen ((*p->sym_ptr_ptr)->name);
20027 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20028 names += len;
20029 if (p->addend != 0)
20030 {
20031 char buf[30], *a;
20032
20033 memcpy (names, "+0x", sizeof ("+0x") - 1);
20034 names += sizeof ("+0x") - 1;
20035 bfd_sprintf_vma (abfd, buf, p->addend);
20036 for (a = buf; *a == '0'; ++a)
20037 ;
20038 len = strlen (a);
20039 memcpy (names, a, len);
20040 names += len;
20041 }
20042 memcpy (names, "@plt", sizeof ("@plt"));
20043 names += sizeof ("@plt");
20044 ++s, ++n;
20045 offset += plt_size;
20046 }
20047
20048 return n;
20049 }
20050
20051 static bfd_boolean
20052 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20053 {
20054 if (hdr->sh_flags & SHF_ARM_PURECODE)
20055 *flags |= SEC_ELF_PURECODE;
20056 return TRUE;
20057 }
20058
20059 static flagword
20060 elf32_arm_lookup_section_flags (char *flag_name)
20061 {
20062 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20063 return SHF_ARM_PURECODE;
20064
20065 return SEC_NO_FLAGS;
20066 }
20067
20068 static unsigned int
20069 elf32_arm_count_additional_relocs (asection *sec)
20070 {
20071 struct _arm_elf_section_data *arm_data;
20072 arm_data = get_arm_elf_section_data (sec);
20073
20074 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20075 }
20076
20077 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20078 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20079 FALSE otherwise. ISECTION is the best guess matching section from the
20080 input bfd IBFD, but it might be NULL. */
20081
20082 static bfd_boolean
20083 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20084 bfd *obfd ATTRIBUTE_UNUSED,
20085 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20086 Elf_Internal_Shdr *osection)
20087 {
20088 switch (osection->sh_type)
20089 {
20090 case SHT_ARM_EXIDX:
20091 {
20092 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20093 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20094 unsigned i = 0;
20095
20096 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20097 osection->sh_info = 0;
20098
20099 /* The sh_link field must be set to the text section associated with
20100 this index section. Unfortunately the ARM EHABI does not specify
20101 exactly how to determine this association. Our caller does try
20102 to match up OSECTION with its corresponding input section however
20103 so that is a good first guess. */
20104 if (isection != NULL
20105 && osection->bfd_section != NULL
20106 && isection->bfd_section != NULL
20107 && isection->bfd_section->output_section != NULL
20108 && isection->bfd_section->output_section == osection->bfd_section
20109 && iheaders != NULL
20110 && isection->sh_link > 0
20111 && isection->sh_link < elf_numsections (ibfd)
20112 && iheaders[isection->sh_link]->bfd_section != NULL
20113 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20114 )
20115 {
20116 for (i = elf_numsections (obfd); i-- > 0;)
20117 if (oheaders[i]->bfd_section
20118 == iheaders[isection->sh_link]->bfd_section->output_section)
20119 break;
20120 }
20121
20122 if (i == 0)
20123 {
20124 /* Failing that we have to find a matching section ourselves. If
20125 we had the output section name available we could compare that
20126 with input section names. Unfortunately we don't. So instead
20127 we use a simple heuristic and look for the nearest executable
20128 section before this one. */
20129 for (i = elf_numsections (obfd); i-- > 0;)
20130 if (oheaders[i] == osection)
20131 break;
20132 if (i == 0)
20133 break;
20134
20135 while (i-- > 0)
20136 if (oheaders[i]->sh_type == SHT_PROGBITS
20137 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20138 == (SHF_ALLOC | SHF_EXECINSTR))
20139 break;
20140 }
20141
20142 if (i)
20143 {
20144 osection->sh_link = i;
20145 /* If the text section was part of a group
20146 then the index section should be too. */
20147 if (oheaders[i]->sh_flags & SHF_GROUP)
20148 osection->sh_flags |= SHF_GROUP;
20149 return TRUE;
20150 }
20151 }
20152 break;
20153
20154 case SHT_ARM_PREEMPTMAP:
20155 osection->sh_flags = SHF_ALLOC;
20156 break;
20157
20158 case SHT_ARM_ATTRIBUTES:
20159 case SHT_ARM_DEBUGOVERLAY:
20160 case SHT_ARM_OVERLAYSECTION:
20161 default:
20162 break;
20163 }
20164
20165 return FALSE;
20166 }
20167
20168 /* Returns TRUE if NAME is an ARM mapping symbol.
20169 Traditionally the symbols $a, $d and $t have been used.
20170 The ARM ELF standard also defines $x (for A64 code). It also allows a
20171 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20172 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20173 not support them here. $t.x indicates the start of ThumbEE instructions. */
20174
20175 static bfd_boolean
20176 is_arm_mapping_symbol (const char * name)
20177 {
20178 return name != NULL /* Paranoia. */
20179 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20180 the mapping symbols could have acquired a prefix.
20181 We do not support this here, since such symbols no
20182 longer conform to the ARM ELF ABI. */
20183 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20184 && (name[2] == 0 || name[2] == '.');
20185 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20186 any characters that follow the period are legal characters for the body
20187 of a symbol's name. For now we just assume that this is the case. */
20188 }
20189
20190 /* Make sure that mapping symbols in object files are not removed via the
20191 "strip --strip-unneeded" tool. These symbols are needed in order to
20192 correctly generate interworking veneers, and for byte swapping code
20193 regions. Once an object file has been linked, it is safe to remove the
20194 symbols as they will no longer be needed. */
20195
20196 static void
20197 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20198 {
20199 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20200 && sym->section != bfd_abs_section_ptr
20201 && is_arm_mapping_symbol (sym->name))
20202 sym->flags |= BSF_KEEP;
20203 }
20204
20205 #undef elf_backend_copy_special_section_fields
20206 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20207
20208 #define ELF_ARCH bfd_arch_arm
20209 #define ELF_TARGET_ID ARM_ELF_DATA
20210 #define ELF_MACHINE_CODE EM_ARM
20211 #ifdef __QNXTARGET__
20212 #define ELF_MAXPAGESIZE 0x1000
20213 #else
20214 #define ELF_MAXPAGESIZE 0x10000
20215 #endif
20216 #define ELF_MINPAGESIZE 0x1000
20217 #define ELF_COMMONPAGESIZE 0x1000
20218
20219 #define bfd_elf32_mkobject elf32_arm_mkobject
20220
20221 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20222 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20223 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20224 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20225 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20226 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20227 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20228 #define bfd_elf32_find_nearest_line elf32_arm_find_nearest_line
20229 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20230 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20231 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20232 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20233 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20234
20235 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20236 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20237 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20238 #define elf_backend_check_relocs elf32_arm_check_relocs
20239 #define elf_backend_update_relocs elf32_arm_update_relocs
20240 #define elf_backend_relocate_section elf32_arm_relocate_section
20241 #define elf_backend_write_section elf32_arm_write_section
20242 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20243 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20244 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20245 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20246 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20247 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20248 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20249 #define elf_backend_post_process_headers elf32_arm_post_process_headers
20250 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20251 #define elf_backend_object_p elf32_arm_object_p
20252 #define elf_backend_fake_sections elf32_arm_fake_sections
20253 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20254 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20255 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20256 #define elf_backend_size_info elf32_arm_size_info
20257 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20258 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20259 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20260 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20261 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20262 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20263 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20264 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20265
20266 #define elf_backend_can_refcount 1
20267 #define elf_backend_can_gc_sections 1
20268 #define elf_backend_plt_readonly 1
20269 #define elf_backend_want_got_plt 1
20270 #define elf_backend_want_plt_sym 0
20271 #define elf_backend_want_dynrelro 1
20272 #define elf_backend_may_use_rel_p 1
20273 #define elf_backend_may_use_rela_p 0
20274 #define elf_backend_default_use_rela_p 0
20275 #define elf_backend_dtrel_excludes_plt 1
20276
20277 #define elf_backend_got_header_size 12
20278 #define elf_backend_extern_protected_data 1
20279
20280 #undef elf_backend_obj_attrs_vendor
20281 #define elf_backend_obj_attrs_vendor "aeabi"
20282 #undef elf_backend_obj_attrs_section
20283 #define elf_backend_obj_attrs_section ".ARM.attributes"
20284 #undef elf_backend_obj_attrs_arg_type
20285 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20286 #undef elf_backend_obj_attrs_section_type
20287 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20288 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20289 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20290
20291 #undef elf_backend_section_flags
20292 #define elf_backend_section_flags elf32_arm_section_flags
20293 #undef elf_backend_lookup_section_flags_hook
20294 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20295
20296 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20297
20298 #include "elf32-target.h"
20299
20300 /* Native Client targets. */
20301
20302 #undef TARGET_LITTLE_SYM
20303 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20304 #undef TARGET_LITTLE_NAME
20305 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20306 #undef TARGET_BIG_SYM
20307 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20308 #undef TARGET_BIG_NAME
20309 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20310
20311 /* Like elf32_arm_link_hash_table_create -- but overrides
20312 appropriately for NaCl. */
20313
20314 static struct bfd_link_hash_table *
20315 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20316 {
20317 struct bfd_link_hash_table *ret;
20318
20319 ret = elf32_arm_link_hash_table_create (abfd);
20320 if (ret)
20321 {
20322 struct elf32_arm_link_hash_table *htab
20323 = (struct elf32_arm_link_hash_table *) ret;
20324
20325 htab->nacl_p = 1;
20326
20327 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20328 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20329 }
20330 return ret;
20331 }
20332
20333 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20334 really need to use elf32_arm_modify_segment_map. But we do it
20335 anyway just to reduce gratuitous differences with the stock ARM backend. */
20336
20337 static bfd_boolean
20338 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20339 {
20340 return (elf32_arm_modify_segment_map (abfd, info)
20341 && nacl_modify_segment_map (abfd, info));
20342 }
20343
20344 static void
20345 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20346 {
20347 elf32_arm_final_write_processing (abfd, linker);
20348 nacl_final_write_processing (abfd, linker);
20349 }
20350
20351 static bfd_vma
20352 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20353 const arelent *rel ATTRIBUTE_UNUSED)
20354 {
20355 return plt->vma
20356 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20357 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20358 }
20359
20360 #undef elf32_bed
20361 #define elf32_bed elf32_arm_nacl_bed
20362 #undef bfd_elf32_bfd_link_hash_table_create
20363 #define bfd_elf32_bfd_link_hash_table_create \
20364 elf32_arm_nacl_link_hash_table_create
20365 #undef elf_backend_plt_alignment
20366 #define elf_backend_plt_alignment 4
20367 #undef elf_backend_modify_segment_map
20368 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20369 #undef elf_backend_modify_program_headers
20370 #define elf_backend_modify_program_headers nacl_modify_program_headers
20371 #undef elf_backend_final_write_processing
20372 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20373 #undef bfd_elf32_get_synthetic_symtab
20374 #undef elf_backend_plt_sym_val
20375 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20376 #undef elf_backend_copy_special_section_fields
20377
20378 #undef ELF_MINPAGESIZE
20379 #undef ELF_COMMONPAGESIZE
20380
20381
20382 #include "elf32-target.h"
20383
20384 /* Reset to defaults. */
20385 #undef elf_backend_plt_alignment
20386 #undef elf_backend_modify_segment_map
20387 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20388 #undef elf_backend_modify_program_headers
20389 #undef elf_backend_final_write_processing
20390 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20391 #undef ELF_MINPAGESIZE
20392 #define ELF_MINPAGESIZE 0x1000
20393 #undef ELF_COMMONPAGESIZE
20394 #define ELF_COMMONPAGESIZE 0x1000
20395
20396
20397 /* FDPIC Targets. */
20398
20399 #undef TARGET_LITTLE_SYM
20400 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20401 #undef TARGET_LITTLE_NAME
20402 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20403 #undef TARGET_BIG_SYM
20404 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20405 #undef TARGET_BIG_NAME
20406 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20407 #undef elf_match_priority
20408 #define elf_match_priority 128
20409 #undef ELF_OSABI
20410 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20411
20412 /* Like elf32_arm_link_hash_table_create -- but overrides
20413 appropriately for FDPIC. */
20414
20415 static struct bfd_link_hash_table *
20416 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20417 {
20418 struct bfd_link_hash_table *ret;
20419
20420 ret = elf32_arm_link_hash_table_create (abfd);
20421 if (ret)
20422 {
20423 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20424
20425 htab->fdpic_p = 1;
20426 }
20427 return ret;
20428 }
20429
20430 /* We need dynamic symbols for every section, since segments can
20431 relocate independently. */
20432 static bfd_boolean
20433 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20434 struct bfd_link_info *info
20435 ATTRIBUTE_UNUSED,
20436 asection *p ATTRIBUTE_UNUSED)
20437 {
20438 switch (elf_section_data (p)->this_hdr.sh_type)
20439 {
20440 case SHT_PROGBITS:
20441 case SHT_NOBITS:
20442 /* If sh_type is yet undecided, assume it could be
20443 SHT_PROGBITS/SHT_NOBITS. */
20444 case SHT_NULL:
20445 return FALSE;
20446
20447 /* There shouldn't be section relative relocations
20448 against any other section. */
20449 default:
20450 return TRUE;
20451 }
20452 }
20453
20454 #undef elf32_bed
20455 #define elf32_bed elf32_arm_fdpic_bed
20456
20457 #undef bfd_elf32_bfd_link_hash_table_create
20458 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20459
20460 #undef elf_backend_omit_section_dynsym
20461 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20462
20463 #include "elf32-target.h"
20464
20465 #undef elf_match_priority
20466 #undef ELF_OSABI
20467 #undef elf_backend_omit_section_dynsym
20468
20469 /* VxWorks Targets. */
20470
20471 #undef TARGET_LITTLE_SYM
20472 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20473 #undef TARGET_LITTLE_NAME
20474 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20475 #undef TARGET_BIG_SYM
20476 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20477 #undef TARGET_BIG_NAME
20478 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20479
20480 /* Like elf32_arm_link_hash_table_create -- but overrides
20481 appropriately for VxWorks. */
20482
20483 static struct bfd_link_hash_table *
20484 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20485 {
20486 struct bfd_link_hash_table *ret;
20487
20488 ret = elf32_arm_link_hash_table_create (abfd);
20489 if (ret)
20490 {
20491 struct elf32_arm_link_hash_table *htab
20492 = (struct elf32_arm_link_hash_table *) ret;
20493 htab->use_rel = 0;
20494 htab->vxworks_p = 1;
20495 }
20496 return ret;
20497 }
20498
20499 static void
20500 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20501 {
20502 elf32_arm_final_write_processing (abfd, linker);
20503 elf_vxworks_final_write_processing (abfd, linker);
20504 }
20505
20506 #undef elf32_bed
20507 #define elf32_bed elf32_arm_vxworks_bed
20508
20509 #undef bfd_elf32_bfd_link_hash_table_create
20510 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20511 #undef elf_backend_final_write_processing
20512 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20513 #undef elf_backend_emit_relocs
20514 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20515
20516 #undef elf_backend_may_use_rel_p
20517 #define elf_backend_may_use_rel_p 0
20518 #undef elf_backend_may_use_rela_p
20519 #define elf_backend_may_use_rela_p 1
20520 #undef elf_backend_default_use_rela_p
20521 #define elf_backend_default_use_rela_p 1
20522 #undef elf_backend_want_plt_sym
20523 #define elf_backend_want_plt_sym 1
20524 #undef ELF_MAXPAGESIZE
20525 #define ELF_MAXPAGESIZE 0x1000
20526
20527 #include "elf32-target.h"
20528
20529
20530 /* Merge backend specific data from an object file to the output
20531 object file when linking. */
20532
20533 static bfd_boolean
20534 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20535 {
20536 bfd *obfd = info->output_bfd;
20537 flagword out_flags;
20538 flagword in_flags;
20539 bfd_boolean flags_compatible = TRUE;
20540 asection *sec;
20541
20542 /* Check if we have the same endianness. */
20543 if (! _bfd_generic_verify_endian_match (ibfd, info))
20544 return FALSE;
20545
20546 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20547 return TRUE;
20548
20549 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20550 return FALSE;
20551
20552 /* The input BFD must have had its flags initialised. */
20553 /* The following seems bogus to me -- The flags are initialized in
20554 the assembler but I don't think an elf_flags_init field is
20555 written into the object. */
20556 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20557
20558 in_flags = elf_elfheader (ibfd)->e_flags;
20559 out_flags = elf_elfheader (obfd)->e_flags;
20560
20561 /* In theory there is no reason why we couldn't handle this. However
20562 in practice it isn't even close to working and there is no real
20563 reason to want it. */
20564 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20565 && !(ibfd->flags & DYNAMIC)
20566 && (in_flags & EF_ARM_BE8))
20567 {
20568 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20569 ibfd);
20570 return FALSE;
20571 }
20572
20573 if (!elf_flags_init (obfd))
20574 {
20575 /* If the input is the default architecture and had the default
20576 flags then do not bother setting the flags for the output
20577 architecture, instead allow future merges to do this. If no
20578 future merges ever set these flags then they will retain their
20579 uninitialised values, which surprise surprise, correspond
20580 to the default values. */
20581 if (bfd_get_arch_info (ibfd)->the_default
20582 && elf_elfheader (ibfd)->e_flags == 0)
20583 return TRUE;
20584
20585 elf_flags_init (obfd) = TRUE;
20586 elf_elfheader (obfd)->e_flags = in_flags;
20587
20588 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20589 && bfd_get_arch_info (obfd)->the_default)
20590 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20591
20592 return TRUE;
20593 }
20594
20595 /* Determine what should happen if the input ARM architecture
20596 does not match the output ARM architecture. */
20597 if (! bfd_arm_merge_machines (ibfd, obfd))
20598 return FALSE;
20599
20600 /* Identical flags must be compatible. */
20601 if (in_flags == out_flags)
20602 return TRUE;
20603
20604 /* Check to see if the input BFD actually contains any sections. If
20605 not, its flags may not have been initialised either, but it
20606 cannot actually cause any incompatiblity. Do not short-circuit
20607 dynamic objects; their section list may be emptied by
20608 elf_link_add_object_symbols.
20609
20610 Also check to see if there are no code sections in the input.
20611 In this case there is no need to check for code specific flags.
20612 XXX - do we need to worry about floating-point format compatability
20613 in data sections ? */
20614 if (!(ibfd->flags & DYNAMIC))
20615 {
20616 bfd_boolean null_input_bfd = TRUE;
20617 bfd_boolean only_data_sections = TRUE;
20618
20619 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20620 {
20621 /* Ignore synthetic glue sections. */
20622 if (strcmp (sec->name, ".glue_7")
20623 && strcmp (sec->name, ".glue_7t"))
20624 {
20625 if ((bfd_get_section_flags (ibfd, sec)
20626 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20627 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20628 only_data_sections = FALSE;
20629
20630 null_input_bfd = FALSE;
20631 break;
20632 }
20633 }
20634
20635 if (null_input_bfd || only_data_sections)
20636 return TRUE;
20637 }
20638
20639 /* Complain about various flag mismatches. */
20640 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20641 EF_ARM_EABI_VERSION (out_flags)))
20642 {
20643 _bfd_error_handler
20644 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20645 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20646 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20647 return FALSE;
20648 }
20649
20650 /* Not sure what needs to be checked for EABI versions >= 1. */
20651 /* VxWorks libraries do not use these flags. */
20652 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20653 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20654 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20655 {
20656 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20657 {
20658 _bfd_error_handler
20659 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20660 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20661 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20662 flags_compatible = FALSE;
20663 }
20664
20665 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20666 {
20667 if (in_flags & EF_ARM_APCS_FLOAT)
20668 _bfd_error_handler
20669 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20670 ibfd, obfd);
20671 else
20672 _bfd_error_handler
20673 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20674 ibfd, obfd);
20675
20676 flags_compatible = FALSE;
20677 }
20678
20679 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20680 {
20681 if (in_flags & EF_ARM_VFP_FLOAT)
20682 _bfd_error_handler
20683 (_("error: %pB uses %s instructions, whereas %pB does not"),
20684 ibfd, "VFP", obfd);
20685 else
20686 _bfd_error_handler
20687 (_("error: %pB uses %s instructions, whereas %pB does not"),
20688 ibfd, "FPA", obfd);
20689
20690 flags_compatible = FALSE;
20691 }
20692
20693 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20694 {
20695 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20696 _bfd_error_handler
20697 (_("error: %pB uses %s instructions, whereas %pB does not"),
20698 ibfd, "Maverick", obfd);
20699 else
20700 _bfd_error_handler
20701 (_("error: %pB does not use %s instructions, whereas %pB does"),
20702 ibfd, "Maverick", obfd);
20703
20704 flags_compatible = FALSE;
20705 }
20706
20707 #ifdef EF_ARM_SOFT_FLOAT
20708 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20709 {
20710 /* We can allow interworking between code that is VFP format
20711 layout, and uses either soft float or integer regs for
20712 passing floating point arguments and results. We already
20713 know that the APCS_FLOAT flags match; similarly for VFP
20714 flags. */
20715 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20716 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20717 {
20718 if (in_flags & EF_ARM_SOFT_FLOAT)
20719 _bfd_error_handler
20720 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20721 ibfd, obfd);
20722 else
20723 _bfd_error_handler
20724 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20725 ibfd, obfd);
20726
20727 flags_compatible = FALSE;
20728 }
20729 }
20730 #endif
20731
20732 /* Interworking mismatch is only a warning. */
20733 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20734 {
20735 if (in_flags & EF_ARM_INTERWORK)
20736 {
20737 _bfd_error_handler
20738 (_("warning: %pB supports interworking, whereas %pB does not"),
20739 ibfd, obfd);
20740 }
20741 else
20742 {
20743 _bfd_error_handler
20744 (_("warning: %pB does not support interworking, whereas %pB does"),
20745 ibfd, obfd);
20746 }
20747 }
20748 }
20749
20750 return flags_compatible;
20751 }
20752
20753
20754 /* Symbian OS Targets. */
20755
20756 #undef TARGET_LITTLE_SYM
20757 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20758 #undef TARGET_LITTLE_NAME
20759 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20760 #undef TARGET_BIG_SYM
20761 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20762 #undef TARGET_BIG_NAME
20763 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20764
20765 /* Like elf32_arm_link_hash_table_create -- but overrides
20766 appropriately for Symbian OS. */
20767
20768 static struct bfd_link_hash_table *
20769 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20770 {
20771 struct bfd_link_hash_table *ret;
20772
20773 ret = elf32_arm_link_hash_table_create (abfd);
20774 if (ret)
20775 {
20776 struct elf32_arm_link_hash_table *htab
20777 = (struct elf32_arm_link_hash_table *)ret;
20778 /* There is no PLT header for Symbian OS. */
20779 htab->plt_header_size = 0;
20780 /* The PLT entries are each one instruction and one word. */
20781 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20782 htab->symbian_p = 1;
20783 /* Symbian uses armv5t or above, so use_blx is always true. */
20784 htab->use_blx = 1;
20785 htab->root.is_relocatable_executable = 1;
20786 }
20787 return ret;
20788 }
20789
20790 static const struct bfd_elf_special_section
20791 elf32_arm_symbian_special_sections[] =
20792 {
20793 /* In a BPABI executable, the dynamic linking sections do not go in
20794 the loadable read-only segment. The post-linker may wish to
20795 refer to these sections, but they are not part of the final
20796 program image. */
20797 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
20798 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
20799 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
20800 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
20801 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
20802 /* These sections do not need to be writable as the SymbianOS
20803 postlinker will arrange things so that no dynamic relocation is
20804 required. */
20805 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
20806 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
20807 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20808 { NULL, 0, 0, 0, 0 }
20809 };
20810
20811 static void
20812 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20813 struct bfd_link_info *link_info)
20814 {
20815 /* BPABI objects are never loaded directly by an OS kernel; they are
20816 processed by a postlinker first, into an OS-specific format. If
20817 the D_PAGED bit is set on the file, BFD will align segments on
20818 page boundaries, so that an OS can directly map the file. With
20819 BPABI objects, that just results in wasted space. In addition,
20820 because we clear the D_PAGED bit, map_sections_to_segments will
20821 recognize that the program headers should not be mapped into any
20822 loadable segment. */
20823 abfd->flags &= ~D_PAGED;
20824 elf32_arm_begin_write_processing (abfd, link_info);
20825 }
20826
20827 static bfd_boolean
20828 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20829 struct bfd_link_info *info)
20830 {
20831 struct elf_segment_map *m;
20832 asection *dynsec;
20833
20834 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20835 segment. However, because the .dynamic section is not marked
20836 with SEC_LOAD, the generic ELF code will not create such a
20837 segment. */
20838 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
20839 if (dynsec)
20840 {
20841 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
20842 if (m->p_type == PT_DYNAMIC)
20843 break;
20844
20845 if (m == NULL)
20846 {
20847 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
20848 m->next = elf_seg_map (abfd);
20849 elf_seg_map (abfd) = m;
20850 }
20851 }
20852
20853 /* Also call the generic arm routine. */
20854 return elf32_arm_modify_segment_map (abfd, info);
20855 }
20856
20857 /* Return address for Ith PLT stub in section PLT, for relocation REL
20858 or (bfd_vma) -1 if it should not be included. */
20859
20860 static bfd_vma
20861 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
20862 const arelent *rel ATTRIBUTE_UNUSED)
20863 {
20864 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
20865 }
20866
20867 #undef elf32_bed
20868 #define elf32_bed elf32_arm_symbian_bed
20869
20870 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20871 will process them and then discard them. */
20872 #undef ELF_DYNAMIC_SEC_FLAGS
20873 #define ELF_DYNAMIC_SEC_FLAGS \
20874 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20875
20876 #undef elf_backend_emit_relocs
20877
20878 #undef bfd_elf32_bfd_link_hash_table_create
20879 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
20880 #undef elf_backend_special_sections
20881 #define elf_backend_special_sections elf32_arm_symbian_special_sections
20882 #undef elf_backend_begin_write_processing
20883 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
20884 #undef elf_backend_final_write_processing
20885 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20886
20887 #undef elf_backend_modify_segment_map
20888 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20889
20890 /* There is no .got section for BPABI objects, and hence no header. */
20891 #undef elf_backend_got_header_size
20892 #define elf_backend_got_header_size 0
20893
20894 /* Similarly, there is no .got.plt section. */
20895 #undef elf_backend_want_got_plt
20896 #define elf_backend_want_got_plt 0
20897
20898 #undef elf_backend_plt_sym_val
20899 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
20900
20901 #undef elf_backend_may_use_rel_p
20902 #define elf_backend_may_use_rel_p 1
20903 #undef elf_backend_may_use_rela_p
20904 #define elf_backend_may_use_rela_p 0
20905 #undef elf_backend_default_use_rela_p
20906 #define elf_backend_default_use_rela_p 0
20907 #undef elf_backend_want_plt_sym
20908 #define elf_backend_want_plt_sym 0
20909 #undef elf_backend_dtrel_excludes_plt
20910 #define elf_backend_dtrel_excludes_plt 0
20911 #undef ELF_MAXPAGESIZE
20912 #define ELF_MAXPAGESIZE 0x8000
20913
20914 #include "elf32-target.h"