]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - bfd/elf32-arm.c
69d3ba16eebb1d7f413452267e128c10ab399cea
[thirdparty/binutils-gdb.git] / bfd / elf32-arm.c
1 /* 32-bit ELF support for ARM
2 Copyright (C) 1998-2020 Free Software Foundation, Inc.
3
4 This file is part of BFD, the Binary File Descriptor library.
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19 MA 02110-1301, USA. */
20
21 #include "sysdep.h"
22 #include <limits.h>
23
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
33
34 /* Return the relocation section associated with NAME. HTAB is the
35 bfd's elf32_arm_link_hash_entry. */
36 #define RELOC_SECTION(HTAB, NAME) \
37 ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38
39 /* Return size of a relocation entry. HTAB is the bfd's
40 elf32_arm_link_hash_entry. */
41 #define RELOC_SIZE(HTAB) \
42 ((HTAB)->use_rel \
43 ? sizeof (Elf32_External_Rel) \
44 : sizeof (Elf32_External_Rela))
45
46 /* Return function to swap relocations in. HTAB is the bfd's
47 elf32_arm_link_hash_entry. */
48 #define SWAP_RELOC_IN(HTAB) \
49 ((HTAB)->use_rel \
50 ? bfd_elf32_swap_reloc_in \
51 : bfd_elf32_swap_reloca_in)
52
53 /* Return function to swap relocations out. HTAB is the bfd's
54 elf32_arm_link_hash_entry. */
55 #define SWAP_RELOC_OUT(HTAB) \
56 ((HTAB)->use_rel \
57 ? bfd_elf32_swap_reloc_out \
58 : bfd_elf32_swap_reloca_out)
59
60 #define elf_info_to_howto NULL
61 #define elf_info_to_howto_rel elf32_arm_info_to_howto
62
63 #define ARM_ELF_ABI_VERSION 0
64 #define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM
65
66 /* The Adjusted Place, as defined by AAELF. */
67 #define Pa(X) ((X) & 0xfffffffc)
68
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 struct bfd_link_info *link_info,
71 asection *sec,
72 bfd_byte *contents);
73
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75 R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76 in that slot. */
77
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80 /* No relocation. */
81 HOWTO (R_ARM_NONE, /* type */
82 0, /* rightshift */
83 3, /* size (0 = byte, 1 = short, 2 = long) */
84 0, /* bitsize */
85 FALSE, /* pc_relative */
86 0, /* bitpos */
87 complain_overflow_dont,/* complain_on_overflow */
88 bfd_elf_generic_reloc, /* special_function */
89 "R_ARM_NONE", /* name */
90 FALSE, /* partial_inplace */
91 0, /* src_mask */
92 0, /* dst_mask */
93 FALSE), /* pcrel_offset */
94
95 HOWTO (R_ARM_PC24, /* type */
96 2, /* rightshift */
97 2, /* size (0 = byte, 1 = short, 2 = long) */
98 24, /* bitsize */
99 TRUE, /* pc_relative */
100 0, /* bitpos */
101 complain_overflow_signed,/* complain_on_overflow */
102 bfd_elf_generic_reloc, /* special_function */
103 "R_ARM_PC24", /* name */
104 FALSE, /* partial_inplace */
105 0x00ffffff, /* src_mask */
106 0x00ffffff, /* dst_mask */
107 TRUE), /* pcrel_offset */
108
109 /* 32 bit absolute */
110 HOWTO (R_ARM_ABS32, /* type */
111 0, /* rightshift */
112 2, /* size (0 = byte, 1 = short, 2 = long) */
113 32, /* bitsize */
114 FALSE, /* pc_relative */
115 0, /* bitpos */
116 complain_overflow_bitfield,/* complain_on_overflow */
117 bfd_elf_generic_reloc, /* special_function */
118 "R_ARM_ABS32", /* name */
119 FALSE, /* partial_inplace */
120 0xffffffff, /* src_mask */
121 0xffffffff, /* dst_mask */
122 FALSE), /* pcrel_offset */
123
124 /* standard 32bit pc-relative reloc */
125 HOWTO (R_ARM_REL32, /* type */
126 0, /* rightshift */
127 2, /* size (0 = byte, 1 = short, 2 = long) */
128 32, /* bitsize */
129 TRUE, /* pc_relative */
130 0, /* bitpos */
131 complain_overflow_bitfield,/* complain_on_overflow */
132 bfd_elf_generic_reloc, /* special_function */
133 "R_ARM_REL32", /* name */
134 FALSE, /* partial_inplace */
135 0xffffffff, /* src_mask */
136 0xffffffff, /* dst_mask */
137 TRUE), /* pcrel_offset */
138
139 /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140 HOWTO (R_ARM_LDR_PC_G0, /* type */
141 0, /* rightshift */
142 0, /* size (0 = byte, 1 = short, 2 = long) */
143 32, /* bitsize */
144 TRUE, /* pc_relative */
145 0, /* bitpos */
146 complain_overflow_dont,/* complain_on_overflow */
147 bfd_elf_generic_reloc, /* special_function */
148 "R_ARM_LDR_PC_G0", /* name */
149 FALSE, /* partial_inplace */
150 0xffffffff, /* src_mask */
151 0xffffffff, /* dst_mask */
152 TRUE), /* pcrel_offset */
153
154 /* 16 bit absolute */
155 HOWTO (R_ARM_ABS16, /* type */
156 0, /* rightshift */
157 1, /* size (0 = byte, 1 = short, 2 = long) */
158 16, /* bitsize */
159 FALSE, /* pc_relative */
160 0, /* bitpos */
161 complain_overflow_bitfield,/* complain_on_overflow */
162 bfd_elf_generic_reloc, /* special_function */
163 "R_ARM_ABS16", /* name */
164 FALSE, /* partial_inplace */
165 0x0000ffff, /* src_mask */
166 0x0000ffff, /* dst_mask */
167 FALSE), /* pcrel_offset */
168
169 /* 12 bit absolute */
170 HOWTO (R_ARM_ABS12, /* type */
171 0, /* rightshift */
172 2, /* size (0 = byte, 1 = short, 2 = long) */
173 12, /* bitsize */
174 FALSE, /* pc_relative */
175 0, /* bitpos */
176 complain_overflow_bitfield,/* complain_on_overflow */
177 bfd_elf_generic_reloc, /* special_function */
178 "R_ARM_ABS12", /* name */
179 FALSE, /* partial_inplace */
180 0x00000fff, /* src_mask */
181 0x00000fff, /* dst_mask */
182 FALSE), /* pcrel_offset */
183
184 HOWTO (R_ARM_THM_ABS5, /* type */
185 6, /* rightshift */
186 1, /* size (0 = byte, 1 = short, 2 = long) */
187 5, /* bitsize */
188 FALSE, /* pc_relative */
189 0, /* bitpos */
190 complain_overflow_bitfield,/* complain_on_overflow */
191 bfd_elf_generic_reloc, /* special_function */
192 "R_ARM_THM_ABS5", /* name */
193 FALSE, /* partial_inplace */
194 0x000007e0, /* src_mask */
195 0x000007e0, /* dst_mask */
196 FALSE), /* pcrel_offset */
197
198 /* 8 bit absolute */
199 HOWTO (R_ARM_ABS8, /* type */
200 0, /* rightshift */
201 0, /* size (0 = byte, 1 = short, 2 = long) */
202 8, /* bitsize */
203 FALSE, /* pc_relative */
204 0, /* bitpos */
205 complain_overflow_bitfield,/* complain_on_overflow */
206 bfd_elf_generic_reloc, /* special_function */
207 "R_ARM_ABS8", /* name */
208 FALSE, /* partial_inplace */
209 0x000000ff, /* src_mask */
210 0x000000ff, /* dst_mask */
211 FALSE), /* pcrel_offset */
212
213 HOWTO (R_ARM_SBREL32, /* type */
214 0, /* rightshift */
215 2, /* size (0 = byte, 1 = short, 2 = long) */
216 32, /* bitsize */
217 FALSE, /* pc_relative */
218 0, /* bitpos */
219 complain_overflow_dont,/* complain_on_overflow */
220 bfd_elf_generic_reloc, /* special_function */
221 "R_ARM_SBREL32", /* name */
222 FALSE, /* partial_inplace */
223 0xffffffff, /* src_mask */
224 0xffffffff, /* dst_mask */
225 FALSE), /* pcrel_offset */
226
227 HOWTO (R_ARM_THM_CALL, /* type */
228 1, /* rightshift */
229 2, /* size (0 = byte, 1 = short, 2 = long) */
230 24, /* bitsize */
231 TRUE, /* pc_relative */
232 0, /* bitpos */
233 complain_overflow_signed,/* complain_on_overflow */
234 bfd_elf_generic_reloc, /* special_function */
235 "R_ARM_THM_CALL", /* name */
236 FALSE, /* partial_inplace */
237 0x07ff2fff, /* src_mask */
238 0x07ff2fff, /* dst_mask */
239 TRUE), /* pcrel_offset */
240
241 HOWTO (R_ARM_THM_PC8, /* type */
242 1, /* rightshift */
243 1, /* size (0 = byte, 1 = short, 2 = long) */
244 8, /* bitsize */
245 TRUE, /* pc_relative */
246 0, /* bitpos */
247 complain_overflow_signed,/* complain_on_overflow */
248 bfd_elf_generic_reloc, /* special_function */
249 "R_ARM_THM_PC8", /* name */
250 FALSE, /* partial_inplace */
251 0x000000ff, /* src_mask */
252 0x000000ff, /* dst_mask */
253 TRUE), /* pcrel_offset */
254
255 HOWTO (R_ARM_BREL_ADJ, /* type */
256 1, /* rightshift */
257 1, /* size (0 = byte, 1 = short, 2 = long) */
258 32, /* bitsize */
259 FALSE, /* pc_relative */
260 0, /* bitpos */
261 complain_overflow_signed,/* complain_on_overflow */
262 bfd_elf_generic_reloc, /* special_function */
263 "R_ARM_BREL_ADJ", /* name */
264 FALSE, /* partial_inplace */
265 0xffffffff, /* src_mask */
266 0xffffffff, /* dst_mask */
267 FALSE), /* pcrel_offset */
268
269 HOWTO (R_ARM_TLS_DESC, /* type */
270 0, /* rightshift */
271 2, /* size (0 = byte, 1 = short, 2 = long) */
272 32, /* bitsize */
273 FALSE, /* pc_relative */
274 0, /* bitpos */
275 complain_overflow_bitfield,/* complain_on_overflow */
276 bfd_elf_generic_reloc, /* special_function */
277 "R_ARM_TLS_DESC", /* name */
278 FALSE, /* partial_inplace */
279 0xffffffff, /* src_mask */
280 0xffffffff, /* dst_mask */
281 FALSE), /* pcrel_offset */
282
283 HOWTO (R_ARM_THM_SWI8, /* type */
284 0, /* rightshift */
285 0, /* size (0 = byte, 1 = short, 2 = long) */
286 0, /* bitsize */
287 FALSE, /* pc_relative */
288 0, /* bitpos */
289 complain_overflow_signed,/* complain_on_overflow */
290 bfd_elf_generic_reloc, /* special_function */
291 "R_ARM_SWI8", /* name */
292 FALSE, /* partial_inplace */
293 0x00000000, /* src_mask */
294 0x00000000, /* dst_mask */
295 FALSE), /* pcrel_offset */
296
297 /* BLX instruction for the ARM. */
298 HOWTO (R_ARM_XPC25, /* type */
299 2, /* rightshift */
300 2, /* size (0 = byte, 1 = short, 2 = long) */
301 24, /* bitsize */
302 TRUE, /* pc_relative */
303 0, /* bitpos */
304 complain_overflow_signed,/* complain_on_overflow */
305 bfd_elf_generic_reloc, /* special_function */
306 "R_ARM_XPC25", /* name */
307 FALSE, /* partial_inplace */
308 0x00ffffff, /* src_mask */
309 0x00ffffff, /* dst_mask */
310 TRUE), /* pcrel_offset */
311
312 /* BLX instruction for the Thumb. */
313 HOWTO (R_ARM_THM_XPC22, /* type */
314 2, /* rightshift */
315 2, /* size (0 = byte, 1 = short, 2 = long) */
316 24, /* bitsize */
317 TRUE, /* pc_relative */
318 0, /* bitpos */
319 complain_overflow_signed,/* complain_on_overflow */
320 bfd_elf_generic_reloc, /* special_function */
321 "R_ARM_THM_XPC22", /* name */
322 FALSE, /* partial_inplace */
323 0x07ff2fff, /* src_mask */
324 0x07ff2fff, /* dst_mask */
325 TRUE), /* pcrel_offset */
326
327 /* Dynamic TLS relocations. */
328
329 HOWTO (R_ARM_TLS_DTPMOD32, /* type */
330 0, /* rightshift */
331 2, /* size (0 = byte, 1 = short, 2 = long) */
332 32, /* bitsize */
333 FALSE, /* pc_relative */
334 0, /* bitpos */
335 complain_overflow_bitfield,/* complain_on_overflow */
336 bfd_elf_generic_reloc, /* special_function */
337 "R_ARM_TLS_DTPMOD32", /* name */
338 TRUE, /* partial_inplace */
339 0xffffffff, /* src_mask */
340 0xffffffff, /* dst_mask */
341 FALSE), /* pcrel_offset */
342
343 HOWTO (R_ARM_TLS_DTPOFF32, /* type */
344 0, /* rightshift */
345 2, /* size (0 = byte, 1 = short, 2 = long) */
346 32, /* bitsize */
347 FALSE, /* pc_relative */
348 0, /* bitpos */
349 complain_overflow_bitfield,/* complain_on_overflow */
350 bfd_elf_generic_reloc, /* special_function */
351 "R_ARM_TLS_DTPOFF32", /* name */
352 TRUE, /* partial_inplace */
353 0xffffffff, /* src_mask */
354 0xffffffff, /* dst_mask */
355 FALSE), /* pcrel_offset */
356
357 HOWTO (R_ARM_TLS_TPOFF32, /* type */
358 0, /* rightshift */
359 2, /* size (0 = byte, 1 = short, 2 = long) */
360 32, /* bitsize */
361 FALSE, /* pc_relative */
362 0, /* bitpos */
363 complain_overflow_bitfield,/* complain_on_overflow */
364 bfd_elf_generic_reloc, /* special_function */
365 "R_ARM_TLS_TPOFF32", /* name */
366 TRUE, /* partial_inplace */
367 0xffffffff, /* src_mask */
368 0xffffffff, /* dst_mask */
369 FALSE), /* pcrel_offset */
370
371 /* Relocs used in ARM Linux */
372
373 HOWTO (R_ARM_COPY, /* type */
374 0, /* rightshift */
375 2, /* size (0 = byte, 1 = short, 2 = long) */
376 32, /* bitsize */
377 FALSE, /* pc_relative */
378 0, /* bitpos */
379 complain_overflow_bitfield,/* complain_on_overflow */
380 bfd_elf_generic_reloc, /* special_function */
381 "R_ARM_COPY", /* name */
382 TRUE, /* partial_inplace */
383 0xffffffff, /* src_mask */
384 0xffffffff, /* dst_mask */
385 FALSE), /* pcrel_offset */
386
387 HOWTO (R_ARM_GLOB_DAT, /* type */
388 0, /* rightshift */
389 2, /* size (0 = byte, 1 = short, 2 = long) */
390 32, /* bitsize */
391 FALSE, /* pc_relative */
392 0, /* bitpos */
393 complain_overflow_bitfield,/* complain_on_overflow */
394 bfd_elf_generic_reloc, /* special_function */
395 "R_ARM_GLOB_DAT", /* name */
396 TRUE, /* partial_inplace */
397 0xffffffff, /* src_mask */
398 0xffffffff, /* dst_mask */
399 FALSE), /* pcrel_offset */
400
401 HOWTO (R_ARM_JUMP_SLOT, /* type */
402 0, /* rightshift */
403 2, /* size (0 = byte, 1 = short, 2 = long) */
404 32, /* bitsize */
405 FALSE, /* pc_relative */
406 0, /* bitpos */
407 complain_overflow_bitfield,/* complain_on_overflow */
408 bfd_elf_generic_reloc, /* special_function */
409 "R_ARM_JUMP_SLOT", /* name */
410 TRUE, /* partial_inplace */
411 0xffffffff, /* src_mask */
412 0xffffffff, /* dst_mask */
413 FALSE), /* pcrel_offset */
414
415 HOWTO (R_ARM_RELATIVE, /* type */
416 0, /* rightshift */
417 2, /* size (0 = byte, 1 = short, 2 = long) */
418 32, /* bitsize */
419 FALSE, /* pc_relative */
420 0, /* bitpos */
421 complain_overflow_bitfield,/* complain_on_overflow */
422 bfd_elf_generic_reloc, /* special_function */
423 "R_ARM_RELATIVE", /* name */
424 TRUE, /* partial_inplace */
425 0xffffffff, /* src_mask */
426 0xffffffff, /* dst_mask */
427 FALSE), /* pcrel_offset */
428
429 HOWTO (R_ARM_GOTOFF32, /* type */
430 0, /* rightshift */
431 2, /* size (0 = byte, 1 = short, 2 = long) */
432 32, /* bitsize */
433 FALSE, /* pc_relative */
434 0, /* bitpos */
435 complain_overflow_bitfield,/* complain_on_overflow */
436 bfd_elf_generic_reloc, /* special_function */
437 "R_ARM_GOTOFF32", /* name */
438 TRUE, /* partial_inplace */
439 0xffffffff, /* src_mask */
440 0xffffffff, /* dst_mask */
441 FALSE), /* pcrel_offset */
442
443 HOWTO (R_ARM_GOTPC, /* type */
444 0, /* rightshift */
445 2, /* size (0 = byte, 1 = short, 2 = long) */
446 32, /* bitsize */
447 TRUE, /* pc_relative */
448 0, /* bitpos */
449 complain_overflow_bitfield,/* complain_on_overflow */
450 bfd_elf_generic_reloc, /* special_function */
451 "R_ARM_GOTPC", /* name */
452 TRUE, /* partial_inplace */
453 0xffffffff, /* src_mask */
454 0xffffffff, /* dst_mask */
455 TRUE), /* pcrel_offset */
456
457 HOWTO (R_ARM_GOT32, /* type */
458 0, /* rightshift */
459 2, /* size (0 = byte, 1 = short, 2 = long) */
460 32, /* bitsize */
461 FALSE, /* pc_relative */
462 0, /* bitpos */
463 complain_overflow_bitfield,/* complain_on_overflow */
464 bfd_elf_generic_reloc, /* special_function */
465 "R_ARM_GOT32", /* name */
466 TRUE, /* partial_inplace */
467 0xffffffff, /* src_mask */
468 0xffffffff, /* dst_mask */
469 FALSE), /* pcrel_offset */
470
471 HOWTO (R_ARM_PLT32, /* type */
472 2, /* rightshift */
473 2, /* size (0 = byte, 1 = short, 2 = long) */
474 24, /* bitsize */
475 TRUE, /* pc_relative */
476 0, /* bitpos */
477 complain_overflow_bitfield,/* complain_on_overflow */
478 bfd_elf_generic_reloc, /* special_function */
479 "R_ARM_PLT32", /* name */
480 FALSE, /* partial_inplace */
481 0x00ffffff, /* src_mask */
482 0x00ffffff, /* dst_mask */
483 TRUE), /* pcrel_offset */
484
485 HOWTO (R_ARM_CALL, /* type */
486 2, /* rightshift */
487 2, /* size (0 = byte, 1 = short, 2 = long) */
488 24, /* bitsize */
489 TRUE, /* pc_relative */
490 0, /* bitpos */
491 complain_overflow_signed,/* complain_on_overflow */
492 bfd_elf_generic_reloc, /* special_function */
493 "R_ARM_CALL", /* name */
494 FALSE, /* partial_inplace */
495 0x00ffffff, /* src_mask */
496 0x00ffffff, /* dst_mask */
497 TRUE), /* pcrel_offset */
498
499 HOWTO (R_ARM_JUMP24, /* type */
500 2, /* rightshift */
501 2, /* size (0 = byte, 1 = short, 2 = long) */
502 24, /* bitsize */
503 TRUE, /* pc_relative */
504 0, /* bitpos */
505 complain_overflow_signed,/* complain_on_overflow */
506 bfd_elf_generic_reloc, /* special_function */
507 "R_ARM_JUMP24", /* name */
508 FALSE, /* partial_inplace */
509 0x00ffffff, /* src_mask */
510 0x00ffffff, /* dst_mask */
511 TRUE), /* pcrel_offset */
512
513 HOWTO (R_ARM_THM_JUMP24, /* type */
514 1, /* rightshift */
515 2, /* size (0 = byte, 1 = short, 2 = long) */
516 24, /* bitsize */
517 TRUE, /* pc_relative */
518 0, /* bitpos */
519 complain_overflow_signed,/* complain_on_overflow */
520 bfd_elf_generic_reloc, /* special_function */
521 "R_ARM_THM_JUMP24", /* name */
522 FALSE, /* partial_inplace */
523 0x07ff2fff, /* src_mask */
524 0x07ff2fff, /* dst_mask */
525 TRUE), /* pcrel_offset */
526
527 HOWTO (R_ARM_BASE_ABS, /* type */
528 0, /* rightshift */
529 2, /* size (0 = byte, 1 = short, 2 = long) */
530 32, /* bitsize */
531 FALSE, /* pc_relative */
532 0, /* bitpos */
533 complain_overflow_dont,/* complain_on_overflow */
534 bfd_elf_generic_reloc, /* special_function */
535 "R_ARM_BASE_ABS", /* name */
536 FALSE, /* partial_inplace */
537 0xffffffff, /* src_mask */
538 0xffffffff, /* dst_mask */
539 FALSE), /* pcrel_offset */
540
541 HOWTO (R_ARM_ALU_PCREL7_0, /* type */
542 0, /* rightshift */
543 2, /* size (0 = byte, 1 = short, 2 = long) */
544 12, /* bitsize */
545 TRUE, /* pc_relative */
546 0, /* bitpos */
547 complain_overflow_dont,/* complain_on_overflow */
548 bfd_elf_generic_reloc, /* special_function */
549 "R_ARM_ALU_PCREL_7_0", /* name */
550 FALSE, /* partial_inplace */
551 0x00000fff, /* src_mask */
552 0x00000fff, /* dst_mask */
553 TRUE), /* pcrel_offset */
554
555 HOWTO (R_ARM_ALU_PCREL15_8, /* type */
556 0, /* rightshift */
557 2, /* size (0 = byte, 1 = short, 2 = long) */
558 12, /* bitsize */
559 TRUE, /* pc_relative */
560 8, /* bitpos */
561 complain_overflow_dont,/* complain_on_overflow */
562 bfd_elf_generic_reloc, /* special_function */
563 "R_ARM_ALU_PCREL_15_8",/* name */
564 FALSE, /* partial_inplace */
565 0x00000fff, /* src_mask */
566 0x00000fff, /* dst_mask */
567 TRUE), /* pcrel_offset */
568
569 HOWTO (R_ARM_ALU_PCREL23_15, /* type */
570 0, /* rightshift */
571 2, /* size (0 = byte, 1 = short, 2 = long) */
572 12, /* bitsize */
573 TRUE, /* pc_relative */
574 16, /* bitpos */
575 complain_overflow_dont,/* complain_on_overflow */
576 bfd_elf_generic_reloc, /* special_function */
577 "R_ARM_ALU_PCREL_23_15",/* name */
578 FALSE, /* partial_inplace */
579 0x00000fff, /* src_mask */
580 0x00000fff, /* dst_mask */
581 TRUE), /* pcrel_offset */
582
583 HOWTO (R_ARM_LDR_SBREL_11_0, /* type */
584 0, /* rightshift */
585 2, /* size (0 = byte, 1 = short, 2 = long) */
586 12, /* bitsize */
587 FALSE, /* pc_relative */
588 0, /* bitpos */
589 complain_overflow_dont,/* complain_on_overflow */
590 bfd_elf_generic_reloc, /* special_function */
591 "R_ARM_LDR_SBREL_11_0",/* name */
592 FALSE, /* partial_inplace */
593 0x00000fff, /* src_mask */
594 0x00000fff, /* dst_mask */
595 FALSE), /* pcrel_offset */
596
597 HOWTO (R_ARM_ALU_SBREL_19_12, /* type */
598 0, /* rightshift */
599 2, /* size (0 = byte, 1 = short, 2 = long) */
600 8, /* bitsize */
601 FALSE, /* pc_relative */
602 12, /* bitpos */
603 complain_overflow_dont,/* complain_on_overflow */
604 bfd_elf_generic_reloc, /* special_function */
605 "R_ARM_ALU_SBREL_19_12",/* name */
606 FALSE, /* partial_inplace */
607 0x000ff000, /* src_mask */
608 0x000ff000, /* dst_mask */
609 FALSE), /* pcrel_offset */
610
611 HOWTO (R_ARM_ALU_SBREL_27_20, /* type */
612 0, /* rightshift */
613 2, /* size (0 = byte, 1 = short, 2 = long) */
614 8, /* bitsize */
615 FALSE, /* pc_relative */
616 20, /* bitpos */
617 complain_overflow_dont,/* complain_on_overflow */
618 bfd_elf_generic_reloc, /* special_function */
619 "R_ARM_ALU_SBREL_27_20",/* name */
620 FALSE, /* partial_inplace */
621 0x0ff00000, /* src_mask */
622 0x0ff00000, /* dst_mask */
623 FALSE), /* pcrel_offset */
624
625 HOWTO (R_ARM_TARGET1, /* type */
626 0, /* rightshift */
627 2, /* size (0 = byte, 1 = short, 2 = long) */
628 32, /* bitsize */
629 FALSE, /* pc_relative */
630 0, /* bitpos */
631 complain_overflow_dont,/* complain_on_overflow */
632 bfd_elf_generic_reloc, /* special_function */
633 "R_ARM_TARGET1", /* name */
634 FALSE, /* partial_inplace */
635 0xffffffff, /* src_mask */
636 0xffffffff, /* dst_mask */
637 FALSE), /* pcrel_offset */
638
639 HOWTO (R_ARM_ROSEGREL32, /* type */
640 0, /* rightshift */
641 2, /* size (0 = byte, 1 = short, 2 = long) */
642 32, /* bitsize */
643 FALSE, /* pc_relative */
644 0, /* bitpos */
645 complain_overflow_dont,/* complain_on_overflow */
646 bfd_elf_generic_reloc, /* special_function */
647 "R_ARM_ROSEGREL32", /* name */
648 FALSE, /* partial_inplace */
649 0xffffffff, /* src_mask */
650 0xffffffff, /* dst_mask */
651 FALSE), /* pcrel_offset */
652
653 HOWTO (R_ARM_V4BX, /* type */
654 0, /* rightshift */
655 2, /* size (0 = byte, 1 = short, 2 = long) */
656 32, /* bitsize */
657 FALSE, /* pc_relative */
658 0, /* bitpos */
659 complain_overflow_dont,/* complain_on_overflow */
660 bfd_elf_generic_reloc, /* special_function */
661 "R_ARM_V4BX", /* name */
662 FALSE, /* partial_inplace */
663 0xffffffff, /* src_mask */
664 0xffffffff, /* dst_mask */
665 FALSE), /* pcrel_offset */
666
667 HOWTO (R_ARM_TARGET2, /* type */
668 0, /* rightshift */
669 2, /* size (0 = byte, 1 = short, 2 = long) */
670 32, /* bitsize */
671 FALSE, /* pc_relative */
672 0, /* bitpos */
673 complain_overflow_signed,/* complain_on_overflow */
674 bfd_elf_generic_reloc, /* special_function */
675 "R_ARM_TARGET2", /* name */
676 FALSE, /* partial_inplace */
677 0xffffffff, /* src_mask */
678 0xffffffff, /* dst_mask */
679 TRUE), /* pcrel_offset */
680
681 HOWTO (R_ARM_PREL31, /* type */
682 0, /* rightshift */
683 2, /* size (0 = byte, 1 = short, 2 = long) */
684 31, /* bitsize */
685 TRUE, /* pc_relative */
686 0, /* bitpos */
687 complain_overflow_signed,/* complain_on_overflow */
688 bfd_elf_generic_reloc, /* special_function */
689 "R_ARM_PREL31", /* name */
690 FALSE, /* partial_inplace */
691 0x7fffffff, /* src_mask */
692 0x7fffffff, /* dst_mask */
693 TRUE), /* pcrel_offset */
694
695 HOWTO (R_ARM_MOVW_ABS_NC, /* type */
696 0, /* rightshift */
697 2, /* size (0 = byte, 1 = short, 2 = long) */
698 16, /* bitsize */
699 FALSE, /* pc_relative */
700 0, /* bitpos */
701 complain_overflow_dont,/* complain_on_overflow */
702 bfd_elf_generic_reloc, /* special_function */
703 "R_ARM_MOVW_ABS_NC", /* name */
704 FALSE, /* partial_inplace */
705 0x000f0fff, /* src_mask */
706 0x000f0fff, /* dst_mask */
707 FALSE), /* pcrel_offset */
708
709 HOWTO (R_ARM_MOVT_ABS, /* type */
710 0, /* rightshift */
711 2, /* size (0 = byte, 1 = short, 2 = long) */
712 16, /* bitsize */
713 FALSE, /* pc_relative */
714 0, /* bitpos */
715 complain_overflow_bitfield,/* complain_on_overflow */
716 bfd_elf_generic_reloc, /* special_function */
717 "R_ARM_MOVT_ABS", /* name */
718 FALSE, /* partial_inplace */
719 0x000f0fff, /* src_mask */
720 0x000f0fff, /* dst_mask */
721 FALSE), /* pcrel_offset */
722
723 HOWTO (R_ARM_MOVW_PREL_NC, /* type */
724 0, /* rightshift */
725 2, /* size (0 = byte, 1 = short, 2 = long) */
726 16, /* bitsize */
727 TRUE, /* pc_relative */
728 0, /* bitpos */
729 complain_overflow_dont,/* complain_on_overflow */
730 bfd_elf_generic_reloc, /* special_function */
731 "R_ARM_MOVW_PREL_NC", /* name */
732 FALSE, /* partial_inplace */
733 0x000f0fff, /* src_mask */
734 0x000f0fff, /* dst_mask */
735 TRUE), /* pcrel_offset */
736
737 HOWTO (R_ARM_MOVT_PREL, /* type */
738 0, /* rightshift */
739 2, /* size (0 = byte, 1 = short, 2 = long) */
740 16, /* bitsize */
741 TRUE, /* pc_relative */
742 0, /* bitpos */
743 complain_overflow_bitfield,/* complain_on_overflow */
744 bfd_elf_generic_reloc, /* special_function */
745 "R_ARM_MOVT_PREL", /* name */
746 FALSE, /* partial_inplace */
747 0x000f0fff, /* src_mask */
748 0x000f0fff, /* dst_mask */
749 TRUE), /* pcrel_offset */
750
751 HOWTO (R_ARM_THM_MOVW_ABS_NC, /* type */
752 0, /* rightshift */
753 2, /* size (0 = byte, 1 = short, 2 = long) */
754 16, /* bitsize */
755 FALSE, /* pc_relative */
756 0, /* bitpos */
757 complain_overflow_dont,/* complain_on_overflow */
758 bfd_elf_generic_reloc, /* special_function */
759 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 FALSE, /* partial_inplace */
761 0x040f70ff, /* src_mask */
762 0x040f70ff, /* dst_mask */
763 FALSE), /* pcrel_offset */
764
765 HOWTO (R_ARM_THM_MOVT_ABS, /* type */
766 0, /* rightshift */
767 2, /* size (0 = byte, 1 = short, 2 = long) */
768 16, /* bitsize */
769 FALSE, /* pc_relative */
770 0, /* bitpos */
771 complain_overflow_bitfield,/* complain_on_overflow */
772 bfd_elf_generic_reloc, /* special_function */
773 "R_ARM_THM_MOVT_ABS", /* name */
774 FALSE, /* partial_inplace */
775 0x040f70ff, /* src_mask */
776 0x040f70ff, /* dst_mask */
777 FALSE), /* pcrel_offset */
778
779 HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 0, /* rightshift */
781 2, /* size (0 = byte, 1 = short, 2 = long) */
782 16, /* bitsize */
783 TRUE, /* pc_relative */
784 0, /* bitpos */
785 complain_overflow_dont,/* complain_on_overflow */
786 bfd_elf_generic_reloc, /* special_function */
787 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 FALSE, /* partial_inplace */
789 0x040f70ff, /* src_mask */
790 0x040f70ff, /* dst_mask */
791 TRUE), /* pcrel_offset */
792
793 HOWTO (R_ARM_THM_MOVT_PREL, /* type */
794 0, /* rightshift */
795 2, /* size (0 = byte, 1 = short, 2 = long) */
796 16, /* bitsize */
797 TRUE, /* pc_relative */
798 0, /* bitpos */
799 complain_overflow_bitfield,/* complain_on_overflow */
800 bfd_elf_generic_reloc, /* special_function */
801 "R_ARM_THM_MOVT_PREL", /* name */
802 FALSE, /* partial_inplace */
803 0x040f70ff, /* src_mask */
804 0x040f70ff, /* dst_mask */
805 TRUE), /* pcrel_offset */
806
807 HOWTO (R_ARM_THM_JUMP19, /* type */
808 1, /* rightshift */
809 2, /* size (0 = byte, 1 = short, 2 = long) */
810 19, /* bitsize */
811 TRUE, /* pc_relative */
812 0, /* bitpos */
813 complain_overflow_signed,/* complain_on_overflow */
814 bfd_elf_generic_reloc, /* special_function */
815 "R_ARM_THM_JUMP19", /* name */
816 FALSE, /* partial_inplace */
817 0x043f2fff, /* src_mask */
818 0x043f2fff, /* dst_mask */
819 TRUE), /* pcrel_offset */
820
821 HOWTO (R_ARM_THM_JUMP6, /* type */
822 1, /* rightshift */
823 1, /* size (0 = byte, 1 = short, 2 = long) */
824 6, /* bitsize */
825 TRUE, /* pc_relative */
826 0, /* bitpos */
827 complain_overflow_unsigned,/* complain_on_overflow */
828 bfd_elf_generic_reloc, /* special_function */
829 "R_ARM_THM_JUMP6", /* name */
830 FALSE, /* partial_inplace */
831 0x02f8, /* src_mask */
832 0x02f8, /* dst_mask */
833 TRUE), /* pcrel_offset */
834
835 /* These are declared as 13-bit signed relocations because we can
836 address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837 versa. */
838 HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 0, /* rightshift */
840 2, /* size (0 = byte, 1 = short, 2 = long) */
841 13, /* bitsize */
842 TRUE, /* pc_relative */
843 0, /* bitpos */
844 complain_overflow_dont,/* complain_on_overflow */
845 bfd_elf_generic_reloc, /* special_function */
846 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 FALSE, /* partial_inplace */
848 0xffffffff, /* src_mask */
849 0xffffffff, /* dst_mask */
850 TRUE), /* pcrel_offset */
851
852 HOWTO (R_ARM_THM_PC12, /* type */
853 0, /* rightshift */
854 2, /* size (0 = byte, 1 = short, 2 = long) */
855 13, /* bitsize */
856 TRUE, /* pc_relative */
857 0, /* bitpos */
858 complain_overflow_dont,/* complain_on_overflow */
859 bfd_elf_generic_reloc, /* special_function */
860 "R_ARM_THM_PC12", /* name */
861 FALSE, /* partial_inplace */
862 0xffffffff, /* src_mask */
863 0xffffffff, /* dst_mask */
864 TRUE), /* pcrel_offset */
865
866 HOWTO (R_ARM_ABS32_NOI, /* type */
867 0, /* rightshift */
868 2, /* size (0 = byte, 1 = short, 2 = long) */
869 32, /* bitsize */
870 FALSE, /* pc_relative */
871 0, /* bitpos */
872 complain_overflow_dont,/* complain_on_overflow */
873 bfd_elf_generic_reloc, /* special_function */
874 "R_ARM_ABS32_NOI", /* name */
875 FALSE, /* partial_inplace */
876 0xffffffff, /* src_mask */
877 0xffffffff, /* dst_mask */
878 FALSE), /* pcrel_offset */
879
880 HOWTO (R_ARM_REL32_NOI, /* type */
881 0, /* rightshift */
882 2, /* size (0 = byte, 1 = short, 2 = long) */
883 32, /* bitsize */
884 TRUE, /* pc_relative */
885 0, /* bitpos */
886 complain_overflow_dont,/* complain_on_overflow */
887 bfd_elf_generic_reloc, /* special_function */
888 "R_ARM_REL32_NOI", /* name */
889 FALSE, /* partial_inplace */
890 0xffffffff, /* src_mask */
891 0xffffffff, /* dst_mask */
892 FALSE), /* pcrel_offset */
893
894 /* Group relocations. */
895
896 HOWTO (R_ARM_ALU_PC_G0_NC, /* type */
897 0, /* rightshift */
898 2, /* size (0 = byte, 1 = short, 2 = long) */
899 32, /* bitsize */
900 TRUE, /* pc_relative */
901 0, /* bitpos */
902 complain_overflow_dont,/* complain_on_overflow */
903 bfd_elf_generic_reloc, /* special_function */
904 "R_ARM_ALU_PC_G0_NC", /* name */
905 FALSE, /* partial_inplace */
906 0xffffffff, /* src_mask */
907 0xffffffff, /* dst_mask */
908 TRUE), /* pcrel_offset */
909
910 HOWTO (R_ARM_ALU_PC_G0, /* type */
911 0, /* rightshift */
912 2, /* size (0 = byte, 1 = short, 2 = long) */
913 32, /* bitsize */
914 TRUE, /* pc_relative */
915 0, /* bitpos */
916 complain_overflow_dont,/* complain_on_overflow */
917 bfd_elf_generic_reloc, /* special_function */
918 "R_ARM_ALU_PC_G0", /* name */
919 FALSE, /* partial_inplace */
920 0xffffffff, /* src_mask */
921 0xffffffff, /* dst_mask */
922 TRUE), /* pcrel_offset */
923
924 HOWTO (R_ARM_ALU_PC_G1_NC, /* type */
925 0, /* rightshift */
926 2, /* size (0 = byte, 1 = short, 2 = long) */
927 32, /* bitsize */
928 TRUE, /* pc_relative */
929 0, /* bitpos */
930 complain_overflow_dont,/* complain_on_overflow */
931 bfd_elf_generic_reloc, /* special_function */
932 "R_ARM_ALU_PC_G1_NC", /* name */
933 FALSE, /* partial_inplace */
934 0xffffffff, /* src_mask */
935 0xffffffff, /* dst_mask */
936 TRUE), /* pcrel_offset */
937
938 HOWTO (R_ARM_ALU_PC_G1, /* type */
939 0, /* rightshift */
940 2, /* size (0 = byte, 1 = short, 2 = long) */
941 32, /* bitsize */
942 TRUE, /* pc_relative */
943 0, /* bitpos */
944 complain_overflow_dont,/* complain_on_overflow */
945 bfd_elf_generic_reloc, /* special_function */
946 "R_ARM_ALU_PC_G1", /* name */
947 FALSE, /* partial_inplace */
948 0xffffffff, /* src_mask */
949 0xffffffff, /* dst_mask */
950 TRUE), /* pcrel_offset */
951
952 HOWTO (R_ARM_ALU_PC_G2, /* type */
953 0, /* rightshift */
954 2, /* size (0 = byte, 1 = short, 2 = long) */
955 32, /* bitsize */
956 TRUE, /* pc_relative */
957 0, /* bitpos */
958 complain_overflow_dont,/* complain_on_overflow */
959 bfd_elf_generic_reloc, /* special_function */
960 "R_ARM_ALU_PC_G2", /* name */
961 FALSE, /* partial_inplace */
962 0xffffffff, /* src_mask */
963 0xffffffff, /* dst_mask */
964 TRUE), /* pcrel_offset */
965
966 HOWTO (R_ARM_LDR_PC_G1, /* type */
967 0, /* rightshift */
968 2, /* size (0 = byte, 1 = short, 2 = long) */
969 32, /* bitsize */
970 TRUE, /* pc_relative */
971 0, /* bitpos */
972 complain_overflow_dont,/* complain_on_overflow */
973 bfd_elf_generic_reloc, /* special_function */
974 "R_ARM_LDR_PC_G1", /* name */
975 FALSE, /* partial_inplace */
976 0xffffffff, /* src_mask */
977 0xffffffff, /* dst_mask */
978 TRUE), /* pcrel_offset */
979
980 HOWTO (R_ARM_LDR_PC_G2, /* type */
981 0, /* rightshift */
982 2, /* size (0 = byte, 1 = short, 2 = long) */
983 32, /* bitsize */
984 TRUE, /* pc_relative */
985 0, /* bitpos */
986 complain_overflow_dont,/* complain_on_overflow */
987 bfd_elf_generic_reloc, /* special_function */
988 "R_ARM_LDR_PC_G2", /* name */
989 FALSE, /* partial_inplace */
990 0xffffffff, /* src_mask */
991 0xffffffff, /* dst_mask */
992 TRUE), /* pcrel_offset */
993
994 HOWTO (R_ARM_LDRS_PC_G0, /* type */
995 0, /* rightshift */
996 2, /* size (0 = byte, 1 = short, 2 = long) */
997 32, /* bitsize */
998 TRUE, /* pc_relative */
999 0, /* bitpos */
1000 complain_overflow_dont,/* complain_on_overflow */
1001 bfd_elf_generic_reloc, /* special_function */
1002 "R_ARM_LDRS_PC_G0", /* name */
1003 FALSE, /* partial_inplace */
1004 0xffffffff, /* src_mask */
1005 0xffffffff, /* dst_mask */
1006 TRUE), /* pcrel_offset */
1007
1008 HOWTO (R_ARM_LDRS_PC_G1, /* type */
1009 0, /* rightshift */
1010 2, /* size (0 = byte, 1 = short, 2 = long) */
1011 32, /* bitsize */
1012 TRUE, /* pc_relative */
1013 0, /* bitpos */
1014 complain_overflow_dont,/* complain_on_overflow */
1015 bfd_elf_generic_reloc, /* special_function */
1016 "R_ARM_LDRS_PC_G1", /* name */
1017 FALSE, /* partial_inplace */
1018 0xffffffff, /* src_mask */
1019 0xffffffff, /* dst_mask */
1020 TRUE), /* pcrel_offset */
1021
1022 HOWTO (R_ARM_LDRS_PC_G2, /* type */
1023 0, /* rightshift */
1024 2, /* size (0 = byte, 1 = short, 2 = long) */
1025 32, /* bitsize */
1026 TRUE, /* pc_relative */
1027 0, /* bitpos */
1028 complain_overflow_dont,/* complain_on_overflow */
1029 bfd_elf_generic_reloc, /* special_function */
1030 "R_ARM_LDRS_PC_G2", /* name */
1031 FALSE, /* partial_inplace */
1032 0xffffffff, /* src_mask */
1033 0xffffffff, /* dst_mask */
1034 TRUE), /* pcrel_offset */
1035
1036 HOWTO (R_ARM_LDC_PC_G0, /* type */
1037 0, /* rightshift */
1038 2, /* size (0 = byte, 1 = short, 2 = long) */
1039 32, /* bitsize */
1040 TRUE, /* pc_relative */
1041 0, /* bitpos */
1042 complain_overflow_dont,/* complain_on_overflow */
1043 bfd_elf_generic_reloc, /* special_function */
1044 "R_ARM_LDC_PC_G0", /* name */
1045 FALSE, /* partial_inplace */
1046 0xffffffff, /* src_mask */
1047 0xffffffff, /* dst_mask */
1048 TRUE), /* pcrel_offset */
1049
1050 HOWTO (R_ARM_LDC_PC_G1, /* type */
1051 0, /* rightshift */
1052 2, /* size (0 = byte, 1 = short, 2 = long) */
1053 32, /* bitsize */
1054 TRUE, /* pc_relative */
1055 0, /* bitpos */
1056 complain_overflow_dont,/* complain_on_overflow */
1057 bfd_elf_generic_reloc, /* special_function */
1058 "R_ARM_LDC_PC_G1", /* name */
1059 FALSE, /* partial_inplace */
1060 0xffffffff, /* src_mask */
1061 0xffffffff, /* dst_mask */
1062 TRUE), /* pcrel_offset */
1063
1064 HOWTO (R_ARM_LDC_PC_G2, /* type */
1065 0, /* rightshift */
1066 2, /* size (0 = byte, 1 = short, 2 = long) */
1067 32, /* bitsize */
1068 TRUE, /* pc_relative */
1069 0, /* bitpos */
1070 complain_overflow_dont,/* complain_on_overflow */
1071 bfd_elf_generic_reloc, /* special_function */
1072 "R_ARM_LDC_PC_G2", /* name */
1073 FALSE, /* partial_inplace */
1074 0xffffffff, /* src_mask */
1075 0xffffffff, /* dst_mask */
1076 TRUE), /* pcrel_offset */
1077
1078 HOWTO (R_ARM_ALU_SB_G0_NC, /* type */
1079 0, /* rightshift */
1080 2, /* size (0 = byte, 1 = short, 2 = long) */
1081 32, /* bitsize */
1082 TRUE, /* pc_relative */
1083 0, /* bitpos */
1084 complain_overflow_dont,/* complain_on_overflow */
1085 bfd_elf_generic_reloc, /* special_function */
1086 "R_ARM_ALU_SB_G0_NC", /* name */
1087 FALSE, /* partial_inplace */
1088 0xffffffff, /* src_mask */
1089 0xffffffff, /* dst_mask */
1090 TRUE), /* pcrel_offset */
1091
1092 HOWTO (R_ARM_ALU_SB_G0, /* type */
1093 0, /* rightshift */
1094 2, /* size (0 = byte, 1 = short, 2 = long) */
1095 32, /* bitsize */
1096 TRUE, /* pc_relative */
1097 0, /* bitpos */
1098 complain_overflow_dont,/* complain_on_overflow */
1099 bfd_elf_generic_reloc, /* special_function */
1100 "R_ARM_ALU_SB_G0", /* name */
1101 FALSE, /* partial_inplace */
1102 0xffffffff, /* src_mask */
1103 0xffffffff, /* dst_mask */
1104 TRUE), /* pcrel_offset */
1105
1106 HOWTO (R_ARM_ALU_SB_G1_NC, /* type */
1107 0, /* rightshift */
1108 2, /* size (0 = byte, 1 = short, 2 = long) */
1109 32, /* bitsize */
1110 TRUE, /* pc_relative */
1111 0, /* bitpos */
1112 complain_overflow_dont,/* complain_on_overflow */
1113 bfd_elf_generic_reloc, /* special_function */
1114 "R_ARM_ALU_SB_G1_NC", /* name */
1115 FALSE, /* partial_inplace */
1116 0xffffffff, /* src_mask */
1117 0xffffffff, /* dst_mask */
1118 TRUE), /* pcrel_offset */
1119
1120 HOWTO (R_ARM_ALU_SB_G1, /* type */
1121 0, /* rightshift */
1122 2, /* size (0 = byte, 1 = short, 2 = long) */
1123 32, /* bitsize */
1124 TRUE, /* pc_relative */
1125 0, /* bitpos */
1126 complain_overflow_dont,/* complain_on_overflow */
1127 bfd_elf_generic_reloc, /* special_function */
1128 "R_ARM_ALU_SB_G1", /* name */
1129 FALSE, /* partial_inplace */
1130 0xffffffff, /* src_mask */
1131 0xffffffff, /* dst_mask */
1132 TRUE), /* pcrel_offset */
1133
1134 HOWTO (R_ARM_ALU_SB_G2, /* type */
1135 0, /* rightshift */
1136 2, /* size (0 = byte, 1 = short, 2 = long) */
1137 32, /* bitsize */
1138 TRUE, /* pc_relative */
1139 0, /* bitpos */
1140 complain_overflow_dont,/* complain_on_overflow */
1141 bfd_elf_generic_reloc, /* special_function */
1142 "R_ARM_ALU_SB_G2", /* name */
1143 FALSE, /* partial_inplace */
1144 0xffffffff, /* src_mask */
1145 0xffffffff, /* dst_mask */
1146 TRUE), /* pcrel_offset */
1147
1148 HOWTO (R_ARM_LDR_SB_G0, /* type */
1149 0, /* rightshift */
1150 2, /* size (0 = byte, 1 = short, 2 = long) */
1151 32, /* bitsize */
1152 TRUE, /* pc_relative */
1153 0, /* bitpos */
1154 complain_overflow_dont,/* complain_on_overflow */
1155 bfd_elf_generic_reloc, /* special_function */
1156 "R_ARM_LDR_SB_G0", /* name */
1157 FALSE, /* partial_inplace */
1158 0xffffffff, /* src_mask */
1159 0xffffffff, /* dst_mask */
1160 TRUE), /* pcrel_offset */
1161
1162 HOWTO (R_ARM_LDR_SB_G1, /* type */
1163 0, /* rightshift */
1164 2, /* size (0 = byte, 1 = short, 2 = long) */
1165 32, /* bitsize */
1166 TRUE, /* pc_relative */
1167 0, /* bitpos */
1168 complain_overflow_dont,/* complain_on_overflow */
1169 bfd_elf_generic_reloc, /* special_function */
1170 "R_ARM_LDR_SB_G1", /* name */
1171 FALSE, /* partial_inplace */
1172 0xffffffff, /* src_mask */
1173 0xffffffff, /* dst_mask */
1174 TRUE), /* pcrel_offset */
1175
1176 HOWTO (R_ARM_LDR_SB_G2, /* type */
1177 0, /* rightshift */
1178 2, /* size (0 = byte, 1 = short, 2 = long) */
1179 32, /* bitsize */
1180 TRUE, /* pc_relative */
1181 0, /* bitpos */
1182 complain_overflow_dont,/* complain_on_overflow */
1183 bfd_elf_generic_reloc, /* special_function */
1184 "R_ARM_LDR_SB_G2", /* name */
1185 FALSE, /* partial_inplace */
1186 0xffffffff, /* src_mask */
1187 0xffffffff, /* dst_mask */
1188 TRUE), /* pcrel_offset */
1189
1190 HOWTO (R_ARM_LDRS_SB_G0, /* type */
1191 0, /* rightshift */
1192 2, /* size (0 = byte, 1 = short, 2 = long) */
1193 32, /* bitsize */
1194 TRUE, /* pc_relative */
1195 0, /* bitpos */
1196 complain_overflow_dont,/* complain_on_overflow */
1197 bfd_elf_generic_reloc, /* special_function */
1198 "R_ARM_LDRS_SB_G0", /* name */
1199 FALSE, /* partial_inplace */
1200 0xffffffff, /* src_mask */
1201 0xffffffff, /* dst_mask */
1202 TRUE), /* pcrel_offset */
1203
1204 HOWTO (R_ARM_LDRS_SB_G1, /* type */
1205 0, /* rightshift */
1206 2, /* size (0 = byte, 1 = short, 2 = long) */
1207 32, /* bitsize */
1208 TRUE, /* pc_relative */
1209 0, /* bitpos */
1210 complain_overflow_dont,/* complain_on_overflow */
1211 bfd_elf_generic_reloc, /* special_function */
1212 "R_ARM_LDRS_SB_G1", /* name */
1213 FALSE, /* partial_inplace */
1214 0xffffffff, /* src_mask */
1215 0xffffffff, /* dst_mask */
1216 TRUE), /* pcrel_offset */
1217
1218 HOWTO (R_ARM_LDRS_SB_G2, /* type */
1219 0, /* rightshift */
1220 2, /* size (0 = byte, 1 = short, 2 = long) */
1221 32, /* bitsize */
1222 TRUE, /* pc_relative */
1223 0, /* bitpos */
1224 complain_overflow_dont,/* complain_on_overflow */
1225 bfd_elf_generic_reloc, /* special_function */
1226 "R_ARM_LDRS_SB_G2", /* name */
1227 FALSE, /* partial_inplace */
1228 0xffffffff, /* src_mask */
1229 0xffffffff, /* dst_mask */
1230 TRUE), /* pcrel_offset */
1231
1232 HOWTO (R_ARM_LDC_SB_G0, /* type */
1233 0, /* rightshift */
1234 2, /* size (0 = byte, 1 = short, 2 = long) */
1235 32, /* bitsize */
1236 TRUE, /* pc_relative */
1237 0, /* bitpos */
1238 complain_overflow_dont,/* complain_on_overflow */
1239 bfd_elf_generic_reloc, /* special_function */
1240 "R_ARM_LDC_SB_G0", /* name */
1241 FALSE, /* partial_inplace */
1242 0xffffffff, /* src_mask */
1243 0xffffffff, /* dst_mask */
1244 TRUE), /* pcrel_offset */
1245
1246 HOWTO (R_ARM_LDC_SB_G1, /* type */
1247 0, /* rightshift */
1248 2, /* size (0 = byte, 1 = short, 2 = long) */
1249 32, /* bitsize */
1250 TRUE, /* pc_relative */
1251 0, /* bitpos */
1252 complain_overflow_dont,/* complain_on_overflow */
1253 bfd_elf_generic_reloc, /* special_function */
1254 "R_ARM_LDC_SB_G1", /* name */
1255 FALSE, /* partial_inplace */
1256 0xffffffff, /* src_mask */
1257 0xffffffff, /* dst_mask */
1258 TRUE), /* pcrel_offset */
1259
1260 HOWTO (R_ARM_LDC_SB_G2, /* type */
1261 0, /* rightshift */
1262 2, /* size (0 = byte, 1 = short, 2 = long) */
1263 32, /* bitsize */
1264 TRUE, /* pc_relative */
1265 0, /* bitpos */
1266 complain_overflow_dont,/* complain_on_overflow */
1267 bfd_elf_generic_reloc, /* special_function */
1268 "R_ARM_LDC_SB_G2", /* name */
1269 FALSE, /* partial_inplace */
1270 0xffffffff, /* src_mask */
1271 0xffffffff, /* dst_mask */
1272 TRUE), /* pcrel_offset */
1273
1274 /* End of group relocations. */
1275
1276 HOWTO (R_ARM_MOVW_BREL_NC, /* type */
1277 0, /* rightshift */
1278 2, /* size (0 = byte, 1 = short, 2 = long) */
1279 16, /* bitsize */
1280 FALSE, /* pc_relative */
1281 0, /* bitpos */
1282 complain_overflow_dont,/* complain_on_overflow */
1283 bfd_elf_generic_reloc, /* special_function */
1284 "R_ARM_MOVW_BREL_NC", /* name */
1285 FALSE, /* partial_inplace */
1286 0x0000ffff, /* src_mask */
1287 0x0000ffff, /* dst_mask */
1288 FALSE), /* pcrel_offset */
1289
1290 HOWTO (R_ARM_MOVT_BREL, /* type */
1291 0, /* rightshift */
1292 2, /* size (0 = byte, 1 = short, 2 = long) */
1293 16, /* bitsize */
1294 FALSE, /* pc_relative */
1295 0, /* bitpos */
1296 complain_overflow_bitfield,/* complain_on_overflow */
1297 bfd_elf_generic_reloc, /* special_function */
1298 "R_ARM_MOVT_BREL", /* name */
1299 FALSE, /* partial_inplace */
1300 0x0000ffff, /* src_mask */
1301 0x0000ffff, /* dst_mask */
1302 FALSE), /* pcrel_offset */
1303
1304 HOWTO (R_ARM_MOVW_BREL, /* type */
1305 0, /* rightshift */
1306 2, /* size (0 = byte, 1 = short, 2 = long) */
1307 16, /* bitsize */
1308 FALSE, /* pc_relative */
1309 0, /* bitpos */
1310 complain_overflow_dont,/* complain_on_overflow */
1311 bfd_elf_generic_reloc, /* special_function */
1312 "R_ARM_MOVW_BREL", /* name */
1313 FALSE, /* partial_inplace */
1314 0x0000ffff, /* src_mask */
1315 0x0000ffff, /* dst_mask */
1316 FALSE), /* pcrel_offset */
1317
1318 HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 0, /* rightshift */
1320 2, /* size (0 = byte, 1 = short, 2 = long) */
1321 16, /* bitsize */
1322 FALSE, /* pc_relative */
1323 0, /* bitpos */
1324 complain_overflow_dont,/* complain_on_overflow */
1325 bfd_elf_generic_reloc, /* special_function */
1326 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 FALSE, /* partial_inplace */
1328 0x040f70ff, /* src_mask */
1329 0x040f70ff, /* dst_mask */
1330 FALSE), /* pcrel_offset */
1331
1332 HOWTO (R_ARM_THM_MOVT_BREL, /* type */
1333 0, /* rightshift */
1334 2, /* size (0 = byte, 1 = short, 2 = long) */
1335 16, /* bitsize */
1336 FALSE, /* pc_relative */
1337 0, /* bitpos */
1338 complain_overflow_bitfield,/* complain_on_overflow */
1339 bfd_elf_generic_reloc, /* special_function */
1340 "R_ARM_THM_MOVT_BREL", /* name */
1341 FALSE, /* partial_inplace */
1342 0x040f70ff, /* src_mask */
1343 0x040f70ff, /* dst_mask */
1344 FALSE), /* pcrel_offset */
1345
1346 HOWTO (R_ARM_THM_MOVW_BREL, /* type */
1347 0, /* rightshift */
1348 2, /* size (0 = byte, 1 = short, 2 = long) */
1349 16, /* bitsize */
1350 FALSE, /* pc_relative */
1351 0, /* bitpos */
1352 complain_overflow_dont,/* complain_on_overflow */
1353 bfd_elf_generic_reloc, /* special_function */
1354 "R_ARM_THM_MOVW_BREL", /* name */
1355 FALSE, /* partial_inplace */
1356 0x040f70ff, /* src_mask */
1357 0x040f70ff, /* dst_mask */
1358 FALSE), /* pcrel_offset */
1359
1360 HOWTO (R_ARM_TLS_GOTDESC, /* type */
1361 0, /* rightshift */
1362 2, /* size (0 = byte, 1 = short, 2 = long) */
1363 32, /* bitsize */
1364 FALSE, /* pc_relative */
1365 0, /* bitpos */
1366 complain_overflow_bitfield,/* complain_on_overflow */
1367 NULL, /* special_function */
1368 "R_ARM_TLS_GOTDESC", /* name */
1369 TRUE, /* partial_inplace */
1370 0xffffffff, /* src_mask */
1371 0xffffffff, /* dst_mask */
1372 FALSE), /* pcrel_offset */
1373
1374 HOWTO (R_ARM_TLS_CALL, /* type */
1375 0, /* rightshift */
1376 2, /* size (0 = byte, 1 = short, 2 = long) */
1377 24, /* bitsize */
1378 FALSE, /* pc_relative */
1379 0, /* bitpos */
1380 complain_overflow_dont,/* complain_on_overflow */
1381 bfd_elf_generic_reloc, /* special_function */
1382 "R_ARM_TLS_CALL", /* name */
1383 FALSE, /* partial_inplace */
1384 0x00ffffff, /* src_mask */
1385 0x00ffffff, /* dst_mask */
1386 FALSE), /* pcrel_offset */
1387
1388 HOWTO (R_ARM_TLS_DESCSEQ, /* type */
1389 0, /* rightshift */
1390 2, /* size (0 = byte, 1 = short, 2 = long) */
1391 0, /* bitsize */
1392 FALSE, /* pc_relative */
1393 0, /* bitpos */
1394 complain_overflow_bitfield,/* complain_on_overflow */
1395 bfd_elf_generic_reloc, /* special_function */
1396 "R_ARM_TLS_DESCSEQ", /* name */
1397 FALSE, /* partial_inplace */
1398 0x00000000, /* src_mask */
1399 0x00000000, /* dst_mask */
1400 FALSE), /* pcrel_offset */
1401
1402 HOWTO (R_ARM_THM_TLS_CALL, /* type */
1403 0, /* rightshift */
1404 2, /* size (0 = byte, 1 = short, 2 = long) */
1405 24, /* bitsize */
1406 FALSE, /* pc_relative */
1407 0, /* bitpos */
1408 complain_overflow_dont,/* complain_on_overflow */
1409 bfd_elf_generic_reloc, /* special_function */
1410 "R_ARM_THM_TLS_CALL", /* name */
1411 FALSE, /* partial_inplace */
1412 0x07ff07ff, /* src_mask */
1413 0x07ff07ff, /* dst_mask */
1414 FALSE), /* pcrel_offset */
1415
1416 HOWTO (R_ARM_PLT32_ABS, /* type */
1417 0, /* rightshift */
1418 2, /* size (0 = byte, 1 = short, 2 = long) */
1419 32, /* bitsize */
1420 FALSE, /* pc_relative */
1421 0, /* bitpos */
1422 complain_overflow_dont,/* complain_on_overflow */
1423 bfd_elf_generic_reloc, /* special_function */
1424 "R_ARM_PLT32_ABS", /* name */
1425 FALSE, /* partial_inplace */
1426 0xffffffff, /* src_mask */
1427 0xffffffff, /* dst_mask */
1428 FALSE), /* pcrel_offset */
1429
1430 HOWTO (R_ARM_GOT_ABS, /* type */
1431 0, /* rightshift */
1432 2, /* size (0 = byte, 1 = short, 2 = long) */
1433 32, /* bitsize */
1434 FALSE, /* pc_relative */
1435 0, /* bitpos */
1436 complain_overflow_dont,/* complain_on_overflow */
1437 bfd_elf_generic_reloc, /* special_function */
1438 "R_ARM_GOT_ABS", /* name */
1439 FALSE, /* partial_inplace */
1440 0xffffffff, /* src_mask */
1441 0xffffffff, /* dst_mask */
1442 FALSE), /* pcrel_offset */
1443
1444 HOWTO (R_ARM_GOT_PREL, /* type */
1445 0, /* rightshift */
1446 2, /* size (0 = byte, 1 = short, 2 = long) */
1447 32, /* bitsize */
1448 TRUE, /* pc_relative */
1449 0, /* bitpos */
1450 complain_overflow_dont, /* complain_on_overflow */
1451 bfd_elf_generic_reloc, /* special_function */
1452 "R_ARM_GOT_PREL", /* name */
1453 FALSE, /* partial_inplace */
1454 0xffffffff, /* src_mask */
1455 0xffffffff, /* dst_mask */
1456 TRUE), /* pcrel_offset */
1457
1458 HOWTO (R_ARM_GOT_BREL12, /* type */
1459 0, /* rightshift */
1460 2, /* size (0 = byte, 1 = short, 2 = long) */
1461 12, /* bitsize */
1462 FALSE, /* pc_relative */
1463 0, /* bitpos */
1464 complain_overflow_bitfield,/* complain_on_overflow */
1465 bfd_elf_generic_reloc, /* special_function */
1466 "R_ARM_GOT_BREL12", /* name */
1467 FALSE, /* partial_inplace */
1468 0x00000fff, /* src_mask */
1469 0x00000fff, /* dst_mask */
1470 FALSE), /* pcrel_offset */
1471
1472 HOWTO (R_ARM_GOTOFF12, /* type */
1473 0, /* rightshift */
1474 2, /* size (0 = byte, 1 = short, 2 = long) */
1475 12, /* bitsize */
1476 FALSE, /* pc_relative */
1477 0, /* bitpos */
1478 complain_overflow_bitfield,/* complain_on_overflow */
1479 bfd_elf_generic_reloc, /* special_function */
1480 "R_ARM_GOTOFF12", /* name */
1481 FALSE, /* partial_inplace */
1482 0x00000fff, /* src_mask */
1483 0x00000fff, /* dst_mask */
1484 FALSE), /* pcrel_offset */
1485
1486 EMPTY_HOWTO (R_ARM_GOTRELAX), /* reserved for future GOT-load optimizations */
1487
1488 /* GNU extension to record C++ vtable member usage */
1489 HOWTO (R_ARM_GNU_VTENTRY, /* type */
1490 0, /* rightshift */
1491 2, /* size (0 = byte, 1 = short, 2 = long) */
1492 0, /* bitsize */
1493 FALSE, /* pc_relative */
1494 0, /* bitpos */
1495 complain_overflow_dont, /* complain_on_overflow */
1496 _bfd_elf_rel_vtable_reloc_fn, /* special_function */
1497 "R_ARM_GNU_VTENTRY", /* name */
1498 FALSE, /* partial_inplace */
1499 0, /* src_mask */
1500 0, /* dst_mask */
1501 FALSE), /* pcrel_offset */
1502
1503 /* GNU extension to record C++ vtable hierarchy */
1504 HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 0, /* rightshift */
1506 2, /* size (0 = byte, 1 = short, 2 = long) */
1507 0, /* bitsize */
1508 FALSE, /* pc_relative */
1509 0, /* bitpos */
1510 complain_overflow_dont, /* complain_on_overflow */
1511 NULL, /* special_function */
1512 "R_ARM_GNU_VTINHERIT", /* name */
1513 FALSE, /* partial_inplace */
1514 0, /* src_mask */
1515 0, /* dst_mask */
1516 FALSE), /* pcrel_offset */
1517
1518 HOWTO (R_ARM_THM_JUMP11, /* type */
1519 1, /* rightshift */
1520 1, /* size (0 = byte, 1 = short, 2 = long) */
1521 11, /* bitsize */
1522 TRUE, /* pc_relative */
1523 0, /* bitpos */
1524 complain_overflow_signed, /* complain_on_overflow */
1525 bfd_elf_generic_reloc, /* special_function */
1526 "R_ARM_THM_JUMP11", /* name */
1527 FALSE, /* partial_inplace */
1528 0x000007ff, /* src_mask */
1529 0x000007ff, /* dst_mask */
1530 TRUE), /* pcrel_offset */
1531
1532 HOWTO (R_ARM_THM_JUMP8, /* type */
1533 1, /* rightshift */
1534 1, /* size (0 = byte, 1 = short, 2 = long) */
1535 8, /* bitsize */
1536 TRUE, /* pc_relative */
1537 0, /* bitpos */
1538 complain_overflow_signed, /* complain_on_overflow */
1539 bfd_elf_generic_reloc, /* special_function */
1540 "R_ARM_THM_JUMP8", /* name */
1541 FALSE, /* partial_inplace */
1542 0x000000ff, /* src_mask */
1543 0x000000ff, /* dst_mask */
1544 TRUE), /* pcrel_offset */
1545
1546 /* TLS relocations */
1547 HOWTO (R_ARM_TLS_GD32, /* type */
1548 0, /* rightshift */
1549 2, /* size (0 = byte, 1 = short, 2 = long) */
1550 32, /* bitsize */
1551 FALSE, /* pc_relative */
1552 0, /* bitpos */
1553 complain_overflow_bitfield,/* complain_on_overflow */
1554 NULL, /* special_function */
1555 "R_ARM_TLS_GD32", /* name */
1556 TRUE, /* partial_inplace */
1557 0xffffffff, /* src_mask */
1558 0xffffffff, /* dst_mask */
1559 FALSE), /* pcrel_offset */
1560
1561 HOWTO (R_ARM_TLS_LDM32, /* type */
1562 0, /* rightshift */
1563 2, /* size (0 = byte, 1 = short, 2 = long) */
1564 32, /* bitsize */
1565 FALSE, /* pc_relative */
1566 0, /* bitpos */
1567 complain_overflow_bitfield,/* complain_on_overflow */
1568 bfd_elf_generic_reloc, /* special_function */
1569 "R_ARM_TLS_LDM32", /* name */
1570 TRUE, /* partial_inplace */
1571 0xffffffff, /* src_mask */
1572 0xffffffff, /* dst_mask */
1573 FALSE), /* pcrel_offset */
1574
1575 HOWTO (R_ARM_TLS_LDO32, /* type */
1576 0, /* rightshift */
1577 2, /* size (0 = byte, 1 = short, 2 = long) */
1578 32, /* bitsize */
1579 FALSE, /* pc_relative */
1580 0, /* bitpos */
1581 complain_overflow_bitfield,/* complain_on_overflow */
1582 bfd_elf_generic_reloc, /* special_function */
1583 "R_ARM_TLS_LDO32", /* name */
1584 TRUE, /* partial_inplace */
1585 0xffffffff, /* src_mask */
1586 0xffffffff, /* dst_mask */
1587 FALSE), /* pcrel_offset */
1588
1589 HOWTO (R_ARM_TLS_IE32, /* type */
1590 0, /* rightshift */
1591 2, /* size (0 = byte, 1 = short, 2 = long) */
1592 32, /* bitsize */
1593 FALSE, /* pc_relative */
1594 0, /* bitpos */
1595 complain_overflow_bitfield,/* complain_on_overflow */
1596 NULL, /* special_function */
1597 "R_ARM_TLS_IE32", /* name */
1598 TRUE, /* partial_inplace */
1599 0xffffffff, /* src_mask */
1600 0xffffffff, /* dst_mask */
1601 FALSE), /* pcrel_offset */
1602
1603 HOWTO (R_ARM_TLS_LE32, /* type */
1604 0, /* rightshift */
1605 2, /* size (0 = byte, 1 = short, 2 = long) */
1606 32, /* bitsize */
1607 FALSE, /* pc_relative */
1608 0, /* bitpos */
1609 complain_overflow_bitfield,/* complain_on_overflow */
1610 NULL, /* special_function */
1611 "R_ARM_TLS_LE32", /* name */
1612 TRUE, /* partial_inplace */
1613 0xffffffff, /* src_mask */
1614 0xffffffff, /* dst_mask */
1615 FALSE), /* pcrel_offset */
1616
1617 HOWTO (R_ARM_TLS_LDO12, /* type */
1618 0, /* rightshift */
1619 2, /* size (0 = byte, 1 = short, 2 = long) */
1620 12, /* bitsize */
1621 FALSE, /* pc_relative */
1622 0, /* bitpos */
1623 complain_overflow_bitfield,/* complain_on_overflow */
1624 bfd_elf_generic_reloc, /* special_function */
1625 "R_ARM_TLS_LDO12", /* name */
1626 FALSE, /* partial_inplace */
1627 0x00000fff, /* src_mask */
1628 0x00000fff, /* dst_mask */
1629 FALSE), /* pcrel_offset */
1630
1631 HOWTO (R_ARM_TLS_LE12, /* type */
1632 0, /* rightshift */
1633 2, /* size (0 = byte, 1 = short, 2 = long) */
1634 12, /* bitsize */
1635 FALSE, /* pc_relative */
1636 0, /* bitpos */
1637 complain_overflow_bitfield,/* complain_on_overflow */
1638 bfd_elf_generic_reloc, /* special_function */
1639 "R_ARM_TLS_LE12", /* name */
1640 FALSE, /* partial_inplace */
1641 0x00000fff, /* src_mask */
1642 0x00000fff, /* dst_mask */
1643 FALSE), /* pcrel_offset */
1644
1645 HOWTO (R_ARM_TLS_IE12GP, /* type */
1646 0, /* rightshift */
1647 2, /* size (0 = byte, 1 = short, 2 = long) */
1648 12, /* bitsize */
1649 FALSE, /* pc_relative */
1650 0, /* bitpos */
1651 complain_overflow_bitfield,/* complain_on_overflow */
1652 bfd_elf_generic_reloc, /* special_function */
1653 "R_ARM_TLS_IE12GP", /* name */
1654 FALSE, /* partial_inplace */
1655 0x00000fff, /* src_mask */
1656 0x00000fff, /* dst_mask */
1657 FALSE), /* pcrel_offset */
1658
1659 /* 112-127 private relocations. */
1660 EMPTY_HOWTO (112),
1661 EMPTY_HOWTO (113),
1662 EMPTY_HOWTO (114),
1663 EMPTY_HOWTO (115),
1664 EMPTY_HOWTO (116),
1665 EMPTY_HOWTO (117),
1666 EMPTY_HOWTO (118),
1667 EMPTY_HOWTO (119),
1668 EMPTY_HOWTO (120),
1669 EMPTY_HOWTO (121),
1670 EMPTY_HOWTO (122),
1671 EMPTY_HOWTO (123),
1672 EMPTY_HOWTO (124),
1673 EMPTY_HOWTO (125),
1674 EMPTY_HOWTO (126),
1675 EMPTY_HOWTO (127),
1676
1677 /* R_ARM_ME_TOO, obsolete. */
1678 EMPTY_HOWTO (128),
1679
1680 HOWTO (R_ARM_THM_TLS_DESCSEQ, /* type */
1681 0, /* rightshift */
1682 1, /* size (0 = byte, 1 = short, 2 = long) */
1683 0, /* bitsize */
1684 FALSE, /* pc_relative */
1685 0, /* bitpos */
1686 complain_overflow_bitfield,/* complain_on_overflow */
1687 bfd_elf_generic_reloc, /* special_function */
1688 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 FALSE, /* partial_inplace */
1690 0x00000000, /* src_mask */
1691 0x00000000, /* dst_mask */
1692 FALSE), /* pcrel_offset */
1693 EMPTY_HOWTO (130),
1694 EMPTY_HOWTO (131),
1695 HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type. */
1696 0, /* rightshift. */
1697 1, /* size (0 = byte, 1 = short, 2 = long). */
1698 16, /* bitsize. */
1699 FALSE, /* pc_relative. */
1700 0, /* bitpos. */
1701 complain_overflow_bitfield,/* complain_on_overflow. */
1702 bfd_elf_generic_reloc, /* special_function. */
1703 "R_ARM_THM_ALU_ABS_G0_NC",/* name. */
1704 FALSE, /* partial_inplace. */
1705 0x00000000, /* src_mask. */
1706 0x00000000, /* dst_mask. */
1707 FALSE), /* pcrel_offset. */
1708 HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type. */
1709 0, /* rightshift. */
1710 1, /* size (0 = byte, 1 = short, 2 = long). */
1711 16, /* bitsize. */
1712 FALSE, /* pc_relative. */
1713 0, /* bitpos. */
1714 complain_overflow_bitfield,/* complain_on_overflow. */
1715 bfd_elf_generic_reloc, /* special_function. */
1716 "R_ARM_THM_ALU_ABS_G1_NC",/* name. */
1717 FALSE, /* partial_inplace. */
1718 0x00000000, /* src_mask. */
1719 0x00000000, /* dst_mask. */
1720 FALSE), /* pcrel_offset. */
1721 HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type. */
1722 0, /* rightshift. */
1723 1, /* size (0 = byte, 1 = short, 2 = long). */
1724 16, /* bitsize. */
1725 FALSE, /* pc_relative. */
1726 0, /* bitpos. */
1727 complain_overflow_bitfield,/* complain_on_overflow. */
1728 bfd_elf_generic_reloc, /* special_function. */
1729 "R_ARM_THM_ALU_ABS_G2_NC",/* name. */
1730 FALSE, /* partial_inplace. */
1731 0x00000000, /* src_mask. */
1732 0x00000000, /* dst_mask. */
1733 FALSE), /* pcrel_offset. */
1734 HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type. */
1735 0, /* rightshift. */
1736 1, /* size (0 = byte, 1 = short, 2 = long). */
1737 16, /* bitsize. */
1738 FALSE, /* pc_relative. */
1739 0, /* bitpos. */
1740 complain_overflow_bitfield,/* complain_on_overflow. */
1741 bfd_elf_generic_reloc, /* special_function. */
1742 "R_ARM_THM_ALU_ABS_G3_NC",/* name. */
1743 FALSE, /* partial_inplace. */
1744 0x00000000, /* src_mask. */
1745 0x00000000, /* dst_mask. */
1746 FALSE), /* pcrel_offset. */
1747 /* Relocations for Armv8.1-M Mainline. */
1748 HOWTO (R_ARM_THM_BF16, /* type. */
1749 0, /* rightshift. */
1750 1, /* size (0 = byte, 1 = short, 2 = long). */
1751 16, /* bitsize. */
1752 TRUE, /* pc_relative. */
1753 0, /* bitpos. */
1754 complain_overflow_dont,/* do not complain_on_overflow. */
1755 bfd_elf_generic_reloc, /* special_function. */
1756 "R_ARM_THM_BF16", /* name. */
1757 FALSE, /* partial_inplace. */
1758 0x001f0ffe, /* src_mask. */
1759 0x001f0ffe, /* dst_mask. */
1760 TRUE), /* pcrel_offset. */
1761 HOWTO (R_ARM_THM_BF12, /* type. */
1762 0, /* rightshift. */
1763 1, /* size (0 = byte, 1 = short, 2 = long). */
1764 12, /* bitsize. */
1765 TRUE, /* pc_relative. */
1766 0, /* bitpos. */
1767 complain_overflow_dont,/* do not complain_on_overflow. */
1768 bfd_elf_generic_reloc, /* special_function. */
1769 "R_ARM_THM_BF12", /* name. */
1770 FALSE, /* partial_inplace. */
1771 0x00010ffe, /* src_mask. */
1772 0x00010ffe, /* dst_mask. */
1773 TRUE), /* pcrel_offset. */
1774 HOWTO (R_ARM_THM_BF18, /* type. */
1775 0, /* rightshift. */
1776 1, /* size (0 = byte, 1 = short, 2 = long). */
1777 18, /* bitsize. */
1778 TRUE, /* pc_relative. */
1779 0, /* bitpos. */
1780 complain_overflow_dont,/* do not complain_on_overflow. */
1781 bfd_elf_generic_reloc, /* special_function. */
1782 "R_ARM_THM_BF18", /* name. */
1783 FALSE, /* partial_inplace. */
1784 0x007f0ffe, /* src_mask. */
1785 0x007f0ffe, /* dst_mask. */
1786 TRUE), /* pcrel_offset. */
1787 };
1788
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1791 {
1792 HOWTO (R_ARM_IRELATIVE, /* type */
1793 0, /* rightshift */
1794 2, /* size (0 = byte, 1 = short, 2 = long) */
1795 32, /* bitsize */
1796 FALSE, /* pc_relative */
1797 0, /* bitpos */
1798 complain_overflow_bitfield,/* complain_on_overflow */
1799 bfd_elf_generic_reloc, /* special_function */
1800 "R_ARM_IRELATIVE", /* name */
1801 TRUE, /* partial_inplace */
1802 0xffffffff, /* src_mask */
1803 0xffffffff, /* dst_mask */
1804 FALSE), /* pcrel_offset */
1805 HOWTO (R_ARM_GOTFUNCDESC, /* type */
1806 0, /* rightshift */
1807 2, /* size (0 = byte, 1 = short, 2 = long) */
1808 32, /* bitsize */
1809 FALSE, /* pc_relative */
1810 0, /* bitpos */
1811 complain_overflow_bitfield,/* complain_on_overflow */
1812 bfd_elf_generic_reloc, /* special_function */
1813 "R_ARM_GOTFUNCDESC", /* name */
1814 FALSE, /* partial_inplace */
1815 0, /* src_mask */
1816 0xffffffff, /* dst_mask */
1817 FALSE), /* pcrel_offset */
1818 HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 0, /* rightshift */
1820 2, /* size (0 = byte, 1 = short, 2 = long) */
1821 32, /* bitsize */
1822 FALSE, /* pc_relative */
1823 0, /* bitpos */
1824 complain_overflow_bitfield,/* complain_on_overflow */
1825 bfd_elf_generic_reloc, /* special_function */
1826 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 FALSE, /* partial_inplace */
1828 0, /* src_mask */
1829 0xffffffff, /* dst_mask */
1830 FALSE), /* pcrel_offset */
1831 HOWTO (R_ARM_FUNCDESC, /* type */
1832 0, /* rightshift */
1833 2, /* size (0 = byte, 1 = short, 2 = long) */
1834 32, /* bitsize */
1835 FALSE, /* pc_relative */
1836 0, /* bitpos */
1837 complain_overflow_bitfield,/* complain_on_overflow */
1838 bfd_elf_generic_reloc, /* special_function */
1839 "R_ARM_FUNCDESC", /* name */
1840 FALSE, /* partial_inplace */
1841 0, /* src_mask */
1842 0xffffffff, /* dst_mask */
1843 FALSE), /* pcrel_offset */
1844 HOWTO (R_ARM_FUNCDESC_VALUE, /* type */
1845 0, /* rightshift */
1846 2, /* size (0 = byte, 1 = short, 2 = long) */
1847 64, /* bitsize */
1848 FALSE, /* pc_relative */
1849 0, /* bitpos */
1850 complain_overflow_bitfield,/* complain_on_overflow */
1851 bfd_elf_generic_reloc, /* special_function */
1852 "R_ARM_FUNCDESC_VALUE",/* name */
1853 FALSE, /* partial_inplace */
1854 0, /* src_mask */
1855 0xffffffff, /* dst_mask */
1856 FALSE), /* pcrel_offset */
1857 HOWTO (R_ARM_TLS_GD32_FDPIC, /* type */
1858 0, /* rightshift */
1859 2, /* size (0 = byte, 1 = short, 2 = long) */
1860 32, /* bitsize */
1861 FALSE, /* pc_relative */
1862 0, /* bitpos */
1863 complain_overflow_bitfield,/* complain_on_overflow */
1864 bfd_elf_generic_reloc, /* special_function */
1865 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 FALSE, /* partial_inplace */
1867 0, /* src_mask */
1868 0xffffffff, /* dst_mask */
1869 FALSE), /* pcrel_offset */
1870 HOWTO (R_ARM_TLS_LDM32_FDPIC, /* type */
1871 0, /* rightshift */
1872 2, /* size (0 = byte, 1 = short, 2 = long) */
1873 32, /* bitsize */
1874 FALSE, /* pc_relative */
1875 0, /* bitpos */
1876 complain_overflow_bitfield,/* complain_on_overflow */
1877 bfd_elf_generic_reloc, /* special_function */
1878 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 FALSE, /* partial_inplace */
1880 0, /* src_mask */
1881 0xffffffff, /* dst_mask */
1882 FALSE), /* pcrel_offset */
1883 HOWTO (R_ARM_TLS_IE32_FDPIC, /* type */
1884 0, /* rightshift */
1885 2, /* size (0 = byte, 1 = short, 2 = long) */
1886 32, /* bitsize */
1887 FALSE, /* pc_relative */
1888 0, /* bitpos */
1889 complain_overflow_bitfield,/* complain_on_overflow */
1890 bfd_elf_generic_reloc, /* special_function */
1891 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 FALSE, /* partial_inplace */
1893 0, /* src_mask */
1894 0xffffffff, /* dst_mask */
1895 FALSE), /* pcrel_offset */
1896 };
1897
1898 /* 249-255 extended, currently unused, relocations: */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1900 {
1901 HOWTO (R_ARM_RREL32, /* type */
1902 0, /* rightshift */
1903 0, /* size (0 = byte, 1 = short, 2 = long) */
1904 0, /* bitsize */
1905 FALSE, /* pc_relative */
1906 0, /* bitpos */
1907 complain_overflow_dont,/* complain_on_overflow */
1908 bfd_elf_generic_reloc, /* special_function */
1909 "R_ARM_RREL32", /* name */
1910 FALSE, /* partial_inplace */
1911 0, /* src_mask */
1912 0, /* dst_mask */
1913 FALSE), /* pcrel_offset */
1914
1915 HOWTO (R_ARM_RABS32, /* type */
1916 0, /* rightshift */
1917 0, /* size (0 = byte, 1 = short, 2 = long) */
1918 0, /* bitsize */
1919 FALSE, /* pc_relative */
1920 0, /* bitpos */
1921 complain_overflow_dont,/* complain_on_overflow */
1922 bfd_elf_generic_reloc, /* special_function */
1923 "R_ARM_RABS32", /* name */
1924 FALSE, /* partial_inplace */
1925 0, /* src_mask */
1926 0, /* dst_mask */
1927 FALSE), /* pcrel_offset */
1928
1929 HOWTO (R_ARM_RPC24, /* type */
1930 0, /* rightshift */
1931 0, /* size (0 = byte, 1 = short, 2 = long) */
1932 0, /* bitsize */
1933 FALSE, /* pc_relative */
1934 0, /* bitpos */
1935 complain_overflow_dont,/* complain_on_overflow */
1936 bfd_elf_generic_reloc, /* special_function */
1937 "R_ARM_RPC24", /* name */
1938 FALSE, /* partial_inplace */
1939 0, /* src_mask */
1940 0, /* dst_mask */
1941 FALSE), /* pcrel_offset */
1942
1943 HOWTO (R_ARM_RBASE, /* type */
1944 0, /* rightshift */
1945 0, /* size (0 = byte, 1 = short, 2 = long) */
1946 0, /* bitsize */
1947 FALSE, /* pc_relative */
1948 0, /* bitpos */
1949 complain_overflow_dont,/* complain_on_overflow */
1950 bfd_elf_generic_reloc, /* special_function */
1951 "R_ARM_RBASE", /* name */
1952 FALSE, /* partial_inplace */
1953 0, /* src_mask */
1954 0, /* dst_mask */
1955 FALSE) /* pcrel_offset */
1956 };
1957
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1960 {
1961 if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962 return &elf32_arm_howto_table_1[r_type];
1963
1964 if (r_type >= R_ARM_IRELATIVE
1965 && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966 return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967
1968 if (r_type >= R_ARM_RREL32
1969 && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970 return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971
1972 return NULL;
1973 }
1974
1975 static bfd_boolean
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 Elf_Internal_Rela * elf_reloc)
1978 {
1979 unsigned int r_type;
1980
1981 r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982 if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983 {
1984 /* xgettext:c-format */
1985 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 abfd, r_type);
1987 bfd_set_error (bfd_error_bad_value);
1988 return FALSE;
1989 }
1990 return TRUE;
1991 }
1992
1993 struct elf32_arm_reloc_map
1994 {
1995 bfd_reloc_code_real_type bfd_reloc_val;
1996 unsigned char elf_reloc_val;
1997 };
1998
1999 /* All entries in this list must also be present in elf32_arm_howto_table. */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001 {
2002 {BFD_RELOC_NONE, R_ARM_NONE},
2003 {BFD_RELOC_ARM_PCREL_BRANCH, R_ARM_PC24},
2004 {BFD_RELOC_ARM_PCREL_CALL, R_ARM_CALL},
2005 {BFD_RELOC_ARM_PCREL_JUMP, R_ARM_JUMP24},
2006 {BFD_RELOC_ARM_PCREL_BLX, R_ARM_XPC25},
2007 {BFD_RELOC_THUMB_PCREL_BLX, R_ARM_THM_XPC22},
2008 {BFD_RELOC_32, R_ARM_ABS32},
2009 {BFD_RELOC_32_PCREL, R_ARM_REL32},
2010 {BFD_RELOC_8, R_ARM_ABS8},
2011 {BFD_RELOC_16, R_ARM_ABS16},
2012 {BFD_RELOC_ARM_OFFSET_IMM, R_ARM_ABS12},
2013 {BFD_RELOC_ARM_THUMB_OFFSET, R_ARM_THM_ABS5},
2014 {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015 {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016 {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017 {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018 {BFD_RELOC_THUMB_PCREL_BRANCH9, R_ARM_THM_JUMP8},
2019 {BFD_RELOC_THUMB_PCREL_BRANCH7, R_ARM_THM_JUMP6},
2020 {BFD_RELOC_ARM_GLOB_DAT, R_ARM_GLOB_DAT},
2021 {BFD_RELOC_ARM_JUMP_SLOT, R_ARM_JUMP_SLOT},
2022 {BFD_RELOC_ARM_RELATIVE, R_ARM_RELATIVE},
2023 {BFD_RELOC_ARM_GOTOFF, R_ARM_GOTOFF32},
2024 {BFD_RELOC_ARM_GOTPC, R_ARM_GOTPC},
2025 {BFD_RELOC_ARM_GOT_PREL, R_ARM_GOT_PREL},
2026 {BFD_RELOC_ARM_GOT32, R_ARM_GOT32},
2027 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2028 {BFD_RELOC_ARM_TARGET1, R_ARM_TARGET1},
2029 {BFD_RELOC_ARM_ROSEGREL32, R_ARM_ROSEGREL32},
2030 {BFD_RELOC_ARM_SBREL32, R_ARM_SBREL32},
2031 {BFD_RELOC_ARM_PREL31, R_ARM_PREL31},
2032 {BFD_RELOC_ARM_TARGET2, R_ARM_TARGET2},
2033 {BFD_RELOC_ARM_PLT32, R_ARM_PLT32},
2034 {BFD_RELOC_ARM_TLS_GOTDESC, R_ARM_TLS_GOTDESC},
2035 {BFD_RELOC_ARM_TLS_CALL, R_ARM_TLS_CALL},
2036 {BFD_RELOC_ARM_THM_TLS_CALL, R_ARM_THM_TLS_CALL},
2037 {BFD_RELOC_ARM_TLS_DESCSEQ, R_ARM_TLS_DESCSEQ},
2038 {BFD_RELOC_ARM_THM_TLS_DESCSEQ, R_ARM_THM_TLS_DESCSEQ},
2039 {BFD_RELOC_ARM_TLS_DESC, R_ARM_TLS_DESC},
2040 {BFD_RELOC_ARM_TLS_GD32, R_ARM_TLS_GD32},
2041 {BFD_RELOC_ARM_TLS_LDO32, R_ARM_TLS_LDO32},
2042 {BFD_RELOC_ARM_TLS_LDM32, R_ARM_TLS_LDM32},
2043 {BFD_RELOC_ARM_TLS_DTPMOD32, R_ARM_TLS_DTPMOD32},
2044 {BFD_RELOC_ARM_TLS_DTPOFF32, R_ARM_TLS_DTPOFF32},
2045 {BFD_RELOC_ARM_TLS_TPOFF32, R_ARM_TLS_TPOFF32},
2046 {BFD_RELOC_ARM_TLS_IE32, R_ARM_TLS_IE32},
2047 {BFD_RELOC_ARM_TLS_LE32, R_ARM_TLS_LE32},
2048 {BFD_RELOC_ARM_IRELATIVE, R_ARM_IRELATIVE},
2049 {BFD_RELOC_ARM_GOTFUNCDESC, R_ARM_GOTFUNCDESC},
2050 {BFD_RELOC_ARM_GOTOFFFUNCDESC, R_ARM_GOTOFFFUNCDESC},
2051 {BFD_RELOC_ARM_FUNCDESC, R_ARM_FUNCDESC},
2052 {BFD_RELOC_ARM_FUNCDESC_VALUE, R_ARM_FUNCDESC_VALUE},
2053 {BFD_RELOC_ARM_TLS_GD32_FDPIC, R_ARM_TLS_GD32_FDPIC},
2054 {BFD_RELOC_ARM_TLS_LDM32_FDPIC, R_ARM_TLS_LDM32_FDPIC},
2055 {BFD_RELOC_ARM_TLS_IE32_FDPIC, R_ARM_TLS_IE32_FDPIC},
2056 {BFD_RELOC_VTABLE_INHERIT, R_ARM_GNU_VTINHERIT},
2057 {BFD_RELOC_VTABLE_ENTRY, R_ARM_GNU_VTENTRY},
2058 {BFD_RELOC_ARM_MOVW, R_ARM_MOVW_ABS_NC},
2059 {BFD_RELOC_ARM_MOVT, R_ARM_MOVT_ABS},
2060 {BFD_RELOC_ARM_MOVW_PCREL, R_ARM_MOVW_PREL_NC},
2061 {BFD_RELOC_ARM_MOVT_PCREL, R_ARM_MOVT_PREL},
2062 {BFD_RELOC_ARM_THUMB_MOVW, R_ARM_THM_MOVW_ABS_NC},
2063 {BFD_RELOC_ARM_THUMB_MOVT, R_ARM_THM_MOVT_ABS},
2064 {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065 {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066 {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067 {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068 {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069 {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070 {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071 {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072 {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073 {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074 {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075 {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076 {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077 {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078 {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079 {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080 {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081 {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082 {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083 {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084 {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085 {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086 {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087 {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088 {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089 {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090 {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091 {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092 {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093 {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094 {BFD_RELOC_ARM_V4BX, R_ARM_V4BX},
2095 {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096 {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097 {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098 {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099 {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100 {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101 {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102 };
2103
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 bfd_reloc_code_real_type code)
2107 {
2108 unsigned int i;
2109
2110 for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111 if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112 return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113
2114 return NULL;
2115 }
2116
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 const char *r_name)
2120 {
2121 unsigned int i;
2122
2123 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124 if (elf32_arm_howto_table_1[i].name != NULL
2125 && strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126 return &elf32_arm_howto_table_1[i];
2127
2128 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129 if (elf32_arm_howto_table_2[i].name != NULL
2130 && strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131 return &elf32_arm_howto_table_2[i];
2132
2133 for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134 if (elf32_arm_howto_table_3[i].name != NULL
2135 && strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136 return &elf32_arm_howto_table_3[i];
2137
2138 return NULL;
2139 }
2140
2141 /* Support for core dump NOTE sections. */
2142
2143 static bfd_boolean
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146 int offset;
2147 size_t size;
2148
2149 switch (note->descsz)
2150 {
2151 default:
2152 return FALSE;
2153
2154 case 148: /* Linux/ARM 32-bit. */
2155 /* pr_cursig */
2156 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157
2158 /* pr_pid */
2159 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160
2161 /* pr_reg */
2162 offset = 72;
2163 size = 72;
2164
2165 break;
2166 }
2167
2168 /* Make a ".reg/999" section. */
2169 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 size, note->descpos + offset);
2171 }
2172
2173 static bfd_boolean
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175 {
2176 switch (note->descsz)
2177 {
2178 default:
2179 return FALSE;
2180
2181 case 124: /* Linux/ARM elf_prpsinfo. */
2182 elf_tdata (abfd)->core->pid
2183 = bfd_get_32 (abfd, note->descdata + 12);
2184 elf_tdata (abfd)->core->program
2185 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 elf_tdata (abfd)->core->command
2187 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188 }
2189
2190 /* Note that for some reason, a spurious space is tacked
2191 onto the end of the args in some (at least one anyway)
2192 implementations, so strip it off if it exists. */
2193 {
2194 char *command = elf_tdata (abfd)->core->command;
2195 int n = strlen (command);
2196
2197 if (0 < n && command[n - 1] == ' ')
2198 command[n - 1] = '\0';
2199 }
2200
2201 return TRUE;
2202 }
2203
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 int note_type, ...)
2207 {
2208 switch (note_type)
2209 {
2210 default:
2211 return NULL;
2212
2213 case NT_PRPSINFO:
2214 {
2215 char data[124] ATTRIBUTE_NONSTRING;
2216 va_list ap;
2217
2218 va_start (ap, note_type);
2219 memset (data, 0, sizeof (data));
2220 strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 DIAGNOSTIC_PUSH;
2223 /* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 -Wstringop-truncation:
2225 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226 */
2227 DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 DIAGNOSTIC_POP;
2232 #endif
2233 va_end (ap);
2234
2235 return elfcore_write_note (abfd, buf, bufsiz,
2236 "CORE", note_type, data, sizeof (data));
2237 }
2238
2239 case NT_PRSTATUS:
2240 {
2241 char data[148];
2242 va_list ap;
2243 long pid;
2244 int cursig;
2245 const void *greg;
2246
2247 va_start (ap, note_type);
2248 memset (data, 0, sizeof (data));
2249 pid = va_arg (ap, long);
2250 bfd_put_32 (abfd, pid, data + 24);
2251 cursig = va_arg (ap, int);
2252 bfd_put_16 (abfd, cursig, data + 12);
2253 greg = va_arg (ap, const void *);
2254 memcpy (data + 72, greg, 72);
2255 va_end (ap);
2256
2257 return elfcore_write_note (abfd, buf, bufsiz,
2258 "CORE", note_type, data, sizeof (data));
2259 }
2260 }
2261 }
2262
2263 #define TARGET_LITTLE_SYM arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME "elf32-littlearm"
2265 #define TARGET_BIG_SYM arm_elf32_be_vec
2266 #define TARGET_BIG_NAME "elf32-bigarm"
2267
2268 #define elf_backend_grok_prstatus elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note elf32_arm_nabi_write_core_note
2271
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2274
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276 interworkable. */
2277 #define INTERWORK_FLAG(abfd) \
2278 (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279 || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280 || ((abfd)->flags & BFD_LINKER_CREATED))
2281
2282 /* The linker script knows the section names for placement.
2283 The entry_names are used to do simple name mangling on the stubs.
2284 Given a function name, and its type, the stub can be found. The
2285 name can be changed. The only requirement is the %s be present. */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME "__%s_from_thumb"
2288
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME "__%s_from_arm"
2291
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME "__vfp11_veneer_%x"
2294
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "__stm32l4xx_veneer_%x"
2297
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME "__bx_r%d"
2300
2301 #define STUB_ENTRY_NAME "__%s_veneer"
2302
2303 #define CMSE_PREFIX "__acle_se_"
2304
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2306
2307 /* The name of the dynamic interpreter. This is put in the .interp
2308 section. */
2309 #define ELF_DYNAMIC_INTERPRETER "/usr/lib/ld.so.1"
2310
2311 /* FDPIC default stack size. */
2312 #define DEFAULT_STACK_SIZE 0x8000
2313
2314 static const unsigned long tls_trampoline [] =
2315 {
2316 0xe08e0000, /* add r0, lr, r0 */
2317 0xe5901004, /* ldr r1, [r0,#4] */
2318 0xe12fff11, /* bx r1 */
2319 };
2320
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322 {
2323 0xe52d2004, /* push {r2} */
2324 0xe59f200c, /* ldr r2, [pc, #3f - . - 8] */
2325 0xe59f100c, /* ldr r1, [pc, #4f - . - 8] */
2326 0xe79f2002, /* 1: ldr r2, [pc, r2] */
2327 0xe081100f, /* 2: add r1, pc */
2328 0xe12fff12, /* bx r2 */
2329 0x00000014, /* 3: .word _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 + dl_tlsdesc_lazy_resolver(GOT) */
2331 0x00000018, /* 4: .word _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332 };
2333
2334 /* NOTE: [Thumb nop sequence]
2335 When adding code that transitions from Thumb to Arm the instruction that
2336 should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337 a nop for performance reasons. */
2338
2339 /* ARM FDPIC PLT entry. */
2340 /* The last 5 words contain PLT lazy fragment code and data. */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342 {
2343 0xe59fc008, /* ldr r12, .L1 */
2344 0xe08cc009, /* add r12, r12, r9 */
2345 0xe59c9004, /* ldr r9, [r12, #4] */
2346 0xe59cf000, /* ldr pc, [r12] */
2347 0x00000000, /* L1. .word foo(GOTOFFFUNCDESC) */
2348 0x00000000, /* L1. .word foo(funcdesc_value_reloc_offset) */
2349 0xe51fc00c, /* ldr r12, [pc, #-12] */
2350 0xe92d1000, /* push {r12} */
2351 0xe599c004, /* ldr r12, [r9, #4] */
2352 0xe599f000, /* ldr pc, [r9] */
2353 };
2354
2355 /* Thumb FDPIC PLT entry. */
2356 /* The last 5 words contain PLT lazy fragment code and data. */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358 {
2359 0xc00cf8df, /* ldr.w r12, .L1 */
2360 0x0c09eb0c, /* add.w r12, r12, r9 */
2361 0x9004f8dc, /* ldr.w r9, [r12, #4] */
2362 0xf000f8dc, /* ldr.w pc, [r12] */
2363 0x00000000, /* .L1 .word foo(GOTOFFFUNCDESC) */
2364 0x00000000, /* .L2 .word foo(funcdesc_value_reloc_offset) */
2365 0xc008f85f, /* ldr.w r12, .L2 */
2366 0xcd04f84d, /* push {r12} */
2367 0xc004f8d9, /* ldr.w r12, [r9, #4] */
2368 0xf000f8d9, /* ldr.w pc, [r9] */
2369 };
2370
2371 #ifdef FOUR_WORD_PLT
2372
2373 /* The first entry in a procedure linkage table looks like
2374 this. It is set up so that any shared library function that is
2375 called before the relocation has been set up calls the dynamic
2376 linker first. */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2378 {
2379 0xe52de004, /* str lr, [sp, #-4]! */
2380 0xe59fe010, /* ldr lr, [pc, #16] */
2381 0xe08fe00e, /* add lr, pc, lr */
2382 0xe5bef008, /* ldr pc, [lr, #8]! */
2383 };
2384
2385 /* Subsequent entries in a procedure linkage table look like
2386 this. */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2388 {
2389 0xe28fc600, /* add ip, pc, #NN */
2390 0xe28cca00, /* add ip, ip, #NN */
2391 0xe5bcf000, /* ldr pc, [ip, #NN]! */
2392 0x00000000, /* unused */
2393 };
2394
2395 #else /* not FOUR_WORD_PLT */
2396
2397 /* The first entry in a procedure linkage table looks like
2398 this. It is set up so that any shared library function that is
2399 called before the relocation has been set up calls the dynamic
2400 linker first. */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2402 {
2403 0xe52de004, /* str lr, [sp, #-4]! */
2404 0xe59fe004, /* ldr lr, [pc, #4] */
2405 0xe08fe00e, /* add lr, pc, lr */
2406 0xe5bef008, /* ldr pc, [lr, #8]! */
2407 0x00000000, /* &GOT[0] - . */
2408 };
2409
2410 /* By default subsequent entries in a procedure linkage table look like
2411 this. Offsets that don't fit into 28 bits will cause link error. */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2413 {
2414 0xe28fc600, /* add ip, pc, #0xNN00000 */
2415 0xe28cca00, /* add ip, ip, #0xNN000 */
2416 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2417 };
2418
2419 /* When explicitly asked, we'll use this "long" entry format
2420 which can cope with arbitrary displacements. */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2422 {
2423 0xe28fc200, /* add ip, pc, #0xN0000000 */
2424 0xe28cc600, /* add ip, ip, #0xNN00000 */
2425 0xe28cca00, /* add ip, ip, #0xNN000 */
2426 0xe5bcf000, /* ldr pc, [ip, #0xNNN]! */
2427 };
2428
2429 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2430
2431 #endif /* not FOUR_WORD_PLT */
2432
2433 /* The first entry in a procedure linkage table looks like this.
2434 It is set up so that any shared library function that is called before the
2435 relocation has been set up calls the dynamic linker first. */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2437 {
2438 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439 an instruction maybe encoded to one or two array elements. */
2440 0xf8dfb500, /* push {lr} */
2441 0x44fee008, /* ldr.w lr, [pc, #8] */
2442 /* add lr, pc */
2443 0xff08f85e, /* ldr.w pc, [lr, #8]! */
2444 0x00000000, /* &GOT[0] - . */
2445 };
2446
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448 look like this. */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2450 {
2451 /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452 an instruction maybe encoded to one or two array elements. */
2453 0x0c00f240, /* movw ip, #0xNNNN */
2454 0x0c00f2c0, /* movt ip, #0xNNNN */
2455 0xf8dc44fc, /* add ip, pc */
2456 0xe7fcf000 /* ldr.w pc, [ip] */
2457 /* b .-4 */
2458 };
2459
2460 /* The format of the first entry in the procedure linkage table
2461 for a VxWorks executable. */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463 {
2464 0xe52dc008, /* str ip,[sp,#-8]! */
2465 0xe59fc000, /* ldr ip,[pc] */
2466 0xe59cf008, /* ldr pc,[ip,#8] */
2467 0x00000000, /* .long _GLOBAL_OFFSET_TABLE_ */
2468 };
2469
2470 /* The format of subsequent entries in a VxWorks executable. */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472 {
2473 0xe59fc000, /* ldr ip,[pc] */
2474 0xe59cf000, /* ldr pc,[ip] */
2475 0x00000000, /* .long @got */
2476 0xe59fc000, /* ldr ip,[pc] */
2477 0xea000000, /* b _PLT */
2478 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2479 };
2480
2481 /* The format of entries in a VxWorks shared library. */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483 {
2484 0xe59fc000, /* ldr ip,[pc] */
2485 0xe79cf009, /* ldr pc,[ip,r9] */
2486 0x00000000, /* .long @got */
2487 0xe59fc000, /* ldr ip,[pc] */
2488 0xe599f008, /* ldr pc,[r9,#8] */
2489 0x00000000, /* .long @pltindex*sizeof(Elf32_Rela) */
2490 };
2491
2492 /* An initial stub used if the PLT entry is referenced from Thumb code. */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495 {
2496 0x4778, /* bx pc */
2497 0xe7fd /* b .-2 */
2498 };
2499
2500 /* The entries in a PLT when using a DLL-based target with multiple
2501 address spaces. */
2502 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2503 {
2504 0xe51ff004, /* ldr pc, [pc, #-4] */
2505 0x00000000, /* dcd R_ARM_GLOB_DAT(X) */
2506 };
2507
2508 /* The first entry in a procedure linkage table looks like
2509 this. It is set up so that any shared library function that is
2510 called before the relocation has been set up calls the dynamic
2511 linker first. */
2512 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2513 {
2514 /* First bundle: */
2515 0xe300c000, /* movw ip, #:lower16:&GOT[2]-.+8 */
2516 0xe340c000, /* movt ip, #:upper16:&GOT[2]-.+8 */
2517 0xe08cc00f, /* add ip, ip, pc */
2518 0xe52dc008, /* str ip, [sp, #-8]! */
2519 /* Second bundle: */
2520 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2521 0xe59cc000, /* ldr ip, [ip] */
2522 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2523 0xe12fff1c, /* bx ip */
2524 /* Third bundle: */
2525 0xe320f000, /* nop */
2526 0xe320f000, /* nop */
2527 0xe320f000, /* nop */
2528 /* .Lplt_tail: */
2529 0xe50dc004, /* str ip, [sp, #-4] */
2530 /* Fourth bundle: */
2531 0xe3ccc103, /* bic ip, ip, #0xc0000000 */
2532 0xe59cc000, /* ldr ip, [ip] */
2533 0xe3ccc13f, /* bic ip, ip, #0xc000000f */
2534 0xe12fff1c, /* bx ip */
2535 };
2536 #define ARM_NACL_PLT_TAIL_OFFSET (11 * 4)
2537
2538 /* Subsequent entries in a procedure linkage table look like this. */
2539 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2540 {
2541 0xe300c000, /* movw ip, #:lower16:&GOT[n]-.+8 */
2542 0xe340c000, /* movt ip, #:upper16:&GOT[n]-.+8 */
2543 0xe08cc00f, /* add ip, ip, pc */
2544 0xea000000, /* b .Lplt_tail */
2545 };
2546
2547 #define ARM_MAX_FWD_BRANCH_OFFSET ((((1 << 23) - 1) << 2) + 8)
2548 #define ARM_MAX_BWD_BRANCH_OFFSET ((-((1 << 23) << 2)) + 8)
2549 #define THM_MAX_FWD_BRANCH_OFFSET ((1 << 22) -2 + 4)
2550 #define THM_MAX_BWD_BRANCH_OFFSET (-(1 << 22) + 4)
2551 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2552 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2553 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2554 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2555
2556 enum stub_insn_type
2557 {
2558 THUMB16_TYPE = 1,
2559 THUMB32_TYPE,
2560 ARM_TYPE,
2561 DATA_TYPE
2562 };
2563
2564 #define THUMB16_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 0}
2565 /* A bit of a hack. A Thumb conditional branch, in which the proper condition
2566 is inserted in arm_build_one_stub(). */
2567 #define THUMB16_BCOND_INSN(X) {(X), THUMB16_TYPE, R_ARM_NONE, 1}
2568 #define THUMB32_INSN(X) {(X), THUMB32_TYPE, R_ARM_NONE, 0}
2569 #define THUMB32_MOVT(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2570 #define THUMB32_MOVW(X) {(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2571 #define THUMB32_B_INSN(X, Z) {(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2572 #define ARM_INSN(X) {(X), ARM_TYPE, R_ARM_NONE, 0}
2573 #define ARM_REL_INSN(X, Z) {(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2574 #define DATA_WORD(X,Y,Z) {(X), DATA_TYPE, (Y), (Z)}
2575
2576 typedef struct
2577 {
2578 bfd_vma data;
2579 enum stub_insn_type type;
2580 unsigned int r_type;
2581 int reloc_addend;
2582 } insn_sequence;
2583
2584 /* See note [Thumb nop sequence] when adding a veneer. */
2585
2586 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2587 to reach the stub if necessary. */
2588 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2589 {
2590 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2591 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2592 };
2593
2594 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2595 available. */
2596 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2597 {
2598 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2599 ARM_INSN (0xe12fff1c), /* bx ip */
2600 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2601 };
2602
2603 /* Thumb -> Thumb long branch stub. Used on M-profile architectures. */
2604 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2605 {
2606 THUMB16_INSN (0xb401), /* push {r0} */
2607 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2608 THUMB16_INSN (0x4684), /* mov ip, r0 */
2609 THUMB16_INSN (0xbc01), /* pop {r0} */
2610 THUMB16_INSN (0x4760), /* bx ip */
2611 THUMB16_INSN (0xbf00), /* nop */
2612 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2613 };
2614
2615 /* Thumb -> Thumb long branch stub in thumb2 encoding. Used on armv7. */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2617 {
2618 THUMB32_INSN (0xf85ff000), /* ldr.w pc, [pc, #-0] */
2619 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(x) */
2620 };
2621
2622 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2623 M-profile architectures. */
2624 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2625 {
2626 THUMB32_MOVW (0xf2400c00), /* mov.w ip, R_ARM_MOVW_ABS_NC */
2627 THUMB32_MOVT (0xf2c00c00), /* movt ip, R_ARM_MOVT_ABS << 16 */
2628 THUMB16_INSN (0x4760), /* bx ip */
2629 };
2630
2631 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2632 allowed. */
2633 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2634 {
2635 THUMB16_INSN (0x4778), /* bx pc */
2636 THUMB16_INSN (0xe7fd), /* b .-2 */
2637 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2638 ARM_INSN (0xe12fff1c), /* bx ip */
2639 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2640 };
2641
2642 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2643 available. */
2644 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2645 {
2646 THUMB16_INSN (0x4778), /* bx pc */
2647 THUMB16_INSN (0xe7fd), /* b .-2 */
2648 ARM_INSN (0xe51ff004), /* ldr pc, [pc, #-4] */
2649 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2650 };
2651
2652 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2653 one, when the destination is close enough. */
2654 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2655 {
2656 THUMB16_INSN (0x4778), /* bx pc */
2657 THUMB16_INSN (0xe7fd), /* b .-2 */
2658 ARM_REL_INSN (0xea000000, -8), /* b (X-8) */
2659 };
2660
2661 /* ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
2662 blx to reach the stub if necessary. */
2663 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2664 {
2665 ARM_INSN (0xe59fc000), /* ldr ip, [pc] */
2666 ARM_INSN (0xe08ff00c), /* add pc, pc, ip */
2667 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2668 };
2669
2670 /* ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
2671 blx to reach the stub if necessary. We can not add into pc;
2672 it is not guaranteed to mode switch (different in ARMv6 and
2673 ARMv7). */
2674 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2675 {
2676 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2677 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2678 ARM_INSN (0xe12fff1c), /* bx ip */
2679 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2680 };
2681
2682 /* V4T ARM -> ARM long branch stub, PIC. */
2683 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2684 {
2685 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2686 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2687 ARM_INSN (0xe12fff1c), /* bx ip */
2688 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2689 };
2690
2691 /* V4T Thumb -> ARM long branch stub, PIC. */
2692 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2693 {
2694 THUMB16_INSN (0x4778), /* bx pc */
2695 THUMB16_INSN (0xe7fd), /* b .-2 */
2696 ARM_INSN (0xe59fc000), /* ldr ip, [pc, #0] */
2697 ARM_INSN (0xe08cf00f), /* add pc, ip, pc */
2698 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2699 };
2700
2701 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2702 architectures. */
2703 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2704 {
2705 THUMB16_INSN (0xb401), /* push {r0} */
2706 THUMB16_INSN (0x4802), /* ldr r0, [pc, #8] */
2707 THUMB16_INSN (0x46fc), /* mov ip, pc */
2708 THUMB16_INSN (0x4484), /* add ip, r0 */
2709 THUMB16_INSN (0xbc01), /* pop {r0} */
2710 THUMB16_INSN (0x4760), /* bx ip */
2711 DATA_WORD (0, R_ARM_REL32, 4), /* dcd R_ARM_REL32(X) */
2712 };
2713
2714 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2715 allowed. */
2716 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2717 {
2718 THUMB16_INSN (0x4778), /* bx pc */
2719 THUMB16_INSN (0xe7fd), /* b .-2 */
2720 ARM_INSN (0xe59fc004), /* ldr ip, [pc, #4] */
2721 ARM_INSN (0xe08fc00c), /* add ip, pc, ip */
2722 ARM_INSN (0xe12fff1c), /* bx ip */
2723 DATA_WORD (0, R_ARM_REL32, 0), /* dcd R_ARM_REL32(X) */
2724 };
2725
2726 /* Thumb2/ARM -> TLS trampoline. Lowest common denominator, which is a
2727 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2728 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2729 {
2730 ARM_INSN (0xe59f1000), /* ldr r1, [pc] */
2731 ARM_INSN (0xe08ff001), /* add pc, pc, r1 */
2732 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X-4) */
2733 };
2734
2735 /* V4T Thumb -> TLS trampoline. lowest common denominator, which is a
2736 long PIC stub. We can use r1 as a scratch -- and cannot use ip. */
2737 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2738 {
2739 THUMB16_INSN (0x4778), /* bx pc */
2740 THUMB16_INSN (0xe7fd), /* b .-2 */
2741 ARM_INSN (0xe59f1000), /* ldr r1, [pc, #0] */
2742 ARM_INSN (0xe081f00f), /* add pc, r1, pc */
2743 DATA_WORD (0, R_ARM_REL32, -4), /* dcd R_ARM_REL32(X) */
2744 };
2745
2746 /* NaCl ARM -> ARM long branch stub. */
2747 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2748 {
2749 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2750 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2751 ARM_INSN (0xe12fff1c), /* bx ip */
2752 ARM_INSN (0xe320f000), /* nop */
2753 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2754 DATA_WORD (0, R_ARM_ABS32, 0), /* dcd R_ARM_ABS32(X) */
2755 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2756 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2757 };
2758
2759 /* NaCl ARM -> ARM long branch stub, PIC. */
2760 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2761 {
2762 ARM_INSN (0xe59fc00c), /* ldr ip, [pc, #12] */
2763 ARM_INSN (0xe08cc00f), /* add ip, ip, pc */
2764 ARM_INSN (0xe3ccc13f), /* bic ip, ip, #0xc000000f */
2765 ARM_INSN (0xe12fff1c), /* bx ip */
2766 ARM_INSN (0xe125be70), /* bkpt 0x5be0 */
2767 DATA_WORD (0, R_ARM_REL32, 8), /* dcd R_ARM_REL32(X+8) */
2768 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2769 DATA_WORD (0, R_ARM_NONE, 0), /* .word 0 */
2770 };
2771
2772 /* Stub used for transition to secure state (aka SG veneer). */
2773 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2774 {
2775 THUMB32_INSN (0xe97fe97f), /* sg. */
2776 THUMB32_B_INSN (0xf000b800, -4), /* b.w original_branch_dest. */
2777 };
2778
2779
2780 /* Cortex-A8 erratum-workaround stubs. */
2781
2782 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2783 can't use a conditional branch to reach this stub). */
2784
2785 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2786 {
2787 THUMB16_BCOND_INSN (0xd001), /* b<cond>.n true. */
2788 THUMB32_B_INSN (0xf000b800, -4), /* b.w insn_after_original_branch. */
2789 THUMB32_B_INSN (0xf000b800, -4) /* true: b.w original_branch_dest. */
2790 };
2791
2792 /* Stub used for b.w and bl.w instructions. */
2793
2794 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2795 {
2796 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2797 };
2798
2799 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2800 {
2801 THUMB32_B_INSN (0xf000b800, -4) /* b.w original_branch_dest. */
2802 };
2803
2804 /* Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
2805 instruction (which switches to ARM mode) to point to this stub. Jump to the
2806 real destination using an ARM-mode branch. */
2807
2808 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2809 {
2810 ARM_REL_INSN (0xea000000, -8) /* b original_branch_dest. */
2811 };
2812
2813 /* For each section group there can be a specially created linker section
2814 to hold the stubs for that group. The name of the stub section is based
2815 upon the name of another section within that group with the suffix below
2816 applied.
2817
2818 PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2819 create what appeared to be a linker stub section when it actually
2820 contained user code/data. For example, consider this fragment:
2821
2822 const char * stubborn_problems[] = { "np" };
2823
2824 If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2825 section called:
2826
2827 .data.rel.local.stubborn_problems
2828
2829 This then causes problems in arm32_arm_build_stubs() as it triggers:
2830
2831 // Ignore non-stub sections.
2832 if (!strstr (stub_sec->name, STUB_SUFFIX))
2833 continue;
2834
2835 And so the section would be ignored instead of being processed. Hence
2836 the change in definition of STUB_SUFFIX to a name that cannot be a valid
2837 C identifier. */
2838 #define STUB_SUFFIX ".__stub"
2839
2840 /* One entry per long/short branch stub defined above. */
2841 #define DEF_STUBS \
2842 DEF_STUB(long_branch_any_any) \
2843 DEF_STUB(long_branch_v4t_arm_thumb) \
2844 DEF_STUB(long_branch_thumb_only) \
2845 DEF_STUB(long_branch_v4t_thumb_thumb) \
2846 DEF_STUB(long_branch_v4t_thumb_arm) \
2847 DEF_STUB(short_branch_v4t_thumb_arm) \
2848 DEF_STUB(long_branch_any_arm_pic) \
2849 DEF_STUB(long_branch_any_thumb_pic) \
2850 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2851 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2852 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2853 DEF_STUB(long_branch_thumb_only_pic) \
2854 DEF_STUB(long_branch_any_tls_pic) \
2855 DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2856 DEF_STUB(long_branch_arm_nacl) \
2857 DEF_STUB(long_branch_arm_nacl_pic) \
2858 DEF_STUB(cmse_branch_thumb_only) \
2859 DEF_STUB(a8_veneer_b_cond) \
2860 DEF_STUB(a8_veneer_b) \
2861 DEF_STUB(a8_veneer_bl) \
2862 DEF_STUB(a8_veneer_blx) \
2863 DEF_STUB(long_branch_thumb2_only) \
2864 DEF_STUB(long_branch_thumb2_only_pure)
2865
2866 #define DEF_STUB(x) arm_stub_##x,
2867 enum elf32_arm_stub_type
2868 {
2869 arm_stub_none,
2870 DEF_STUBS
2871 max_stub_type
2872 };
2873 #undef DEF_STUB
2874
2875 /* Note the first a8_veneer type. */
2876 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2877
2878 typedef struct
2879 {
2880 const insn_sequence* template_sequence;
2881 int template_size;
2882 } stub_def;
2883
2884 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2885 static const stub_def stub_definitions[] =
2886 {
2887 {NULL, 0},
2888 DEF_STUBS
2889 };
2890
2891 struct elf32_arm_stub_hash_entry
2892 {
2893 /* Base hash table entry structure. */
2894 struct bfd_hash_entry root;
2895
2896 /* The stub section. */
2897 asection *stub_sec;
2898
2899 /* Offset within stub_sec of the beginning of this stub. */
2900 bfd_vma stub_offset;
2901
2902 /* Given the symbol's value and its section we can determine its final
2903 value when building the stubs (so the stub knows where to jump). */
2904 bfd_vma target_value;
2905 asection *target_section;
2906
2907 /* Same as above but for the source of the branch to the stub. Used for
2908 Cortex-A8 erratum workaround to patch it to branch to the stub. As
2909 such, source section does not need to be recorded since Cortex-A8 erratum
2910 workaround stubs are only generated when both source and target are in the
2911 same section. */
2912 bfd_vma source_value;
2913
2914 /* The instruction which caused this stub to be generated (only valid for
2915 Cortex-A8 erratum workaround stubs at present). */
2916 unsigned long orig_insn;
2917
2918 /* The stub type. */
2919 enum elf32_arm_stub_type stub_type;
2920 /* Its encoding size in bytes. */
2921 int stub_size;
2922 /* Its template. */
2923 const insn_sequence *stub_template;
2924 /* The size of the template (number of entries). */
2925 int stub_template_size;
2926
2927 /* The symbol table entry, if any, that this was derived from. */
2928 struct elf32_arm_link_hash_entry *h;
2929
2930 /* Type of branch. */
2931 enum arm_st_branch_type branch_type;
2932
2933 /* Where this stub is being called from, or, in the case of combined
2934 stub sections, the first input section in the group. */
2935 asection *id_sec;
2936
2937 /* The name for the local symbol at the start of this stub. The
2938 stub name in the hash table has to be unique; this does not, so
2939 it can be friendlier. */
2940 char *output_name;
2941 };
2942
2943 /* Used to build a map of a section. This is required for mixed-endian
2944 code/data. */
2945
2946 typedef struct elf32_elf_section_map
2947 {
2948 bfd_vma vma;
2949 char type;
2950 }
2951 elf32_arm_section_map;
2952
2953 /* Information about a VFP11 erratum veneer, or a branch to such a veneer. */
2954
2955 typedef enum
2956 {
2957 VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2958 VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2959 VFP11_ERRATUM_ARM_VENEER,
2960 VFP11_ERRATUM_THUMB_VENEER
2961 }
2962 elf32_vfp11_erratum_type;
2963
2964 typedef struct elf32_vfp11_erratum_list
2965 {
2966 struct elf32_vfp11_erratum_list *next;
2967 bfd_vma vma;
2968 union
2969 {
2970 struct
2971 {
2972 struct elf32_vfp11_erratum_list *veneer;
2973 unsigned int vfp_insn;
2974 } b;
2975 struct
2976 {
2977 struct elf32_vfp11_erratum_list *branch;
2978 unsigned int id;
2979 } v;
2980 } u;
2981 elf32_vfp11_erratum_type type;
2982 }
2983 elf32_vfp11_erratum_list;
2984
2985 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2986 veneer. */
2987 typedef enum
2988 {
2989 STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2990 STM32L4XX_ERRATUM_VENEER
2991 }
2992 elf32_stm32l4xx_erratum_type;
2993
2994 typedef struct elf32_stm32l4xx_erratum_list
2995 {
2996 struct elf32_stm32l4xx_erratum_list *next;
2997 bfd_vma vma;
2998 union
2999 {
3000 struct
3001 {
3002 struct elf32_stm32l4xx_erratum_list *veneer;
3003 unsigned int insn;
3004 } b;
3005 struct
3006 {
3007 struct elf32_stm32l4xx_erratum_list *branch;
3008 unsigned int id;
3009 } v;
3010 } u;
3011 elf32_stm32l4xx_erratum_type type;
3012 }
3013 elf32_stm32l4xx_erratum_list;
3014
3015 typedef enum
3016 {
3017 DELETE_EXIDX_ENTRY,
3018 INSERT_EXIDX_CANTUNWIND_AT_END
3019 }
3020 arm_unwind_edit_type;
3021
3022 /* A (sorted) list of edits to apply to an unwind table. */
3023 typedef struct arm_unwind_table_edit
3024 {
3025 arm_unwind_edit_type type;
3026 /* Note: we sometimes want to insert an unwind entry corresponding to a
3027 section different from the one we're currently writing out, so record the
3028 (text) section this edit relates to here. */
3029 asection *linked_section;
3030 unsigned int index;
3031 struct arm_unwind_table_edit *next;
3032 }
3033 arm_unwind_table_edit;
3034
3035 typedef struct _arm_elf_section_data
3036 {
3037 /* Information about mapping symbols. */
3038 struct bfd_elf_section_data elf;
3039 unsigned int mapcount;
3040 unsigned int mapsize;
3041 elf32_arm_section_map *map;
3042 /* Information about CPU errata. */
3043 unsigned int erratumcount;
3044 elf32_vfp11_erratum_list *erratumlist;
3045 unsigned int stm32l4xx_erratumcount;
3046 elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3047 unsigned int additional_reloc_count;
3048 /* Information about unwind tables. */
3049 union
3050 {
3051 /* Unwind info attached to a text section. */
3052 struct
3053 {
3054 asection *arm_exidx_sec;
3055 } text;
3056
3057 /* Unwind info attached to an .ARM.exidx section. */
3058 struct
3059 {
3060 arm_unwind_table_edit *unwind_edit_list;
3061 arm_unwind_table_edit *unwind_edit_tail;
3062 } exidx;
3063 } u;
3064 }
3065 _arm_elf_section_data;
3066
3067 #define elf32_arm_section_data(sec) \
3068 ((_arm_elf_section_data *) elf_section_data (sec))
3069
3070 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3071 These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3072 so may be created multiple times: we use an array of these entries whilst
3073 relaxing which we can refresh easily, then create stubs for each potentially
3074 erratum-triggering instruction once we've settled on a solution. */
3075
3076 struct a8_erratum_fix
3077 {
3078 bfd *input_bfd;
3079 asection *section;
3080 bfd_vma offset;
3081 bfd_vma target_offset;
3082 unsigned long orig_insn;
3083 char *stub_name;
3084 enum elf32_arm_stub_type stub_type;
3085 enum arm_st_branch_type branch_type;
3086 };
3087
3088 /* A table of relocs applied to branches which might trigger Cortex-A8
3089 erratum. */
3090
3091 struct a8_erratum_reloc
3092 {
3093 bfd_vma from;
3094 bfd_vma destination;
3095 struct elf32_arm_link_hash_entry *hash;
3096 const char *sym_name;
3097 unsigned int r_type;
3098 enum arm_st_branch_type branch_type;
3099 bfd_boolean non_a8_stub;
3100 };
3101
3102 /* The size of the thread control block. */
3103 #define TCB_SIZE 8
3104
3105 /* ARM-specific information about a PLT entry, over and above the usual
3106 gotplt_union. */
3107 struct arm_plt_info
3108 {
3109 /* We reference count Thumb references to a PLT entry separately,
3110 so that we can emit the Thumb trampoline only if needed. */
3111 bfd_signed_vma thumb_refcount;
3112
3113 /* Some references from Thumb code may be eliminated by BL->BLX
3114 conversion, so record them separately. */
3115 bfd_signed_vma maybe_thumb_refcount;
3116
3117 /* How many of the recorded PLT accesses were from non-call relocations.
3118 This information is useful when deciding whether anything takes the
3119 address of an STT_GNU_IFUNC PLT. A value of 0 means that all
3120 non-call references to the function should resolve directly to the
3121 real runtime target. */
3122 unsigned int noncall_refcount;
3123
3124 /* Since PLT entries have variable size if the Thumb prologue is
3125 used, we need to record the index into .got.plt instead of
3126 recomputing it from the PLT offset. */
3127 bfd_signed_vma got_offset;
3128 };
3129
3130 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol. */
3131 struct arm_local_iplt_info
3132 {
3133 /* The information that is usually found in the generic ELF part of
3134 the hash table entry. */
3135 union gotplt_union root;
3136
3137 /* The information that is usually found in the ARM-specific part of
3138 the hash table entry. */
3139 struct arm_plt_info arm;
3140
3141 /* A list of all potential dynamic relocations against this symbol. */
3142 struct elf_dyn_relocs *dyn_relocs;
3143 };
3144
3145 /* Structure to handle FDPIC support for local functions. */
3146 struct fdpic_local {
3147 unsigned int funcdesc_cnt;
3148 unsigned int gotofffuncdesc_cnt;
3149 int funcdesc_offset;
3150 };
3151
3152 struct elf_arm_obj_tdata
3153 {
3154 struct elf_obj_tdata root;
3155
3156 /* tls_type for each local got entry. */
3157 char *local_got_tls_type;
3158
3159 /* GOTPLT entries for TLS descriptors. */
3160 bfd_vma *local_tlsdesc_gotent;
3161
3162 /* Information for local symbols that need entries in .iplt. */
3163 struct arm_local_iplt_info **local_iplt;
3164
3165 /* Zero to warn when linking objects with incompatible enum sizes. */
3166 int no_enum_size_warning;
3167
3168 /* Zero to warn when linking objects with incompatible wchar_t sizes. */
3169 int no_wchar_size_warning;
3170
3171 /* Maintains FDPIC counters and funcdesc info. */
3172 struct fdpic_local *local_fdpic_cnts;
3173 };
3174
3175 #define elf_arm_tdata(bfd) \
3176 ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3177
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179 (elf_arm_tdata (bfd)->local_got_tls_type)
3180
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182 (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3183
3184 #define elf32_arm_local_iplt(bfd) \
3185 (elf_arm_tdata (bfd)->local_iplt)
3186
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188 (elf_arm_tdata (bfd)->local_fdpic_cnts)
3189
3190 #define is_arm_elf(bfd) \
3191 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192 && elf_tdata (bfd) != NULL \
3193 && elf_object_id (bfd) == ARM_ELF_DATA)
3194
3195 static bfd_boolean
3196 elf32_arm_mkobject (bfd *abfd)
3197 {
3198 return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3199 ARM_ELF_DATA);
3200 }
3201
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3203
3204 /* Structure to handle FDPIC support for extern functions. */
3205 struct fdpic_global {
3206 unsigned int gotofffuncdesc_cnt;
3207 unsigned int gotfuncdesc_cnt;
3208 unsigned int funcdesc_cnt;
3209 int funcdesc_offset;
3210 int gotfuncdesc_offset;
3211 };
3212
3213 /* Arm ELF linker hash entry. */
3214 struct elf32_arm_link_hash_entry
3215 {
3216 struct elf_link_hash_entry root;
3217
3218 /* ARM-specific PLT information. */
3219 struct arm_plt_info plt;
3220
3221 #define GOT_UNKNOWN 0
3222 #define GOT_NORMAL 1
3223 #define GOT_TLS_GD 2
3224 #define GOT_TLS_IE 4
3225 #define GOT_TLS_GDESC 8
3226 #define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3227 unsigned int tls_type : 8;
3228
3229 /* True if the symbol's PLT entry is in .iplt rather than .plt. */
3230 unsigned int is_iplt : 1;
3231
3232 unsigned int unused : 23;
3233
3234 /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3235 starting at the end of the jump table. */
3236 bfd_vma tlsdesc_got;
3237
3238 /* The symbol marking the real symbol location for exported thumb
3239 symbols with Arm stubs. */
3240 struct elf_link_hash_entry *export_glue;
3241
3242 /* A pointer to the most recently used stub hash entry against this
3243 symbol. */
3244 struct elf32_arm_stub_hash_entry *stub_cache;
3245
3246 /* Counter for FDPIC relocations against this symbol. */
3247 struct fdpic_global fdpic_cnts;
3248 };
3249
3250 /* Traverse an arm ELF linker hash table. */
3251 #define elf32_arm_link_hash_traverse(table, func, info) \
3252 (elf_link_hash_traverse \
3253 (&(table)->root, \
3254 (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func), \
3255 (info)))
3256
3257 /* Get the ARM elf linker hash table from a link_info structure. */
3258 #define elf32_arm_hash_table(info) \
3259 (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3260 == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3261
3262 #define arm_stub_hash_lookup(table, string, create, copy) \
3263 ((struct elf32_arm_stub_hash_entry *) \
3264 bfd_hash_lookup ((table), (string), (create), (copy)))
3265
3266 /* Array to keep track of which stub sections have been created, and
3267 information on stub grouping. */
3268 struct map_stub
3269 {
3270 /* This is the section to which stubs in the group will be
3271 attached. */
3272 asection *link_sec;
3273 /* The stub section. */
3274 asection *stub_sec;
3275 };
3276
3277 #define elf32_arm_compute_jump_table_size(htab) \
3278 ((htab)->next_tls_desc_index * 4)
3279
3280 /* ARM ELF linker hash table. */
3281 struct elf32_arm_link_hash_table
3282 {
3283 /* The main hash table. */
3284 struct elf_link_hash_table root;
3285
3286 /* The size in bytes of the section containing the Thumb-to-ARM glue. */
3287 bfd_size_type thumb_glue_size;
3288
3289 /* The size in bytes of the section containing the ARM-to-Thumb glue. */
3290 bfd_size_type arm_glue_size;
3291
3292 /* The size in bytes of section containing the ARMv4 BX veneers. */
3293 bfd_size_type bx_glue_size;
3294
3295 /* Offsets of ARMv4 BX veneers. Bit1 set if present, and Bit0 set when
3296 veneer has been populated. */
3297 bfd_vma bx_glue_offset[15];
3298
3299 /* The size in bytes of the section containing glue for VFP11 erratum
3300 veneers. */
3301 bfd_size_type vfp11_erratum_glue_size;
3302
3303 /* The size in bytes of the section containing glue for STM32L4XX erratum
3304 veneers. */
3305 bfd_size_type stm32l4xx_erratum_glue_size;
3306
3307 /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum. This
3308 holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3309 elf32_arm_write_section(). */
3310 struct a8_erratum_fix *a8_erratum_fixes;
3311 unsigned int num_a8_erratum_fixes;
3312
3313 /* An arbitrary input BFD chosen to hold the glue sections. */
3314 bfd * bfd_of_glue_owner;
3315
3316 /* Nonzero to output a BE8 image. */
3317 int byteswap_code;
3318
3319 /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3320 Nonzero if R_ARM_TARGET1 means R_ARM_REL32. */
3321 int target1_is_rel;
3322
3323 /* The relocation to use for R_ARM_TARGET2 relocations. */
3324 int target2_reloc;
3325
3326 /* 0 = Ignore R_ARM_V4BX.
3327 1 = Convert BX to MOV PC.
3328 2 = Generate v4 interworing stubs. */
3329 int fix_v4bx;
3330
3331 /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum. */
3332 int fix_cortex_a8;
3333
3334 /* Whether we should fix the ARM1176 BLX immediate issue. */
3335 int fix_arm1176;
3336
3337 /* Nonzero if the ARM/Thumb BLX instructions are available for use. */
3338 int use_blx;
3339
3340 /* What sort of code sequences we should look for which may trigger the
3341 VFP11 denorm erratum. */
3342 bfd_arm_vfp11_fix vfp11_fix;
3343
3344 /* Global counter for the number of fixes we have emitted. */
3345 int num_vfp11_fixes;
3346
3347 /* What sort of code sequences we should look for which may trigger the
3348 STM32L4XX erratum. */
3349 bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3350
3351 /* Global counter for the number of fixes we have emitted. */
3352 int num_stm32l4xx_fixes;
3353
3354 /* Nonzero to force PIC branch veneers. */
3355 int pic_veneer;
3356
3357 /* The number of bytes in the initial entry in the PLT. */
3358 bfd_size_type plt_header_size;
3359
3360 /* The number of bytes in the subsequent PLT etries. */
3361 bfd_size_type plt_entry_size;
3362
3363 /* True if the target system is VxWorks. */
3364 int vxworks_p;
3365
3366 /* True if the target system is Symbian OS. */
3367 int symbian_p;
3368
3369 /* True if the target system is Native Client. */
3370 int nacl_p;
3371
3372 /* True if the target uses REL relocations. */
3373 bfd_boolean use_rel;
3374
3375 /* Nonzero if import library must be a secure gateway import library
3376 as per ARMv8-M Security Extensions. */
3377 int cmse_implib;
3378
3379 /* The import library whose symbols' address must remain stable in
3380 the import library generated. */
3381 bfd *in_implib_bfd;
3382
3383 /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt. */
3384 bfd_vma next_tls_desc_index;
3385
3386 /* How many R_ARM_TLS_DESC relocations were generated so far. */
3387 bfd_vma num_tls_desc;
3388
3389 /* The (unloaded but important) VxWorks .rela.plt.unloaded section. */
3390 asection *srelplt2;
3391
3392 /* The offset into splt of the PLT entry for the TLS descriptor
3393 resolver. Special values are 0, if not necessary (or not found
3394 to be necessary yet), and -1 if needed but not determined
3395 yet. */
3396 bfd_vma dt_tlsdesc_plt;
3397
3398 /* The offset into sgot of the GOT entry used by the PLT entry
3399 above. */
3400 bfd_vma dt_tlsdesc_got;
3401
3402 /* Offset in .plt section of tls_arm_trampoline. */
3403 bfd_vma tls_trampoline;
3404
3405 /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
3406 union
3407 {
3408 bfd_signed_vma refcount;
3409 bfd_vma offset;
3410 } tls_ldm_got;
3411
3412 /* Small local sym cache. */
3413 struct sym_cache sym_cache;
3414
3415 /* For convenience in allocate_dynrelocs. */
3416 bfd * obfd;
3417
3418 /* The amount of space used by the reserved portion of the sgotplt
3419 section, plus whatever space is used by the jump slots. */
3420 bfd_vma sgotplt_jump_table_size;
3421
3422 /* The stub hash table. */
3423 struct bfd_hash_table stub_hash_table;
3424
3425 /* Linker stub bfd. */
3426 bfd *stub_bfd;
3427
3428 /* Linker call-backs. */
3429 asection * (*add_stub_section) (const char *, asection *, asection *,
3430 unsigned int);
3431 void (*layout_sections_again) (void);
3432
3433 /* Array to keep track of which stub sections have been created, and
3434 information on stub grouping. */
3435 struct map_stub *stub_group;
3436
3437 /* Input stub section holding secure gateway veneers. */
3438 asection *cmse_stub_sec;
3439
3440 /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3441 start to be allocated. */
3442 bfd_vma new_cmse_stub_offset;
3443
3444 /* Number of elements in stub_group. */
3445 unsigned int top_id;
3446
3447 /* Assorted information used by elf32_arm_size_stubs. */
3448 unsigned int bfd_count;
3449 unsigned int top_index;
3450 asection **input_list;
3451
3452 /* True if the target system uses FDPIC. */
3453 int fdpic_p;
3454
3455 /* Fixup section. Used for FDPIC. */
3456 asection *srofixup;
3457 };
3458
3459 /* Add an FDPIC read-only fixup. */
3460 static void
3461 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3462 {
3463 bfd_vma fixup_offset;
3464
3465 fixup_offset = srofixup->reloc_count++ * 4;
3466 BFD_ASSERT (fixup_offset < srofixup->size);
3467 bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3468 }
3469
3470 static inline int
3471 ctz (unsigned int mask)
3472 {
3473 #if GCC_VERSION >= 3004
3474 return __builtin_ctz (mask);
3475 #else
3476 unsigned int i;
3477
3478 for (i = 0; i < 8 * sizeof (mask); i++)
3479 {
3480 if (mask & 0x1)
3481 break;
3482 mask = (mask >> 1);
3483 }
3484 return i;
3485 #endif
3486 }
3487
3488 static inline int
3489 elf32_arm_popcount (unsigned int mask)
3490 {
3491 #if GCC_VERSION >= 3004
3492 return __builtin_popcount (mask);
3493 #else
3494 unsigned int i;
3495 int sum = 0;
3496
3497 for (i = 0; i < 8 * sizeof (mask); i++)
3498 {
3499 if (mask & 0x1)
3500 sum++;
3501 mask = (mask >> 1);
3502 }
3503 return sum;
3504 #endif
3505 }
3506
3507 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3508 asection *sreloc, Elf_Internal_Rela *rel);
3509
3510 static void
3511 arm_elf_fill_funcdesc(bfd *output_bfd,
3512 struct bfd_link_info *info,
3513 int *funcdesc_offset,
3514 int dynindx,
3515 int offset,
3516 bfd_vma addr,
3517 bfd_vma dynreloc_value,
3518 bfd_vma seg)
3519 {
3520 if ((*funcdesc_offset & 1) == 0)
3521 {
3522 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3523 asection *sgot = globals->root.sgot;
3524
3525 if (bfd_link_pic(info))
3526 {
3527 asection *srelgot = globals->root.srelgot;
3528 Elf_Internal_Rela outrel;
3529
3530 outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3531 outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3532 outrel.r_addend = 0;
3533
3534 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3535 bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3536 bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3537 }
3538 else
3539 {
3540 struct elf_link_hash_entry *hgot = globals->root.hgot;
3541 bfd_vma got_value = hgot->root.u.def.value
3542 + hgot->root.u.def.section->output_section->vma
3543 + hgot->root.u.def.section->output_offset;
3544
3545 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3546 sgot->output_section->vma + sgot->output_offset
3547 + offset);
3548 arm_elf_add_rofixup(output_bfd, globals->srofixup,
3549 sgot->output_section->vma + sgot->output_offset
3550 + offset + 4);
3551 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3552 bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3553 }
3554 *funcdesc_offset |= 1;
3555 }
3556 }
3557
3558 /* Create an entry in an ARM ELF linker hash table. */
3559
3560 static struct bfd_hash_entry *
3561 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3562 struct bfd_hash_table * table,
3563 const char * string)
3564 {
3565 struct elf32_arm_link_hash_entry * ret =
3566 (struct elf32_arm_link_hash_entry *) entry;
3567
3568 /* Allocate the structure if it has not already been allocated by a
3569 subclass. */
3570 if (ret == NULL)
3571 ret = (struct elf32_arm_link_hash_entry *)
3572 bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3573 if (ret == NULL)
3574 return (struct bfd_hash_entry *) ret;
3575
3576 /* Call the allocation method of the superclass. */
3577 ret = ((struct elf32_arm_link_hash_entry *)
3578 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3579 table, string));
3580 if (ret != NULL)
3581 {
3582 ret->tls_type = GOT_UNKNOWN;
3583 ret->tlsdesc_got = (bfd_vma) -1;
3584 ret->plt.thumb_refcount = 0;
3585 ret->plt.maybe_thumb_refcount = 0;
3586 ret->plt.noncall_refcount = 0;
3587 ret->plt.got_offset = -1;
3588 ret->is_iplt = FALSE;
3589 ret->export_glue = NULL;
3590
3591 ret->stub_cache = NULL;
3592
3593 ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3594 ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3595 ret->fdpic_cnts.funcdesc_cnt = 0;
3596 ret->fdpic_cnts.funcdesc_offset = -1;
3597 ret->fdpic_cnts.gotfuncdesc_offset = -1;
3598 }
3599
3600 return (struct bfd_hash_entry *) ret;
3601 }
3602
3603 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3604 symbols. */
3605
3606 static bfd_boolean
3607 elf32_arm_allocate_local_sym_info (bfd *abfd)
3608 {
3609 if (elf_local_got_refcounts (abfd) == NULL)
3610 {
3611 bfd_size_type num_syms;
3612 bfd_size_type size;
3613 char *data;
3614
3615 num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3616 size = num_syms * (sizeof (bfd_signed_vma)
3617 + sizeof (struct arm_local_iplt_info *)
3618 + sizeof (bfd_vma)
3619 + sizeof (char)
3620 + sizeof (struct fdpic_local));
3621 data = bfd_zalloc (abfd, size);
3622 if (data == NULL)
3623 return FALSE;
3624
3625 elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3626 data += num_syms * sizeof (struct fdpic_local);
3627
3628 elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3629 data += num_syms * sizeof (bfd_signed_vma);
3630
3631 elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3632 data += num_syms * sizeof (struct arm_local_iplt_info *);
3633
3634 elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3635 data += num_syms * sizeof (bfd_vma);
3636
3637 elf32_arm_local_got_tls_type (abfd) = data;
3638 }
3639 return TRUE;
3640 }
3641
3642 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3643 to input bfd ABFD. Create the information if it doesn't already exist.
3644 Return null if an allocation fails. */
3645
3646 static struct arm_local_iplt_info *
3647 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3648 {
3649 struct arm_local_iplt_info **ptr;
3650
3651 if (!elf32_arm_allocate_local_sym_info (abfd))
3652 return NULL;
3653
3654 BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3655 ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3656 if (*ptr == NULL)
3657 *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3658 return *ptr;
3659 }
3660
3661 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3662 in ABFD's symbol table. If the symbol is global, H points to its
3663 hash table entry, otherwise H is null.
3664
3665 Return true if the symbol does have PLT information. When returning
3666 true, point *ROOT_PLT at the target-independent reference count/offset
3667 union and *ARM_PLT at the ARM-specific information. */
3668
3669 static bfd_boolean
3670 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3671 struct elf32_arm_link_hash_entry *h,
3672 unsigned long r_symndx, union gotplt_union **root_plt,
3673 struct arm_plt_info **arm_plt)
3674 {
3675 struct arm_local_iplt_info *local_iplt;
3676
3677 if (globals->root.splt == NULL && globals->root.iplt == NULL)
3678 return FALSE;
3679
3680 if (h != NULL)
3681 {
3682 *root_plt = &h->root.plt;
3683 *arm_plt = &h->plt;
3684 return TRUE;
3685 }
3686
3687 if (elf32_arm_local_iplt (abfd) == NULL)
3688 return FALSE;
3689
3690 local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3691 if (local_iplt == NULL)
3692 return FALSE;
3693
3694 *root_plt = &local_iplt->root;
3695 *arm_plt = &local_iplt->arm;
3696 return TRUE;
3697 }
3698
3699 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3700
3701 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3702 before it. */
3703
3704 static bfd_boolean
3705 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3706 struct arm_plt_info *arm_plt)
3707 {
3708 struct elf32_arm_link_hash_table *htab;
3709
3710 htab = elf32_arm_hash_table (info);
3711
3712 return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3713 || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3714 }
3715
3716 /* Return a pointer to the head of the dynamic reloc list that should
3717 be used for local symbol ISYM, which is symbol number R_SYMNDX in
3718 ABFD's symbol table. Return null if an error occurs. */
3719
3720 static struct elf_dyn_relocs **
3721 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3722 Elf_Internal_Sym *isym)
3723 {
3724 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3725 {
3726 struct arm_local_iplt_info *local_iplt;
3727
3728 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3729 if (local_iplt == NULL)
3730 return NULL;
3731 return &local_iplt->dyn_relocs;
3732 }
3733 else
3734 {
3735 /* Track dynamic relocs needed for local syms too.
3736 We really need local syms available to do this
3737 easily. Oh well. */
3738 asection *s;
3739 void *vpp;
3740
3741 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3742 if (s == NULL)
3743 abort ();
3744
3745 vpp = &elf_section_data (s)->local_dynrel;
3746 return (struct elf_dyn_relocs **) vpp;
3747 }
3748 }
3749
3750 /* Initialize an entry in the stub hash table. */
3751
3752 static struct bfd_hash_entry *
3753 stub_hash_newfunc (struct bfd_hash_entry *entry,
3754 struct bfd_hash_table *table,
3755 const char *string)
3756 {
3757 /* Allocate the structure if it has not already been allocated by a
3758 subclass. */
3759 if (entry == NULL)
3760 {
3761 entry = (struct bfd_hash_entry *)
3762 bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3763 if (entry == NULL)
3764 return entry;
3765 }
3766
3767 /* Call the allocation method of the superclass. */
3768 entry = bfd_hash_newfunc (entry, table, string);
3769 if (entry != NULL)
3770 {
3771 struct elf32_arm_stub_hash_entry *eh;
3772
3773 /* Initialize the local fields. */
3774 eh = (struct elf32_arm_stub_hash_entry *) entry;
3775 eh->stub_sec = NULL;
3776 eh->stub_offset = (bfd_vma) -1;
3777 eh->source_value = 0;
3778 eh->target_value = 0;
3779 eh->target_section = NULL;
3780 eh->orig_insn = 0;
3781 eh->stub_type = arm_stub_none;
3782 eh->stub_size = 0;
3783 eh->stub_template = NULL;
3784 eh->stub_template_size = -1;
3785 eh->h = NULL;
3786 eh->id_sec = NULL;
3787 eh->output_name = NULL;
3788 }
3789
3790 return entry;
3791 }
3792
3793 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3794 shortcuts to them in our hash table. */
3795
3796 static bfd_boolean
3797 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3798 {
3799 struct elf32_arm_link_hash_table *htab;
3800
3801 htab = elf32_arm_hash_table (info);
3802 if (htab == NULL)
3803 return FALSE;
3804
3805 /* BPABI objects never have a GOT, or associated sections. */
3806 if (htab->symbian_p)
3807 return TRUE;
3808
3809 if (! _bfd_elf_create_got_section (dynobj, info))
3810 return FALSE;
3811
3812 /* Also create .rofixup. */
3813 if (htab->fdpic_p)
3814 {
3815 htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3816 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3817 | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3818 if (htab->srofixup == NULL
3819 || !bfd_set_section_alignment (htab->srofixup, 2))
3820 return FALSE;
3821 }
3822
3823 return TRUE;
3824 }
3825
3826 /* Create the .iplt, .rel(a).iplt and .igot.plt sections. */
3827
3828 static bfd_boolean
3829 create_ifunc_sections (struct bfd_link_info *info)
3830 {
3831 struct elf32_arm_link_hash_table *htab;
3832 const struct elf_backend_data *bed;
3833 bfd *dynobj;
3834 asection *s;
3835 flagword flags;
3836
3837 htab = elf32_arm_hash_table (info);
3838 dynobj = htab->root.dynobj;
3839 bed = get_elf_backend_data (dynobj);
3840 flags = bed->dynamic_sec_flags;
3841
3842 if (htab->root.iplt == NULL)
3843 {
3844 s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3845 flags | SEC_READONLY | SEC_CODE);
3846 if (s == NULL
3847 || !bfd_set_section_alignment (s, bed->plt_alignment))
3848 return FALSE;
3849 htab->root.iplt = s;
3850 }
3851
3852 if (htab->root.irelplt == NULL)
3853 {
3854 s = bfd_make_section_anyway_with_flags (dynobj,
3855 RELOC_SECTION (htab, ".iplt"),
3856 flags | SEC_READONLY);
3857 if (s == NULL
3858 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3859 return FALSE;
3860 htab->root.irelplt = s;
3861 }
3862
3863 if (htab->root.igotplt == NULL)
3864 {
3865 s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3866 if (s == NULL
3867 || !bfd_set_section_alignment (s, bed->s->log_file_align))
3868 return FALSE;
3869 htab->root.igotplt = s;
3870 }
3871 return TRUE;
3872 }
3873
3874 /* Determine if we're dealing with a Thumb only architecture. */
3875
3876 static bfd_boolean
3877 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3878 {
3879 int arch;
3880 int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3881 Tag_CPU_arch_profile);
3882
3883 if (profile)
3884 return profile == 'M';
3885
3886 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3887
3888 /* Force return logic to be reviewed for each new architecture. */
3889 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3890
3891 if (arch == TAG_CPU_ARCH_V6_M
3892 || arch == TAG_CPU_ARCH_V6S_M
3893 || arch == TAG_CPU_ARCH_V7E_M
3894 || arch == TAG_CPU_ARCH_V8M_BASE
3895 || arch == TAG_CPU_ARCH_V8M_MAIN
3896 || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3897 return TRUE;
3898
3899 return FALSE;
3900 }
3901
3902 /* Determine if we're dealing with a Thumb-2 object. */
3903
3904 static bfd_boolean
3905 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3906 {
3907 int arch;
3908 int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3909 Tag_THUMB_ISA_use);
3910
3911 if (thumb_isa)
3912 return thumb_isa == 2;
3913
3914 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3915
3916 /* Force return logic to be reviewed for each new architecture. */
3917 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3918
3919 return (arch == TAG_CPU_ARCH_V6T2
3920 || arch == TAG_CPU_ARCH_V7
3921 || arch == TAG_CPU_ARCH_V7E_M
3922 || arch == TAG_CPU_ARCH_V8
3923 || arch == TAG_CPU_ARCH_V8R
3924 || arch == TAG_CPU_ARCH_V8M_MAIN
3925 || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3926 }
3927
3928 /* Determine whether Thumb-2 BL instruction is available. */
3929
3930 static bfd_boolean
3931 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3932 {
3933 int arch =
3934 bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3935
3936 /* Force return logic to be reviewed for each new architecture. */
3937 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3938
3939 /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M). */
3940 return (arch == TAG_CPU_ARCH_V6T2
3941 || arch >= TAG_CPU_ARCH_V7);
3942 }
3943
3944 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3945 .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3946 hash table. */
3947
3948 static bfd_boolean
3949 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3950 {
3951 struct elf32_arm_link_hash_table *htab;
3952
3953 htab = elf32_arm_hash_table (info);
3954 if (htab == NULL)
3955 return FALSE;
3956
3957 if (!htab->root.sgot && !create_got_section (dynobj, info))
3958 return FALSE;
3959
3960 if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3961 return FALSE;
3962
3963 if (htab->vxworks_p)
3964 {
3965 if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3966 return FALSE;
3967
3968 if (bfd_link_pic (info))
3969 {
3970 htab->plt_header_size = 0;
3971 htab->plt_entry_size
3972 = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3973 }
3974 else
3975 {
3976 htab->plt_header_size
3977 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3978 htab->plt_entry_size
3979 = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3980 }
3981
3982 if (elf_elfheader (dynobj))
3983 elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3984 }
3985 else
3986 {
3987 /* PR ld/16017
3988 Test for thumb only architectures. Note - we cannot just call
3989 using_thumb_only() as the attributes in the output bfd have not been
3990 initialised at this point, so instead we use the input bfd. */
3991 bfd * saved_obfd = htab->obfd;
3992
3993 htab->obfd = dynobj;
3994 if (using_thumb_only (htab))
3995 {
3996 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3997 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3998 }
3999 htab->obfd = saved_obfd;
4000 }
4001
4002 if (htab->fdpic_p) {
4003 htab->plt_header_size = 0;
4004 if (info->flags & DF_BIND_NOW)
4005 htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
4006 else
4007 htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
4008 }
4009
4010 if (!htab->root.splt
4011 || !htab->root.srelplt
4012 || !htab->root.sdynbss
4013 || (!bfd_link_pic (info) && !htab->root.srelbss))
4014 abort ();
4015
4016 return TRUE;
4017 }
4018
4019 /* Copy the extra info we tack onto an elf_link_hash_entry. */
4020
4021 static void
4022 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4023 struct elf_link_hash_entry *dir,
4024 struct elf_link_hash_entry *ind)
4025 {
4026 struct elf32_arm_link_hash_entry *edir, *eind;
4027
4028 edir = (struct elf32_arm_link_hash_entry *) dir;
4029 eind = (struct elf32_arm_link_hash_entry *) ind;
4030
4031 if (ind->dyn_relocs != NULL)
4032 {
4033 if (dir->dyn_relocs != NULL)
4034 {
4035 struct elf_dyn_relocs **pp;
4036 struct elf_dyn_relocs *p;
4037
4038 /* Add reloc counts against the indirect sym to the direct sym
4039 list. Merge any entries against the same section. */
4040 for (pp = &ind->dyn_relocs; (p = *pp) != NULL; )
4041 {
4042 struct elf_dyn_relocs *q;
4043
4044 for (q = dir->dyn_relocs; q != NULL; q = q->next)
4045 if (q->sec == p->sec)
4046 {
4047 q->pc_count += p->pc_count;
4048 q->count += p->count;
4049 *pp = p->next;
4050 break;
4051 }
4052 if (q == NULL)
4053 pp = &p->next;
4054 }
4055 *pp = dir->dyn_relocs;
4056 }
4057
4058 dir->dyn_relocs = ind->dyn_relocs;
4059 ind->dyn_relocs = NULL;
4060 }
4061
4062 if (ind->root.type == bfd_link_hash_indirect)
4063 {
4064 /* Copy over PLT info. */
4065 edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4066 eind->plt.thumb_refcount = 0;
4067 edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4068 eind->plt.maybe_thumb_refcount = 0;
4069 edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4070 eind->plt.noncall_refcount = 0;
4071
4072 /* Copy FDPIC counters. */
4073 edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4074 edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4075 edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4076
4077 /* We should only allocate a function to .iplt once the final
4078 symbol information is known. */
4079 BFD_ASSERT (!eind->is_iplt);
4080
4081 if (dir->got.refcount <= 0)
4082 {
4083 edir->tls_type = eind->tls_type;
4084 eind->tls_type = GOT_UNKNOWN;
4085 }
4086 }
4087
4088 _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4089 }
4090
4091 /* Destroy an ARM elf linker hash table. */
4092
4093 static void
4094 elf32_arm_link_hash_table_free (bfd *obfd)
4095 {
4096 struct elf32_arm_link_hash_table *ret
4097 = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4098
4099 bfd_hash_table_free (&ret->stub_hash_table);
4100 _bfd_elf_link_hash_table_free (obfd);
4101 }
4102
4103 /* Create an ARM elf linker hash table. */
4104
4105 static struct bfd_link_hash_table *
4106 elf32_arm_link_hash_table_create (bfd *abfd)
4107 {
4108 struct elf32_arm_link_hash_table *ret;
4109 size_t amt = sizeof (struct elf32_arm_link_hash_table);
4110
4111 ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4112 if (ret == NULL)
4113 return NULL;
4114
4115 if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4116 elf32_arm_link_hash_newfunc,
4117 sizeof (struct elf32_arm_link_hash_entry),
4118 ARM_ELF_DATA))
4119 {
4120 free (ret);
4121 return NULL;
4122 }
4123
4124 ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4125 ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4126 #ifdef FOUR_WORD_PLT
4127 ret->plt_header_size = 16;
4128 ret->plt_entry_size = 16;
4129 #else
4130 ret->plt_header_size = 20;
4131 ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4132 #endif
4133 ret->use_rel = TRUE;
4134 ret->obfd = abfd;
4135 ret->fdpic_p = 0;
4136
4137 if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4138 sizeof (struct elf32_arm_stub_hash_entry)))
4139 {
4140 _bfd_elf_link_hash_table_free (abfd);
4141 return NULL;
4142 }
4143 ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4144
4145 return &ret->root.root;
4146 }
4147
4148 /* Determine what kind of NOPs are available. */
4149
4150 static bfd_boolean
4151 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4152 {
4153 const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4154 Tag_CPU_arch);
4155
4156 /* Force return logic to be reviewed for each new architecture. */
4157 BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4158
4159 return (arch == TAG_CPU_ARCH_V6T2
4160 || arch == TAG_CPU_ARCH_V6K
4161 || arch == TAG_CPU_ARCH_V7
4162 || arch == TAG_CPU_ARCH_V8
4163 || arch == TAG_CPU_ARCH_V8R);
4164 }
4165
4166 static bfd_boolean
4167 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4168 {
4169 switch (stub_type)
4170 {
4171 case arm_stub_long_branch_thumb_only:
4172 case arm_stub_long_branch_thumb2_only:
4173 case arm_stub_long_branch_thumb2_only_pure:
4174 case arm_stub_long_branch_v4t_thumb_arm:
4175 case arm_stub_short_branch_v4t_thumb_arm:
4176 case arm_stub_long_branch_v4t_thumb_arm_pic:
4177 case arm_stub_long_branch_v4t_thumb_tls_pic:
4178 case arm_stub_long_branch_thumb_only_pic:
4179 case arm_stub_cmse_branch_thumb_only:
4180 return TRUE;
4181 case arm_stub_none:
4182 BFD_FAIL ();
4183 return FALSE;
4184 break;
4185 default:
4186 return FALSE;
4187 }
4188 }
4189
4190 /* Determine the type of stub needed, if any, for a call. */
4191
4192 static enum elf32_arm_stub_type
4193 arm_type_of_stub (struct bfd_link_info *info,
4194 asection *input_sec,
4195 const Elf_Internal_Rela *rel,
4196 unsigned char st_type,
4197 enum arm_st_branch_type *actual_branch_type,
4198 struct elf32_arm_link_hash_entry *hash,
4199 bfd_vma destination,
4200 asection *sym_sec,
4201 bfd *input_bfd,
4202 const char *name)
4203 {
4204 bfd_vma location;
4205 bfd_signed_vma branch_offset;
4206 unsigned int r_type;
4207 struct elf32_arm_link_hash_table * globals;
4208 bfd_boolean thumb2, thumb2_bl, thumb_only;
4209 enum elf32_arm_stub_type stub_type = arm_stub_none;
4210 int use_plt = 0;
4211 enum arm_st_branch_type branch_type = *actual_branch_type;
4212 union gotplt_union *root_plt;
4213 struct arm_plt_info *arm_plt;
4214 int arch;
4215 int thumb2_movw;
4216
4217 if (branch_type == ST_BRANCH_LONG)
4218 return stub_type;
4219
4220 globals = elf32_arm_hash_table (info);
4221 if (globals == NULL)
4222 return stub_type;
4223
4224 thumb_only = using_thumb_only (globals);
4225 thumb2 = using_thumb2 (globals);
4226 thumb2_bl = using_thumb2_bl (globals);
4227
4228 arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4229
4230 /* True for architectures that implement the thumb2 movw instruction. */
4231 thumb2_movw = thumb2 || (arch == TAG_CPU_ARCH_V8M_BASE);
4232
4233 /* Determine where the call point is. */
4234 location = (input_sec->output_offset
4235 + input_sec->output_section->vma
4236 + rel->r_offset);
4237
4238 r_type = ELF32_R_TYPE (rel->r_info);
4239
4240 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4241 are considering a function call relocation. */
4242 if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4243 || r_type == R_ARM_THM_JUMP19)
4244 && branch_type == ST_BRANCH_TO_ARM)
4245 branch_type = ST_BRANCH_TO_THUMB;
4246
4247 /* For TLS call relocs, it is the caller's responsibility to provide
4248 the address of the appropriate trampoline. */
4249 if (r_type != R_ARM_TLS_CALL
4250 && r_type != R_ARM_THM_TLS_CALL
4251 && elf32_arm_get_plt_info (input_bfd, globals, hash,
4252 ELF32_R_SYM (rel->r_info), &root_plt,
4253 &arm_plt)
4254 && root_plt->offset != (bfd_vma) -1)
4255 {
4256 asection *splt;
4257
4258 if (hash == NULL || hash->is_iplt)
4259 splt = globals->root.iplt;
4260 else
4261 splt = globals->root.splt;
4262 if (splt != NULL)
4263 {
4264 use_plt = 1;
4265
4266 /* Note when dealing with PLT entries: the main PLT stub is in
4267 ARM mode, so if the branch is in Thumb mode, another
4268 Thumb->ARM stub will be inserted later just before the ARM
4269 PLT stub. If a long branch stub is needed, we'll add a
4270 Thumb->Arm one and branch directly to the ARM PLT entry.
4271 Here, we have to check if a pre-PLT Thumb->ARM stub
4272 is needed and if it will be close enough. */
4273
4274 destination = (splt->output_section->vma
4275 + splt->output_offset
4276 + root_plt->offset);
4277 st_type = STT_FUNC;
4278
4279 /* Thumb branch/call to PLT: it can become a branch to ARM
4280 or to Thumb. We must perform the same checks and
4281 corrections as in elf32_arm_final_link_relocate. */
4282 if ((r_type == R_ARM_THM_CALL)
4283 || (r_type == R_ARM_THM_JUMP24))
4284 {
4285 if (globals->use_blx
4286 && r_type == R_ARM_THM_CALL
4287 && !thumb_only)
4288 {
4289 /* If the Thumb BLX instruction is available, convert
4290 the BL to a BLX instruction to call the ARM-mode
4291 PLT entry. */
4292 branch_type = ST_BRANCH_TO_ARM;
4293 }
4294 else
4295 {
4296 if (!thumb_only)
4297 /* Target the Thumb stub before the ARM PLT entry. */
4298 destination -= PLT_THUMB_STUB_SIZE;
4299 branch_type = ST_BRANCH_TO_THUMB;
4300 }
4301 }
4302 else
4303 {
4304 branch_type = ST_BRANCH_TO_ARM;
4305 }
4306 }
4307 }
4308 /* Calls to STT_GNU_IFUNC symbols should go through a PLT. */
4309 BFD_ASSERT (st_type != STT_GNU_IFUNC);
4310
4311 branch_offset = (bfd_signed_vma)(destination - location);
4312
4313 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4314 || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4315 {
4316 /* Handle cases where:
4317 - this call goes too far (different Thumb/Thumb2 max
4318 distance)
4319 - it's a Thumb->Arm call and blx is not available, or it's a
4320 Thumb->Arm branch (not bl). A stub is needed in this case,
4321 but only if this call is not through a PLT entry. Indeed,
4322 PLT stubs handle mode switching already. */
4323 if ((!thumb2_bl
4324 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4325 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4326 || (thumb2_bl
4327 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4328 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4329 || (thumb2
4330 && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4331 || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4332 && (r_type == R_ARM_THM_JUMP19))
4333 || (branch_type == ST_BRANCH_TO_ARM
4334 && (((r_type == R_ARM_THM_CALL
4335 || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4336 || (r_type == R_ARM_THM_JUMP24)
4337 || (r_type == R_ARM_THM_JUMP19))
4338 && !use_plt))
4339 {
4340 /* If we need to insert a Thumb-Thumb long branch stub to a
4341 PLT, use one that branches directly to the ARM PLT
4342 stub. If we pretended we'd use the pre-PLT Thumb->ARM
4343 stub, undo this now. */
4344 if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4345 {
4346 branch_type = ST_BRANCH_TO_ARM;
4347 branch_offset += PLT_THUMB_STUB_SIZE;
4348 }
4349
4350 if (branch_type == ST_BRANCH_TO_THUMB)
4351 {
4352 /* Thumb to thumb. */
4353 if (!thumb_only)
4354 {
4355 if (input_sec->flags & SEC_ELF_PURECODE)
4356 _bfd_error_handler
4357 (_("%pB(%pA): warning: long branch veneers used in"
4358 " section with SHF_ARM_PURECODE section"
4359 " attribute is only supported for M-profile"
4360 " targets that implement the movw instruction"),
4361 input_bfd, input_sec);
4362
4363 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4364 /* PIC stubs. */
4365 ? ((globals->use_blx
4366 && (r_type == R_ARM_THM_CALL))
4367 /* V5T and above. Stub starts with ARM code, so
4368 we must be able to switch mode before
4369 reaching it, which is only possible for 'bl'
4370 (ie R_ARM_THM_CALL relocation). */
4371 ? arm_stub_long_branch_any_thumb_pic
4372 /* On V4T, use Thumb code only. */
4373 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4374
4375 /* non-PIC stubs. */
4376 : ((globals->use_blx
4377 && (r_type == R_ARM_THM_CALL))
4378 /* V5T and above. */
4379 ? arm_stub_long_branch_any_any
4380 /* V4T. */
4381 : arm_stub_long_branch_v4t_thumb_thumb);
4382 }
4383 else
4384 {
4385 if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4386 stub_type = arm_stub_long_branch_thumb2_only_pure;
4387 else
4388 {
4389 if (input_sec->flags & SEC_ELF_PURECODE)
4390 _bfd_error_handler
4391 (_("%pB(%pA): warning: long branch veneers used in"
4392 " section with SHF_ARM_PURECODE section"
4393 " attribute is only supported for M-profile"
4394 " targets that implement the movw instruction"),
4395 input_bfd, input_sec);
4396
4397 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4398 /* PIC stub. */
4399 ? arm_stub_long_branch_thumb_only_pic
4400 /* non-PIC stub. */
4401 : (thumb2 ? arm_stub_long_branch_thumb2_only
4402 : arm_stub_long_branch_thumb_only);
4403 }
4404 }
4405 }
4406 else
4407 {
4408 if (input_sec->flags & SEC_ELF_PURECODE)
4409 _bfd_error_handler
4410 (_("%pB(%pA): warning: long branch veneers used in"
4411 " section with SHF_ARM_PURECODE section"
4412 " attribute is only supported" " for M-profile"
4413 " targets that implement the movw instruction"),
4414 input_bfd, input_sec);
4415
4416 /* Thumb to arm. */
4417 if (sym_sec != NULL
4418 && sym_sec->owner != NULL
4419 && !INTERWORK_FLAG (sym_sec->owner))
4420 {
4421 _bfd_error_handler
4422 (_("%pB(%s): warning: interworking not enabled;"
4423 " first occurrence: %pB: %s call to %s"),
4424 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4425 }
4426
4427 stub_type =
4428 (bfd_link_pic (info) | globals->pic_veneer)
4429 /* PIC stubs. */
4430 ? (r_type == R_ARM_THM_TLS_CALL
4431 /* TLS PIC stubs. */
4432 ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4433 : arm_stub_long_branch_v4t_thumb_tls_pic)
4434 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4435 /* V5T PIC and above. */
4436 ? arm_stub_long_branch_any_arm_pic
4437 /* V4T PIC stub. */
4438 : arm_stub_long_branch_v4t_thumb_arm_pic))
4439
4440 /* non-PIC stubs. */
4441 : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4442 /* V5T and above. */
4443 ? arm_stub_long_branch_any_any
4444 /* V4T. */
4445 : arm_stub_long_branch_v4t_thumb_arm);
4446
4447 /* Handle v4t short branches. */
4448 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4449 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4450 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4451 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4452 }
4453 }
4454 }
4455 else if (r_type == R_ARM_CALL
4456 || r_type == R_ARM_JUMP24
4457 || r_type == R_ARM_PLT32
4458 || r_type == R_ARM_TLS_CALL)
4459 {
4460 if (input_sec->flags & SEC_ELF_PURECODE)
4461 _bfd_error_handler
4462 (_("%pB(%pA): warning: long branch veneers used in"
4463 " section with SHF_ARM_PURECODE section"
4464 " attribute is only supported for M-profile"
4465 " targets that implement the movw instruction"),
4466 input_bfd, input_sec);
4467 if (branch_type == ST_BRANCH_TO_THUMB)
4468 {
4469 /* Arm to thumb. */
4470
4471 if (sym_sec != NULL
4472 && sym_sec->owner != NULL
4473 && !INTERWORK_FLAG (sym_sec->owner))
4474 {
4475 _bfd_error_handler
4476 (_("%pB(%s): warning: interworking not enabled;"
4477 " first occurrence: %pB: %s call to %s"),
4478 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4479 }
4480
4481 /* We have an extra 2-bytes reach because of
4482 the mode change (bit 24 (H) of BLX encoding). */
4483 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4484 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4485 || (r_type == R_ARM_CALL && !globals->use_blx)
4486 || (r_type == R_ARM_JUMP24)
4487 || (r_type == R_ARM_PLT32))
4488 {
4489 stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4490 /* PIC stubs. */
4491 ? ((globals->use_blx)
4492 /* V5T and above. */
4493 ? arm_stub_long_branch_any_thumb_pic
4494 /* V4T stub. */
4495 : arm_stub_long_branch_v4t_arm_thumb_pic)
4496
4497 /* non-PIC stubs. */
4498 : ((globals->use_blx)
4499 /* V5T and above. */
4500 ? arm_stub_long_branch_any_any
4501 /* V4T. */
4502 : arm_stub_long_branch_v4t_arm_thumb);
4503 }
4504 }
4505 else
4506 {
4507 /* Arm to arm. */
4508 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4509 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4510 {
4511 stub_type =
4512 (bfd_link_pic (info) | globals->pic_veneer)
4513 /* PIC stubs. */
4514 ? (r_type == R_ARM_TLS_CALL
4515 /* TLS PIC Stub. */
4516 ? arm_stub_long_branch_any_tls_pic
4517 : (globals->nacl_p
4518 ? arm_stub_long_branch_arm_nacl_pic
4519 : arm_stub_long_branch_any_arm_pic))
4520 /* non-PIC stubs. */
4521 : (globals->nacl_p
4522 ? arm_stub_long_branch_arm_nacl
4523 : arm_stub_long_branch_any_any);
4524 }
4525 }
4526 }
4527
4528 /* If a stub is needed, record the actual destination type. */
4529 if (stub_type != arm_stub_none)
4530 *actual_branch_type = branch_type;
4531
4532 return stub_type;
4533 }
4534
4535 /* Build a name for an entry in the stub hash table. */
4536
4537 static char *
4538 elf32_arm_stub_name (const asection *input_section,
4539 const asection *sym_sec,
4540 const struct elf32_arm_link_hash_entry *hash,
4541 const Elf_Internal_Rela *rel,
4542 enum elf32_arm_stub_type stub_type)
4543 {
4544 char *stub_name;
4545 bfd_size_type len;
4546
4547 if (hash)
4548 {
4549 len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4550 stub_name = (char *) bfd_malloc (len);
4551 if (stub_name != NULL)
4552 sprintf (stub_name, "%08x_%s+%x_%d",
4553 input_section->id & 0xffffffff,
4554 hash->root.root.root.string,
4555 (int) rel->r_addend & 0xffffffff,
4556 (int) stub_type);
4557 }
4558 else
4559 {
4560 len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4561 stub_name = (char *) bfd_malloc (len);
4562 if (stub_name != NULL)
4563 sprintf (stub_name, "%08x_%x:%x+%x_%d",
4564 input_section->id & 0xffffffff,
4565 sym_sec->id & 0xffffffff,
4566 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4567 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4568 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4569 (int) rel->r_addend & 0xffffffff,
4570 (int) stub_type);
4571 }
4572
4573 return stub_name;
4574 }
4575
4576 /* Look up an entry in the stub hash. Stub entries are cached because
4577 creating the stub name takes a bit of time. */
4578
4579 static struct elf32_arm_stub_hash_entry *
4580 elf32_arm_get_stub_entry (const asection *input_section,
4581 const asection *sym_sec,
4582 struct elf_link_hash_entry *hash,
4583 const Elf_Internal_Rela *rel,
4584 struct elf32_arm_link_hash_table *htab,
4585 enum elf32_arm_stub_type stub_type)
4586 {
4587 struct elf32_arm_stub_hash_entry *stub_entry;
4588 struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4589 const asection *id_sec;
4590
4591 if ((input_section->flags & SEC_CODE) == 0)
4592 return NULL;
4593
4594 /* If the input section is the CMSE stubs one and it needs a long
4595 branch stub to reach it's final destination, give up with an
4596 error message: this is not supported. See PR ld/24709. */
4597 if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen(CMSE_STUB_NAME)))
4598 {
4599 bfd *output_bfd = htab->obfd;
4600 asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4601
4602 _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4603 "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4604 CMSE_STUB_NAME,
4605 (uint64_t)out_sec->output_section->vma
4606 + out_sec->output_offset,
4607 (uint64_t)sym_sec->output_section->vma
4608 + sym_sec->output_offset
4609 + h->root.root.u.def.value);
4610 /* Exit, rather than leave incompletely processed
4611 relocations. */
4612 xexit(1);
4613 }
4614
4615 /* If this input section is part of a group of sections sharing one
4616 stub section, then use the id of the first section in the group.
4617 Stub names need to include a section id, as there may well be
4618 more than one stub used to reach say, printf, and we need to
4619 distinguish between them. */
4620 BFD_ASSERT (input_section->id <= htab->top_id);
4621 id_sec = htab->stub_group[input_section->id].link_sec;
4622
4623 if (h != NULL && h->stub_cache != NULL
4624 && h->stub_cache->h == h
4625 && h->stub_cache->id_sec == id_sec
4626 && h->stub_cache->stub_type == stub_type)
4627 {
4628 stub_entry = h->stub_cache;
4629 }
4630 else
4631 {
4632 char *stub_name;
4633
4634 stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4635 if (stub_name == NULL)
4636 return NULL;
4637
4638 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4639 stub_name, FALSE, FALSE);
4640 if (h != NULL)
4641 h->stub_cache = stub_entry;
4642
4643 free (stub_name);
4644 }
4645
4646 return stub_entry;
4647 }
4648
4649 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4650 section. */
4651
4652 static bfd_boolean
4653 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4654 {
4655 if (stub_type >= max_stub_type)
4656 abort (); /* Should be unreachable. */
4657
4658 switch (stub_type)
4659 {
4660 case arm_stub_cmse_branch_thumb_only:
4661 return TRUE;
4662
4663 default:
4664 return FALSE;
4665 }
4666
4667 abort (); /* Should be unreachable. */
4668 }
4669
4670 /* Required alignment (as a power of 2) for the dedicated section holding
4671 veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4672 with input sections. */
4673
4674 static int
4675 arm_dedicated_stub_output_section_required_alignment
4676 (enum elf32_arm_stub_type stub_type)
4677 {
4678 if (stub_type >= max_stub_type)
4679 abort (); /* Should be unreachable. */
4680
4681 switch (stub_type)
4682 {
4683 /* Vectors of Secure Gateway veneers must be aligned on 32byte
4684 boundary. */
4685 case arm_stub_cmse_branch_thumb_only:
4686 return 5;
4687
4688 default:
4689 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4690 return 0;
4691 }
4692
4693 abort (); /* Should be unreachable. */
4694 }
4695
4696 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4697 NULL if veneers of this type are interspersed with input sections. */
4698
4699 static const char *
4700 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4701 {
4702 if (stub_type >= max_stub_type)
4703 abort (); /* Should be unreachable. */
4704
4705 switch (stub_type)
4706 {
4707 case arm_stub_cmse_branch_thumb_only:
4708 return CMSE_STUB_NAME;
4709
4710 default:
4711 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4712 return NULL;
4713 }
4714
4715 abort (); /* Should be unreachable. */
4716 }
4717
4718 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4719 returns the address of the hash table field in HTAB holding a pointer to the
4720 corresponding input section. Otherwise, returns NULL. */
4721
4722 static asection **
4723 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4724 enum elf32_arm_stub_type stub_type)
4725 {
4726 if (stub_type >= max_stub_type)
4727 abort (); /* Should be unreachable. */
4728
4729 switch (stub_type)
4730 {
4731 case arm_stub_cmse_branch_thumb_only:
4732 return &htab->cmse_stub_sec;
4733
4734 default:
4735 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4736 return NULL;
4737 }
4738
4739 abort (); /* Should be unreachable. */
4740 }
4741
4742 /* Find or create a stub section to contain a stub of type STUB_TYPE. SECTION
4743 is the section that branch into veneer and can be NULL if stub should go in
4744 a dedicated output section. Returns a pointer to the stub section, and the
4745 section to which the stub section will be attached (in *LINK_SEC_P).
4746 LINK_SEC_P may be NULL. */
4747
4748 static asection *
4749 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4750 struct elf32_arm_link_hash_table *htab,
4751 enum elf32_arm_stub_type stub_type)
4752 {
4753 asection *link_sec, *out_sec, **stub_sec_p;
4754 const char *stub_sec_prefix;
4755 bfd_boolean dedicated_output_section =
4756 arm_dedicated_stub_output_section_required (stub_type);
4757 int align;
4758
4759 if (dedicated_output_section)
4760 {
4761 bfd *output_bfd = htab->obfd;
4762 const char *out_sec_name =
4763 arm_dedicated_stub_output_section_name (stub_type);
4764 link_sec = NULL;
4765 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4766 stub_sec_prefix = out_sec_name;
4767 align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4768 out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4769 if (out_sec == NULL)
4770 {
4771 _bfd_error_handler (_("no address assigned to the veneers output "
4772 "section %s"), out_sec_name);
4773 return NULL;
4774 }
4775 }
4776 else
4777 {
4778 BFD_ASSERT (section->id <= htab->top_id);
4779 link_sec = htab->stub_group[section->id].link_sec;
4780 BFD_ASSERT (link_sec != NULL);
4781 stub_sec_p = &htab->stub_group[section->id].stub_sec;
4782 if (*stub_sec_p == NULL)
4783 stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4784 stub_sec_prefix = link_sec->name;
4785 out_sec = link_sec->output_section;
4786 align = htab->nacl_p ? 4 : 3;
4787 }
4788
4789 if (*stub_sec_p == NULL)
4790 {
4791 size_t namelen;
4792 bfd_size_type len;
4793 char *s_name;
4794
4795 namelen = strlen (stub_sec_prefix);
4796 len = namelen + sizeof (STUB_SUFFIX);
4797 s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4798 if (s_name == NULL)
4799 return NULL;
4800
4801 memcpy (s_name, stub_sec_prefix, namelen);
4802 memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4803 *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4804 align);
4805 if (*stub_sec_p == NULL)
4806 return NULL;
4807
4808 out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4809 | SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4810 | SEC_KEEP;
4811 }
4812
4813 if (!dedicated_output_section)
4814 htab->stub_group[section->id].stub_sec = *stub_sec_p;
4815
4816 if (link_sec_p)
4817 *link_sec_p = link_sec;
4818
4819 return *stub_sec_p;
4820 }
4821
4822 /* Add a new stub entry to the stub hash. Not all fields of the new
4823 stub entry are initialised. */
4824
4825 static struct elf32_arm_stub_hash_entry *
4826 elf32_arm_add_stub (const char *stub_name, asection *section,
4827 struct elf32_arm_link_hash_table *htab,
4828 enum elf32_arm_stub_type stub_type)
4829 {
4830 asection *link_sec;
4831 asection *stub_sec;
4832 struct elf32_arm_stub_hash_entry *stub_entry;
4833
4834 stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4835 stub_type);
4836 if (stub_sec == NULL)
4837 return NULL;
4838
4839 /* Enter this entry into the linker stub hash table. */
4840 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4841 TRUE, FALSE);
4842 if (stub_entry == NULL)
4843 {
4844 if (section == NULL)
4845 section = stub_sec;
4846 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4847 section->owner, stub_name);
4848 return NULL;
4849 }
4850
4851 stub_entry->stub_sec = stub_sec;
4852 stub_entry->stub_offset = (bfd_vma) -1;
4853 stub_entry->id_sec = link_sec;
4854
4855 return stub_entry;
4856 }
4857
4858 /* Store an Arm insn into an output section not processed by
4859 elf32_arm_write_section. */
4860
4861 static void
4862 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4863 bfd * output_bfd, bfd_vma val, void * ptr)
4864 {
4865 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4866 bfd_putl32 (val, ptr);
4867 else
4868 bfd_putb32 (val, ptr);
4869 }
4870
4871 /* Store a 16-bit Thumb insn into an output section not processed by
4872 elf32_arm_write_section. */
4873
4874 static void
4875 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4876 bfd * output_bfd, bfd_vma val, void * ptr)
4877 {
4878 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4879 bfd_putl16 (val, ptr);
4880 else
4881 bfd_putb16 (val, ptr);
4882 }
4883
4884 /* Store a Thumb2 insn into an output section not processed by
4885 elf32_arm_write_section. */
4886
4887 static void
4888 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4889 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4890 {
4891 /* T2 instructions are 16-bit streamed. */
4892 if (htab->byteswap_code != bfd_little_endian (output_bfd))
4893 {
4894 bfd_putl16 ((val >> 16) & 0xffff, ptr);
4895 bfd_putl16 ((val & 0xffff), ptr + 2);
4896 }
4897 else
4898 {
4899 bfd_putb16 ((val >> 16) & 0xffff, ptr);
4900 bfd_putb16 ((val & 0xffff), ptr + 2);
4901 }
4902 }
4903
4904 /* If it's possible to change R_TYPE to a more efficient access
4905 model, return the new reloc type. */
4906
4907 static unsigned
4908 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4909 struct elf_link_hash_entry *h)
4910 {
4911 int is_local = (h == NULL);
4912
4913 if (bfd_link_dll (info)
4914 || (h && h->root.type == bfd_link_hash_undefweak))
4915 return r_type;
4916
4917 /* We do not support relaxations for Old TLS models. */
4918 switch (r_type)
4919 {
4920 case R_ARM_TLS_GOTDESC:
4921 case R_ARM_TLS_CALL:
4922 case R_ARM_THM_TLS_CALL:
4923 case R_ARM_TLS_DESCSEQ:
4924 case R_ARM_THM_TLS_DESCSEQ:
4925 return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4926 }
4927
4928 return r_type;
4929 }
4930
4931 static bfd_reloc_status_type elf32_arm_final_link_relocate
4932 (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4933 Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4934 const char *, unsigned char, enum arm_st_branch_type,
4935 struct elf_link_hash_entry *, bfd_boolean *, char **);
4936
4937 static unsigned int
4938 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4939 {
4940 switch (stub_type)
4941 {
4942 case arm_stub_a8_veneer_b_cond:
4943 case arm_stub_a8_veneer_b:
4944 case arm_stub_a8_veneer_bl:
4945 return 2;
4946
4947 case arm_stub_long_branch_any_any:
4948 case arm_stub_long_branch_v4t_arm_thumb:
4949 case arm_stub_long_branch_thumb_only:
4950 case arm_stub_long_branch_thumb2_only:
4951 case arm_stub_long_branch_thumb2_only_pure:
4952 case arm_stub_long_branch_v4t_thumb_thumb:
4953 case arm_stub_long_branch_v4t_thumb_arm:
4954 case arm_stub_short_branch_v4t_thumb_arm:
4955 case arm_stub_long_branch_any_arm_pic:
4956 case arm_stub_long_branch_any_thumb_pic:
4957 case arm_stub_long_branch_v4t_thumb_thumb_pic:
4958 case arm_stub_long_branch_v4t_arm_thumb_pic:
4959 case arm_stub_long_branch_v4t_thumb_arm_pic:
4960 case arm_stub_long_branch_thumb_only_pic:
4961 case arm_stub_long_branch_any_tls_pic:
4962 case arm_stub_long_branch_v4t_thumb_tls_pic:
4963 case arm_stub_cmse_branch_thumb_only:
4964 case arm_stub_a8_veneer_blx:
4965 return 4;
4966
4967 case arm_stub_long_branch_arm_nacl:
4968 case arm_stub_long_branch_arm_nacl_pic:
4969 return 16;
4970
4971 default:
4972 abort (); /* Should be unreachable. */
4973 }
4974 }
4975
4976 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4977 veneering (TRUE) or have their own symbol (FALSE). */
4978
4979 static bfd_boolean
4980 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4981 {
4982 if (stub_type >= max_stub_type)
4983 abort (); /* Should be unreachable. */
4984
4985 switch (stub_type)
4986 {
4987 case arm_stub_cmse_branch_thumb_only:
4988 return TRUE;
4989
4990 default:
4991 return FALSE;
4992 }
4993
4994 abort (); /* Should be unreachable. */
4995 }
4996
4997 /* Returns the padding needed for the dedicated section used stubs of type
4998 STUB_TYPE. */
4999
5000 static int
5001 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
5002 {
5003 if (stub_type >= max_stub_type)
5004 abort (); /* Should be unreachable. */
5005
5006 switch (stub_type)
5007 {
5008 case arm_stub_cmse_branch_thumb_only:
5009 return 32;
5010
5011 default:
5012 return 0;
5013 }
5014
5015 abort (); /* Should be unreachable. */
5016 }
5017
5018 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5019 returns the address of the hash table field in HTAB holding the offset at
5020 which new veneers should be layed out in the stub section. */
5021
5022 static bfd_vma*
5023 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5024 enum elf32_arm_stub_type stub_type)
5025 {
5026 switch (stub_type)
5027 {
5028 case arm_stub_cmse_branch_thumb_only:
5029 return &htab->new_cmse_stub_offset;
5030
5031 default:
5032 BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5033 return NULL;
5034 }
5035 }
5036
5037 static bfd_boolean
5038 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5039 void * in_arg)
5040 {
5041 #define MAXRELOCS 3
5042 bfd_boolean removed_sg_veneer;
5043 struct elf32_arm_stub_hash_entry *stub_entry;
5044 struct elf32_arm_link_hash_table *globals;
5045 struct bfd_link_info *info;
5046 asection *stub_sec;
5047 bfd *stub_bfd;
5048 bfd_byte *loc;
5049 bfd_vma sym_value;
5050 int template_size;
5051 int size;
5052 const insn_sequence *template_sequence;
5053 int i;
5054 int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5055 int stub_reloc_offset[MAXRELOCS] = {0, 0};
5056 int nrelocs = 0;
5057 int just_allocated = 0;
5058
5059 /* Massage our args to the form they really have. */
5060 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5061 info = (struct bfd_link_info *) in_arg;
5062
5063 /* Fail if the target section could not be assigned to an output
5064 section. The user should fix his linker script. */
5065 if (stub_entry->target_section->output_section == NULL
5066 && info->non_contiguous_regions)
5067 info->callbacks->einfo (_("%F%P: Could not assign '%pA' to an output section. "
5068 "Retry without --enable-non-contiguous-regions.\n"),
5069 stub_entry->target_section);
5070
5071 globals = elf32_arm_hash_table (info);
5072 if (globals == NULL)
5073 return FALSE;
5074
5075 stub_sec = stub_entry->stub_sec;
5076
5077 if ((globals->fix_cortex_a8 < 0)
5078 != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5079 /* We have to do less-strictly-aligned fixes last. */
5080 return TRUE;
5081
5082 /* Assign a slot at the end of section if none assigned yet. */
5083 if (stub_entry->stub_offset == (bfd_vma) -1)
5084 {
5085 stub_entry->stub_offset = stub_sec->size;
5086 just_allocated = 1;
5087 }
5088 loc = stub_sec->contents + stub_entry->stub_offset;
5089
5090 stub_bfd = stub_sec->owner;
5091
5092 /* This is the address of the stub destination. */
5093 sym_value = (stub_entry->target_value
5094 + stub_entry->target_section->output_offset
5095 + stub_entry->target_section->output_section->vma);
5096
5097 template_sequence = stub_entry->stub_template;
5098 template_size = stub_entry->stub_template_size;
5099
5100 size = 0;
5101 for (i = 0; i < template_size; i++)
5102 {
5103 switch (template_sequence[i].type)
5104 {
5105 case THUMB16_TYPE:
5106 {
5107 bfd_vma data = (bfd_vma) template_sequence[i].data;
5108 if (template_sequence[i].reloc_addend != 0)
5109 {
5110 /* We've borrowed the reloc_addend field to mean we should
5111 insert a condition code into this (Thumb-1 branch)
5112 instruction. See THUMB16_BCOND_INSN. */
5113 BFD_ASSERT ((data & 0xff00) == 0xd000);
5114 data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5115 }
5116 bfd_put_16 (stub_bfd, data, loc + size);
5117 size += 2;
5118 }
5119 break;
5120
5121 case THUMB32_TYPE:
5122 bfd_put_16 (stub_bfd,
5123 (template_sequence[i].data >> 16) & 0xffff,
5124 loc + size);
5125 bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5126 loc + size + 2);
5127 if (template_sequence[i].r_type != R_ARM_NONE)
5128 {
5129 stub_reloc_idx[nrelocs] = i;
5130 stub_reloc_offset[nrelocs++] = size;
5131 }
5132 size += 4;
5133 break;
5134
5135 case ARM_TYPE:
5136 bfd_put_32 (stub_bfd, template_sequence[i].data,
5137 loc + size);
5138 /* Handle cases where the target is encoded within the
5139 instruction. */
5140 if (template_sequence[i].r_type == R_ARM_JUMP24)
5141 {
5142 stub_reloc_idx[nrelocs] = i;
5143 stub_reloc_offset[nrelocs++] = size;
5144 }
5145 size += 4;
5146 break;
5147
5148 case DATA_TYPE:
5149 bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5150 stub_reloc_idx[nrelocs] = i;
5151 stub_reloc_offset[nrelocs++] = size;
5152 size += 4;
5153 break;
5154
5155 default:
5156 BFD_FAIL ();
5157 return FALSE;
5158 }
5159 }
5160
5161 if (just_allocated)
5162 stub_sec->size += size;
5163
5164 /* Stub size has already been computed in arm_size_one_stub. Check
5165 consistency. */
5166 BFD_ASSERT (size == stub_entry->stub_size);
5167
5168 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
5169 if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5170 sym_value |= 1;
5171
5172 /* Assume non empty slots have at least one and at most MAXRELOCS entries
5173 to relocate in each stub. */
5174 removed_sg_veneer =
5175 (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5176 BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5177
5178 for (i = 0; i < nrelocs; i++)
5179 {
5180 Elf_Internal_Rela rel;
5181 bfd_boolean unresolved_reloc;
5182 char *error_message;
5183 bfd_vma points_to =
5184 sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5185
5186 rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5187 rel.r_info = ELF32_R_INFO (0,
5188 template_sequence[stub_reloc_idx[i]].r_type);
5189 rel.r_addend = 0;
5190
5191 if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5192 /* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5193 template should refer back to the instruction after the original
5194 branch. We use target_section as Cortex-A8 erratum workaround stubs
5195 are only generated when both source and target are in the same
5196 section. */
5197 points_to = stub_entry->target_section->output_section->vma
5198 + stub_entry->target_section->output_offset
5199 + stub_entry->source_value;
5200
5201 elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5202 (template_sequence[stub_reloc_idx[i]].r_type),
5203 stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5204 points_to, info, stub_entry->target_section, "", STT_FUNC,
5205 stub_entry->branch_type,
5206 (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5207 &error_message);
5208 }
5209
5210 return TRUE;
5211 #undef MAXRELOCS
5212 }
5213
5214 /* Calculate the template, template size and instruction size for a stub.
5215 Return value is the instruction size. */
5216
5217 static unsigned int
5218 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5219 const insn_sequence **stub_template,
5220 int *stub_template_size)
5221 {
5222 const insn_sequence *template_sequence = NULL;
5223 int template_size = 0, i;
5224 unsigned int size;
5225
5226 template_sequence = stub_definitions[stub_type].template_sequence;
5227 if (stub_template)
5228 *stub_template = template_sequence;
5229
5230 template_size = stub_definitions[stub_type].template_size;
5231 if (stub_template_size)
5232 *stub_template_size = template_size;
5233
5234 size = 0;
5235 for (i = 0; i < template_size; i++)
5236 {
5237 switch (template_sequence[i].type)
5238 {
5239 case THUMB16_TYPE:
5240 size += 2;
5241 break;
5242
5243 case ARM_TYPE:
5244 case THUMB32_TYPE:
5245 case DATA_TYPE:
5246 size += 4;
5247 break;
5248
5249 default:
5250 BFD_FAIL ();
5251 return 0;
5252 }
5253 }
5254
5255 return size;
5256 }
5257
5258 /* As above, but don't actually build the stub. Just bump offset so
5259 we know stub section sizes. */
5260
5261 static bfd_boolean
5262 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5263 void *in_arg ATTRIBUTE_UNUSED)
5264 {
5265 struct elf32_arm_stub_hash_entry *stub_entry;
5266 const insn_sequence *template_sequence;
5267 int template_size, size;
5268
5269 /* Massage our args to the form they really have. */
5270 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5271
5272 BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5273 && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5274
5275 size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5276 &template_size);
5277
5278 /* Initialized to -1. Null size indicates an empty slot full of zeros. */
5279 if (stub_entry->stub_template_size)
5280 {
5281 stub_entry->stub_size = size;
5282 stub_entry->stub_template = template_sequence;
5283 stub_entry->stub_template_size = template_size;
5284 }
5285
5286 /* Already accounted for. */
5287 if (stub_entry->stub_offset != (bfd_vma) -1)
5288 return TRUE;
5289
5290 size = (size + 7) & ~7;
5291 stub_entry->stub_sec->size += size;
5292
5293 return TRUE;
5294 }
5295
5296 /* External entry points for sizing and building linker stubs. */
5297
5298 /* Set up various things so that we can make a list of input sections
5299 for each output section included in the link. Returns -1 on error,
5300 0 when no stubs will be needed, and 1 on success. */
5301
5302 int
5303 elf32_arm_setup_section_lists (bfd *output_bfd,
5304 struct bfd_link_info *info)
5305 {
5306 bfd *input_bfd;
5307 unsigned int bfd_count;
5308 unsigned int top_id, top_index;
5309 asection *section;
5310 asection **input_list, **list;
5311 size_t amt;
5312 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5313
5314 if (htab == NULL)
5315 return 0;
5316 if (! is_elf_hash_table (htab))
5317 return 0;
5318
5319 /* Count the number of input BFDs and find the top input section id. */
5320 for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5321 input_bfd != NULL;
5322 input_bfd = input_bfd->link.next)
5323 {
5324 bfd_count += 1;
5325 for (section = input_bfd->sections;
5326 section != NULL;
5327 section = section->next)
5328 {
5329 if (top_id < section->id)
5330 top_id = section->id;
5331 }
5332 }
5333 htab->bfd_count = bfd_count;
5334
5335 amt = sizeof (struct map_stub) * (top_id + 1);
5336 htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5337 if (htab->stub_group == NULL)
5338 return -1;
5339 htab->top_id = top_id;
5340
5341 /* We can't use output_bfd->section_count here to find the top output
5342 section index as some sections may have been removed, and
5343 _bfd_strip_section_from_output doesn't renumber the indices. */
5344 for (section = output_bfd->sections, top_index = 0;
5345 section != NULL;
5346 section = section->next)
5347 {
5348 if (top_index < section->index)
5349 top_index = section->index;
5350 }
5351
5352 htab->top_index = top_index;
5353 amt = sizeof (asection *) * (top_index + 1);
5354 input_list = (asection **) bfd_malloc (amt);
5355 htab->input_list = input_list;
5356 if (input_list == NULL)
5357 return -1;
5358
5359 /* For sections we aren't interested in, mark their entries with a
5360 value we can check later. */
5361 list = input_list + top_index;
5362 do
5363 *list = bfd_abs_section_ptr;
5364 while (list-- != input_list);
5365
5366 for (section = output_bfd->sections;
5367 section != NULL;
5368 section = section->next)
5369 {
5370 if ((section->flags & SEC_CODE) != 0)
5371 input_list[section->index] = NULL;
5372 }
5373
5374 return 1;
5375 }
5376
5377 /* The linker repeatedly calls this function for each input section,
5378 in the order that input sections are linked into output sections.
5379 Build lists of input sections to determine groupings between which
5380 we may insert linker stubs. */
5381
5382 void
5383 elf32_arm_next_input_section (struct bfd_link_info *info,
5384 asection *isec)
5385 {
5386 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5387
5388 if (htab == NULL)
5389 return;
5390
5391 if (isec->output_section->index <= htab->top_index)
5392 {
5393 asection **list = htab->input_list + isec->output_section->index;
5394
5395 if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5396 {
5397 /* Steal the link_sec pointer for our list. */
5398 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5399 /* This happens to make the list in reverse order,
5400 which we reverse later. */
5401 PREV_SEC (isec) = *list;
5402 *list = isec;
5403 }
5404 }
5405 }
5406
5407 /* See whether we can group stub sections together. Grouping stub
5408 sections may result in fewer stubs. More importantly, we need to
5409 put all .init* and .fini* stubs at the end of the .init or
5410 .fini output sections respectively, because glibc splits the
5411 _init and _fini functions into multiple parts. Putting a stub in
5412 the middle of a function is not a good idea. */
5413
5414 static void
5415 group_sections (struct elf32_arm_link_hash_table *htab,
5416 bfd_size_type stub_group_size,
5417 bfd_boolean stubs_always_after_branch)
5418 {
5419 asection **list = htab->input_list;
5420
5421 do
5422 {
5423 asection *tail = *list;
5424 asection *head;
5425
5426 if (tail == bfd_abs_section_ptr)
5427 continue;
5428
5429 /* Reverse the list: we must avoid placing stubs at the
5430 beginning of the section because the beginning of the text
5431 section may be required for an interrupt vector in bare metal
5432 code. */
5433 #define NEXT_SEC PREV_SEC
5434 head = NULL;
5435 while (tail != NULL)
5436 {
5437 /* Pop from tail. */
5438 asection *item = tail;
5439 tail = PREV_SEC (item);
5440
5441 /* Push on head. */
5442 NEXT_SEC (item) = head;
5443 head = item;
5444 }
5445
5446 while (head != NULL)
5447 {
5448 asection *curr;
5449 asection *next;
5450 bfd_vma stub_group_start = head->output_offset;
5451 bfd_vma end_of_next;
5452
5453 curr = head;
5454 while (NEXT_SEC (curr) != NULL)
5455 {
5456 next = NEXT_SEC (curr);
5457 end_of_next = next->output_offset + next->size;
5458 if (end_of_next - stub_group_start >= stub_group_size)
5459 /* End of NEXT is too far from start, so stop. */
5460 break;
5461 /* Add NEXT to the group. */
5462 curr = next;
5463 }
5464
5465 /* OK, the size from the start to the start of CURR is less
5466 than stub_group_size and thus can be handled by one stub
5467 section. (Or the head section is itself larger than
5468 stub_group_size, in which case we may be toast.)
5469 We should really be keeping track of the total size of
5470 stubs added here, as stubs contribute to the final output
5471 section size. */
5472 do
5473 {
5474 next = NEXT_SEC (head);
5475 /* Set up this stub group. */
5476 htab->stub_group[head->id].link_sec = curr;
5477 }
5478 while (head != curr && (head = next) != NULL);
5479
5480 /* But wait, there's more! Input sections up to stub_group_size
5481 bytes after the stub section can be handled by it too. */
5482 if (!stubs_always_after_branch)
5483 {
5484 stub_group_start = curr->output_offset + curr->size;
5485
5486 while (next != NULL)
5487 {
5488 end_of_next = next->output_offset + next->size;
5489 if (end_of_next - stub_group_start >= stub_group_size)
5490 /* End of NEXT is too far from stubs, so stop. */
5491 break;
5492 /* Add NEXT to the stub group. */
5493 head = next;
5494 next = NEXT_SEC (head);
5495 htab->stub_group[head->id].link_sec = curr;
5496 }
5497 }
5498 head = next;
5499 }
5500 }
5501 while (list++ != htab->input_list + htab->top_index);
5502
5503 free (htab->input_list);
5504 #undef PREV_SEC
5505 #undef NEXT_SEC
5506 }
5507
5508 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5509 erratum fix. */
5510
5511 static int
5512 a8_reloc_compare (const void *a, const void *b)
5513 {
5514 const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5515 const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5516
5517 if (ra->from < rb->from)
5518 return -1;
5519 else if (ra->from > rb->from)
5520 return 1;
5521 else
5522 return 0;
5523 }
5524
5525 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5526 const char *, char **);
5527
5528 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5529 branch/TLB erratum. Fill in the table described by A8_FIXES_P,
5530 NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P. Returns true if an error occurs, false
5531 otherwise. */
5532
5533 static bfd_boolean
5534 cortex_a8_erratum_scan (bfd *input_bfd,
5535 struct bfd_link_info *info,
5536 struct a8_erratum_fix **a8_fixes_p,
5537 unsigned int *num_a8_fixes_p,
5538 unsigned int *a8_fix_table_size_p,
5539 struct a8_erratum_reloc *a8_relocs,
5540 unsigned int num_a8_relocs,
5541 unsigned prev_num_a8_fixes,
5542 bfd_boolean *stub_changed_p)
5543 {
5544 asection *section;
5545 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5546 struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5547 unsigned int num_a8_fixes = *num_a8_fixes_p;
5548 unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5549
5550 if (htab == NULL)
5551 return FALSE;
5552
5553 for (section = input_bfd->sections;
5554 section != NULL;
5555 section = section->next)
5556 {
5557 bfd_byte *contents = NULL;
5558 struct _arm_elf_section_data *sec_data;
5559 unsigned int span;
5560 bfd_vma base_vma;
5561
5562 if (elf_section_type (section) != SHT_PROGBITS
5563 || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5564 || (section->flags & SEC_EXCLUDE) != 0
5565 || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5566 || (section->output_section == bfd_abs_section_ptr))
5567 continue;
5568
5569 base_vma = section->output_section->vma + section->output_offset;
5570
5571 if (elf_section_data (section)->this_hdr.contents != NULL)
5572 contents = elf_section_data (section)->this_hdr.contents;
5573 else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5574 return TRUE;
5575
5576 sec_data = elf32_arm_section_data (section);
5577
5578 for (span = 0; span < sec_data->mapcount; span++)
5579 {
5580 unsigned int span_start = sec_data->map[span].vma;
5581 unsigned int span_end = (span == sec_data->mapcount - 1)
5582 ? section->size : sec_data->map[span + 1].vma;
5583 unsigned int i;
5584 char span_type = sec_data->map[span].type;
5585 bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5586
5587 if (span_type != 't')
5588 continue;
5589
5590 /* Span is entirely within a single 4KB region: skip scanning. */
5591 if (((base_vma + span_start) & ~0xfff)
5592 == ((base_vma + span_end) & ~0xfff))
5593 continue;
5594
5595 /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5596
5597 * The opcode is BLX.W, BL.W, B.W, Bcc.W
5598 * The branch target is in the same 4KB region as the
5599 first half of the branch.
5600 * The instruction before the branch is a 32-bit
5601 length non-branch instruction. */
5602 for (i = span_start; i < span_end;)
5603 {
5604 unsigned int insn = bfd_getl16 (&contents[i]);
5605 bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5606 bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5607
5608 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5609 insn_32bit = TRUE;
5610
5611 if (insn_32bit)
5612 {
5613 /* Load the rest of the insn (in manual-friendly order). */
5614 insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5615
5616 /* Encoding T4: B<c>.W. */
5617 is_b = (insn & 0xf800d000) == 0xf0009000;
5618 /* Encoding T1: BL<c>.W. */
5619 is_bl = (insn & 0xf800d000) == 0xf000d000;
5620 /* Encoding T2: BLX<c>.W. */
5621 is_blx = (insn & 0xf800d000) == 0xf000c000;
5622 /* Encoding T3: B<c>.W (not permitted in IT block). */
5623 is_bcc = (insn & 0xf800d000) == 0xf0008000
5624 && (insn & 0x07f00000) != 0x03800000;
5625 }
5626
5627 is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5628
5629 if (((base_vma + i) & 0xfff) == 0xffe
5630 && insn_32bit
5631 && is_32bit_branch
5632 && last_was_32bit
5633 && ! last_was_branch)
5634 {
5635 bfd_signed_vma offset = 0;
5636 bfd_boolean force_target_arm = FALSE;
5637 bfd_boolean force_target_thumb = FALSE;
5638 bfd_vma target;
5639 enum elf32_arm_stub_type stub_type = arm_stub_none;
5640 struct a8_erratum_reloc key, *found;
5641 bfd_boolean use_plt = FALSE;
5642
5643 key.from = base_vma + i;
5644 found = (struct a8_erratum_reloc *)
5645 bsearch (&key, a8_relocs, num_a8_relocs,
5646 sizeof (struct a8_erratum_reloc),
5647 &a8_reloc_compare);
5648
5649 if (found)
5650 {
5651 char *error_message = NULL;
5652 struct elf_link_hash_entry *entry;
5653
5654 /* We don't care about the error returned from this
5655 function, only if there is glue or not. */
5656 entry = find_thumb_glue (info, found->sym_name,
5657 &error_message);
5658
5659 if (entry)
5660 found->non_a8_stub = TRUE;
5661
5662 /* Keep a simpler condition, for the sake of clarity. */
5663 if (htab->root.splt != NULL && found->hash != NULL
5664 && found->hash->root.plt.offset != (bfd_vma) -1)
5665 use_plt = TRUE;
5666
5667 if (found->r_type == R_ARM_THM_CALL)
5668 {
5669 if (found->branch_type == ST_BRANCH_TO_ARM
5670 || use_plt)
5671 force_target_arm = TRUE;
5672 else
5673 force_target_thumb = TRUE;
5674 }
5675 }
5676
5677 /* Check if we have an offending branch instruction. */
5678
5679 if (found && found->non_a8_stub)
5680 /* We've already made a stub for this instruction, e.g.
5681 it's a long branch or a Thumb->ARM stub. Assume that
5682 stub will suffice to work around the A8 erratum (see
5683 setting of always_after_branch above). */
5684 ;
5685 else if (is_bcc)
5686 {
5687 offset = (insn & 0x7ff) << 1;
5688 offset |= (insn & 0x3f0000) >> 4;
5689 offset |= (insn & 0x2000) ? 0x40000 : 0;
5690 offset |= (insn & 0x800) ? 0x80000 : 0;
5691 offset |= (insn & 0x4000000) ? 0x100000 : 0;
5692 if (offset & 0x100000)
5693 offset |= ~ ((bfd_signed_vma) 0xfffff);
5694 stub_type = arm_stub_a8_veneer_b_cond;
5695 }
5696 else if (is_b || is_bl || is_blx)
5697 {
5698 int s = (insn & 0x4000000) != 0;
5699 int j1 = (insn & 0x2000) != 0;
5700 int j2 = (insn & 0x800) != 0;
5701 int i1 = !(j1 ^ s);
5702 int i2 = !(j2 ^ s);
5703
5704 offset = (insn & 0x7ff) << 1;
5705 offset |= (insn & 0x3ff0000) >> 4;
5706 offset |= i2 << 22;
5707 offset |= i1 << 23;
5708 offset |= s << 24;
5709 if (offset & 0x1000000)
5710 offset |= ~ ((bfd_signed_vma) 0xffffff);
5711
5712 if (is_blx)
5713 offset &= ~ ((bfd_signed_vma) 3);
5714
5715 stub_type = is_blx ? arm_stub_a8_veneer_blx :
5716 is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5717 }
5718
5719 if (stub_type != arm_stub_none)
5720 {
5721 bfd_vma pc_for_insn = base_vma + i + 4;
5722
5723 /* The original instruction is a BL, but the target is
5724 an ARM instruction. If we were not making a stub,
5725 the BL would have been converted to a BLX. Use the
5726 BLX stub instead in that case. */
5727 if (htab->use_blx && force_target_arm
5728 && stub_type == arm_stub_a8_veneer_bl)
5729 {
5730 stub_type = arm_stub_a8_veneer_blx;
5731 is_blx = TRUE;
5732 is_bl = FALSE;
5733 }
5734 /* Conversely, if the original instruction was
5735 BLX but the target is Thumb mode, use the BL
5736 stub. */
5737 else if (force_target_thumb
5738 && stub_type == arm_stub_a8_veneer_blx)
5739 {
5740 stub_type = arm_stub_a8_veneer_bl;
5741 is_blx = FALSE;
5742 is_bl = TRUE;
5743 }
5744
5745 if (is_blx)
5746 pc_for_insn &= ~ ((bfd_vma) 3);
5747
5748 /* If we found a relocation, use the proper destination,
5749 not the offset in the (unrelocated) instruction.
5750 Note this is always done if we switched the stub type
5751 above. */
5752 if (found)
5753 offset =
5754 (bfd_signed_vma) (found->destination - pc_for_insn);
5755
5756 /* If the stub will use a Thumb-mode branch to a
5757 PLT target, redirect it to the preceding Thumb
5758 entry point. */
5759 if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5760 offset -= PLT_THUMB_STUB_SIZE;
5761
5762 target = pc_for_insn + offset;
5763
5764 /* The BLX stub is ARM-mode code. Adjust the offset to
5765 take the different PC value (+8 instead of +4) into
5766 account. */
5767 if (stub_type == arm_stub_a8_veneer_blx)
5768 offset += 4;
5769
5770 if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5771 {
5772 char *stub_name = NULL;
5773
5774 if (num_a8_fixes == a8_fix_table_size)
5775 {
5776 a8_fix_table_size *= 2;
5777 a8_fixes = (struct a8_erratum_fix *)
5778 bfd_realloc (a8_fixes,
5779 sizeof (struct a8_erratum_fix)
5780 * a8_fix_table_size);
5781 }
5782
5783 if (num_a8_fixes < prev_num_a8_fixes)
5784 {
5785 /* If we're doing a subsequent scan,
5786 check if we've found the same fix as
5787 before, and try and reuse the stub
5788 name. */
5789 stub_name = a8_fixes[num_a8_fixes].stub_name;
5790 if ((a8_fixes[num_a8_fixes].section != section)
5791 || (a8_fixes[num_a8_fixes].offset != i))
5792 {
5793 free (stub_name);
5794 stub_name = NULL;
5795 *stub_changed_p = TRUE;
5796 }
5797 }
5798
5799 if (!stub_name)
5800 {
5801 stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5802 if (stub_name != NULL)
5803 sprintf (stub_name, "%x:%x", section->id, i);
5804 }
5805
5806 a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5807 a8_fixes[num_a8_fixes].section = section;
5808 a8_fixes[num_a8_fixes].offset = i;
5809 a8_fixes[num_a8_fixes].target_offset =
5810 target - base_vma;
5811 a8_fixes[num_a8_fixes].orig_insn = insn;
5812 a8_fixes[num_a8_fixes].stub_name = stub_name;
5813 a8_fixes[num_a8_fixes].stub_type = stub_type;
5814 a8_fixes[num_a8_fixes].branch_type =
5815 is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5816
5817 num_a8_fixes++;
5818 }
5819 }
5820 }
5821
5822 i += insn_32bit ? 4 : 2;
5823 last_was_32bit = insn_32bit;
5824 last_was_branch = is_32bit_branch;
5825 }
5826 }
5827
5828 if (elf_section_data (section)->this_hdr.contents == NULL)
5829 free (contents);
5830 }
5831
5832 *a8_fixes_p = a8_fixes;
5833 *num_a8_fixes_p = num_a8_fixes;
5834 *a8_fix_table_size_p = a8_fix_table_size;
5835
5836 return FALSE;
5837 }
5838
5839 /* Create or update a stub entry depending on whether the stub can already be
5840 found in HTAB. The stub is identified by:
5841 - its type STUB_TYPE
5842 - its source branch (note that several can share the same stub) whose
5843 section and relocation (if any) are given by SECTION and IRELA
5844 respectively
5845 - its target symbol whose input section, hash, name, value and branch type
5846 are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5847 respectively
5848
5849 If found, the value of the stub's target symbol is updated from SYM_VALUE
5850 and *NEW_STUB is set to FALSE. Otherwise, *NEW_STUB is set to
5851 TRUE and the stub entry is initialized.
5852
5853 Returns the stub that was created or updated, or NULL if an error
5854 occurred. */
5855
5856 static struct elf32_arm_stub_hash_entry *
5857 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5858 enum elf32_arm_stub_type stub_type, asection *section,
5859 Elf_Internal_Rela *irela, asection *sym_sec,
5860 struct elf32_arm_link_hash_entry *hash, char *sym_name,
5861 bfd_vma sym_value, enum arm_st_branch_type branch_type,
5862 bfd_boolean *new_stub)
5863 {
5864 const asection *id_sec;
5865 char *stub_name;
5866 struct elf32_arm_stub_hash_entry *stub_entry;
5867 unsigned int r_type;
5868 bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5869
5870 BFD_ASSERT (stub_type != arm_stub_none);
5871 *new_stub = FALSE;
5872
5873 if (sym_claimed)
5874 stub_name = sym_name;
5875 else
5876 {
5877 BFD_ASSERT (irela);
5878 BFD_ASSERT (section);
5879 BFD_ASSERT (section->id <= htab->top_id);
5880
5881 /* Support for grouping stub sections. */
5882 id_sec = htab->stub_group[section->id].link_sec;
5883
5884 /* Get the name of this stub. */
5885 stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5886 stub_type);
5887 if (!stub_name)
5888 return NULL;
5889 }
5890
5891 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5892 FALSE);
5893 /* The proper stub has already been created, just update its value. */
5894 if (stub_entry != NULL)
5895 {
5896 if (!sym_claimed)
5897 free (stub_name);
5898 stub_entry->target_value = sym_value;
5899 return stub_entry;
5900 }
5901
5902 stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5903 if (stub_entry == NULL)
5904 {
5905 if (!sym_claimed)
5906 free (stub_name);
5907 return NULL;
5908 }
5909
5910 stub_entry->target_value = sym_value;
5911 stub_entry->target_section = sym_sec;
5912 stub_entry->stub_type = stub_type;
5913 stub_entry->h = hash;
5914 stub_entry->branch_type = branch_type;
5915
5916 if (sym_claimed)
5917 stub_entry->output_name = sym_name;
5918 else
5919 {
5920 if (sym_name == NULL)
5921 sym_name = "unnamed";
5922 stub_entry->output_name = (char *)
5923 bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5924 + strlen (sym_name));
5925 if (stub_entry->output_name == NULL)
5926 {
5927 free (stub_name);
5928 return NULL;
5929 }
5930
5931 /* For historical reasons, use the existing names for ARM-to-Thumb and
5932 Thumb-to-ARM stubs. */
5933 r_type = ELF32_R_TYPE (irela->r_info);
5934 if ((r_type == (unsigned int) R_ARM_THM_CALL
5935 || r_type == (unsigned int) R_ARM_THM_JUMP24
5936 || r_type == (unsigned int) R_ARM_THM_JUMP19)
5937 && branch_type == ST_BRANCH_TO_ARM)
5938 sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5939 else if ((r_type == (unsigned int) R_ARM_CALL
5940 || r_type == (unsigned int) R_ARM_JUMP24)
5941 && branch_type == ST_BRANCH_TO_THUMB)
5942 sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5943 else
5944 sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5945 }
5946
5947 *new_stub = TRUE;
5948 return stub_entry;
5949 }
5950
5951 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5952 gateway veneer to transition from non secure to secure state and create them
5953 accordingly.
5954
5955 "ARMv8-M Security Extensions: Requirements on Development Tools" document
5956 defines the conditions that govern Secure Gateway veneer creation for a
5957 given symbol <SYM> as follows:
5958 - it has function type
5959 - it has non local binding
5960 - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5961 same type, binding and value as <SYM> (called normal symbol).
5962 An entry function can handle secure state transition itself in which case
5963 its special symbol would have a different value from the normal symbol.
5964
5965 OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5966 entry mapping while HTAB gives the name to hash entry mapping.
5967 *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5968 created.
5969
5970 The return value gives whether a stub failed to be allocated. */
5971
5972 static bfd_boolean
5973 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5974 obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5975 int *cmse_stub_created)
5976 {
5977 const struct elf_backend_data *bed;
5978 Elf_Internal_Shdr *symtab_hdr;
5979 unsigned i, j, sym_count, ext_start;
5980 Elf_Internal_Sym *cmse_sym, *local_syms;
5981 struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5982 enum arm_st_branch_type branch_type;
5983 char *sym_name, *lsym_name;
5984 bfd_vma sym_value;
5985 asection *section;
5986 struct elf32_arm_stub_hash_entry *stub_entry;
5987 bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5988
5989 bed = get_elf_backend_data (input_bfd);
5990 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5991 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5992 ext_start = symtab_hdr->sh_info;
5993 is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5994 && out_attr[Tag_CPU_arch_profile].i == 'M');
5995
5996 local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5997 if (local_syms == NULL)
5998 local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5999 symtab_hdr->sh_info, 0, NULL, NULL,
6000 NULL);
6001 if (symtab_hdr->sh_info && local_syms == NULL)
6002 return FALSE;
6003
6004 /* Scan symbols. */
6005 for (i = 0; i < sym_count; i++)
6006 {
6007 cmse_invalid = FALSE;
6008
6009 if (i < ext_start)
6010 {
6011 cmse_sym = &local_syms[i];
6012 sym_name = bfd_elf_string_from_elf_section (input_bfd,
6013 symtab_hdr->sh_link,
6014 cmse_sym->st_name);
6015 if (!sym_name || !CONST_STRNEQ (sym_name, CMSE_PREFIX))
6016 continue;
6017
6018 /* Special symbol with local binding. */
6019 cmse_invalid = TRUE;
6020 }
6021 else
6022 {
6023 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
6024 sym_name = (char *) cmse_hash->root.root.root.string;
6025 if (!CONST_STRNEQ (sym_name, CMSE_PREFIX))
6026 continue;
6027
6028 /* Special symbol has incorrect binding or type. */
6029 if ((cmse_hash->root.root.type != bfd_link_hash_defined
6030 && cmse_hash->root.root.type != bfd_link_hash_defweak)
6031 || cmse_hash->root.type != STT_FUNC)
6032 cmse_invalid = TRUE;
6033 }
6034
6035 if (!is_v8m)
6036 {
6037 _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6038 "ARMv8-M architecture or later"),
6039 input_bfd, sym_name);
6040 is_v8m = TRUE; /* Avoid multiple warning. */
6041 ret = FALSE;
6042 }
6043
6044 if (cmse_invalid)
6045 {
6046 _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6047 " a global or weak function symbol"),
6048 input_bfd, sym_name);
6049 ret = FALSE;
6050 if (i < ext_start)
6051 continue;
6052 }
6053
6054 sym_name += strlen (CMSE_PREFIX);
6055 hash = (struct elf32_arm_link_hash_entry *)
6056 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6057
6058 /* No associated normal symbol or it is neither global nor weak. */
6059 if (!hash
6060 || (hash->root.root.type != bfd_link_hash_defined
6061 && hash->root.root.type != bfd_link_hash_defweak)
6062 || hash->root.type != STT_FUNC)
6063 {
6064 /* Initialize here to avoid warning about use of possibly
6065 uninitialized variable. */
6066 j = 0;
6067
6068 if (!hash)
6069 {
6070 /* Searching for a normal symbol with local binding. */
6071 for (; j < ext_start; j++)
6072 {
6073 lsym_name =
6074 bfd_elf_string_from_elf_section (input_bfd,
6075 symtab_hdr->sh_link,
6076 local_syms[j].st_name);
6077 if (!strcmp (sym_name, lsym_name))
6078 break;
6079 }
6080 }
6081
6082 if (hash || j < ext_start)
6083 {
6084 _bfd_error_handler
6085 (_("%pB: invalid standard symbol `%s'; it must be "
6086 "a global or weak function symbol"),
6087 input_bfd, sym_name);
6088 }
6089 else
6090 _bfd_error_handler
6091 (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6092 ret = FALSE;
6093 if (!hash)
6094 continue;
6095 }
6096
6097 sym_value = hash->root.root.u.def.value;
6098 section = hash->root.root.u.def.section;
6099
6100 if (cmse_hash->root.root.u.def.section != section)
6101 {
6102 _bfd_error_handler
6103 (_("%pB: `%s' and its special symbol are in different sections"),
6104 input_bfd, sym_name);
6105 ret = FALSE;
6106 }
6107 if (cmse_hash->root.root.u.def.value != sym_value)
6108 continue; /* Ignore: could be an entry function starting with SG. */
6109
6110 /* If this section is a link-once section that will be discarded, then
6111 don't create any stubs. */
6112 if (section->output_section == NULL)
6113 {
6114 _bfd_error_handler
6115 (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6116 continue;
6117 }
6118
6119 if (hash->root.size == 0)
6120 {
6121 _bfd_error_handler
6122 (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6123 ret = FALSE;
6124 }
6125
6126 if (!ret)
6127 continue;
6128 branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6129 stub_entry
6130 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6131 NULL, NULL, section, hash, sym_name,
6132 sym_value, branch_type, &new_stub);
6133
6134 if (stub_entry == NULL)
6135 ret = FALSE;
6136 else
6137 {
6138 BFD_ASSERT (new_stub);
6139 (*cmse_stub_created)++;
6140 }
6141 }
6142
6143 if (!symtab_hdr->contents)
6144 free (local_syms);
6145 return ret;
6146 }
6147
6148 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6149 code entry function, ie can be called from non secure code without using a
6150 veneer. */
6151
6152 static bfd_boolean
6153 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6154 {
6155 bfd_byte contents[4];
6156 uint32_t first_insn;
6157 asection *section;
6158 file_ptr offset;
6159 bfd *abfd;
6160
6161 /* Defined symbol of function type. */
6162 if (hash->root.root.type != bfd_link_hash_defined
6163 && hash->root.root.type != bfd_link_hash_defweak)
6164 return FALSE;
6165 if (hash->root.type != STT_FUNC)
6166 return FALSE;
6167
6168 /* Read first instruction. */
6169 section = hash->root.root.u.def.section;
6170 abfd = section->owner;
6171 offset = hash->root.root.u.def.value - section->vma;
6172 if (!bfd_get_section_contents (abfd, section, contents, offset,
6173 sizeof (contents)))
6174 return FALSE;
6175
6176 first_insn = bfd_get_32 (abfd, contents);
6177
6178 /* Starts by SG instruction. */
6179 return first_insn == 0xe97fe97f;
6180 }
6181
6182 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6183 secure gateway veneers (ie. the veneers was not in the input import library)
6184 and there is no output import library (GEN_INFO->out_implib_bfd is NULL. */
6185
6186 static bfd_boolean
6187 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6188 {
6189 struct elf32_arm_stub_hash_entry *stub_entry;
6190 struct bfd_link_info *info;
6191
6192 /* Massage our args to the form they really have. */
6193 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6194 info = (struct bfd_link_info *) gen_info;
6195
6196 if (info->out_implib_bfd)
6197 return TRUE;
6198
6199 if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6200 return TRUE;
6201
6202 if (stub_entry->stub_offset == (bfd_vma) -1)
6203 _bfd_error_handler (" %s", stub_entry->output_name);
6204
6205 return TRUE;
6206 }
6207
6208 /* Set offset of each secure gateway veneers so that its address remain
6209 identical to the one in the input import library referred by
6210 HTAB->in_implib_bfd. A warning is issued for veneers that disappeared
6211 (present in input import library but absent from the executable being
6212 linked) or if new veneers appeared and there is no output import library
6213 (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6214 number of secure gateway veneers found in the input import library.
6215
6216 The function returns whether an error occurred. If no error occurred,
6217 *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6218 and this function and HTAB->new_cmse_stub_offset is set to the biggest
6219 veneer observed set for new veneers to be layed out after. */
6220
6221 static bfd_boolean
6222 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6223 struct elf32_arm_link_hash_table *htab,
6224 int *cmse_stub_created)
6225 {
6226 long symsize;
6227 char *sym_name;
6228 flagword flags;
6229 long i, symcount;
6230 bfd *in_implib_bfd;
6231 asection *stub_out_sec;
6232 bfd_boolean ret = TRUE;
6233 Elf_Internal_Sym *intsym;
6234 const char *out_sec_name;
6235 bfd_size_type cmse_stub_size;
6236 asymbol **sympp = NULL, *sym;
6237 struct elf32_arm_link_hash_entry *hash;
6238 const insn_sequence *cmse_stub_template;
6239 struct elf32_arm_stub_hash_entry *stub_entry;
6240 int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6241 bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6242 bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6243
6244 /* No input secure gateway import library. */
6245 if (!htab->in_implib_bfd)
6246 return TRUE;
6247
6248 in_implib_bfd = htab->in_implib_bfd;
6249 if (!htab->cmse_implib)
6250 {
6251 _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6252 "Gateway import libraries"), in_implib_bfd);
6253 return FALSE;
6254 }
6255
6256 /* Get symbol table size. */
6257 symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6258 if (symsize < 0)
6259 return FALSE;
6260
6261 /* Read in the input secure gateway import library's symbol table. */
6262 sympp = (asymbol **) bfd_malloc (symsize);
6263 if (sympp == NULL)
6264 return FALSE;
6265
6266 symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6267 if (symcount < 0)
6268 {
6269 ret = FALSE;
6270 goto free_sym_buf;
6271 }
6272
6273 htab->new_cmse_stub_offset = 0;
6274 cmse_stub_size =
6275 find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6276 &cmse_stub_template,
6277 &cmse_stub_template_size);
6278 out_sec_name =
6279 arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6280 stub_out_sec =
6281 bfd_get_section_by_name (htab->obfd, out_sec_name);
6282 if (stub_out_sec != NULL)
6283 cmse_stub_sec_vma = stub_out_sec->vma;
6284
6285 /* Set addresses of veneers mentionned in input secure gateway import
6286 library's symbol table. */
6287 for (i = 0; i < symcount; i++)
6288 {
6289 sym = sympp[i];
6290 flags = sym->flags;
6291 sym_name = (char *) bfd_asymbol_name (sym);
6292 intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6293
6294 if (sym->section != bfd_abs_section_ptr
6295 || !(flags & (BSF_GLOBAL | BSF_WEAK))
6296 || (flags & BSF_FUNCTION) != BSF_FUNCTION
6297 || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6298 != ST_BRANCH_TO_THUMB))
6299 {
6300 _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6301 "symbol should be absolute, global and "
6302 "refer to Thumb functions"),
6303 in_implib_bfd, sym_name);
6304 ret = FALSE;
6305 continue;
6306 }
6307
6308 veneer_value = bfd_asymbol_value (sym);
6309 stub_offset = veneer_value - cmse_stub_sec_vma;
6310 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6311 FALSE, FALSE);
6312 hash = (struct elf32_arm_link_hash_entry *)
6313 elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6314
6315 /* Stub entry should have been created by cmse_scan or the symbol be of
6316 a secure function callable from non secure code. */
6317 if (!stub_entry && !hash)
6318 {
6319 bfd_boolean new_stub;
6320
6321 _bfd_error_handler
6322 (_("entry function `%s' disappeared from secure code"), sym_name);
6323 hash = (struct elf32_arm_link_hash_entry *)
6324 elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6325 stub_entry
6326 = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6327 NULL, NULL, bfd_abs_section_ptr, hash,
6328 sym_name, veneer_value,
6329 ST_BRANCH_TO_THUMB, &new_stub);
6330 if (stub_entry == NULL)
6331 ret = FALSE;
6332 else
6333 {
6334 BFD_ASSERT (new_stub);
6335 new_cmse_stubs_created++;
6336 (*cmse_stub_created)++;
6337 }
6338 stub_entry->stub_template_size = stub_entry->stub_size = 0;
6339 stub_entry->stub_offset = stub_offset;
6340 }
6341 /* Symbol found is not callable from non secure code. */
6342 else if (!stub_entry)
6343 {
6344 if (!cmse_entry_fct_p (hash))
6345 {
6346 _bfd_error_handler (_("`%s' refers to a non entry function"),
6347 sym_name);
6348 ret = FALSE;
6349 }
6350 continue;
6351 }
6352 else
6353 {
6354 /* Only stubs for SG veneers should have been created. */
6355 BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6356
6357 /* Check visibility hasn't changed. */
6358 if (!!(flags & BSF_GLOBAL)
6359 != (hash->root.root.type == bfd_link_hash_defined))
6360 _bfd_error_handler
6361 (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6362 sym_name);
6363
6364 stub_entry->stub_offset = stub_offset;
6365 }
6366
6367 /* Size should match that of a SG veneer. */
6368 if (intsym->st_size != cmse_stub_size)
6369 {
6370 _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6371 in_implib_bfd, sym_name);
6372 ret = FALSE;
6373 }
6374
6375 /* Previous veneer address is before current SG veneer section. */
6376 if (veneer_value < cmse_stub_sec_vma)
6377 {
6378 /* Avoid offset underflow. */
6379 if (stub_entry)
6380 stub_entry->stub_offset = 0;
6381 stub_offset = 0;
6382 ret = FALSE;
6383 }
6384
6385 /* Complain if stub offset not a multiple of stub size. */
6386 if (stub_offset % cmse_stub_size)
6387 {
6388 _bfd_error_handler
6389 (_("offset of veneer for entry function `%s' not a multiple of "
6390 "its size"), sym_name);
6391 ret = FALSE;
6392 }
6393
6394 if (!ret)
6395 continue;
6396
6397 new_cmse_stubs_created--;
6398 if (veneer_value < cmse_stub_array_start)
6399 cmse_stub_array_start = veneer_value;
6400 next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6401 if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6402 htab->new_cmse_stub_offset = next_cmse_stub_offset;
6403 }
6404
6405 if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6406 {
6407 BFD_ASSERT (new_cmse_stubs_created > 0);
6408 _bfd_error_handler
6409 (_("new entry function(s) introduced but no output import library "
6410 "specified:"));
6411 bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6412 }
6413
6414 if (cmse_stub_array_start != cmse_stub_sec_vma)
6415 {
6416 _bfd_error_handler
6417 (_("start address of `%s' is different from previous link"),
6418 out_sec_name);
6419 ret = FALSE;
6420 }
6421
6422 free_sym_buf:
6423 free (sympp);
6424 return ret;
6425 }
6426
6427 /* Determine and set the size of the stub section for a final link.
6428
6429 The basic idea here is to examine all the relocations looking for
6430 PC-relative calls to a target that is unreachable with a "bl"
6431 instruction. */
6432
6433 bfd_boolean
6434 elf32_arm_size_stubs (bfd *output_bfd,
6435 bfd *stub_bfd,
6436 struct bfd_link_info *info,
6437 bfd_signed_vma group_size,
6438 asection * (*add_stub_section) (const char *, asection *,
6439 asection *,
6440 unsigned int),
6441 void (*layout_sections_again) (void))
6442 {
6443 bfd_boolean ret = TRUE;
6444 obj_attribute *out_attr;
6445 int cmse_stub_created = 0;
6446 bfd_size_type stub_group_size;
6447 bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6448 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6449 struct a8_erratum_fix *a8_fixes = NULL;
6450 unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6451 struct a8_erratum_reloc *a8_relocs = NULL;
6452 unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6453
6454 if (htab == NULL)
6455 return FALSE;
6456
6457 if (htab->fix_cortex_a8)
6458 {
6459 a8_fixes = (struct a8_erratum_fix *)
6460 bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6461 a8_relocs = (struct a8_erratum_reloc *)
6462 bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6463 }
6464
6465 /* Propagate mach to stub bfd, because it may not have been
6466 finalized when we created stub_bfd. */
6467 bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6468 bfd_get_mach (output_bfd));
6469
6470 /* Stash our params away. */
6471 htab->stub_bfd = stub_bfd;
6472 htab->add_stub_section = add_stub_section;
6473 htab->layout_sections_again = layout_sections_again;
6474 stubs_always_after_branch = group_size < 0;
6475
6476 out_attr = elf_known_obj_attributes_proc (output_bfd);
6477 m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6478
6479 /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6480 as the first half of a 32-bit branch straddling two 4K pages. This is a
6481 crude way of enforcing that. */
6482 if (htab->fix_cortex_a8)
6483 stubs_always_after_branch = 1;
6484
6485 if (group_size < 0)
6486 stub_group_size = -group_size;
6487 else
6488 stub_group_size = group_size;
6489
6490 if (stub_group_size == 1)
6491 {
6492 /* Default values. */
6493 /* Thumb branch range is +-4MB has to be used as the default
6494 maximum size (a given section can contain both ARM and Thumb
6495 code, so the worst case has to be taken into account).
6496
6497 This value is 24K less than that, which allows for 2025
6498 12-byte stubs. If we exceed that, then we will fail to link.
6499 The user will have to relink with an explicit group size
6500 option. */
6501 stub_group_size = 4170000;
6502 }
6503
6504 group_sections (htab, stub_group_size, stubs_always_after_branch);
6505
6506 /* If we're applying the cortex A8 fix, we need to determine the
6507 program header size now, because we cannot change it later --
6508 that could alter section placements. Notice the A8 erratum fix
6509 ends up requiring the section addresses to remain unchanged
6510 modulo the page size. That's something we cannot represent
6511 inside BFD, and we don't want to force the section alignment to
6512 be the page size. */
6513 if (htab->fix_cortex_a8)
6514 (*htab->layout_sections_again) ();
6515
6516 while (1)
6517 {
6518 bfd *input_bfd;
6519 unsigned int bfd_indx;
6520 asection *stub_sec;
6521 enum elf32_arm_stub_type stub_type;
6522 bfd_boolean stub_changed = FALSE;
6523 unsigned prev_num_a8_fixes = num_a8_fixes;
6524
6525 num_a8_fixes = 0;
6526 for (input_bfd = info->input_bfds, bfd_indx = 0;
6527 input_bfd != NULL;
6528 input_bfd = input_bfd->link.next, bfd_indx++)
6529 {
6530 Elf_Internal_Shdr *symtab_hdr;
6531 asection *section;
6532 Elf_Internal_Sym *local_syms = NULL;
6533
6534 if (!is_arm_elf (input_bfd))
6535 continue;
6536 if ((input_bfd->flags & DYNAMIC) != 0
6537 && (elf_sym_hashes (input_bfd) == NULL
6538 || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6539 continue;
6540
6541 num_a8_relocs = 0;
6542
6543 /* We'll need the symbol table in a second. */
6544 symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6545 if (symtab_hdr->sh_info == 0)
6546 continue;
6547
6548 /* Limit scan of symbols to object file whose profile is
6549 Microcontroller to not hinder performance in the general case. */
6550 if (m_profile && first_veneer_scan)
6551 {
6552 struct elf_link_hash_entry **sym_hashes;
6553
6554 sym_hashes = elf_sym_hashes (input_bfd);
6555 if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6556 &cmse_stub_created))
6557 goto error_ret_free_local;
6558
6559 if (cmse_stub_created != 0)
6560 stub_changed = TRUE;
6561 }
6562
6563 /* Walk over each section attached to the input bfd. */
6564 for (section = input_bfd->sections;
6565 section != NULL;
6566 section = section->next)
6567 {
6568 Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6569
6570 /* If there aren't any relocs, then there's nothing more
6571 to do. */
6572 if ((section->flags & SEC_RELOC) == 0
6573 || section->reloc_count == 0
6574 || (section->flags & SEC_CODE) == 0)
6575 continue;
6576
6577 /* If this section is a link-once section that will be
6578 discarded, then don't create any stubs. */
6579 if (section->output_section == NULL
6580 || section->output_section->owner != output_bfd)
6581 continue;
6582
6583 /* Get the relocs. */
6584 internal_relocs
6585 = _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6586 NULL, info->keep_memory);
6587 if (internal_relocs == NULL)
6588 goto error_ret_free_local;
6589
6590 /* Now examine each relocation. */
6591 irela = internal_relocs;
6592 irelaend = irela + section->reloc_count;
6593 for (; irela < irelaend; irela++)
6594 {
6595 unsigned int r_type, r_indx;
6596 asection *sym_sec;
6597 bfd_vma sym_value;
6598 bfd_vma destination;
6599 struct elf32_arm_link_hash_entry *hash;
6600 const char *sym_name;
6601 unsigned char st_type;
6602 enum arm_st_branch_type branch_type;
6603 bfd_boolean created_stub = FALSE;
6604
6605 r_type = ELF32_R_TYPE (irela->r_info);
6606 r_indx = ELF32_R_SYM (irela->r_info);
6607
6608 if (r_type >= (unsigned int) R_ARM_max)
6609 {
6610 bfd_set_error (bfd_error_bad_value);
6611 error_ret_free_internal:
6612 if (elf_section_data (section)->relocs == NULL)
6613 free (internal_relocs);
6614 /* Fall through. */
6615 error_ret_free_local:
6616 if (symtab_hdr->contents != (unsigned char *) local_syms)
6617 free (local_syms);
6618 return FALSE;
6619 }
6620
6621 hash = NULL;
6622 if (r_indx >= symtab_hdr->sh_info)
6623 hash = elf32_arm_hash_entry
6624 (elf_sym_hashes (input_bfd)
6625 [r_indx - symtab_hdr->sh_info]);
6626
6627 /* Only look for stubs on branch instructions, or
6628 non-relaxed TLSCALL */
6629 if ((r_type != (unsigned int) R_ARM_CALL)
6630 && (r_type != (unsigned int) R_ARM_THM_CALL)
6631 && (r_type != (unsigned int) R_ARM_JUMP24)
6632 && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6633 && (r_type != (unsigned int) R_ARM_THM_XPC22)
6634 && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6635 && (r_type != (unsigned int) R_ARM_PLT32)
6636 && !((r_type == (unsigned int) R_ARM_TLS_CALL
6637 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6638 && r_type == elf32_arm_tls_transition
6639 (info, r_type, &hash->root)
6640 && ((hash ? hash->tls_type
6641 : (elf32_arm_local_got_tls_type
6642 (input_bfd)[r_indx]))
6643 & GOT_TLS_GDESC) != 0))
6644 continue;
6645
6646 /* Now determine the call target, its name, value,
6647 section. */
6648 sym_sec = NULL;
6649 sym_value = 0;
6650 destination = 0;
6651 sym_name = NULL;
6652
6653 if (r_type == (unsigned int) R_ARM_TLS_CALL
6654 || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6655 {
6656 /* A non-relaxed TLS call. The target is the
6657 plt-resident trampoline and nothing to do
6658 with the symbol. */
6659 BFD_ASSERT (htab->tls_trampoline > 0);
6660 sym_sec = htab->root.splt;
6661 sym_value = htab->tls_trampoline;
6662 hash = 0;
6663 st_type = STT_FUNC;
6664 branch_type = ST_BRANCH_TO_ARM;
6665 }
6666 else if (!hash)
6667 {
6668 /* It's a local symbol. */
6669 Elf_Internal_Sym *sym;
6670
6671 if (local_syms == NULL)
6672 {
6673 local_syms
6674 = (Elf_Internal_Sym *) symtab_hdr->contents;
6675 if (local_syms == NULL)
6676 local_syms
6677 = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6678 symtab_hdr->sh_info, 0,
6679 NULL, NULL, NULL);
6680 if (local_syms == NULL)
6681 goto error_ret_free_internal;
6682 }
6683
6684 sym = local_syms + r_indx;
6685 if (sym->st_shndx == SHN_UNDEF)
6686 sym_sec = bfd_und_section_ptr;
6687 else if (sym->st_shndx == SHN_ABS)
6688 sym_sec = bfd_abs_section_ptr;
6689 else if (sym->st_shndx == SHN_COMMON)
6690 sym_sec = bfd_com_section_ptr;
6691 else
6692 sym_sec =
6693 bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6694
6695 if (!sym_sec)
6696 /* This is an undefined symbol. It can never
6697 be resolved. */
6698 continue;
6699
6700 if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6701 sym_value = sym->st_value;
6702 destination = (sym_value + irela->r_addend
6703 + sym_sec->output_offset
6704 + sym_sec->output_section->vma);
6705 st_type = ELF_ST_TYPE (sym->st_info);
6706 branch_type =
6707 ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6708 sym_name
6709 = bfd_elf_string_from_elf_section (input_bfd,
6710 symtab_hdr->sh_link,
6711 sym->st_name);
6712 }
6713 else
6714 {
6715 /* It's an external symbol. */
6716 while (hash->root.root.type == bfd_link_hash_indirect
6717 || hash->root.root.type == bfd_link_hash_warning)
6718 hash = ((struct elf32_arm_link_hash_entry *)
6719 hash->root.root.u.i.link);
6720
6721 if (hash->root.root.type == bfd_link_hash_defined
6722 || hash->root.root.type == bfd_link_hash_defweak)
6723 {
6724 sym_sec = hash->root.root.u.def.section;
6725 sym_value = hash->root.root.u.def.value;
6726
6727 struct elf32_arm_link_hash_table *globals =
6728 elf32_arm_hash_table (info);
6729
6730 /* For a destination in a shared library,
6731 use the PLT stub as target address to
6732 decide whether a branch stub is
6733 needed. */
6734 if (globals != NULL
6735 && globals->root.splt != NULL
6736 && hash != NULL
6737 && hash->root.plt.offset != (bfd_vma) -1)
6738 {
6739 sym_sec = globals->root.splt;
6740 sym_value = hash->root.plt.offset;
6741 if (sym_sec->output_section != NULL)
6742 destination = (sym_value
6743 + sym_sec->output_offset
6744 + sym_sec->output_section->vma);
6745 }
6746 else if (sym_sec->output_section != NULL)
6747 destination = (sym_value + irela->r_addend
6748 + sym_sec->output_offset
6749 + sym_sec->output_section->vma);
6750 }
6751 else if ((hash->root.root.type == bfd_link_hash_undefined)
6752 || (hash->root.root.type == bfd_link_hash_undefweak))
6753 {
6754 /* For a shared library, use the PLT stub as
6755 target address to decide whether a long
6756 branch stub is needed.
6757 For absolute code, they cannot be handled. */
6758 struct elf32_arm_link_hash_table *globals =
6759 elf32_arm_hash_table (info);
6760
6761 if (globals != NULL
6762 && globals->root.splt != NULL
6763 && hash != NULL
6764 && hash->root.plt.offset != (bfd_vma) -1)
6765 {
6766 sym_sec = globals->root.splt;
6767 sym_value = hash->root.plt.offset;
6768 if (sym_sec->output_section != NULL)
6769 destination = (sym_value
6770 + sym_sec->output_offset
6771 + sym_sec->output_section->vma);
6772 }
6773 else
6774 continue;
6775 }
6776 else
6777 {
6778 bfd_set_error (bfd_error_bad_value);
6779 goto error_ret_free_internal;
6780 }
6781 st_type = hash->root.type;
6782 branch_type =
6783 ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6784 sym_name = hash->root.root.root.string;
6785 }
6786
6787 do
6788 {
6789 bfd_boolean new_stub;
6790 struct elf32_arm_stub_hash_entry *stub_entry;
6791
6792 /* Determine what (if any) linker stub is needed. */
6793 stub_type = arm_type_of_stub (info, section, irela,
6794 st_type, &branch_type,
6795 hash, destination, sym_sec,
6796 input_bfd, sym_name);
6797 if (stub_type == arm_stub_none)
6798 break;
6799
6800 /* We've either created a stub for this reloc already,
6801 or we are about to. */
6802 stub_entry =
6803 elf32_arm_create_stub (htab, stub_type, section, irela,
6804 sym_sec, hash,
6805 (char *) sym_name, sym_value,
6806 branch_type, &new_stub);
6807
6808 created_stub = stub_entry != NULL;
6809 if (!created_stub)
6810 goto error_ret_free_internal;
6811 else if (!new_stub)
6812 break;
6813 else
6814 stub_changed = TRUE;
6815 }
6816 while (0);
6817
6818 /* Look for relocations which might trigger Cortex-A8
6819 erratum. */
6820 if (htab->fix_cortex_a8
6821 && (r_type == (unsigned int) R_ARM_THM_JUMP24
6822 || r_type == (unsigned int) R_ARM_THM_JUMP19
6823 || r_type == (unsigned int) R_ARM_THM_CALL
6824 || r_type == (unsigned int) R_ARM_THM_XPC22))
6825 {
6826 bfd_vma from = section->output_section->vma
6827 + section->output_offset
6828 + irela->r_offset;
6829
6830 if ((from & 0xfff) == 0xffe)
6831 {
6832 /* Found a candidate. Note we haven't checked the
6833 destination is within 4K here: if we do so (and
6834 don't create an entry in a8_relocs) we can't tell
6835 that a branch should have been relocated when
6836 scanning later. */
6837 if (num_a8_relocs == a8_reloc_table_size)
6838 {
6839 a8_reloc_table_size *= 2;
6840 a8_relocs = (struct a8_erratum_reloc *)
6841 bfd_realloc (a8_relocs,
6842 sizeof (struct a8_erratum_reloc)
6843 * a8_reloc_table_size);
6844 }
6845
6846 a8_relocs[num_a8_relocs].from = from;
6847 a8_relocs[num_a8_relocs].destination = destination;
6848 a8_relocs[num_a8_relocs].r_type = r_type;
6849 a8_relocs[num_a8_relocs].branch_type = branch_type;
6850 a8_relocs[num_a8_relocs].sym_name = sym_name;
6851 a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6852 a8_relocs[num_a8_relocs].hash = hash;
6853
6854 num_a8_relocs++;
6855 }
6856 }
6857 }
6858
6859 /* We're done with the internal relocs, free them. */
6860 if (elf_section_data (section)->relocs == NULL)
6861 free (internal_relocs);
6862 }
6863
6864 if (htab->fix_cortex_a8)
6865 {
6866 /* Sort relocs which might apply to Cortex-A8 erratum. */
6867 qsort (a8_relocs, num_a8_relocs,
6868 sizeof (struct a8_erratum_reloc),
6869 &a8_reloc_compare);
6870
6871 /* Scan for branches which might trigger Cortex-A8 erratum. */
6872 if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6873 &num_a8_fixes, &a8_fix_table_size,
6874 a8_relocs, num_a8_relocs,
6875 prev_num_a8_fixes, &stub_changed)
6876 != 0)
6877 goto error_ret_free_local;
6878 }
6879
6880 if (local_syms != NULL
6881 && symtab_hdr->contents != (unsigned char *) local_syms)
6882 {
6883 if (!info->keep_memory)
6884 free (local_syms);
6885 else
6886 symtab_hdr->contents = (unsigned char *) local_syms;
6887 }
6888 }
6889
6890 if (first_veneer_scan
6891 && !set_cmse_veneer_addr_from_implib (info, htab,
6892 &cmse_stub_created))
6893 ret = FALSE;
6894
6895 if (prev_num_a8_fixes != num_a8_fixes)
6896 stub_changed = TRUE;
6897
6898 if (!stub_changed)
6899 break;
6900
6901 /* OK, we've added some stubs. Find out the new size of the
6902 stub sections. */
6903 for (stub_sec = htab->stub_bfd->sections;
6904 stub_sec != NULL;
6905 stub_sec = stub_sec->next)
6906 {
6907 /* Ignore non-stub sections. */
6908 if (!strstr (stub_sec->name, STUB_SUFFIX))
6909 continue;
6910
6911 stub_sec->size = 0;
6912 }
6913
6914 /* Add new SG veneers after those already in the input import
6915 library. */
6916 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6917 stub_type++)
6918 {
6919 bfd_vma *start_offset_p;
6920 asection **stub_sec_p;
6921
6922 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6923 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6924 if (start_offset_p == NULL)
6925 continue;
6926
6927 BFD_ASSERT (stub_sec_p != NULL);
6928 if (*stub_sec_p != NULL)
6929 (*stub_sec_p)->size = *start_offset_p;
6930 }
6931
6932 /* Compute stub section size, considering padding. */
6933 bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6934 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6935 stub_type++)
6936 {
6937 int size, padding;
6938 asection **stub_sec_p;
6939
6940 padding = arm_dedicated_stub_section_padding (stub_type);
6941 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6942 /* Skip if no stub input section or no stub section padding
6943 required. */
6944 if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6945 continue;
6946 /* Stub section padding required but no dedicated section. */
6947 BFD_ASSERT (stub_sec_p);
6948
6949 size = (*stub_sec_p)->size;
6950 size = (size + padding - 1) & ~(padding - 1);
6951 (*stub_sec_p)->size = size;
6952 }
6953
6954 /* Add Cortex-A8 erratum veneers to stub section sizes too. */
6955 if (htab->fix_cortex_a8)
6956 for (i = 0; i < num_a8_fixes; i++)
6957 {
6958 stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6959 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6960
6961 if (stub_sec == NULL)
6962 return FALSE;
6963
6964 stub_sec->size
6965 += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6966 NULL);
6967 }
6968
6969
6970 /* Ask the linker to do its stuff. */
6971 (*htab->layout_sections_again) ();
6972 first_veneer_scan = FALSE;
6973 }
6974
6975 /* Add stubs for Cortex-A8 erratum fixes now. */
6976 if (htab->fix_cortex_a8)
6977 {
6978 for (i = 0; i < num_a8_fixes; i++)
6979 {
6980 struct elf32_arm_stub_hash_entry *stub_entry;
6981 char *stub_name = a8_fixes[i].stub_name;
6982 asection *section = a8_fixes[i].section;
6983 unsigned int section_id = a8_fixes[i].section->id;
6984 asection *link_sec = htab->stub_group[section_id].link_sec;
6985 asection *stub_sec = htab->stub_group[section_id].stub_sec;
6986 const insn_sequence *template_sequence;
6987 int template_size, size = 0;
6988
6989 stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6990 TRUE, FALSE);
6991 if (stub_entry == NULL)
6992 {
6993 _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6994 section->owner, stub_name);
6995 return FALSE;
6996 }
6997
6998 stub_entry->stub_sec = stub_sec;
6999 stub_entry->stub_offset = (bfd_vma) -1;
7000 stub_entry->id_sec = link_sec;
7001 stub_entry->stub_type = a8_fixes[i].stub_type;
7002 stub_entry->source_value = a8_fixes[i].offset;
7003 stub_entry->target_section = a8_fixes[i].section;
7004 stub_entry->target_value = a8_fixes[i].target_offset;
7005 stub_entry->orig_insn = a8_fixes[i].orig_insn;
7006 stub_entry->branch_type = a8_fixes[i].branch_type;
7007
7008 size = find_stub_size_and_template (a8_fixes[i].stub_type,
7009 &template_sequence,
7010 &template_size);
7011
7012 stub_entry->stub_size = size;
7013 stub_entry->stub_template = template_sequence;
7014 stub_entry->stub_template_size = template_size;
7015 }
7016
7017 /* Stash the Cortex-A8 erratum fix array for use later in
7018 elf32_arm_write_section(). */
7019 htab->a8_erratum_fixes = a8_fixes;
7020 htab->num_a8_erratum_fixes = num_a8_fixes;
7021 }
7022 else
7023 {
7024 htab->a8_erratum_fixes = NULL;
7025 htab->num_a8_erratum_fixes = 0;
7026 }
7027 return ret;
7028 }
7029
7030 /* Build all the stubs associated with the current output file. The
7031 stubs are kept in a hash table attached to the main linker hash
7032 table. We also set up the .plt entries for statically linked PIC
7033 functions here. This function is called via arm_elf_finish in the
7034 linker. */
7035
7036 bfd_boolean
7037 elf32_arm_build_stubs (struct bfd_link_info *info)
7038 {
7039 asection *stub_sec;
7040 struct bfd_hash_table *table;
7041 enum elf32_arm_stub_type stub_type;
7042 struct elf32_arm_link_hash_table *htab;
7043
7044 htab = elf32_arm_hash_table (info);
7045 if (htab == NULL)
7046 return FALSE;
7047
7048 for (stub_sec = htab->stub_bfd->sections;
7049 stub_sec != NULL;
7050 stub_sec = stub_sec->next)
7051 {
7052 bfd_size_type size;
7053
7054 /* Ignore non-stub sections. */
7055 if (!strstr (stub_sec->name, STUB_SUFFIX))
7056 continue;
7057
7058 /* Allocate memory to hold the linker stubs. Zeroing the stub sections
7059 must at least be done for stub section requiring padding and for SG
7060 veneers to ensure that a non secure code branching to a removed SG
7061 veneer causes an error. */
7062 size = stub_sec->size;
7063 stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7064 if (stub_sec->contents == NULL && size != 0)
7065 return FALSE;
7066
7067 stub_sec->size = 0;
7068 }
7069
7070 /* Add new SG veneers after those already in the input import library. */
7071 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7072 {
7073 bfd_vma *start_offset_p;
7074 asection **stub_sec_p;
7075
7076 start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7077 stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7078 if (start_offset_p == NULL)
7079 continue;
7080
7081 BFD_ASSERT (stub_sec_p != NULL);
7082 if (*stub_sec_p != NULL)
7083 (*stub_sec_p)->size = *start_offset_p;
7084 }
7085
7086 /* Build the stubs as directed by the stub hash table. */
7087 table = &htab->stub_hash_table;
7088 bfd_hash_traverse (table, arm_build_one_stub, info);
7089 if (htab->fix_cortex_a8)
7090 {
7091 /* Place the cortex a8 stubs last. */
7092 htab->fix_cortex_a8 = -1;
7093 bfd_hash_traverse (table, arm_build_one_stub, info);
7094 }
7095
7096 return TRUE;
7097 }
7098
7099 /* Locate the Thumb encoded calling stub for NAME. */
7100
7101 static struct elf_link_hash_entry *
7102 find_thumb_glue (struct bfd_link_info *link_info,
7103 const char *name,
7104 char **error_message)
7105 {
7106 char *tmp_name;
7107 struct elf_link_hash_entry *hash;
7108 struct elf32_arm_link_hash_table *hash_table;
7109
7110 /* We need a pointer to the armelf specific hash table. */
7111 hash_table = elf32_arm_hash_table (link_info);
7112 if (hash_table == NULL)
7113 return NULL;
7114
7115 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7116 + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7117
7118 BFD_ASSERT (tmp_name);
7119
7120 sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7121
7122 hash = elf_link_hash_lookup
7123 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7124
7125 if (hash == NULL
7126 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7127 "Thumb", tmp_name, name) == -1)
7128 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7129
7130 free (tmp_name);
7131
7132 return hash;
7133 }
7134
7135 /* Locate the ARM encoded calling stub for NAME. */
7136
7137 static struct elf_link_hash_entry *
7138 find_arm_glue (struct bfd_link_info *link_info,
7139 const char *name,
7140 char **error_message)
7141 {
7142 char *tmp_name;
7143 struct elf_link_hash_entry *myh;
7144 struct elf32_arm_link_hash_table *hash_table;
7145
7146 /* We need a pointer to the elfarm specific hash table. */
7147 hash_table = elf32_arm_hash_table (link_info);
7148 if (hash_table == NULL)
7149 return NULL;
7150
7151 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7152 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7153 BFD_ASSERT (tmp_name);
7154
7155 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7156
7157 myh = elf_link_hash_lookup
7158 (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7159
7160 if (myh == NULL
7161 && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7162 "ARM", tmp_name, name) == -1)
7163 *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7164
7165 free (tmp_name);
7166
7167 return myh;
7168 }
7169
7170 /* ARM->Thumb glue (static images):
7171
7172 .arm
7173 __func_from_arm:
7174 ldr r12, __func_addr
7175 bx r12
7176 __func_addr:
7177 .word func @ behave as if you saw a ARM_32 reloc.
7178
7179 (v5t static images)
7180 .arm
7181 __func_from_arm:
7182 ldr pc, __func_addr
7183 __func_addr:
7184 .word func @ behave as if you saw a ARM_32 reloc.
7185
7186 (relocatable images)
7187 .arm
7188 __func_from_arm:
7189 ldr r12, __func_offset
7190 add r12, r12, pc
7191 bx r12
7192 __func_offset:
7193 .word func - . */
7194
7195 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7196 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7197 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7198 static const insn32 a2t3_func_addr_insn = 0x00000001;
7199
7200 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7201 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7202 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7203
7204 #define ARM2THUMB_PIC_GLUE_SIZE 16
7205 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7206 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7207 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7208
7209 /* Thumb->ARM: Thumb->(non-interworking aware) ARM
7210
7211 .thumb .thumb
7212 .align 2 .align 2
7213 __func_from_thumb: __func_from_thumb:
7214 bx pc push {r6, lr}
7215 nop ldr r6, __func_addr
7216 .arm mov lr, pc
7217 b func bx r6
7218 .arm
7219 ;; back_to_thumb
7220 ldmia r13! {r6, lr}
7221 bx lr
7222 __func_addr:
7223 .word func */
7224
7225 #define THUMB2ARM_GLUE_SIZE 8
7226 static const insn16 t2a1_bx_pc_insn = 0x4778;
7227 static const insn16 t2a2_noop_insn = 0x46c0;
7228 static const insn32 t2a3_b_insn = 0xea000000;
7229
7230 #define VFP11_ERRATUM_VENEER_SIZE 8
7231 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7232 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7233
7234 #define ARM_BX_VENEER_SIZE 12
7235 static const insn32 armbx1_tst_insn = 0xe3100001;
7236 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7237 static const insn32 armbx3_bx_insn = 0xe12fff10;
7238
7239 #ifndef ELFARM_NABI_C_INCLUDED
7240 static void
7241 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7242 {
7243 asection * s;
7244 bfd_byte * contents;
7245
7246 if (size == 0)
7247 {
7248 /* Do not include empty glue sections in the output. */
7249 if (abfd != NULL)
7250 {
7251 s = bfd_get_linker_section (abfd, name);
7252 if (s != NULL)
7253 s->flags |= SEC_EXCLUDE;
7254 }
7255 return;
7256 }
7257
7258 BFD_ASSERT (abfd != NULL);
7259
7260 s = bfd_get_linker_section (abfd, name);
7261 BFD_ASSERT (s != NULL);
7262
7263 contents = (bfd_byte *) bfd_zalloc (abfd, size);
7264
7265 BFD_ASSERT (s->size == size);
7266 s->contents = contents;
7267 }
7268
7269 bfd_boolean
7270 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7271 {
7272 struct elf32_arm_link_hash_table * globals;
7273
7274 globals = elf32_arm_hash_table (info);
7275 BFD_ASSERT (globals != NULL);
7276
7277 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7278 globals->arm_glue_size,
7279 ARM2THUMB_GLUE_SECTION_NAME);
7280
7281 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7282 globals->thumb_glue_size,
7283 THUMB2ARM_GLUE_SECTION_NAME);
7284
7285 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7286 globals->vfp11_erratum_glue_size,
7287 VFP11_ERRATUM_VENEER_SECTION_NAME);
7288
7289 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7290 globals->stm32l4xx_erratum_glue_size,
7291 STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7292
7293 arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7294 globals->bx_glue_size,
7295 ARM_BX_GLUE_SECTION_NAME);
7296
7297 return TRUE;
7298 }
7299
7300 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7301 returns the symbol identifying the stub. */
7302
7303 static struct elf_link_hash_entry *
7304 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7305 struct elf_link_hash_entry * h)
7306 {
7307 const char * name = h->root.root.string;
7308 asection * s;
7309 char * tmp_name;
7310 struct elf_link_hash_entry * myh;
7311 struct bfd_link_hash_entry * bh;
7312 struct elf32_arm_link_hash_table * globals;
7313 bfd_vma val;
7314 bfd_size_type size;
7315
7316 globals = elf32_arm_hash_table (link_info);
7317 BFD_ASSERT (globals != NULL);
7318 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7319
7320 s = bfd_get_linker_section
7321 (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7322
7323 BFD_ASSERT (s != NULL);
7324
7325 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7326 + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7327 BFD_ASSERT (tmp_name);
7328
7329 sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7330
7331 myh = elf_link_hash_lookup
7332 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7333
7334 if (myh != NULL)
7335 {
7336 /* We've already seen this guy. */
7337 free (tmp_name);
7338 return myh;
7339 }
7340
7341 /* The only trick here is using hash_table->arm_glue_size as the value.
7342 Even though the section isn't allocated yet, this is where we will be
7343 putting it. The +1 on the value marks that the stub has not been
7344 output yet - not that it is a Thumb function. */
7345 bh = NULL;
7346 val = globals->arm_glue_size + 1;
7347 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7348 tmp_name, BSF_GLOBAL, s, val,
7349 NULL, TRUE, FALSE, &bh);
7350
7351 myh = (struct elf_link_hash_entry *) bh;
7352 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7353 myh->forced_local = 1;
7354
7355 free (tmp_name);
7356
7357 if (bfd_link_pic (link_info)
7358 || globals->root.is_relocatable_executable
7359 || globals->pic_veneer)
7360 size = ARM2THUMB_PIC_GLUE_SIZE;
7361 else if (globals->use_blx)
7362 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7363 else
7364 size = ARM2THUMB_STATIC_GLUE_SIZE;
7365
7366 s->size += size;
7367 globals->arm_glue_size += size;
7368
7369 return myh;
7370 }
7371
7372 /* Allocate space for ARMv4 BX veneers. */
7373
7374 static void
7375 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7376 {
7377 asection * s;
7378 struct elf32_arm_link_hash_table *globals;
7379 char *tmp_name;
7380 struct elf_link_hash_entry *myh;
7381 struct bfd_link_hash_entry *bh;
7382 bfd_vma val;
7383
7384 /* BX PC does not need a veneer. */
7385 if (reg == 15)
7386 return;
7387
7388 globals = elf32_arm_hash_table (link_info);
7389 BFD_ASSERT (globals != NULL);
7390 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7391
7392 /* Check if this veneer has already been allocated. */
7393 if (globals->bx_glue_offset[reg])
7394 return;
7395
7396 s = bfd_get_linker_section
7397 (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7398
7399 BFD_ASSERT (s != NULL);
7400
7401 /* Add symbol for veneer. */
7402 tmp_name = (char *)
7403 bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7404 BFD_ASSERT (tmp_name);
7405
7406 sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7407
7408 myh = elf_link_hash_lookup
7409 (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7410
7411 BFD_ASSERT (myh == NULL);
7412
7413 bh = NULL;
7414 val = globals->bx_glue_size;
7415 _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7416 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7417 NULL, TRUE, FALSE, &bh);
7418
7419 myh = (struct elf_link_hash_entry *) bh;
7420 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7421 myh->forced_local = 1;
7422
7423 s->size += ARM_BX_VENEER_SIZE;
7424 globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7425 globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7426 }
7427
7428
7429 /* Add an entry to the code/data map for section SEC. */
7430
7431 static void
7432 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7433 {
7434 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7435 unsigned int newidx;
7436
7437 if (sec_data->map == NULL)
7438 {
7439 sec_data->map = (elf32_arm_section_map *)
7440 bfd_malloc (sizeof (elf32_arm_section_map));
7441 sec_data->mapcount = 0;
7442 sec_data->mapsize = 1;
7443 }
7444
7445 newidx = sec_data->mapcount++;
7446
7447 if (sec_data->mapcount > sec_data->mapsize)
7448 {
7449 sec_data->mapsize *= 2;
7450 sec_data->map = (elf32_arm_section_map *)
7451 bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7452 * sizeof (elf32_arm_section_map));
7453 }
7454
7455 if (sec_data->map)
7456 {
7457 sec_data->map[newidx].vma = vma;
7458 sec_data->map[newidx].type = type;
7459 }
7460 }
7461
7462
7463 /* Record information about a VFP11 denorm-erratum veneer. Only ARM-mode
7464 veneers are handled for now. */
7465
7466 static bfd_vma
7467 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7468 elf32_vfp11_erratum_list *branch,
7469 bfd *branch_bfd,
7470 asection *branch_sec,
7471 unsigned int offset)
7472 {
7473 asection *s;
7474 struct elf32_arm_link_hash_table *hash_table;
7475 char *tmp_name;
7476 struct elf_link_hash_entry *myh;
7477 struct bfd_link_hash_entry *bh;
7478 bfd_vma val;
7479 struct _arm_elf_section_data *sec_data;
7480 elf32_vfp11_erratum_list *newerr;
7481
7482 hash_table = elf32_arm_hash_table (link_info);
7483 BFD_ASSERT (hash_table != NULL);
7484 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7485
7486 s = bfd_get_linker_section
7487 (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7488
7489 sec_data = elf32_arm_section_data (s);
7490
7491 BFD_ASSERT (s != NULL);
7492
7493 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7494 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7495 BFD_ASSERT (tmp_name);
7496
7497 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7498 hash_table->num_vfp11_fixes);
7499
7500 myh = elf_link_hash_lookup
7501 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7502
7503 BFD_ASSERT (myh == NULL);
7504
7505 bh = NULL;
7506 val = hash_table->vfp11_erratum_glue_size;
7507 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7508 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7509 NULL, TRUE, FALSE, &bh);
7510
7511 myh = (struct elf_link_hash_entry *) bh;
7512 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7513 myh->forced_local = 1;
7514
7515 /* Link veneer back to calling location. */
7516 sec_data->erratumcount += 1;
7517 newerr = (elf32_vfp11_erratum_list *)
7518 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7519
7520 newerr->type = VFP11_ERRATUM_ARM_VENEER;
7521 newerr->vma = -1;
7522 newerr->u.v.branch = branch;
7523 newerr->u.v.id = hash_table->num_vfp11_fixes;
7524 branch->u.b.veneer = newerr;
7525
7526 newerr->next = sec_data->erratumlist;
7527 sec_data->erratumlist = newerr;
7528
7529 /* A symbol for the return from the veneer. */
7530 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7531 hash_table->num_vfp11_fixes);
7532
7533 myh = elf_link_hash_lookup
7534 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7535
7536 if (myh != NULL)
7537 abort ();
7538
7539 bh = NULL;
7540 val = offset + 4;
7541 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7542 branch_sec, val, NULL, TRUE, FALSE, &bh);
7543
7544 myh = (struct elf_link_hash_entry *) bh;
7545 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7546 myh->forced_local = 1;
7547
7548 free (tmp_name);
7549
7550 /* Generate a mapping symbol for the veneer section, and explicitly add an
7551 entry for that symbol to the code/data map for the section. */
7552 if (hash_table->vfp11_erratum_glue_size == 0)
7553 {
7554 bh = NULL;
7555 /* FIXME: Creates an ARM symbol. Thumb mode will need attention if it
7556 ever requires this erratum fix. */
7557 _bfd_generic_link_add_one_symbol (link_info,
7558 hash_table->bfd_of_glue_owner, "$a",
7559 BSF_LOCAL, s, 0, NULL,
7560 TRUE, FALSE, &bh);
7561
7562 myh = (struct elf_link_hash_entry *) bh;
7563 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7564 myh->forced_local = 1;
7565
7566 /* The elf32_arm_init_maps function only cares about symbols from input
7567 BFDs. We must make a note of this generated mapping symbol
7568 ourselves so that code byteswapping works properly in
7569 elf32_arm_write_section. */
7570 elf32_arm_section_map_add (s, 'a', 0);
7571 }
7572
7573 s->size += VFP11_ERRATUM_VENEER_SIZE;
7574 hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7575 hash_table->num_vfp11_fixes++;
7576
7577 /* The offset of the veneer. */
7578 return val;
7579 }
7580
7581 /* Record information about a STM32L4XX STM erratum veneer. Only THUMB-mode
7582 veneers need to be handled because used only in Cortex-M. */
7583
7584 static bfd_vma
7585 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7586 elf32_stm32l4xx_erratum_list *branch,
7587 bfd *branch_bfd,
7588 asection *branch_sec,
7589 unsigned int offset,
7590 bfd_size_type veneer_size)
7591 {
7592 asection *s;
7593 struct elf32_arm_link_hash_table *hash_table;
7594 char *tmp_name;
7595 struct elf_link_hash_entry *myh;
7596 struct bfd_link_hash_entry *bh;
7597 bfd_vma val;
7598 struct _arm_elf_section_data *sec_data;
7599 elf32_stm32l4xx_erratum_list *newerr;
7600
7601 hash_table = elf32_arm_hash_table (link_info);
7602 BFD_ASSERT (hash_table != NULL);
7603 BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7604
7605 s = bfd_get_linker_section
7606 (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7607
7608 BFD_ASSERT (s != NULL);
7609
7610 sec_data = elf32_arm_section_data (s);
7611
7612 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7613 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7614 BFD_ASSERT (tmp_name);
7615
7616 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7617 hash_table->num_stm32l4xx_fixes);
7618
7619 myh = elf_link_hash_lookup
7620 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7621
7622 BFD_ASSERT (myh == NULL);
7623
7624 bh = NULL;
7625 val = hash_table->stm32l4xx_erratum_glue_size;
7626 _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7627 tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7628 NULL, TRUE, FALSE, &bh);
7629
7630 myh = (struct elf_link_hash_entry *) bh;
7631 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7632 myh->forced_local = 1;
7633
7634 /* Link veneer back to calling location. */
7635 sec_data->stm32l4xx_erratumcount += 1;
7636 newerr = (elf32_stm32l4xx_erratum_list *)
7637 bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7638
7639 newerr->type = STM32L4XX_ERRATUM_VENEER;
7640 newerr->vma = -1;
7641 newerr->u.v.branch = branch;
7642 newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7643 branch->u.b.veneer = newerr;
7644
7645 newerr->next = sec_data->stm32l4xx_erratumlist;
7646 sec_data->stm32l4xx_erratumlist = newerr;
7647
7648 /* A symbol for the return from the veneer. */
7649 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7650 hash_table->num_stm32l4xx_fixes);
7651
7652 myh = elf_link_hash_lookup
7653 (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7654
7655 if (myh != NULL)
7656 abort ();
7657
7658 bh = NULL;
7659 val = offset + 4;
7660 _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7661 branch_sec, val, NULL, TRUE, FALSE, &bh);
7662
7663 myh = (struct elf_link_hash_entry *) bh;
7664 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7665 myh->forced_local = 1;
7666
7667 free (tmp_name);
7668
7669 /* Generate a mapping symbol for the veneer section, and explicitly add an
7670 entry for that symbol to the code/data map for the section. */
7671 if (hash_table->stm32l4xx_erratum_glue_size == 0)
7672 {
7673 bh = NULL;
7674 /* Creates a THUMB symbol since there is no other choice. */
7675 _bfd_generic_link_add_one_symbol (link_info,
7676 hash_table->bfd_of_glue_owner, "$t",
7677 BSF_LOCAL, s, 0, NULL,
7678 TRUE, FALSE, &bh);
7679
7680 myh = (struct elf_link_hash_entry *) bh;
7681 myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7682 myh->forced_local = 1;
7683
7684 /* The elf32_arm_init_maps function only cares about symbols from input
7685 BFDs. We must make a note of this generated mapping symbol
7686 ourselves so that code byteswapping works properly in
7687 elf32_arm_write_section. */
7688 elf32_arm_section_map_add (s, 't', 0);
7689 }
7690
7691 s->size += veneer_size;
7692 hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7693 hash_table->num_stm32l4xx_fixes++;
7694
7695 /* The offset of the veneer. */
7696 return val;
7697 }
7698
7699 #define ARM_GLUE_SECTION_FLAGS \
7700 (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7701 | SEC_READONLY | SEC_LINKER_CREATED)
7702
7703 /* Create a fake section for use by the ARM backend of the linker. */
7704
7705 static bfd_boolean
7706 arm_make_glue_section (bfd * abfd, const char * name)
7707 {
7708 asection * sec;
7709
7710 sec = bfd_get_linker_section (abfd, name);
7711 if (sec != NULL)
7712 /* Already made. */
7713 return TRUE;
7714
7715 sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7716
7717 if (sec == NULL
7718 || !bfd_set_section_alignment (sec, 2))
7719 return FALSE;
7720
7721 /* Set the gc mark to prevent the section from being removed by garbage
7722 collection, despite the fact that no relocs refer to this section. */
7723 sec->gc_mark = 1;
7724
7725 return TRUE;
7726 }
7727
7728 /* Set size of .plt entries. This function is called from the
7729 linker scripts in ld/emultempl/{armelf}.em. */
7730
7731 void
7732 bfd_elf32_arm_use_long_plt (void)
7733 {
7734 elf32_arm_use_long_plt_entry = TRUE;
7735 }
7736
7737 /* Add the glue sections to ABFD. This function is called from the
7738 linker scripts in ld/emultempl/{armelf}.em. */
7739
7740 bfd_boolean
7741 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7742 struct bfd_link_info *info)
7743 {
7744 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7745 bfd_boolean dostm32l4xx = globals
7746 && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7747 bfd_boolean addglue;
7748
7749 /* If we are only performing a partial
7750 link do not bother adding the glue. */
7751 if (bfd_link_relocatable (info))
7752 return TRUE;
7753
7754 addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7755 && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7756 && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7757 && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7758
7759 if (!dostm32l4xx)
7760 return addglue;
7761
7762 return addglue
7763 && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7764 }
7765
7766 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP. This
7767 ensures they are not marked for deletion by
7768 strip_excluded_output_sections () when veneers are going to be created
7769 later. Not doing so would trigger assert on empty section size in
7770 lang_size_sections_1 (). */
7771
7772 void
7773 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7774 {
7775 enum elf32_arm_stub_type stub_type;
7776
7777 /* If we are only performing a partial
7778 link do not bother adding the glue. */
7779 if (bfd_link_relocatable (info))
7780 return;
7781
7782 for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7783 {
7784 asection *out_sec;
7785 const char *out_sec_name;
7786
7787 if (!arm_dedicated_stub_output_section_required (stub_type))
7788 continue;
7789
7790 out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7791 out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7792 if (out_sec != NULL)
7793 out_sec->flags |= SEC_KEEP;
7794 }
7795 }
7796
7797 /* Select a BFD to be used to hold the sections used by the glue code.
7798 This function is called from the linker scripts in ld/emultempl/
7799 {armelf/pe}.em. */
7800
7801 bfd_boolean
7802 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7803 {
7804 struct elf32_arm_link_hash_table *globals;
7805
7806 /* If we are only performing a partial link
7807 do not bother getting a bfd to hold the glue. */
7808 if (bfd_link_relocatable (info))
7809 return TRUE;
7810
7811 /* Make sure we don't attach the glue sections to a dynamic object. */
7812 BFD_ASSERT (!(abfd->flags & DYNAMIC));
7813
7814 globals = elf32_arm_hash_table (info);
7815 BFD_ASSERT (globals != NULL);
7816
7817 if (globals->bfd_of_glue_owner != NULL)
7818 return TRUE;
7819
7820 /* Save the bfd for later use. */
7821 globals->bfd_of_glue_owner = abfd;
7822
7823 return TRUE;
7824 }
7825
7826 static void
7827 check_use_blx (struct elf32_arm_link_hash_table *globals)
7828 {
7829 int cpu_arch;
7830
7831 cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7832 Tag_CPU_arch);
7833
7834 if (globals->fix_arm1176)
7835 {
7836 if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7837 globals->use_blx = 1;
7838 }
7839 else
7840 {
7841 if (cpu_arch > TAG_CPU_ARCH_V4T)
7842 globals->use_blx = 1;
7843 }
7844 }
7845
7846 bfd_boolean
7847 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7848 struct bfd_link_info *link_info)
7849 {
7850 Elf_Internal_Shdr *symtab_hdr;
7851 Elf_Internal_Rela *internal_relocs = NULL;
7852 Elf_Internal_Rela *irel, *irelend;
7853 bfd_byte *contents = NULL;
7854
7855 asection *sec;
7856 struct elf32_arm_link_hash_table *globals;
7857
7858 /* If we are only performing a partial link do not bother
7859 to construct any glue. */
7860 if (bfd_link_relocatable (link_info))
7861 return TRUE;
7862
7863 /* Here we have a bfd that is to be included on the link. We have a
7864 hook to do reloc rummaging, before section sizes are nailed down. */
7865 globals = elf32_arm_hash_table (link_info);
7866 BFD_ASSERT (globals != NULL);
7867
7868 check_use_blx (globals);
7869
7870 if (globals->byteswap_code && !bfd_big_endian (abfd))
7871 {
7872 _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7873 abfd);
7874 return FALSE;
7875 }
7876
7877 /* PR 5398: If we have not decided to include any loadable sections in
7878 the output then we will not have a glue owner bfd. This is OK, it
7879 just means that there is nothing else for us to do here. */
7880 if (globals->bfd_of_glue_owner == NULL)
7881 return TRUE;
7882
7883 /* Rummage around all the relocs and map the glue vectors. */
7884 sec = abfd->sections;
7885
7886 if (sec == NULL)
7887 return TRUE;
7888
7889 for (; sec != NULL; sec = sec->next)
7890 {
7891 if (sec->reloc_count == 0)
7892 continue;
7893
7894 if ((sec->flags & SEC_EXCLUDE) != 0)
7895 continue;
7896
7897 symtab_hdr = & elf_symtab_hdr (abfd);
7898
7899 /* Load the relocs. */
7900 internal_relocs
7901 = _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7902
7903 if (internal_relocs == NULL)
7904 goto error_return;
7905
7906 irelend = internal_relocs + sec->reloc_count;
7907 for (irel = internal_relocs; irel < irelend; irel++)
7908 {
7909 long r_type;
7910 unsigned long r_index;
7911
7912 struct elf_link_hash_entry *h;
7913
7914 r_type = ELF32_R_TYPE (irel->r_info);
7915 r_index = ELF32_R_SYM (irel->r_info);
7916
7917 /* These are the only relocation types we care about. */
7918 if ( r_type != R_ARM_PC24
7919 && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7920 continue;
7921
7922 /* Get the section contents if we haven't done so already. */
7923 if (contents == NULL)
7924 {
7925 /* Get cached copy if it exists. */
7926 if (elf_section_data (sec)->this_hdr.contents != NULL)
7927 contents = elf_section_data (sec)->this_hdr.contents;
7928 else
7929 {
7930 /* Go get them off disk. */
7931 if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7932 goto error_return;
7933 }
7934 }
7935
7936 if (r_type == R_ARM_V4BX)
7937 {
7938 int reg;
7939
7940 reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7941 record_arm_bx_glue (link_info, reg);
7942 continue;
7943 }
7944
7945 /* If the relocation is not against a symbol it cannot concern us. */
7946 h = NULL;
7947
7948 /* We don't care about local symbols. */
7949 if (r_index < symtab_hdr->sh_info)
7950 continue;
7951
7952 /* This is an external symbol. */
7953 r_index -= symtab_hdr->sh_info;
7954 h = (struct elf_link_hash_entry *)
7955 elf_sym_hashes (abfd)[r_index];
7956
7957 /* If the relocation is against a static symbol it must be within
7958 the current section and so cannot be a cross ARM/Thumb relocation. */
7959 if (h == NULL)
7960 continue;
7961
7962 /* If the call will go through a PLT entry then we do not need
7963 glue. */
7964 if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7965 continue;
7966
7967 switch (r_type)
7968 {
7969 case R_ARM_PC24:
7970 /* This one is a call from arm code. We need to look up
7971 the target of the call. If it is a thumb target, we
7972 insert glue. */
7973 if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7974 == ST_BRANCH_TO_THUMB)
7975 record_arm_to_thumb_glue (link_info, h);
7976 break;
7977
7978 default:
7979 abort ();
7980 }
7981 }
7982
7983 if (elf_section_data (sec)->this_hdr.contents != contents)
7984 free (contents);
7985 contents = NULL;
7986
7987 if (elf_section_data (sec)->relocs != internal_relocs)
7988 free (internal_relocs);
7989 internal_relocs = NULL;
7990 }
7991
7992 return TRUE;
7993
7994 error_return:
7995 if (elf_section_data (sec)->this_hdr.contents != contents)
7996 free (contents);
7997 if (elf_section_data (sec)->relocs != internal_relocs)
7998 free (internal_relocs);
7999
8000 return FALSE;
8001 }
8002 #endif
8003
8004
8005 /* Initialise maps of ARM/Thumb/data for input BFDs. */
8006
8007 void
8008 bfd_elf32_arm_init_maps (bfd *abfd)
8009 {
8010 Elf_Internal_Sym *isymbuf;
8011 Elf_Internal_Shdr *hdr;
8012 unsigned int i, localsyms;
8013
8014 /* PR 7093: Make sure that we are dealing with an arm elf binary. */
8015 if (! is_arm_elf (abfd))
8016 return;
8017
8018 if ((abfd->flags & DYNAMIC) != 0)
8019 return;
8020
8021 hdr = & elf_symtab_hdr (abfd);
8022 localsyms = hdr->sh_info;
8023
8024 /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8025 should contain the number of local symbols, which should come before any
8026 global symbols. Mapping symbols are always local. */
8027 isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8028 NULL);
8029
8030 /* No internal symbols read? Skip this BFD. */
8031 if (isymbuf == NULL)
8032 return;
8033
8034 for (i = 0; i < localsyms; i++)
8035 {
8036 Elf_Internal_Sym *isym = &isymbuf[i];
8037 asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8038 const char *name;
8039
8040 if (sec != NULL
8041 && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8042 {
8043 name = bfd_elf_string_from_elf_section (abfd,
8044 hdr->sh_link, isym->st_name);
8045
8046 if (bfd_is_arm_special_symbol_name (name,
8047 BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8048 elf32_arm_section_map_add (sec, name[1], isym->st_value);
8049 }
8050 }
8051 }
8052
8053
8054 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8055 say what they wanted. */
8056
8057 void
8058 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8059 {
8060 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8061 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8062
8063 if (globals == NULL)
8064 return;
8065
8066 if (globals->fix_cortex_a8 == -1)
8067 {
8068 /* Turn on Cortex-A8 erratum workaround for ARMv7-A. */
8069 if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8070 && (out_attr[Tag_CPU_arch_profile].i == 'A'
8071 || out_attr[Tag_CPU_arch_profile].i == 0))
8072 globals->fix_cortex_a8 = 1;
8073 else
8074 globals->fix_cortex_a8 = 0;
8075 }
8076 }
8077
8078
8079 void
8080 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8081 {
8082 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8083 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8084
8085 if (globals == NULL)
8086 return;
8087 /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix. */
8088 if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8089 {
8090 switch (globals->vfp11_fix)
8091 {
8092 case BFD_ARM_VFP11_FIX_DEFAULT:
8093 case BFD_ARM_VFP11_FIX_NONE:
8094 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8095 break;
8096
8097 default:
8098 /* Give a warning, but do as the user requests anyway. */
8099 _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8100 "workaround is not necessary for target architecture"), obfd);
8101 }
8102 }
8103 else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8104 /* For earlier architectures, we might need the workaround, but do not
8105 enable it by default. If users is running with broken hardware, they
8106 must enable the erratum fix explicitly. */
8107 globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8108 }
8109
8110 void
8111 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8112 {
8113 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8114 obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8115
8116 if (globals == NULL)
8117 return;
8118
8119 /* We assume only Cortex-M4 may require the fix. */
8120 if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8121 || out_attr[Tag_CPU_arch_profile].i != 'M')
8122 {
8123 if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8124 /* Give a warning, but do as the user requests anyway. */
8125 _bfd_error_handler
8126 (_("%pB: warning: selected STM32L4XX erratum "
8127 "workaround is not necessary for target architecture"), obfd);
8128 }
8129 }
8130
8131 enum bfd_arm_vfp11_pipe
8132 {
8133 VFP11_FMAC,
8134 VFP11_LS,
8135 VFP11_DS,
8136 VFP11_BAD
8137 };
8138
8139 /* Return a VFP register number. This is encoded as RX:X for single-precision
8140 registers, or X:RX for double-precision registers, where RX is the group of
8141 four bits in the instruction encoding and X is the single extension bit.
8142 RX and X fields are specified using their lowest (starting) bit. The return
8143 value is:
8144
8145 0...31: single-precision registers s0...s31
8146 32...63: double-precision registers d0...d31.
8147
8148 Although X should be zero for VFP11 (encoding d0...d15 only), we might
8149 encounter VFP3 instructions, so we allow the full range for DP registers. */
8150
8151 static unsigned int
8152 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8153 unsigned int x)
8154 {
8155 if (is_double)
8156 return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8157 else
8158 return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8159 }
8160
8161 /* Set bits in *WMASK according to a register number REG as encoded by
8162 bfd_arm_vfp11_regno(). Ignore d16-d31. */
8163
8164 static void
8165 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8166 {
8167 if (reg < 32)
8168 *wmask |= 1 << reg;
8169 else if (reg < 48)
8170 *wmask |= 3 << ((reg - 32) * 2);
8171 }
8172
8173 /* Return TRUE if WMASK overwrites anything in REGS. */
8174
8175 static bfd_boolean
8176 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8177 {
8178 int i;
8179
8180 for (i = 0; i < numregs; i++)
8181 {
8182 unsigned int reg = regs[i];
8183
8184 if (reg < 32 && (wmask & (1 << reg)) != 0)
8185 return TRUE;
8186
8187 reg -= 32;
8188
8189 if (reg >= 16)
8190 continue;
8191
8192 if ((wmask & (3 << (reg * 2))) != 0)
8193 return TRUE;
8194 }
8195
8196 return FALSE;
8197 }
8198
8199 /* In this function, we're interested in two things: finding input registers
8200 for VFP data-processing instructions, and finding the set of registers which
8201 arbitrary VFP instructions may write to. We use a 32-bit unsigned int to
8202 hold the written set, so FLDM etc. are easy to deal with (we're only
8203 interested in 32 SP registers or 16 dp registers, due to the VFP version
8204 implemented by the chip in question). DP registers are marked by setting
8205 both SP registers in the write mask). */
8206
8207 static enum bfd_arm_vfp11_pipe
8208 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8209 int *numregs)
8210 {
8211 enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8212 bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8213
8214 if ((insn & 0x0f000e10) == 0x0e000a00) /* A data-processing insn. */
8215 {
8216 unsigned int pqrs;
8217 unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8218 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8219
8220 pqrs = ((insn & 0x00800000) >> 20)
8221 | ((insn & 0x00300000) >> 19)
8222 | ((insn & 0x00000040) >> 6);
8223
8224 switch (pqrs)
8225 {
8226 case 0: /* fmac[sd]. */
8227 case 1: /* fnmac[sd]. */
8228 case 2: /* fmsc[sd]. */
8229 case 3: /* fnmsc[sd]. */
8230 vpipe = VFP11_FMAC;
8231 bfd_arm_vfp11_write_mask (destmask, fd);
8232 regs[0] = fd;
8233 regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8234 regs[2] = fm;
8235 *numregs = 3;
8236 break;
8237
8238 case 4: /* fmul[sd]. */
8239 case 5: /* fnmul[sd]. */
8240 case 6: /* fadd[sd]. */
8241 case 7: /* fsub[sd]. */
8242 vpipe = VFP11_FMAC;
8243 goto vfp_binop;
8244
8245 case 8: /* fdiv[sd]. */
8246 vpipe = VFP11_DS;
8247 vfp_binop:
8248 bfd_arm_vfp11_write_mask (destmask, fd);
8249 regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7); /* Fn. */
8250 regs[1] = fm;
8251 *numregs = 2;
8252 break;
8253
8254 case 15: /* extended opcode. */
8255 {
8256 unsigned int extn = ((insn >> 15) & 0x1e)
8257 | ((insn >> 7) & 1);
8258
8259 switch (extn)
8260 {
8261 case 0: /* fcpy[sd]. */
8262 case 1: /* fabs[sd]. */
8263 case 2: /* fneg[sd]. */
8264 case 8: /* fcmp[sd]. */
8265 case 9: /* fcmpe[sd]. */
8266 case 10: /* fcmpz[sd]. */
8267 case 11: /* fcmpez[sd]. */
8268 case 16: /* fuito[sd]. */
8269 case 17: /* fsito[sd]. */
8270 case 24: /* ftoui[sd]. */
8271 case 25: /* ftouiz[sd]. */
8272 case 26: /* ftosi[sd]. */
8273 case 27: /* ftosiz[sd]. */
8274 /* These instructions will not bounce due to underflow. */
8275 *numregs = 0;
8276 vpipe = VFP11_FMAC;
8277 break;
8278
8279 case 3: /* fsqrt[sd]. */
8280 /* fsqrt cannot underflow, but it can (perhaps) overwrite
8281 registers to cause the erratum in previous instructions. */
8282 bfd_arm_vfp11_write_mask (destmask, fd);
8283 vpipe = VFP11_DS;
8284 break;
8285
8286 case 15: /* fcvt{ds,sd}. */
8287 {
8288 int rnum = 0;
8289
8290 bfd_arm_vfp11_write_mask (destmask, fd);
8291
8292 /* Only FCVTSD can underflow. */
8293 if ((insn & 0x100) != 0)
8294 regs[rnum++] = fm;
8295
8296 *numregs = rnum;
8297
8298 vpipe = VFP11_FMAC;
8299 }
8300 break;
8301
8302 default:
8303 return VFP11_BAD;
8304 }
8305 }
8306 break;
8307
8308 default:
8309 return VFP11_BAD;
8310 }
8311 }
8312 /* Two-register transfer. */
8313 else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8314 {
8315 unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8316
8317 if ((insn & 0x100000) == 0)
8318 {
8319 if (is_double)
8320 bfd_arm_vfp11_write_mask (destmask, fm);
8321 else
8322 {
8323 bfd_arm_vfp11_write_mask (destmask, fm);
8324 bfd_arm_vfp11_write_mask (destmask, fm + 1);
8325 }
8326 }
8327
8328 vpipe = VFP11_LS;
8329 }
8330 else if ((insn & 0x0e100e00) == 0x0c100a00) /* A load insn. */
8331 {
8332 int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8333 unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8334
8335 switch (puw)
8336 {
8337 case 0: /* Two-reg transfer. We should catch these above. */
8338 abort ();
8339
8340 case 2: /* fldm[sdx]. */
8341 case 3:
8342 case 5:
8343 {
8344 unsigned int i, offset = insn & 0xff;
8345
8346 if (is_double)
8347 offset >>= 1;
8348
8349 for (i = fd; i < fd + offset; i++)
8350 bfd_arm_vfp11_write_mask (destmask, i);
8351 }
8352 break;
8353
8354 case 4: /* fld[sd]. */
8355 case 6:
8356 bfd_arm_vfp11_write_mask (destmask, fd);
8357 break;
8358
8359 default:
8360 return VFP11_BAD;
8361 }
8362
8363 vpipe = VFP11_LS;
8364 }
8365 /* Single-register transfer. Note L==0. */
8366 else if ((insn & 0x0f100e10) == 0x0e000a10)
8367 {
8368 unsigned int opcode = (insn >> 21) & 7;
8369 unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8370
8371 switch (opcode)
8372 {
8373 case 0: /* fmsr/fmdlr. */
8374 case 1: /* fmdhr. */
8375 /* Mark fmdhr and fmdlr as writing to the whole of the DP
8376 destination register. I don't know if this is exactly right,
8377 but it is the conservative choice. */
8378 bfd_arm_vfp11_write_mask (destmask, fn);
8379 break;
8380
8381 case 7: /* fmxr. */
8382 break;
8383 }
8384
8385 vpipe = VFP11_LS;
8386 }
8387
8388 return vpipe;
8389 }
8390
8391
8392 static int elf32_arm_compare_mapping (const void * a, const void * b);
8393
8394
8395 /* Look for potentially-troublesome code sequences which might trigger the
8396 VFP11 denormal/antidependency erratum. See, e.g., the ARM1136 errata sheet
8397 (available from ARM) for details of the erratum. A short version is
8398 described in ld.texinfo. */
8399
8400 bfd_boolean
8401 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8402 {
8403 asection *sec;
8404 bfd_byte *contents = NULL;
8405 int state = 0;
8406 int regs[3], numregs = 0;
8407 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8408 int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8409
8410 if (globals == NULL)
8411 return FALSE;
8412
8413 /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8414 The states transition as follows:
8415
8416 0 -> 1 (vector) or 0 -> 2 (scalar)
8417 A VFP FMAC-pipeline instruction has been seen. Fill
8418 regs[0]..regs[numregs-1] with its input operands. Remember this
8419 instruction in 'first_fmac'.
8420
8421 1 -> 2
8422 Any instruction, except for a VFP instruction which overwrites
8423 regs[*].
8424
8425 1 -> 3 [ -> 0 ] or
8426 2 -> 3 [ -> 0 ]
8427 A VFP instruction has been seen which overwrites any of regs[*].
8428 We must make a veneer! Reset state to 0 before examining next
8429 instruction.
8430
8431 2 -> 0
8432 If we fail to match anything in state 2, reset to state 0 and reset
8433 the instruction pointer to the instruction after 'first_fmac'.
8434
8435 If the VFP11 vector mode is in use, there must be at least two unrelated
8436 instructions between anti-dependent VFP11 instructions to properly avoid
8437 triggering the erratum, hence the use of the extra state 1. */
8438
8439 /* If we are only performing a partial link do not bother
8440 to construct any glue. */
8441 if (bfd_link_relocatable (link_info))
8442 return TRUE;
8443
8444 /* Skip if this bfd does not correspond to an ELF image. */
8445 if (! is_arm_elf (abfd))
8446 return TRUE;
8447
8448 /* We should have chosen a fix type by the time we get here. */
8449 BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8450
8451 if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8452 return TRUE;
8453
8454 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8455 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8456 return TRUE;
8457
8458 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8459 {
8460 unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8461 struct _arm_elf_section_data *sec_data;
8462
8463 /* If we don't have executable progbits, we're not interested in this
8464 section. Also skip if section is to be excluded. */
8465 if (elf_section_type (sec) != SHT_PROGBITS
8466 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8467 || (sec->flags & SEC_EXCLUDE) != 0
8468 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8469 || sec->output_section == bfd_abs_section_ptr
8470 || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8471 continue;
8472
8473 sec_data = elf32_arm_section_data (sec);
8474
8475 if (sec_data->mapcount == 0)
8476 continue;
8477
8478 if (elf_section_data (sec)->this_hdr.contents != NULL)
8479 contents = elf_section_data (sec)->this_hdr.contents;
8480 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8481 goto error_return;
8482
8483 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8484 elf32_arm_compare_mapping);
8485
8486 for (span = 0; span < sec_data->mapcount; span++)
8487 {
8488 unsigned int span_start = sec_data->map[span].vma;
8489 unsigned int span_end = (span == sec_data->mapcount - 1)
8490 ? sec->size : sec_data->map[span + 1].vma;
8491 char span_type = sec_data->map[span].type;
8492
8493 /* FIXME: Only ARM mode is supported at present. We may need to
8494 support Thumb-2 mode also at some point. */
8495 if (span_type != 'a')
8496 continue;
8497
8498 for (i = span_start; i < span_end;)
8499 {
8500 unsigned int next_i = i + 4;
8501 unsigned int insn = bfd_big_endian (abfd)
8502 ? (((unsigned) contents[i] << 24)
8503 | (contents[i + 1] << 16)
8504 | (contents[i + 2] << 8)
8505 | contents[i + 3])
8506 : (((unsigned) contents[i + 3] << 24)
8507 | (contents[i + 2] << 16)
8508 | (contents[i + 1] << 8)
8509 | contents[i]);
8510 unsigned int writemask = 0;
8511 enum bfd_arm_vfp11_pipe vpipe;
8512
8513 switch (state)
8514 {
8515 case 0:
8516 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8517 &numregs);
8518 /* I'm assuming the VFP11 erratum can trigger with denorm
8519 operands on either the FMAC or the DS pipeline. This might
8520 lead to slightly overenthusiastic veneer insertion. */
8521 if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8522 {
8523 state = use_vector ? 1 : 2;
8524 first_fmac = i;
8525 veneer_of_insn = insn;
8526 }
8527 break;
8528
8529 case 1:
8530 {
8531 int other_regs[3], other_numregs;
8532 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8533 other_regs,
8534 &other_numregs);
8535 if (vpipe != VFP11_BAD
8536 && bfd_arm_vfp11_antidependency (writemask, regs,
8537 numregs))
8538 state = 3;
8539 else
8540 state = 2;
8541 }
8542 break;
8543
8544 case 2:
8545 {
8546 int other_regs[3], other_numregs;
8547 vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8548 other_regs,
8549 &other_numregs);
8550 if (vpipe != VFP11_BAD
8551 && bfd_arm_vfp11_antidependency (writemask, regs,
8552 numregs))
8553 state = 3;
8554 else
8555 {
8556 state = 0;
8557 next_i = first_fmac + 4;
8558 }
8559 }
8560 break;
8561
8562 case 3:
8563 abort (); /* Should be unreachable. */
8564 }
8565
8566 if (state == 3)
8567 {
8568 elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8569 bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8570
8571 elf32_arm_section_data (sec)->erratumcount += 1;
8572
8573 newerr->u.b.vfp_insn = veneer_of_insn;
8574
8575 switch (span_type)
8576 {
8577 case 'a':
8578 newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8579 break;
8580
8581 default:
8582 abort ();
8583 }
8584
8585 record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8586 first_fmac);
8587
8588 newerr->vma = -1;
8589
8590 newerr->next = sec_data->erratumlist;
8591 sec_data->erratumlist = newerr;
8592
8593 state = 0;
8594 }
8595
8596 i = next_i;
8597 }
8598 }
8599
8600 if (elf_section_data (sec)->this_hdr.contents != contents)
8601 free (contents);
8602 contents = NULL;
8603 }
8604
8605 return TRUE;
8606
8607 error_return:
8608 if (elf_section_data (sec)->this_hdr.contents != contents)
8609 free (contents);
8610
8611 return FALSE;
8612 }
8613
8614 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8615 after sections have been laid out, using specially-named symbols. */
8616
8617 void
8618 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8619 struct bfd_link_info *link_info)
8620 {
8621 asection *sec;
8622 struct elf32_arm_link_hash_table *globals;
8623 char *tmp_name;
8624
8625 if (bfd_link_relocatable (link_info))
8626 return;
8627
8628 /* Skip if this bfd does not correspond to an ELF image. */
8629 if (! is_arm_elf (abfd))
8630 return;
8631
8632 globals = elf32_arm_hash_table (link_info);
8633 if (globals == NULL)
8634 return;
8635
8636 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8637 (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8638 BFD_ASSERT (tmp_name);
8639
8640 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8641 {
8642 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8643 elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8644
8645 for (; errnode != NULL; errnode = errnode->next)
8646 {
8647 struct elf_link_hash_entry *myh;
8648 bfd_vma vma;
8649
8650 switch (errnode->type)
8651 {
8652 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8653 case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8654 /* Find veneer symbol. */
8655 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8656 errnode->u.b.veneer->u.v.id);
8657
8658 myh = elf_link_hash_lookup
8659 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8660
8661 if (myh == NULL)
8662 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8663 abfd, "VFP11", tmp_name);
8664
8665 vma = myh->root.u.def.section->output_section->vma
8666 + myh->root.u.def.section->output_offset
8667 + myh->root.u.def.value;
8668
8669 errnode->u.b.veneer->vma = vma;
8670 break;
8671
8672 case VFP11_ERRATUM_ARM_VENEER:
8673 case VFP11_ERRATUM_THUMB_VENEER:
8674 /* Find return location. */
8675 sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8676 errnode->u.v.id);
8677
8678 myh = elf_link_hash_lookup
8679 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8680
8681 if (myh == NULL)
8682 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8683 abfd, "VFP11", tmp_name);
8684
8685 vma = myh->root.u.def.section->output_section->vma
8686 + myh->root.u.def.section->output_offset
8687 + myh->root.u.def.value;
8688
8689 errnode->u.v.branch->vma = vma;
8690 break;
8691
8692 default:
8693 abort ();
8694 }
8695 }
8696 }
8697
8698 free (tmp_name);
8699 }
8700
8701 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8702 return locations after sections have been laid out, using
8703 specially-named symbols. */
8704
8705 void
8706 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8707 struct bfd_link_info *link_info)
8708 {
8709 asection *sec;
8710 struct elf32_arm_link_hash_table *globals;
8711 char *tmp_name;
8712
8713 if (bfd_link_relocatable (link_info))
8714 return;
8715
8716 /* Skip if this bfd does not correspond to an ELF image. */
8717 if (! is_arm_elf (abfd))
8718 return;
8719
8720 globals = elf32_arm_hash_table (link_info);
8721 if (globals == NULL)
8722 return;
8723
8724 tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8725 (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8726 BFD_ASSERT (tmp_name);
8727
8728 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8729 {
8730 struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8731 elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8732
8733 for (; errnode != NULL; errnode = errnode->next)
8734 {
8735 struct elf_link_hash_entry *myh;
8736 bfd_vma vma;
8737
8738 switch (errnode->type)
8739 {
8740 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8741 /* Find veneer symbol. */
8742 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8743 errnode->u.b.veneer->u.v.id);
8744
8745 myh = elf_link_hash_lookup
8746 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8747
8748 if (myh == NULL)
8749 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8750 abfd, "STM32L4XX", tmp_name);
8751
8752 vma = myh->root.u.def.section->output_section->vma
8753 + myh->root.u.def.section->output_offset
8754 + myh->root.u.def.value;
8755
8756 errnode->u.b.veneer->vma = vma;
8757 break;
8758
8759 case STM32L4XX_ERRATUM_VENEER:
8760 /* Find return location. */
8761 sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8762 errnode->u.v.id);
8763
8764 myh = elf_link_hash_lookup
8765 (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8766
8767 if (myh == NULL)
8768 _bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8769 abfd, "STM32L4XX", tmp_name);
8770
8771 vma = myh->root.u.def.section->output_section->vma
8772 + myh->root.u.def.section->output_offset
8773 + myh->root.u.def.value;
8774
8775 errnode->u.v.branch->vma = vma;
8776 break;
8777
8778 default:
8779 abort ();
8780 }
8781 }
8782 }
8783
8784 free (tmp_name);
8785 }
8786
8787 static inline bfd_boolean
8788 is_thumb2_ldmia (const insn32 insn)
8789 {
8790 /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8791 1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll. */
8792 return (insn & 0xffd02000) == 0xe8900000;
8793 }
8794
8795 static inline bfd_boolean
8796 is_thumb2_ldmdb (const insn32 insn)
8797 {
8798 /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8799 1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll. */
8800 return (insn & 0xffd02000) == 0xe9100000;
8801 }
8802
8803 static inline bfd_boolean
8804 is_thumb2_vldm (const insn32 insn)
8805 {
8806 /* A6.5 Extension register load or store instruction
8807 A7.7.229
8808 We look for SP 32-bit and DP 64-bit registers.
8809 Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8810 <list> is consecutive 64-bit registers
8811 1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8812 Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8813 <list> is consecutive 32-bit registers
8814 1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8815 if P==0 && U==1 && W==1 && Rn=1101 VPOP
8816 if PUW=010 || PUW=011 || PUW=101 VLDM. */
8817 return
8818 (((insn & 0xfe100f00) == 0xec100b00) ||
8819 ((insn & 0xfe100f00) == 0xec100a00))
8820 && /* (IA without !). */
8821 (((((insn << 7) >> 28) & 0xd) == 0x4)
8822 /* (IA with !), includes VPOP (when reg number is SP). */
8823 || ((((insn << 7) >> 28) & 0xd) == 0x5)
8824 /* (DB with !). */
8825 || ((((insn << 7) >> 28) & 0xd) == 0x9));
8826 }
8827
8828 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8829 VLDM opcode and:
8830 - computes the number and the mode of memory accesses
8831 - decides if the replacement should be done:
8832 . replaces only if > 8-word accesses
8833 . or (testing purposes only) replaces all accesses. */
8834
8835 static bfd_boolean
8836 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8837 bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8838 {
8839 int nb_words = 0;
8840
8841 /* The field encoding the register list is the same for both LDMIA
8842 and LDMDB encodings. */
8843 if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8844 nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8845 else if (is_thumb2_vldm (insn))
8846 nb_words = (insn & 0xff);
8847
8848 /* DEFAULT mode accounts for the real bug condition situation,
8849 ALL mode inserts stubs for each LDM/VLDM instruction (testing). */
8850 return
8851 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8852 (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8853 }
8854
8855 /* Look for potentially-troublesome code sequences which might trigger
8856 the STM STM32L4XX erratum. */
8857
8858 bfd_boolean
8859 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8860 struct bfd_link_info *link_info)
8861 {
8862 asection *sec;
8863 bfd_byte *contents = NULL;
8864 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8865
8866 if (globals == NULL)
8867 return FALSE;
8868
8869 /* If we are only performing a partial link do not bother
8870 to construct any glue. */
8871 if (bfd_link_relocatable (link_info))
8872 return TRUE;
8873
8874 /* Skip if this bfd does not correspond to an ELF image. */
8875 if (! is_arm_elf (abfd))
8876 return TRUE;
8877
8878 if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8879 return TRUE;
8880
8881 /* Skip this BFD if it corresponds to an executable or dynamic object. */
8882 if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8883 return TRUE;
8884
8885 for (sec = abfd->sections; sec != NULL; sec = sec->next)
8886 {
8887 unsigned int i, span;
8888 struct _arm_elf_section_data *sec_data;
8889
8890 /* If we don't have executable progbits, we're not interested in this
8891 section. Also skip if section is to be excluded. */
8892 if (elf_section_type (sec) != SHT_PROGBITS
8893 || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8894 || (sec->flags & SEC_EXCLUDE) != 0
8895 || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8896 || sec->output_section == bfd_abs_section_ptr
8897 || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8898 continue;
8899
8900 sec_data = elf32_arm_section_data (sec);
8901
8902 if (sec_data->mapcount == 0)
8903 continue;
8904
8905 if (elf_section_data (sec)->this_hdr.contents != NULL)
8906 contents = elf_section_data (sec)->this_hdr.contents;
8907 else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8908 goto error_return;
8909
8910 qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8911 elf32_arm_compare_mapping);
8912
8913 for (span = 0; span < sec_data->mapcount; span++)
8914 {
8915 unsigned int span_start = sec_data->map[span].vma;
8916 unsigned int span_end = (span == sec_data->mapcount - 1)
8917 ? sec->size : sec_data->map[span + 1].vma;
8918 char span_type = sec_data->map[span].type;
8919 int itblock_current_pos = 0;
8920
8921 /* Only Thumb2 mode need be supported with this CM4 specific
8922 code, we should not encounter any arm mode eg span_type
8923 != 'a'. */
8924 if (span_type != 't')
8925 continue;
8926
8927 for (i = span_start; i < span_end;)
8928 {
8929 unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8930 bfd_boolean insn_32bit = FALSE;
8931 bfd_boolean is_ldm = FALSE;
8932 bfd_boolean is_vldm = FALSE;
8933 bfd_boolean is_not_last_in_it_block = FALSE;
8934
8935 /* The first 16-bits of all 32-bit thumb2 instructions start
8936 with opcode[15..13]=0b111 and the encoded op1 can be anything
8937 except opcode[12..11]!=0b00.
8938 See 32-bit Thumb instruction encoding. */
8939 if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8940 insn_32bit = TRUE;
8941
8942 /* Compute the predicate that tells if the instruction
8943 is concerned by the IT block
8944 - Creates an error if there is a ldm that is not
8945 last in the IT block thus cannot be replaced
8946 - Otherwise we can create a branch at the end of the
8947 IT block, it will be controlled naturally by IT
8948 with the proper pseudo-predicate
8949 - So the only interesting predicate is the one that
8950 tells that we are not on the last item of an IT
8951 block. */
8952 if (itblock_current_pos != 0)
8953 is_not_last_in_it_block = !!--itblock_current_pos;
8954
8955 if (insn_32bit)
8956 {
8957 /* Load the rest of the insn (in manual-friendly order). */
8958 insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8959 is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8960 is_vldm = is_thumb2_vldm (insn);
8961
8962 /* Veneers are created for (v)ldm depending on
8963 option flags and memory accesses conditions; but
8964 if the instruction is not the last instruction of
8965 an IT block, we cannot create a jump there, so we
8966 bail out. */
8967 if ((is_ldm || is_vldm)
8968 && stm32l4xx_need_create_replacing_stub
8969 (insn, globals->stm32l4xx_fix))
8970 {
8971 if (is_not_last_in_it_block)
8972 {
8973 _bfd_error_handler
8974 /* xgettext:c-format */
8975 (_("%pB(%pA+%#x): error: multiple load detected"
8976 " in non-last IT block instruction:"
8977 " STM32L4XX veneer cannot be generated; "
8978 "use gcc option -mrestrict-it to generate"
8979 " only one instruction per IT block"),
8980 abfd, sec, i);
8981 }
8982 else
8983 {
8984 elf32_stm32l4xx_erratum_list *newerr =
8985 (elf32_stm32l4xx_erratum_list *)
8986 bfd_zmalloc
8987 (sizeof (elf32_stm32l4xx_erratum_list));
8988
8989 elf32_arm_section_data (sec)
8990 ->stm32l4xx_erratumcount += 1;
8991 newerr->u.b.insn = insn;
8992 /* We create only thumb branches. */
8993 newerr->type =
8994 STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8995 record_stm32l4xx_erratum_veneer
8996 (link_info, newerr, abfd, sec,
8997 i,
8998 is_ldm ?
8999 STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
9000 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
9001 newerr->vma = -1;
9002 newerr->next = sec_data->stm32l4xx_erratumlist;
9003 sec_data->stm32l4xx_erratumlist = newerr;
9004 }
9005 }
9006 }
9007 else
9008 {
9009 /* A7.7.37 IT p208
9010 IT blocks are only encoded in T1
9011 Encoding T1: IT{x{y{z}}} <firstcond>
9012 1 0 1 1 - 1 1 1 1 - firstcond - mask
9013 if mask = '0000' then see 'related encodings'
9014 We don't deal with UNPREDICTABLE, just ignore these.
9015 There can be no nested IT blocks so an IT block
9016 is naturally a new one for which it is worth
9017 computing its size. */
9018 bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
9019 && ((insn & 0x000f) != 0x0000);
9020 /* If we have a new IT block we compute its size. */
9021 if (is_newitblock)
9022 {
9023 /* Compute the number of instructions controlled
9024 by the IT block, it will be used to decide
9025 whether we are inside an IT block or not. */
9026 unsigned int mask = insn & 0x000f;
9027 itblock_current_pos = 4 - ctz (mask);
9028 }
9029 }
9030
9031 i += insn_32bit ? 4 : 2;
9032 }
9033 }
9034
9035 if (elf_section_data (sec)->this_hdr.contents != contents)
9036 free (contents);
9037 contents = NULL;
9038 }
9039
9040 return TRUE;
9041
9042 error_return:
9043 if (elf_section_data (sec)->this_hdr.contents != contents)
9044 free (contents);
9045
9046 return FALSE;
9047 }
9048
9049 /* Set target relocation values needed during linking. */
9050
9051 void
9052 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9053 struct bfd_link_info *link_info,
9054 struct elf32_arm_params *params)
9055 {
9056 struct elf32_arm_link_hash_table *globals;
9057
9058 globals = elf32_arm_hash_table (link_info);
9059 if (globals == NULL)
9060 return;
9061
9062 globals->target1_is_rel = params->target1_is_rel;
9063 if (globals->fdpic_p)
9064 globals->target2_reloc = R_ARM_GOT32;
9065 else if (strcmp (params->target2_type, "rel") == 0)
9066 globals->target2_reloc = R_ARM_REL32;
9067 else if (strcmp (params->target2_type, "abs") == 0)
9068 globals->target2_reloc = R_ARM_ABS32;
9069 else if (strcmp (params->target2_type, "got-rel") == 0)
9070 globals->target2_reloc = R_ARM_GOT_PREL;
9071 else
9072 {
9073 _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9074 params->target2_type);
9075 }
9076 globals->fix_v4bx = params->fix_v4bx;
9077 globals->use_blx |= params->use_blx;
9078 globals->vfp11_fix = params->vfp11_denorm_fix;
9079 globals->stm32l4xx_fix = params->stm32l4xx_fix;
9080 if (globals->fdpic_p)
9081 globals->pic_veneer = 1;
9082 else
9083 globals->pic_veneer = params->pic_veneer;
9084 globals->fix_cortex_a8 = params->fix_cortex_a8;
9085 globals->fix_arm1176 = params->fix_arm1176;
9086 globals->cmse_implib = params->cmse_implib;
9087 globals->in_implib_bfd = params->in_implib_bfd;
9088
9089 BFD_ASSERT (is_arm_elf (output_bfd));
9090 elf_arm_tdata (output_bfd)->no_enum_size_warning
9091 = params->no_enum_size_warning;
9092 elf_arm_tdata (output_bfd)->no_wchar_size_warning
9093 = params->no_wchar_size_warning;
9094 }
9095
9096 /* Replace the target offset of a Thumb bl or b.w instruction. */
9097
9098 static void
9099 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9100 {
9101 bfd_vma upper;
9102 bfd_vma lower;
9103 int reloc_sign;
9104
9105 BFD_ASSERT ((offset & 1) == 0);
9106
9107 upper = bfd_get_16 (abfd, insn);
9108 lower = bfd_get_16 (abfd, insn + 2);
9109 reloc_sign = (offset < 0) ? 1 : 0;
9110 upper = (upper & ~(bfd_vma) 0x7ff)
9111 | ((offset >> 12) & 0x3ff)
9112 | (reloc_sign << 10);
9113 lower = (lower & ~(bfd_vma) 0x2fff)
9114 | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9115 | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9116 | ((offset >> 1) & 0x7ff);
9117 bfd_put_16 (abfd, upper, insn);
9118 bfd_put_16 (abfd, lower, insn + 2);
9119 }
9120
9121 /* Thumb code calling an ARM function. */
9122
9123 static int
9124 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9125 const char * name,
9126 bfd * input_bfd,
9127 bfd * output_bfd,
9128 asection * input_section,
9129 bfd_byte * hit_data,
9130 asection * sym_sec,
9131 bfd_vma offset,
9132 bfd_signed_vma addend,
9133 bfd_vma val,
9134 char **error_message)
9135 {
9136 asection * s = 0;
9137 bfd_vma my_offset;
9138 long int ret_offset;
9139 struct elf_link_hash_entry * myh;
9140 struct elf32_arm_link_hash_table * globals;
9141
9142 myh = find_thumb_glue (info, name, error_message);
9143 if (myh == NULL)
9144 return FALSE;
9145
9146 globals = elf32_arm_hash_table (info);
9147 BFD_ASSERT (globals != NULL);
9148 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9149
9150 my_offset = myh->root.u.def.value;
9151
9152 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9153 THUMB2ARM_GLUE_SECTION_NAME);
9154
9155 BFD_ASSERT (s != NULL);
9156 BFD_ASSERT (s->contents != NULL);
9157 BFD_ASSERT (s->output_section != NULL);
9158
9159 if ((my_offset & 0x01) == 0x01)
9160 {
9161 if (sym_sec != NULL
9162 && sym_sec->owner != NULL
9163 && !INTERWORK_FLAG (sym_sec->owner))
9164 {
9165 _bfd_error_handler
9166 (_("%pB(%s): warning: interworking not enabled;"
9167 " first occurrence: %pB: %s call to %s"),
9168 sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9169
9170 return FALSE;
9171 }
9172
9173 --my_offset;
9174 myh->root.u.def.value = my_offset;
9175
9176 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9177 s->contents + my_offset);
9178
9179 put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9180 s->contents + my_offset + 2);
9181
9182 ret_offset =
9183 /* Address of destination of the stub. */
9184 ((bfd_signed_vma) val)
9185 - ((bfd_signed_vma)
9186 /* Offset from the start of the current section
9187 to the start of the stubs. */
9188 (s->output_offset
9189 /* Offset of the start of this stub from the start of the stubs. */
9190 + my_offset
9191 /* Address of the start of the current section. */
9192 + s->output_section->vma)
9193 /* The branch instruction is 4 bytes into the stub. */
9194 + 4
9195 /* ARM branches work from the pc of the instruction + 8. */
9196 + 8);
9197
9198 put_arm_insn (globals, output_bfd,
9199 (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9200 s->contents + my_offset + 4);
9201 }
9202
9203 BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9204
9205 /* Now go back and fix up the original BL insn to point to here. */
9206 ret_offset =
9207 /* Address of where the stub is located. */
9208 (s->output_section->vma + s->output_offset + my_offset)
9209 /* Address of where the BL is located. */
9210 - (input_section->output_section->vma + input_section->output_offset
9211 + offset)
9212 /* Addend in the relocation. */
9213 - addend
9214 /* Biassing for PC-relative addressing. */
9215 - 8;
9216
9217 insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9218
9219 return TRUE;
9220 }
9221
9222 /* Populate an Arm to Thumb stub. Returns the stub symbol. */
9223
9224 static struct elf_link_hash_entry *
9225 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9226 const char * name,
9227 bfd * input_bfd,
9228 bfd * output_bfd,
9229 asection * sym_sec,
9230 bfd_vma val,
9231 asection * s,
9232 char ** error_message)
9233 {
9234 bfd_vma my_offset;
9235 long int ret_offset;
9236 struct elf_link_hash_entry * myh;
9237 struct elf32_arm_link_hash_table * globals;
9238
9239 myh = find_arm_glue (info, name, error_message);
9240 if (myh == NULL)
9241 return NULL;
9242
9243 globals = elf32_arm_hash_table (info);
9244 BFD_ASSERT (globals != NULL);
9245 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9246
9247 my_offset = myh->root.u.def.value;
9248
9249 if ((my_offset & 0x01) == 0x01)
9250 {
9251 if (sym_sec != NULL
9252 && sym_sec->owner != NULL
9253 && !INTERWORK_FLAG (sym_sec->owner))
9254 {
9255 _bfd_error_handler
9256 (_("%pB(%s): warning: interworking not enabled;"
9257 " first occurrence: %pB: %s call to %s"),
9258 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9259 }
9260
9261 --my_offset;
9262 myh->root.u.def.value = my_offset;
9263
9264 if (bfd_link_pic (info)
9265 || globals->root.is_relocatable_executable
9266 || globals->pic_veneer)
9267 {
9268 /* For relocatable objects we can't use absolute addresses,
9269 so construct the address from a relative offset. */
9270 /* TODO: If the offset is small it's probably worth
9271 constructing the address with adds. */
9272 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9273 s->contents + my_offset);
9274 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9275 s->contents + my_offset + 4);
9276 put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9277 s->contents + my_offset + 8);
9278 /* Adjust the offset by 4 for the position of the add,
9279 and 8 for the pipeline offset. */
9280 ret_offset = (val - (s->output_offset
9281 + s->output_section->vma
9282 + my_offset + 12))
9283 | 1;
9284 bfd_put_32 (output_bfd, ret_offset,
9285 s->contents + my_offset + 12);
9286 }
9287 else if (globals->use_blx)
9288 {
9289 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9290 s->contents + my_offset);
9291
9292 /* It's a thumb address. Add the low order bit. */
9293 bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9294 s->contents + my_offset + 4);
9295 }
9296 else
9297 {
9298 put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9299 s->contents + my_offset);
9300
9301 put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9302 s->contents + my_offset + 4);
9303
9304 /* It's a thumb address. Add the low order bit. */
9305 bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9306 s->contents + my_offset + 8);
9307
9308 my_offset += 12;
9309 }
9310 }
9311
9312 BFD_ASSERT (my_offset <= globals->arm_glue_size);
9313
9314 return myh;
9315 }
9316
9317 /* Arm code calling a Thumb function. */
9318
9319 static int
9320 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9321 const char * name,
9322 bfd * input_bfd,
9323 bfd * output_bfd,
9324 asection * input_section,
9325 bfd_byte * hit_data,
9326 asection * sym_sec,
9327 bfd_vma offset,
9328 bfd_signed_vma addend,
9329 bfd_vma val,
9330 char **error_message)
9331 {
9332 unsigned long int tmp;
9333 bfd_vma my_offset;
9334 asection * s;
9335 long int ret_offset;
9336 struct elf_link_hash_entry * myh;
9337 struct elf32_arm_link_hash_table * globals;
9338
9339 globals = elf32_arm_hash_table (info);
9340 BFD_ASSERT (globals != NULL);
9341 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9342
9343 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9344 ARM2THUMB_GLUE_SECTION_NAME);
9345 BFD_ASSERT (s != NULL);
9346 BFD_ASSERT (s->contents != NULL);
9347 BFD_ASSERT (s->output_section != NULL);
9348
9349 myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9350 sym_sec, val, s, error_message);
9351 if (!myh)
9352 return FALSE;
9353
9354 my_offset = myh->root.u.def.value;
9355 tmp = bfd_get_32 (input_bfd, hit_data);
9356 tmp = tmp & 0xFF000000;
9357
9358 /* Somehow these are both 4 too far, so subtract 8. */
9359 ret_offset = (s->output_offset
9360 + my_offset
9361 + s->output_section->vma
9362 - (input_section->output_offset
9363 + input_section->output_section->vma
9364 + offset + addend)
9365 - 8);
9366
9367 tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9368
9369 bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9370
9371 return TRUE;
9372 }
9373
9374 /* Populate Arm stub for an exported Thumb function. */
9375
9376 static bfd_boolean
9377 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9378 {
9379 struct bfd_link_info * info = (struct bfd_link_info *) inf;
9380 asection * s;
9381 struct elf_link_hash_entry * myh;
9382 struct elf32_arm_link_hash_entry *eh;
9383 struct elf32_arm_link_hash_table * globals;
9384 asection *sec;
9385 bfd_vma val;
9386 char *error_message;
9387
9388 eh = elf32_arm_hash_entry (h);
9389 /* Allocate stubs for exported Thumb functions on v4t. */
9390 if (eh->export_glue == NULL)
9391 return TRUE;
9392
9393 globals = elf32_arm_hash_table (info);
9394 BFD_ASSERT (globals != NULL);
9395 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9396
9397 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9398 ARM2THUMB_GLUE_SECTION_NAME);
9399 BFD_ASSERT (s != NULL);
9400 BFD_ASSERT (s->contents != NULL);
9401 BFD_ASSERT (s->output_section != NULL);
9402
9403 sec = eh->export_glue->root.u.def.section;
9404
9405 BFD_ASSERT (sec->output_section != NULL);
9406
9407 val = eh->export_glue->root.u.def.value + sec->output_offset
9408 + sec->output_section->vma;
9409
9410 myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9411 h->root.u.def.section->owner,
9412 globals->obfd, sec, val, s,
9413 &error_message);
9414 BFD_ASSERT (myh);
9415 return TRUE;
9416 }
9417
9418 /* Populate ARMv4 BX veneers. Returns the absolute adress of the veneer. */
9419
9420 static bfd_vma
9421 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9422 {
9423 bfd_byte *p;
9424 bfd_vma glue_addr;
9425 asection *s;
9426 struct elf32_arm_link_hash_table *globals;
9427
9428 globals = elf32_arm_hash_table (info);
9429 BFD_ASSERT (globals != NULL);
9430 BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9431
9432 s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9433 ARM_BX_GLUE_SECTION_NAME);
9434 BFD_ASSERT (s != NULL);
9435 BFD_ASSERT (s->contents != NULL);
9436 BFD_ASSERT (s->output_section != NULL);
9437
9438 BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9439
9440 glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9441
9442 if ((globals->bx_glue_offset[reg] & 1) == 0)
9443 {
9444 p = s->contents + glue_addr;
9445 bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9446 bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9447 bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9448 globals->bx_glue_offset[reg] |= 1;
9449 }
9450
9451 return glue_addr + s->output_section->vma + s->output_offset;
9452 }
9453
9454 /* Generate Arm stubs for exported Thumb symbols. */
9455 static void
9456 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9457 struct bfd_link_info *link_info)
9458 {
9459 struct elf32_arm_link_hash_table * globals;
9460
9461 if (link_info == NULL)
9462 /* Ignore this if we are not called by the ELF backend linker. */
9463 return;
9464
9465 globals = elf32_arm_hash_table (link_info);
9466 if (globals == NULL)
9467 return;
9468
9469 /* If blx is available then exported Thumb symbols are OK and there is
9470 nothing to do. */
9471 if (globals->use_blx)
9472 return;
9473
9474 elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9475 link_info);
9476 }
9477
9478 /* Reserve space for COUNT dynamic relocations in relocation selection
9479 SRELOC. */
9480
9481 static void
9482 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9483 bfd_size_type count)
9484 {
9485 struct elf32_arm_link_hash_table *htab;
9486
9487 htab = elf32_arm_hash_table (info);
9488 BFD_ASSERT (htab->root.dynamic_sections_created);
9489 if (sreloc == NULL)
9490 abort ();
9491 sreloc->size += RELOC_SIZE (htab) * count;
9492 }
9493
9494 /* Reserve space for COUNT R_ARM_IRELATIVE relocations. If the link is
9495 dynamic, the relocations should go in SRELOC, otherwise they should
9496 go in the special .rel.iplt section. */
9497
9498 static void
9499 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9500 bfd_size_type count)
9501 {
9502 struct elf32_arm_link_hash_table *htab;
9503
9504 htab = elf32_arm_hash_table (info);
9505 if (!htab->root.dynamic_sections_created)
9506 htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9507 else
9508 {
9509 BFD_ASSERT (sreloc != NULL);
9510 sreloc->size += RELOC_SIZE (htab) * count;
9511 }
9512 }
9513
9514 /* Add relocation REL to the end of relocation section SRELOC. */
9515
9516 static void
9517 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9518 asection *sreloc, Elf_Internal_Rela *rel)
9519 {
9520 bfd_byte *loc;
9521 struct elf32_arm_link_hash_table *htab;
9522
9523 htab = elf32_arm_hash_table (info);
9524 if (!htab->root.dynamic_sections_created
9525 && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9526 sreloc = htab->root.irelplt;
9527 if (sreloc == NULL)
9528 abort ();
9529 loc = sreloc->contents;
9530 loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9531 if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9532 abort ();
9533 SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9534 }
9535
9536 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9537 IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9538 to .plt. */
9539
9540 static void
9541 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9542 bfd_boolean is_iplt_entry,
9543 union gotplt_union *root_plt,
9544 struct arm_plt_info *arm_plt)
9545 {
9546 struct elf32_arm_link_hash_table *htab;
9547 asection *splt;
9548 asection *sgotplt;
9549
9550 htab = elf32_arm_hash_table (info);
9551
9552 if (is_iplt_entry)
9553 {
9554 splt = htab->root.iplt;
9555 sgotplt = htab->root.igotplt;
9556
9557 /* NaCl uses a special first entry in .iplt too. */
9558 if (htab->nacl_p && splt->size == 0)
9559 splt->size += htab->plt_header_size;
9560
9561 /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt. */
9562 elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9563 }
9564 else
9565 {
9566 splt = htab->root.splt;
9567 sgotplt = htab->root.sgotplt;
9568
9569 if (htab->fdpic_p)
9570 {
9571 /* Allocate room for R_ARM_FUNCDESC_VALUE. */
9572 /* For lazy binding, relocations will be put into .rel.plt, in
9573 .rel.got otherwise. */
9574 /* FIXME: today we don't support lazy binding so put it in .rel.got */
9575 if (info->flags & DF_BIND_NOW)
9576 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9577 else
9578 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9579 }
9580 else
9581 {
9582 /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt. */
9583 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9584 }
9585
9586 /* If this is the first .plt entry, make room for the special
9587 first entry. */
9588 if (splt->size == 0)
9589 splt->size += htab->plt_header_size;
9590
9591 htab->next_tls_desc_index++;
9592 }
9593
9594 /* Allocate the PLT entry itself, including any leading Thumb stub. */
9595 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9596 splt->size += PLT_THUMB_STUB_SIZE;
9597 root_plt->offset = splt->size;
9598 splt->size += htab->plt_entry_size;
9599
9600 if (!htab->symbian_p)
9601 {
9602 /* We also need to make an entry in the .got.plt section, which
9603 will be placed in the .got section by the linker script. */
9604 if (is_iplt_entry)
9605 arm_plt->got_offset = sgotplt->size;
9606 else
9607 arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9608 if (htab->fdpic_p)
9609 /* Function descriptor takes 64 bits in GOT. */
9610 sgotplt->size += 8;
9611 else
9612 sgotplt->size += 4;
9613 }
9614 }
9615
9616 static bfd_vma
9617 arm_movw_immediate (bfd_vma value)
9618 {
9619 return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9620 }
9621
9622 static bfd_vma
9623 arm_movt_immediate (bfd_vma value)
9624 {
9625 return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9626 }
9627
9628 /* Fill in a PLT entry and its associated GOT slot. If DYNINDX == -1,
9629 the entry lives in .iplt and resolves to (*SYM_VALUE)().
9630 Otherwise, DYNINDX is the index of the symbol in the dynamic
9631 symbol table and SYM_VALUE is undefined.
9632
9633 ROOT_PLT points to the offset of the PLT entry from the start of its
9634 section (.iplt or .plt). ARM_PLT points to the symbol's ARM-specific
9635 bookkeeping information.
9636
9637 Returns FALSE if there was a problem. */
9638
9639 static bfd_boolean
9640 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9641 union gotplt_union *root_plt,
9642 struct arm_plt_info *arm_plt,
9643 int dynindx, bfd_vma sym_value)
9644 {
9645 struct elf32_arm_link_hash_table *htab;
9646 asection *sgot;
9647 asection *splt;
9648 asection *srel;
9649 bfd_byte *loc;
9650 bfd_vma plt_index;
9651 Elf_Internal_Rela rel;
9652 bfd_vma plt_header_size;
9653 bfd_vma got_header_size;
9654
9655 htab = elf32_arm_hash_table (info);
9656
9657 /* Pick the appropriate sections and sizes. */
9658 if (dynindx == -1)
9659 {
9660 splt = htab->root.iplt;
9661 sgot = htab->root.igotplt;
9662 srel = htab->root.irelplt;
9663
9664 /* There are no reserved entries in .igot.plt, and no special
9665 first entry in .iplt. */
9666 got_header_size = 0;
9667 plt_header_size = 0;
9668 }
9669 else
9670 {
9671 splt = htab->root.splt;
9672 sgot = htab->root.sgotplt;
9673 srel = htab->root.srelplt;
9674
9675 got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9676 plt_header_size = htab->plt_header_size;
9677 }
9678 BFD_ASSERT (splt != NULL && srel != NULL);
9679
9680 /* Fill in the entry in the procedure linkage table. */
9681 if (htab->symbian_p)
9682 {
9683 BFD_ASSERT (dynindx >= 0);
9684 put_arm_insn (htab, output_bfd,
9685 elf32_arm_symbian_plt_entry[0],
9686 splt->contents + root_plt->offset);
9687 bfd_put_32 (output_bfd,
9688 elf32_arm_symbian_plt_entry[1],
9689 splt->contents + root_plt->offset + 4);
9690
9691 /* Fill in the entry in the .rel.plt section. */
9692 rel.r_offset = (splt->output_section->vma
9693 + splt->output_offset
9694 + root_plt->offset + 4);
9695 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9696
9697 /* Get the index in the procedure linkage table which
9698 corresponds to this symbol. This is the index of this symbol
9699 in all the symbols for which we are making plt entries. The
9700 first entry in the procedure linkage table is reserved. */
9701 plt_index = ((root_plt->offset - plt_header_size)
9702 / htab->plt_entry_size);
9703 }
9704 else
9705 {
9706 bfd_vma got_offset, got_address, plt_address;
9707 bfd_vma got_displacement, initial_got_entry;
9708 bfd_byte * ptr;
9709
9710 BFD_ASSERT (sgot != NULL);
9711
9712 /* Get the offset into the .(i)got.plt table of the entry that
9713 corresponds to this function. */
9714 got_offset = (arm_plt->got_offset & -2);
9715
9716 /* Get the index in the procedure linkage table which
9717 corresponds to this symbol. This is the index of this symbol
9718 in all the symbols for which we are making plt entries.
9719 After the reserved .got.plt entries, all symbols appear in
9720 the same order as in .plt. */
9721 if (htab->fdpic_p)
9722 /* Function descriptor takes 8 bytes. */
9723 plt_index = (got_offset - got_header_size) / 8;
9724 else
9725 plt_index = (got_offset - got_header_size) / 4;
9726
9727 /* Calculate the address of the GOT entry. */
9728 got_address = (sgot->output_section->vma
9729 + sgot->output_offset
9730 + got_offset);
9731
9732 /* ...and the address of the PLT entry. */
9733 plt_address = (splt->output_section->vma
9734 + splt->output_offset
9735 + root_plt->offset);
9736
9737 ptr = splt->contents + root_plt->offset;
9738 if (htab->vxworks_p && bfd_link_pic (info))
9739 {
9740 unsigned int i;
9741 bfd_vma val;
9742
9743 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9744 {
9745 val = elf32_arm_vxworks_shared_plt_entry[i];
9746 if (i == 2)
9747 val |= got_address - sgot->output_section->vma;
9748 if (i == 5)
9749 val |= plt_index * RELOC_SIZE (htab);
9750 if (i == 2 || i == 5)
9751 bfd_put_32 (output_bfd, val, ptr);
9752 else
9753 put_arm_insn (htab, output_bfd, val, ptr);
9754 }
9755 }
9756 else if (htab->vxworks_p)
9757 {
9758 unsigned int i;
9759 bfd_vma val;
9760
9761 for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9762 {
9763 val = elf32_arm_vxworks_exec_plt_entry[i];
9764 if (i == 2)
9765 val |= got_address;
9766 if (i == 4)
9767 val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9768 if (i == 5)
9769 val |= plt_index * RELOC_SIZE (htab);
9770 if (i == 2 || i == 5)
9771 bfd_put_32 (output_bfd, val, ptr);
9772 else
9773 put_arm_insn (htab, output_bfd, val, ptr);
9774 }
9775
9776 loc = (htab->srelplt2->contents
9777 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9778
9779 /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9780 referencing the GOT for this PLT entry. */
9781 rel.r_offset = plt_address + 8;
9782 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9783 rel.r_addend = got_offset;
9784 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9785 loc += RELOC_SIZE (htab);
9786
9787 /* Create the R_ARM_ABS32 relocation referencing the
9788 beginning of the PLT for this GOT entry. */
9789 rel.r_offset = got_address;
9790 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9791 rel.r_addend = 0;
9792 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9793 }
9794 else if (htab->nacl_p)
9795 {
9796 /* Calculate the displacement between the PLT slot and the
9797 common tail that's part of the special initial PLT slot. */
9798 int32_t tail_displacement
9799 = ((splt->output_section->vma + splt->output_offset
9800 + ARM_NACL_PLT_TAIL_OFFSET)
9801 - (plt_address + htab->plt_entry_size + 4));
9802 BFD_ASSERT ((tail_displacement & 3) == 0);
9803 tail_displacement >>= 2;
9804
9805 BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9806 || (-tail_displacement & 0xff000000) == 0);
9807
9808 /* Calculate the displacement between the PLT slot and the entry
9809 in the GOT. The offset accounts for the value produced by
9810 adding to pc in the penultimate instruction of the PLT stub. */
9811 got_displacement = (got_address
9812 - (plt_address + htab->plt_entry_size));
9813
9814 /* NaCl does not support interworking at all. */
9815 BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9816
9817 put_arm_insn (htab, output_bfd,
9818 elf32_arm_nacl_plt_entry[0]
9819 | arm_movw_immediate (got_displacement),
9820 ptr + 0);
9821 put_arm_insn (htab, output_bfd,
9822 elf32_arm_nacl_plt_entry[1]
9823 | arm_movt_immediate (got_displacement),
9824 ptr + 4);
9825 put_arm_insn (htab, output_bfd,
9826 elf32_arm_nacl_plt_entry[2],
9827 ptr + 8);
9828 put_arm_insn (htab, output_bfd,
9829 elf32_arm_nacl_plt_entry[3]
9830 | (tail_displacement & 0x00ffffff),
9831 ptr + 12);
9832 }
9833 else if (htab->fdpic_p)
9834 {
9835 const bfd_vma *plt_entry = using_thumb_only(htab)
9836 ? elf32_arm_fdpic_thumb_plt_entry
9837 : elf32_arm_fdpic_plt_entry;
9838
9839 /* Fill-up Thumb stub if needed. */
9840 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9841 {
9842 put_thumb_insn (htab, output_bfd,
9843 elf32_arm_plt_thumb_stub[0], ptr - 4);
9844 put_thumb_insn (htab, output_bfd,
9845 elf32_arm_plt_thumb_stub[1], ptr - 2);
9846 }
9847 /* As we are using 32 bit instructions even for the Thumb
9848 version, we have to use 'put_arm_insn' instead of
9849 'put_thumb_insn'. */
9850 put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9851 put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9852 put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9853 put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9854 bfd_put_32 (output_bfd, got_offset, ptr + 16);
9855
9856 if (!(info->flags & DF_BIND_NOW))
9857 {
9858 /* funcdesc_value_reloc_offset. */
9859 bfd_put_32 (output_bfd,
9860 htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9861 ptr + 20);
9862 put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9863 put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9864 put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9865 put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9866 }
9867 }
9868 else if (using_thumb_only (htab))
9869 {
9870 /* PR ld/16017: Generate thumb only PLT entries. */
9871 if (!using_thumb2 (htab))
9872 {
9873 /* FIXME: We ought to be able to generate thumb-1 PLT
9874 instructions... */
9875 _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9876 output_bfd);
9877 return FALSE;
9878 }
9879
9880 /* Calculate the displacement between the PLT slot and the entry in
9881 the GOT. The 12-byte offset accounts for the value produced by
9882 adding to pc in the 3rd instruction of the PLT stub. */
9883 got_displacement = got_address - (plt_address + 12);
9884
9885 /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9886 instead of 'put_thumb_insn'. */
9887 put_arm_insn (htab, output_bfd,
9888 elf32_thumb2_plt_entry[0]
9889 | ((got_displacement & 0x000000ff) << 16)
9890 | ((got_displacement & 0x00000700) << 20)
9891 | ((got_displacement & 0x00000800) >> 1)
9892 | ((got_displacement & 0x0000f000) >> 12),
9893 ptr + 0);
9894 put_arm_insn (htab, output_bfd,
9895 elf32_thumb2_plt_entry[1]
9896 | ((got_displacement & 0x00ff0000) )
9897 | ((got_displacement & 0x07000000) << 4)
9898 | ((got_displacement & 0x08000000) >> 17)
9899 | ((got_displacement & 0xf0000000) >> 28),
9900 ptr + 4);
9901 put_arm_insn (htab, output_bfd,
9902 elf32_thumb2_plt_entry[2],
9903 ptr + 8);
9904 put_arm_insn (htab, output_bfd,
9905 elf32_thumb2_plt_entry[3],
9906 ptr + 12);
9907 }
9908 else
9909 {
9910 /* Calculate the displacement between the PLT slot and the
9911 entry in the GOT. The eight-byte offset accounts for the
9912 value produced by adding to pc in the first instruction
9913 of the PLT stub. */
9914 got_displacement = got_address - (plt_address + 8);
9915
9916 if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9917 {
9918 put_thumb_insn (htab, output_bfd,
9919 elf32_arm_plt_thumb_stub[0], ptr - 4);
9920 put_thumb_insn (htab, output_bfd,
9921 elf32_arm_plt_thumb_stub[1], ptr - 2);
9922 }
9923
9924 if (!elf32_arm_use_long_plt_entry)
9925 {
9926 BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9927
9928 put_arm_insn (htab, output_bfd,
9929 elf32_arm_plt_entry_short[0]
9930 | ((got_displacement & 0x0ff00000) >> 20),
9931 ptr + 0);
9932 put_arm_insn (htab, output_bfd,
9933 elf32_arm_plt_entry_short[1]
9934 | ((got_displacement & 0x000ff000) >> 12),
9935 ptr+ 4);
9936 put_arm_insn (htab, output_bfd,
9937 elf32_arm_plt_entry_short[2]
9938 | (got_displacement & 0x00000fff),
9939 ptr + 8);
9940 #ifdef FOUR_WORD_PLT
9941 bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9942 #endif
9943 }
9944 else
9945 {
9946 put_arm_insn (htab, output_bfd,
9947 elf32_arm_plt_entry_long[0]
9948 | ((got_displacement & 0xf0000000) >> 28),
9949 ptr + 0);
9950 put_arm_insn (htab, output_bfd,
9951 elf32_arm_plt_entry_long[1]
9952 | ((got_displacement & 0x0ff00000) >> 20),
9953 ptr + 4);
9954 put_arm_insn (htab, output_bfd,
9955 elf32_arm_plt_entry_long[2]
9956 | ((got_displacement & 0x000ff000) >> 12),
9957 ptr+ 8);
9958 put_arm_insn (htab, output_bfd,
9959 elf32_arm_plt_entry_long[3]
9960 | (got_displacement & 0x00000fff),
9961 ptr + 12);
9962 }
9963 }
9964
9965 /* Fill in the entry in the .rel(a).(i)plt section. */
9966 rel.r_offset = got_address;
9967 rel.r_addend = 0;
9968 if (dynindx == -1)
9969 {
9970 /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9971 The dynamic linker or static executable then calls SYM_VALUE
9972 to determine the correct run-time value of the .igot.plt entry. */
9973 rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9974 initial_got_entry = sym_value;
9975 }
9976 else
9977 {
9978 /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9979 used by PLT entry. */
9980 if (htab->fdpic_p)
9981 {
9982 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9983 initial_got_entry = 0;
9984 }
9985 else
9986 {
9987 rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9988 initial_got_entry = (splt->output_section->vma
9989 + splt->output_offset);
9990
9991 /* PR ld/16017
9992 When thumb only we need to set the LSB for any address that
9993 will be used with an interworking branch instruction. */
9994 if (using_thumb_only (htab))
9995 initial_got_entry |= 1;
9996 }
9997 }
9998
9999 /* Fill in the entry in the global offset table. */
10000 bfd_put_32 (output_bfd, initial_got_entry,
10001 sgot->contents + got_offset);
10002
10003 if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
10004 {
10005 /* Setup initial funcdesc value. */
10006 /* FIXME: we don't support lazy binding because there is a
10007 race condition between both words getting written and
10008 some other thread attempting to read them. The ARM
10009 architecture does not have an atomic 64 bit load/store
10010 instruction that could be used to prevent it; it is
10011 recommended that threaded FDPIC applications run with the
10012 LD_BIND_NOW environment variable set. */
10013 bfd_put_32(output_bfd, plt_address + 0x18,
10014 sgot->contents + got_offset);
10015 bfd_put_32(output_bfd, -1 /*TODO*/,
10016 sgot->contents + got_offset + 4);
10017 }
10018 }
10019
10020 if (dynindx == -1)
10021 elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
10022 else
10023 {
10024 if (htab->fdpic_p)
10025 {
10026 /* For FDPIC we put PLT relocationss into .rel.got when not
10027 lazy binding otherwise we put them in .rel.plt. For now,
10028 we don't support lazy binding so put it in .rel.got. */
10029 if (info->flags & DF_BIND_NOW)
10030 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
10031 else
10032 elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
10033 }
10034 else
10035 {
10036 loc = srel->contents + plt_index * RELOC_SIZE (htab);
10037 SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
10038 }
10039 }
10040
10041 return TRUE;
10042 }
10043
10044 /* Some relocations map to different relocations depending on the
10045 target. Return the real relocation. */
10046
10047 static int
10048 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10049 int r_type)
10050 {
10051 switch (r_type)
10052 {
10053 case R_ARM_TARGET1:
10054 if (globals->target1_is_rel)
10055 return R_ARM_REL32;
10056 else
10057 return R_ARM_ABS32;
10058
10059 case R_ARM_TARGET2:
10060 return globals->target2_reloc;
10061
10062 default:
10063 return r_type;
10064 }
10065 }
10066
10067 /* Return the base VMA address which should be subtracted from real addresses
10068 when resolving @dtpoff relocation.
10069 This is PT_TLS segment p_vaddr. */
10070
10071 static bfd_vma
10072 dtpoff_base (struct bfd_link_info *info)
10073 {
10074 /* If tls_sec is NULL, we should have signalled an error already. */
10075 if (elf_hash_table (info)->tls_sec == NULL)
10076 return 0;
10077 return elf_hash_table (info)->tls_sec->vma;
10078 }
10079
10080 /* Return the relocation value for @tpoff relocation
10081 if STT_TLS virtual address is ADDRESS. */
10082
10083 static bfd_vma
10084 tpoff (struct bfd_link_info *info, bfd_vma address)
10085 {
10086 struct elf_link_hash_table *htab = elf_hash_table (info);
10087 bfd_vma base;
10088
10089 /* If tls_sec is NULL, we should have signalled an error already. */
10090 if (htab->tls_sec == NULL)
10091 return 0;
10092 base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10093 return address - htab->tls_sec->vma + base;
10094 }
10095
10096 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10097 VALUE is the relocation value. */
10098
10099 static bfd_reloc_status_type
10100 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10101 {
10102 if (value > 0xfff)
10103 return bfd_reloc_overflow;
10104
10105 value |= bfd_get_32 (abfd, data) & 0xfffff000;
10106 bfd_put_32 (abfd, value, data);
10107 return bfd_reloc_ok;
10108 }
10109
10110 /* Handle TLS relaxations. Relaxing is possible for symbols that use
10111 R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10112 R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10113
10114 Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10115 is to then call final_link_relocate. Return other values in the
10116 case of error.
10117
10118 FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10119 the pre-relaxed code. It would be nice if the relocs were updated
10120 to match the optimization. */
10121
10122 static bfd_reloc_status_type
10123 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10124 bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10125 Elf_Internal_Rela *rel, unsigned long is_local)
10126 {
10127 unsigned long insn;
10128
10129 switch (ELF32_R_TYPE (rel->r_info))
10130 {
10131 default:
10132 return bfd_reloc_notsupported;
10133
10134 case R_ARM_TLS_GOTDESC:
10135 if (is_local)
10136 insn = 0;
10137 else
10138 {
10139 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10140 if (insn & 1)
10141 insn -= 5; /* THUMB */
10142 else
10143 insn -= 8; /* ARM */
10144 }
10145 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10146 return bfd_reloc_continue;
10147
10148 case R_ARM_THM_TLS_DESCSEQ:
10149 /* Thumb insn. */
10150 insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10151 if ((insn & 0xff78) == 0x4478) /* add rx, pc */
10152 {
10153 if (is_local)
10154 /* nop */
10155 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10156 }
10157 else if ((insn & 0xffc0) == 0x6840) /* ldr rx,[ry,#4] */
10158 {
10159 if (is_local)
10160 /* nop */
10161 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10162 else
10163 /* ldr rx,[ry] */
10164 bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10165 }
10166 else if ((insn & 0xff87) == 0x4780) /* blx rx */
10167 {
10168 if (is_local)
10169 /* nop */
10170 bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10171 else
10172 /* mov r0, rx */
10173 bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10174 contents + rel->r_offset);
10175 }
10176 else
10177 {
10178 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10179 /* It's a 32 bit instruction, fetch the rest of it for
10180 error generation. */
10181 insn = (insn << 16)
10182 | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10183 _bfd_error_handler
10184 /* xgettext:c-format */
10185 (_("%pB(%pA+%#" PRIx64 "): "
10186 "unexpected %s instruction '%#lx' in TLS trampoline"),
10187 input_bfd, input_sec, (uint64_t) rel->r_offset,
10188 "Thumb", insn);
10189 return bfd_reloc_notsupported;
10190 }
10191 break;
10192
10193 case R_ARM_TLS_DESCSEQ:
10194 /* arm insn. */
10195 insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10196 if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10197 {
10198 if (is_local)
10199 /* mov rx, ry */
10200 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10201 contents + rel->r_offset);
10202 }
10203 else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10204 {
10205 if (is_local)
10206 /* nop */
10207 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10208 else
10209 /* ldr rx,[ry] */
10210 bfd_put_32 (input_bfd, insn & 0xfffff000,
10211 contents + rel->r_offset);
10212 }
10213 else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10214 {
10215 if (is_local)
10216 /* nop */
10217 bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10218 else
10219 /* mov r0, rx */
10220 bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10221 contents + rel->r_offset);
10222 }
10223 else
10224 {
10225 _bfd_error_handler
10226 /* xgettext:c-format */
10227 (_("%pB(%pA+%#" PRIx64 "): "
10228 "unexpected %s instruction '%#lx' in TLS trampoline"),
10229 input_bfd, input_sec, (uint64_t) rel->r_offset,
10230 "ARM", insn);
10231 return bfd_reloc_notsupported;
10232 }
10233 break;
10234
10235 case R_ARM_TLS_CALL:
10236 /* GD->IE relaxation, turn the instruction into 'nop' or
10237 'ldr r0, [pc,r0]' */
10238 insn = is_local ? 0xe1a00000 : 0xe79f0000;
10239 bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10240 break;
10241
10242 case R_ARM_THM_TLS_CALL:
10243 /* GD->IE relaxation. */
10244 if (!is_local)
10245 /* add r0,pc; ldr r0, [r0] */
10246 insn = 0x44786800;
10247 else if (using_thumb2 (globals))
10248 /* nop.w */
10249 insn = 0xf3af8000;
10250 else
10251 /* nop; nop */
10252 insn = 0xbf00bf00;
10253
10254 bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10255 bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10256 break;
10257 }
10258 return bfd_reloc_ok;
10259 }
10260
10261 /* For a given value of n, calculate the value of G_n as required to
10262 deal with group relocations. We return it in the form of an
10263 encoded constant-and-rotation, together with the final residual. If n is
10264 specified as less than zero, then final_residual is filled with the
10265 input value and no further action is performed. */
10266
10267 static bfd_vma
10268 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10269 {
10270 int current_n;
10271 bfd_vma g_n;
10272 bfd_vma encoded_g_n = 0;
10273 bfd_vma residual = value; /* Also known as Y_n. */
10274
10275 for (current_n = 0; current_n <= n; current_n++)
10276 {
10277 int shift;
10278
10279 /* Calculate which part of the value to mask. */
10280 if (residual == 0)
10281 shift = 0;
10282 else
10283 {
10284 int msb;
10285
10286 /* Determine the most significant bit in the residual and
10287 align the resulting value to a 2-bit boundary. */
10288 for (msb = 30; msb >= 0; msb -= 2)
10289 if (residual & (3 << msb))
10290 break;
10291
10292 /* The desired shift is now (msb - 6), or zero, whichever
10293 is the greater. */
10294 shift = msb - 6;
10295 if (shift < 0)
10296 shift = 0;
10297 }
10298
10299 /* Calculate g_n in 32-bit as well as encoded constant+rotation form. */
10300 g_n = residual & (0xff << shift);
10301 encoded_g_n = (g_n >> shift)
10302 | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10303
10304 /* Calculate the residual for the next time around. */
10305 residual &= ~g_n;
10306 }
10307
10308 *final_residual = residual;
10309
10310 return encoded_g_n;
10311 }
10312
10313 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10314 Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise. */
10315
10316 static int
10317 identify_add_or_sub (bfd_vma insn)
10318 {
10319 int opcode = insn & 0x1e00000;
10320
10321 if (opcode == 1 << 23) /* ADD */
10322 return 1;
10323
10324 if (opcode == 1 << 22) /* SUB */
10325 return -1;
10326
10327 return 0;
10328 }
10329
10330 /* Perform a relocation as part of a final link. */
10331
10332 static bfd_reloc_status_type
10333 elf32_arm_final_link_relocate (reloc_howto_type * howto,
10334 bfd * input_bfd,
10335 bfd * output_bfd,
10336 asection * input_section,
10337 bfd_byte * contents,
10338 Elf_Internal_Rela * rel,
10339 bfd_vma value,
10340 struct bfd_link_info * info,
10341 asection * sym_sec,
10342 const char * sym_name,
10343 unsigned char st_type,
10344 enum arm_st_branch_type branch_type,
10345 struct elf_link_hash_entry * h,
10346 bfd_boolean * unresolved_reloc_p,
10347 char ** error_message)
10348 {
10349 unsigned long r_type = howto->type;
10350 unsigned long r_symndx;
10351 bfd_byte * hit_data = contents + rel->r_offset;
10352 bfd_vma * local_got_offsets;
10353 bfd_vma * local_tlsdesc_gotents;
10354 asection * sgot;
10355 asection * splt;
10356 asection * sreloc = NULL;
10357 asection * srelgot;
10358 bfd_vma addend;
10359 bfd_signed_vma signed_addend;
10360 unsigned char dynreloc_st_type;
10361 bfd_vma dynreloc_value;
10362 struct elf32_arm_link_hash_table * globals;
10363 struct elf32_arm_link_hash_entry *eh;
10364 union gotplt_union *root_plt;
10365 struct arm_plt_info *arm_plt;
10366 bfd_vma plt_offset;
10367 bfd_vma gotplt_offset;
10368 bfd_boolean has_iplt_entry;
10369 bfd_boolean resolved_to_zero;
10370
10371 globals = elf32_arm_hash_table (info);
10372 if (globals == NULL)
10373 return bfd_reloc_notsupported;
10374
10375 BFD_ASSERT (is_arm_elf (input_bfd));
10376 BFD_ASSERT (howto != NULL);
10377
10378 /* Some relocation types map to different relocations depending on the
10379 target. We pick the right one here. */
10380 r_type = arm_real_reloc_type (globals, r_type);
10381
10382 /* It is possible to have linker relaxations on some TLS access
10383 models. Update our information here. */
10384 r_type = elf32_arm_tls_transition (info, r_type, h);
10385
10386 if (r_type != howto->type)
10387 howto = elf32_arm_howto_from_type (r_type);
10388
10389 eh = (struct elf32_arm_link_hash_entry *) h;
10390 sgot = globals->root.sgot;
10391 local_got_offsets = elf_local_got_offsets (input_bfd);
10392 local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10393
10394 if (globals->root.dynamic_sections_created)
10395 srelgot = globals->root.srelgot;
10396 else
10397 srelgot = NULL;
10398
10399 r_symndx = ELF32_R_SYM (rel->r_info);
10400
10401 if (globals->use_rel)
10402 {
10403 addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10404
10405 if (addend & ((howto->src_mask + 1) >> 1))
10406 {
10407 signed_addend = -1;
10408 signed_addend &= ~ howto->src_mask;
10409 signed_addend |= addend;
10410 }
10411 else
10412 signed_addend = addend;
10413 }
10414 else
10415 addend = signed_addend = rel->r_addend;
10416
10417 /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10418 are resolving a function call relocation. */
10419 if (using_thumb_only (globals)
10420 && (r_type == R_ARM_THM_CALL
10421 || r_type == R_ARM_THM_JUMP24)
10422 && branch_type == ST_BRANCH_TO_ARM)
10423 branch_type = ST_BRANCH_TO_THUMB;
10424
10425 /* Record the symbol information that should be used in dynamic
10426 relocations. */
10427 dynreloc_st_type = st_type;
10428 dynreloc_value = value;
10429 if (branch_type == ST_BRANCH_TO_THUMB)
10430 dynreloc_value |= 1;
10431
10432 /* Find out whether the symbol has a PLT. Set ST_VALUE, BRANCH_TYPE and
10433 VALUE appropriately for relocations that we resolve at link time. */
10434 has_iplt_entry = FALSE;
10435 if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10436 &arm_plt)
10437 && root_plt->offset != (bfd_vma) -1)
10438 {
10439 plt_offset = root_plt->offset;
10440 gotplt_offset = arm_plt->got_offset;
10441
10442 if (h == NULL || eh->is_iplt)
10443 {
10444 has_iplt_entry = TRUE;
10445 splt = globals->root.iplt;
10446
10447 /* Populate .iplt entries here, because not all of them will
10448 be seen by finish_dynamic_symbol. The lower bit is set if
10449 we have already populated the entry. */
10450 if (plt_offset & 1)
10451 plt_offset--;
10452 else
10453 {
10454 if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10455 -1, dynreloc_value))
10456 root_plt->offset |= 1;
10457 else
10458 return bfd_reloc_notsupported;
10459 }
10460
10461 /* Static relocations always resolve to the .iplt entry. */
10462 st_type = STT_FUNC;
10463 value = (splt->output_section->vma
10464 + splt->output_offset
10465 + plt_offset);
10466 branch_type = ST_BRANCH_TO_ARM;
10467
10468 /* If there are non-call relocations that resolve to the .iplt
10469 entry, then all dynamic ones must too. */
10470 if (arm_plt->noncall_refcount != 0)
10471 {
10472 dynreloc_st_type = st_type;
10473 dynreloc_value = value;
10474 }
10475 }
10476 else
10477 /* We populate the .plt entry in finish_dynamic_symbol. */
10478 splt = globals->root.splt;
10479 }
10480 else
10481 {
10482 splt = NULL;
10483 plt_offset = (bfd_vma) -1;
10484 gotplt_offset = (bfd_vma) -1;
10485 }
10486
10487 resolved_to_zero = (h != NULL
10488 && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10489
10490 switch (r_type)
10491 {
10492 case R_ARM_NONE:
10493 /* We don't need to find a value for this symbol. It's just a
10494 marker. */
10495 *unresolved_reloc_p = FALSE;
10496 return bfd_reloc_ok;
10497
10498 case R_ARM_ABS12:
10499 if (!globals->vxworks_p)
10500 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10501 /* Fall through. */
10502
10503 case R_ARM_PC24:
10504 case R_ARM_ABS32:
10505 case R_ARM_ABS32_NOI:
10506 case R_ARM_REL32:
10507 case R_ARM_REL32_NOI:
10508 case R_ARM_CALL:
10509 case R_ARM_JUMP24:
10510 case R_ARM_XPC25:
10511 case R_ARM_PREL31:
10512 case R_ARM_PLT32:
10513 /* Handle relocations which should use the PLT entry. ABS32/REL32
10514 will use the symbol's value, which may point to a PLT entry, but we
10515 don't need to handle that here. If we created a PLT entry, all
10516 branches in this object should go to it, except if the PLT is too
10517 far away, in which case a long branch stub should be inserted. */
10518 if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10519 && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10520 && r_type != R_ARM_CALL
10521 && r_type != R_ARM_JUMP24
10522 && r_type != R_ARM_PLT32)
10523 && plt_offset != (bfd_vma) -1)
10524 {
10525 /* If we've created a .plt section, and assigned a PLT entry
10526 to this function, it must either be a STT_GNU_IFUNC reference
10527 or not be known to bind locally. In other cases, we should
10528 have cleared the PLT entry by now. */
10529 BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10530
10531 value = (splt->output_section->vma
10532 + splt->output_offset
10533 + plt_offset);
10534 *unresolved_reloc_p = FALSE;
10535 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10536 contents, rel->r_offset, value,
10537 rel->r_addend);
10538 }
10539
10540 /* When generating a shared object or relocatable executable, these
10541 relocations are copied into the output file to be resolved at
10542 run time. */
10543 if ((bfd_link_pic (info)
10544 || globals->root.is_relocatable_executable
10545 || globals->fdpic_p)
10546 && (input_section->flags & SEC_ALLOC)
10547 && !(globals->vxworks_p
10548 && strcmp (input_section->output_section->name,
10549 ".tls_vars") == 0)
10550 && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10551 || !SYMBOL_CALLS_LOCAL (info, h))
10552 && !(input_bfd == globals->stub_bfd
10553 && strstr (input_section->name, STUB_SUFFIX))
10554 && (h == NULL
10555 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10556 && !resolved_to_zero)
10557 || h->root.type != bfd_link_hash_undefweak)
10558 && r_type != R_ARM_PC24
10559 && r_type != R_ARM_CALL
10560 && r_type != R_ARM_JUMP24
10561 && r_type != R_ARM_PREL31
10562 && r_type != R_ARM_PLT32)
10563 {
10564 Elf_Internal_Rela outrel;
10565 bfd_boolean skip, relocate;
10566 int isrofixup = 0;
10567
10568 if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10569 && !h->def_regular)
10570 {
10571 char *v = _("shared object");
10572
10573 if (bfd_link_executable (info))
10574 v = _("PIE executable");
10575
10576 _bfd_error_handler
10577 (_("%pB: relocation %s against external or undefined symbol `%s'"
10578 " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10579 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10580 return bfd_reloc_notsupported;
10581 }
10582
10583 *unresolved_reloc_p = FALSE;
10584
10585 if (sreloc == NULL && globals->root.dynamic_sections_created)
10586 {
10587 sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10588 ! globals->use_rel);
10589
10590 if (sreloc == NULL)
10591 return bfd_reloc_notsupported;
10592 }
10593
10594 skip = FALSE;
10595 relocate = FALSE;
10596
10597 outrel.r_addend = addend;
10598 outrel.r_offset =
10599 _bfd_elf_section_offset (output_bfd, info, input_section,
10600 rel->r_offset);
10601 if (outrel.r_offset == (bfd_vma) -1)
10602 skip = TRUE;
10603 else if (outrel.r_offset == (bfd_vma) -2)
10604 skip = TRUE, relocate = TRUE;
10605 outrel.r_offset += (input_section->output_section->vma
10606 + input_section->output_offset);
10607
10608 if (skip)
10609 memset (&outrel, 0, sizeof outrel);
10610 else if (h != NULL
10611 && h->dynindx != -1
10612 && (!bfd_link_pic (info)
10613 || !(bfd_link_pie (info)
10614 || SYMBOLIC_BIND (info, h))
10615 || !h->def_regular))
10616 outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10617 else
10618 {
10619 int symbol;
10620
10621 /* This symbol is local, or marked to become local. */
10622 BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10623 || (globals->fdpic_p && !bfd_link_pic(info)));
10624 if (globals->symbian_p)
10625 {
10626 asection *osec;
10627
10628 /* On Symbian OS, the data segment and text segement
10629 can be relocated independently. Therefore, we
10630 must indicate the segment to which this
10631 relocation is relative. The BPABI allows us to
10632 use any symbol in the right segment; we just use
10633 the section symbol as it is convenient. (We
10634 cannot use the symbol given by "h" directly as it
10635 will not appear in the dynamic symbol table.)
10636
10637 Note that the dynamic linker ignores the section
10638 symbol value, so we don't subtract osec->vma
10639 from the emitted reloc addend. */
10640 if (sym_sec)
10641 osec = sym_sec->output_section;
10642 else
10643 osec = input_section->output_section;
10644 symbol = elf_section_data (osec)->dynindx;
10645 if (symbol == 0)
10646 {
10647 struct elf_link_hash_table *htab = elf_hash_table (info);
10648
10649 if ((osec->flags & SEC_READONLY) == 0
10650 && htab->data_index_section != NULL)
10651 osec = htab->data_index_section;
10652 else
10653 osec = htab->text_index_section;
10654 symbol = elf_section_data (osec)->dynindx;
10655 }
10656 BFD_ASSERT (symbol != 0);
10657 }
10658 else
10659 /* On SVR4-ish systems, the dynamic loader cannot
10660 relocate the text and data segments independently,
10661 so the symbol does not matter. */
10662 symbol = 0;
10663 if (dynreloc_st_type == STT_GNU_IFUNC)
10664 /* We have an STT_GNU_IFUNC symbol that doesn't resolve
10665 to the .iplt entry. Instead, every non-call reference
10666 must use an R_ARM_IRELATIVE relocation to obtain the
10667 correct run-time address. */
10668 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10669 else if (globals->fdpic_p && !bfd_link_pic(info))
10670 isrofixup = 1;
10671 else
10672 outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10673 if (globals->use_rel)
10674 relocate = TRUE;
10675 else
10676 outrel.r_addend += dynreloc_value;
10677 }
10678
10679 if (isrofixup)
10680 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10681 else
10682 elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10683
10684 /* If this reloc is against an external symbol, we do not want to
10685 fiddle with the addend. Otherwise, we need to include the symbol
10686 value so that it becomes an addend for the dynamic reloc. */
10687 if (! relocate)
10688 return bfd_reloc_ok;
10689
10690 return _bfd_final_link_relocate (howto, input_bfd, input_section,
10691 contents, rel->r_offset,
10692 dynreloc_value, (bfd_vma) 0);
10693 }
10694 else switch (r_type)
10695 {
10696 case R_ARM_ABS12:
10697 return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10698
10699 case R_ARM_XPC25: /* Arm BLX instruction. */
10700 case R_ARM_CALL:
10701 case R_ARM_JUMP24:
10702 case R_ARM_PC24: /* Arm B/BL instruction. */
10703 case R_ARM_PLT32:
10704 {
10705 struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10706
10707 if (r_type == R_ARM_XPC25)
10708 {
10709 /* Check for Arm calling Arm function. */
10710 /* FIXME: Should we translate the instruction into a BL
10711 instruction instead ? */
10712 if (branch_type != ST_BRANCH_TO_THUMB)
10713 _bfd_error_handler
10714 (_("\%pB: warning: %s BLX instruction targets"
10715 " %s function '%s'"),
10716 input_bfd, "ARM",
10717 "ARM", h ? h->root.root.string : "(local)");
10718 }
10719 else if (r_type == R_ARM_PC24)
10720 {
10721 /* Check for Arm calling Thumb function. */
10722 if (branch_type == ST_BRANCH_TO_THUMB)
10723 {
10724 if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10725 output_bfd, input_section,
10726 hit_data, sym_sec, rel->r_offset,
10727 signed_addend, value,
10728 error_message))
10729 return bfd_reloc_ok;
10730 else
10731 return bfd_reloc_dangerous;
10732 }
10733 }
10734
10735 /* Check if a stub has to be inserted because the
10736 destination is too far or we are changing mode. */
10737 if ( r_type == R_ARM_CALL
10738 || r_type == R_ARM_JUMP24
10739 || r_type == R_ARM_PLT32)
10740 {
10741 enum elf32_arm_stub_type stub_type = arm_stub_none;
10742 struct elf32_arm_link_hash_entry *hash;
10743
10744 hash = (struct elf32_arm_link_hash_entry *) h;
10745 stub_type = arm_type_of_stub (info, input_section, rel,
10746 st_type, &branch_type,
10747 hash, value, sym_sec,
10748 input_bfd, sym_name);
10749
10750 if (stub_type != arm_stub_none)
10751 {
10752 /* The target is out of reach, so redirect the
10753 branch to the local stub for this function. */
10754 stub_entry = elf32_arm_get_stub_entry (input_section,
10755 sym_sec, h,
10756 rel, globals,
10757 stub_type);
10758 {
10759 if (stub_entry != NULL)
10760 value = (stub_entry->stub_offset
10761 + stub_entry->stub_sec->output_offset
10762 + stub_entry->stub_sec->output_section->vma);
10763
10764 if (plt_offset != (bfd_vma) -1)
10765 *unresolved_reloc_p = FALSE;
10766 }
10767 }
10768 else
10769 {
10770 /* If the call goes through a PLT entry, make sure to
10771 check distance to the right destination address. */
10772 if (plt_offset != (bfd_vma) -1)
10773 {
10774 value = (splt->output_section->vma
10775 + splt->output_offset
10776 + plt_offset);
10777 *unresolved_reloc_p = FALSE;
10778 /* The PLT entry is in ARM mode, regardless of the
10779 target function. */
10780 branch_type = ST_BRANCH_TO_ARM;
10781 }
10782 }
10783 }
10784
10785 /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10786 where:
10787 S is the address of the symbol in the relocation.
10788 P is address of the instruction being relocated.
10789 A is the addend (extracted from the instruction) in bytes.
10790
10791 S is held in 'value'.
10792 P is the base address of the section containing the
10793 instruction plus the offset of the reloc into that
10794 section, ie:
10795 (input_section->output_section->vma +
10796 input_section->output_offset +
10797 rel->r_offset).
10798 A is the addend, converted into bytes, ie:
10799 (signed_addend * 4)
10800
10801 Note: None of these operations have knowledge of the pipeline
10802 size of the processor, thus it is up to the assembler to
10803 encode this information into the addend. */
10804 value -= (input_section->output_section->vma
10805 + input_section->output_offset);
10806 value -= rel->r_offset;
10807 if (globals->use_rel)
10808 value += (signed_addend << howto->size);
10809 else
10810 /* RELA addends do not have to be adjusted by howto->size. */
10811 value += signed_addend;
10812
10813 signed_addend = value;
10814 signed_addend >>= howto->rightshift;
10815
10816 /* A branch to an undefined weak symbol is turned into a jump to
10817 the next instruction unless a PLT entry will be created.
10818 Do the same for local undefined symbols (but not for STN_UNDEF).
10819 The jump to the next instruction is optimized as a NOP depending
10820 on the architecture. */
10821 if (h ? (h->root.type == bfd_link_hash_undefweak
10822 && plt_offset == (bfd_vma) -1)
10823 : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10824 {
10825 value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10826
10827 if (arch_has_arm_nop (globals))
10828 value |= 0x0320f000;
10829 else
10830 value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0. */
10831 }
10832 else
10833 {
10834 /* Perform a signed range check. */
10835 if ( signed_addend > ((bfd_signed_vma) (howto->dst_mask >> 1))
10836 || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10837 return bfd_reloc_overflow;
10838
10839 addend = (value & 2);
10840
10841 value = (signed_addend & howto->dst_mask)
10842 | (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10843
10844 if (r_type == R_ARM_CALL)
10845 {
10846 /* Set the H bit in the BLX instruction. */
10847 if (branch_type == ST_BRANCH_TO_THUMB)
10848 {
10849 if (addend)
10850 value |= (1 << 24);
10851 else
10852 value &= ~(bfd_vma)(1 << 24);
10853 }
10854
10855 /* Select the correct instruction (BL or BLX). */
10856 /* Only if we are not handling a BL to a stub. In this
10857 case, mode switching is performed by the stub. */
10858 if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10859 value |= (1 << 28);
10860 else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10861 {
10862 value &= ~(bfd_vma)(1 << 28);
10863 value |= (1 << 24);
10864 }
10865 }
10866 }
10867 }
10868 break;
10869
10870 case R_ARM_ABS32:
10871 value += addend;
10872 if (branch_type == ST_BRANCH_TO_THUMB)
10873 value |= 1;
10874 break;
10875
10876 case R_ARM_ABS32_NOI:
10877 value += addend;
10878 break;
10879
10880 case R_ARM_REL32:
10881 value += addend;
10882 if (branch_type == ST_BRANCH_TO_THUMB)
10883 value |= 1;
10884 value -= (input_section->output_section->vma
10885 + input_section->output_offset + rel->r_offset);
10886 break;
10887
10888 case R_ARM_REL32_NOI:
10889 value += addend;
10890 value -= (input_section->output_section->vma
10891 + input_section->output_offset + rel->r_offset);
10892 break;
10893
10894 case R_ARM_PREL31:
10895 value -= (input_section->output_section->vma
10896 + input_section->output_offset + rel->r_offset);
10897 value += signed_addend;
10898 if (! h || h->root.type != bfd_link_hash_undefweak)
10899 {
10900 /* Check for overflow. */
10901 if ((value ^ (value >> 1)) & (1 << 30))
10902 return bfd_reloc_overflow;
10903 }
10904 value &= 0x7fffffff;
10905 value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10906 if (branch_type == ST_BRANCH_TO_THUMB)
10907 value |= 1;
10908 break;
10909 }
10910
10911 bfd_put_32 (input_bfd, value, hit_data);
10912 return bfd_reloc_ok;
10913
10914 case R_ARM_ABS8:
10915 /* PR 16202: Refectch the addend using the correct size. */
10916 if (globals->use_rel)
10917 addend = bfd_get_8 (input_bfd, hit_data);
10918 value += addend;
10919
10920 /* There is no way to tell whether the user intended to use a signed or
10921 unsigned addend. When checking for overflow we accept either,
10922 as specified by the AAELF. */
10923 if ((long) value > 0xff || (long) value < -0x80)
10924 return bfd_reloc_overflow;
10925
10926 bfd_put_8 (input_bfd, value, hit_data);
10927 return bfd_reloc_ok;
10928
10929 case R_ARM_ABS16:
10930 /* PR 16202: Refectch the addend using the correct size. */
10931 if (globals->use_rel)
10932 addend = bfd_get_16 (input_bfd, hit_data);
10933 value += addend;
10934
10935 /* See comment for R_ARM_ABS8. */
10936 if ((long) value > 0xffff || (long) value < -0x8000)
10937 return bfd_reloc_overflow;
10938
10939 bfd_put_16 (input_bfd, value, hit_data);
10940 return bfd_reloc_ok;
10941
10942 case R_ARM_THM_ABS5:
10943 /* Support ldr and str instructions for the thumb. */
10944 if (globals->use_rel)
10945 {
10946 /* Need to refetch addend. */
10947 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10948 /* ??? Need to determine shift amount from operand size. */
10949 addend >>= howto->rightshift;
10950 }
10951 value += addend;
10952
10953 /* ??? Isn't value unsigned? */
10954 if ((long) value > 0x1f || (long) value < -0x10)
10955 return bfd_reloc_overflow;
10956
10957 /* ??? Value needs to be properly shifted into place first. */
10958 value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10959 bfd_put_16 (input_bfd, value, hit_data);
10960 return bfd_reloc_ok;
10961
10962 case R_ARM_THM_ALU_PREL_11_0:
10963 /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw). */
10964 {
10965 bfd_vma insn;
10966 bfd_signed_vma relocation;
10967
10968 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10969 | bfd_get_16 (input_bfd, hit_data + 2);
10970
10971 if (globals->use_rel)
10972 {
10973 signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10974 | ((insn & (1 << 26)) >> 15);
10975 if (insn & 0xf00000)
10976 signed_addend = -signed_addend;
10977 }
10978
10979 relocation = value + signed_addend;
10980 relocation -= Pa (input_section->output_section->vma
10981 + input_section->output_offset
10982 + rel->r_offset);
10983
10984 /* PR 21523: Use an absolute value. The user of this reloc will
10985 have already selected an ADD or SUB insn appropriately. */
10986 value = llabs (relocation);
10987
10988 if (value >= 0x1000)
10989 return bfd_reloc_overflow;
10990
10991 /* Destination is Thumb. Force bit 0 to 1 to reflect this. */
10992 if (branch_type == ST_BRANCH_TO_THUMB)
10993 value |= 1;
10994
10995 insn = (insn & 0xfb0f8f00) | (value & 0xff)
10996 | ((value & 0x700) << 4)
10997 | ((value & 0x800) << 15);
10998 if (relocation < 0)
10999 insn |= 0xa00000;
11000
11001 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11002 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11003
11004 return bfd_reloc_ok;
11005 }
11006
11007 case R_ARM_THM_PC8:
11008 /* PR 10073: This reloc is not generated by the GNU toolchain,
11009 but it is supported for compatibility with third party libraries
11010 generated by other compilers, specifically the ARM/IAR. */
11011 {
11012 bfd_vma insn;
11013 bfd_signed_vma relocation;
11014
11015 insn = bfd_get_16 (input_bfd, hit_data);
11016
11017 if (globals->use_rel)
11018 addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
11019
11020 relocation = value + addend;
11021 relocation -= Pa (input_section->output_section->vma
11022 + input_section->output_offset
11023 + rel->r_offset);
11024
11025 value = relocation;
11026
11027 /* We do not check for overflow of this reloc. Although strictly
11028 speaking this is incorrect, it appears to be necessary in order
11029 to work with IAR generated relocs. Since GCC and GAS do not
11030 generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11031 a problem for them. */
11032 value &= 0x3fc;
11033
11034 insn = (insn & 0xff00) | (value >> 2);
11035
11036 bfd_put_16 (input_bfd, insn, hit_data);
11037
11038 return bfd_reloc_ok;
11039 }
11040
11041 case R_ARM_THM_PC12:
11042 /* Corresponds to: ldr.w reg, [pc, #offset]. */
11043 {
11044 bfd_vma insn;
11045 bfd_signed_vma relocation;
11046
11047 insn = (bfd_get_16 (input_bfd, hit_data) << 16)
11048 | bfd_get_16 (input_bfd, hit_data + 2);
11049
11050 if (globals->use_rel)
11051 {
11052 signed_addend = insn & 0xfff;
11053 if (!(insn & (1 << 23)))
11054 signed_addend = -signed_addend;
11055 }
11056
11057 relocation = value + signed_addend;
11058 relocation -= Pa (input_section->output_section->vma
11059 + input_section->output_offset
11060 + rel->r_offset);
11061
11062 value = relocation;
11063
11064 if (value >= 0x1000)
11065 return bfd_reloc_overflow;
11066
11067 insn = (insn & 0xff7ff000) | value;
11068 if (relocation >= 0)
11069 insn |= (1 << 23);
11070
11071 bfd_put_16 (input_bfd, insn >> 16, hit_data);
11072 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11073
11074 return bfd_reloc_ok;
11075 }
11076
11077 case R_ARM_THM_XPC22:
11078 case R_ARM_THM_CALL:
11079 case R_ARM_THM_JUMP24:
11080 /* Thumb BL (branch long instruction). */
11081 {
11082 bfd_vma relocation;
11083 bfd_vma reloc_sign;
11084 bfd_boolean overflow = FALSE;
11085 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11086 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11087 bfd_signed_vma reloc_signed_max;
11088 bfd_signed_vma reloc_signed_min;
11089 bfd_vma check;
11090 bfd_signed_vma signed_check;
11091 int bitsize;
11092 const int thumb2 = using_thumb2 (globals);
11093 const int thumb2_bl = using_thumb2_bl (globals);
11094
11095 /* A branch to an undefined weak symbol is turned into a jump to
11096 the next instruction unless a PLT entry will be created.
11097 The jump to the next instruction is optimized as a NOP.W for
11098 Thumb-2 enabled architectures. */
11099 if (h && h->root.type == bfd_link_hash_undefweak
11100 && plt_offset == (bfd_vma) -1)
11101 {
11102 if (thumb2)
11103 {
11104 bfd_put_16 (input_bfd, 0xf3af, hit_data);
11105 bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11106 }
11107 else
11108 {
11109 bfd_put_16 (input_bfd, 0xe000, hit_data);
11110 bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11111 }
11112 return bfd_reloc_ok;
11113 }
11114
11115 /* Fetch the addend. We use the Thumb-2 encoding (backwards compatible
11116 with Thumb-1) involving the J1 and J2 bits. */
11117 if (globals->use_rel)
11118 {
11119 bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11120 bfd_vma upper = upper_insn & 0x3ff;
11121 bfd_vma lower = lower_insn & 0x7ff;
11122 bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11123 bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11124 bfd_vma i1 = j1 ^ s ? 0 : 1;
11125 bfd_vma i2 = j2 ^ s ? 0 : 1;
11126
11127 addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11128 /* Sign extend. */
11129 addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11130
11131 signed_addend = addend;
11132 }
11133
11134 if (r_type == R_ARM_THM_XPC22)
11135 {
11136 /* Check for Thumb to Thumb call. */
11137 /* FIXME: Should we translate the instruction into a BL
11138 instruction instead ? */
11139 if (branch_type == ST_BRANCH_TO_THUMB)
11140 _bfd_error_handler
11141 (_("%pB: warning: %s BLX instruction targets"
11142 " %s function '%s'"),
11143 input_bfd, "Thumb",
11144 "Thumb", h ? h->root.root.string : "(local)");
11145 }
11146 else
11147 {
11148 /* If it is not a call to Thumb, assume call to Arm.
11149 If it is a call relative to a section name, then it is not a
11150 function call at all, but rather a long jump. Calls through
11151 the PLT do not require stubs. */
11152 if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11153 {
11154 if (globals->use_blx && r_type == R_ARM_THM_CALL)
11155 {
11156 /* Convert BL to BLX. */
11157 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11158 }
11159 else if (( r_type != R_ARM_THM_CALL)
11160 && (r_type != R_ARM_THM_JUMP24))
11161 {
11162 if (elf32_thumb_to_arm_stub
11163 (info, sym_name, input_bfd, output_bfd, input_section,
11164 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11165 error_message))
11166 return bfd_reloc_ok;
11167 else
11168 return bfd_reloc_dangerous;
11169 }
11170 }
11171 else if (branch_type == ST_BRANCH_TO_THUMB
11172 && globals->use_blx
11173 && r_type == R_ARM_THM_CALL)
11174 {
11175 /* Make sure this is a BL. */
11176 lower_insn |= 0x1800;
11177 }
11178 }
11179
11180 enum elf32_arm_stub_type stub_type = arm_stub_none;
11181 if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11182 {
11183 /* Check if a stub has to be inserted because the destination
11184 is too far. */
11185 struct elf32_arm_stub_hash_entry *stub_entry;
11186 struct elf32_arm_link_hash_entry *hash;
11187
11188 hash = (struct elf32_arm_link_hash_entry *) h;
11189
11190 stub_type = arm_type_of_stub (info, input_section, rel,
11191 st_type, &branch_type,
11192 hash, value, sym_sec,
11193 input_bfd, sym_name);
11194
11195 if (stub_type != arm_stub_none)
11196 {
11197 /* The target is out of reach or we are changing modes, so
11198 redirect the branch to the local stub for this
11199 function. */
11200 stub_entry = elf32_arm_get_stub_entry (input_section,
11201 sym_sec, h,
11202 rel, globals,
11203 stub_type);
11204 if (stub_entry != NULL)
11205 {
11206 value = (stub_entry->stub_offset
11207 + stub_entry->stub_sec->output_offset
11208 + stub_entry->stub_sec->output_section->vma);
11209
11210 if (plt_offset != (bfd_vma) -1)
11211 *unresolved_reloc_p = FALSE;
11212 }
11213
11214 /* If this call becomes a call to Arm, force BLX. */
11215 if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11216 {
11217 if ((stub_entry
11218 && !arm_stub_is_thumb (stub_entry->stub_type))
11219 || branch_type != ST_BRANCH_TO_THUMB)
11220 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11221 }
11222 }
11223 }
11224
11225 /* Handle calls via the PLT. */
11226 if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11227 {
11228 value = (splt->output_section->vma
11229 + splt->output_offset
11230 + plt_offset);
11231
11232 if (globals->use_blx
11233 && r_type == R_ARM_THM_CALL
11234 && ! using_thumb_only (globals))
11235 {
11236 /* If the Thumb BLX instruction is available, convert
11237 the BL to a BLX instruction to call the ARM-mode
11238 PLT entry. */
11239 lower_insn = (lower_insn & ~0x1000) | 0x0800;
11240 branch_type = ST_BRANCH_TO_ARM;
11241 }
11242 else
11243 {
11244 if (! using_thumb_only (globals))
11245 /* Target the Thumb stub before the ARM PLT entry. */
11246 value -= PLT_THUMB_STUB_SIZE;
11247 branch_type = ST_BRANCH_TO_THUMB;
11248 }
11249 *unresolved_reloc_p = FALSE;
11250 }
11251
11252 relocation = value + signed_addend;
11253
11254 relocation -= (input_section->output_section->vma
11255 + input_section->output_offset
11256 + rel->r_offset);
11257
11258 check = relocation >> howto->rightshift;
11259
11260 /* If this is a signed value, the rightshift just dropped
11261 leading 1 bits (assuming twos complement). */
11262 if ((bfd_signed_vma) relocation >= 0)
11263 signed_check = check;
11264 else
11265 signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11266
11267 /* Calculate the permissable maximum and minimum values for
11268 this relocation according to whether we're relocating for
11269 Thumb-2 or not. */
11270 bitsize = howto->bitsize;
11271 if (!thumb2_bl)
11272 bitsize -= 2;
11273 reloc_signed_max = (1 << (bitsize - 1)) - 1;
11274 reloc_signed_min = ~reloc_signed_max;
11275
11276 /* Assumes two's complement. */
11277 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11278 overflow = TRUE;
11279
11280 if ((lower_insn & 0x5000) == 0x4000)
11281 /* For a BLX instruction, make sure that the relocation is rounded up
11282 to a word boundary. This follows the semantics of the instruction
11283 which specifies that bit 1 of the target address will come from bit
11284 1 of the base address. */
11285 relocation = (relocation + 2) & ~ 3;
11286
11287 /* Put RELOCATION back into the insn. Assumes two's complement.
11288 We use the Thumb-2 encoding, which is safe even if dealing with
11289 a Thumb-1 instruction by virtue of our overflow check above. */
11290 reloc_sign = (signed_check < 0) ? 1 : 0;
11291 upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11292 | ((relocation >> 12) & 0x3ff)
11293 | (reloc_sign << 10);
11294 lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11295 | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11296 | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11297 | ((relocation >> 1) & 0x7ff);
11298
11299 /* Put the relocated value back in the object file: */
11300 bfd_put_16 (input_bfd, upper_insn, hit_data);
11301 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11302
11303 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11304 }
11305 break;
11306
11307 case R_ARM_THM_JUMP19:
11308 /* Thumb32 conditional branch instruction. */
11309 {
11310 bfd_vma relocation;
11311 bfd_boolean overflow = FALSE;
11312 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11313 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11314 bfd_signed_vma reloc_signed_max = 0xffffe;
11315 bfd_signed_vma reloc_signed_min = -0x100000;
11316 bfd_signed_vma signed_check;
11317 enum elf32_arm_stub_type stub_type = arm_stub_none;
11318 struct elf32_arm_stub_hash_entry *stub_entry;
11319 struct elf32_arm_link_hash_entry *hash;
11320
11321 /* Need to refetch the addend, reconstruct the top three bits,
11322 and squish the two 11 bit pieces together. */
11323 if (globals->use_rel)
11324 {
11325 bfd_vma S = (upper_insn & 0x0400) >> 10;
11326 bfd_vma upper = (upper_insn & 0x003f);
11327 bfd_vma J1 = (lower_insn & 0x2000) >> 13;
11328 bfd_vma J2 = (lower_insn & 0x0800) >> 11;
11329 bfd_vma lower = (lower_insn & 0x07ff);
11330
11331 upper |= J1 << 6;
11332 upper |= J2 << 7;
11333 upper |= (!S) << 8;
11334 upper -= 0x0100; /* Sign extend. */
11335
11336 addend = (upper << 12) | (lower << 1);
11337 signed_addend = addend;
11338 }
11339
11340 /* Handle calls via the PLT. */
11341 if (plt_offset != (bfd_vma) -1)
11342 {
11343 value = (splt->output_section->vma
11344 + splt->output_offset
11345 + plt_offset);
11346 /* Target the Thumb stub before the ARM PLT entry. */
11347 value -= PLT_THUMB_STUB_SIZE;
11348 *unresolved_reloc_p = FALSE;
11349 }
11350
11351 hash = (struct elf32_arm_link_hash_entry *)h;
11352
11353 stub_type = arm_type_of_stub (info, input_section, rel,
11354 st_type, &branch_type,
11355 hash, value, sym_sec,
11356 input_bfd, sym_name);
11357 if (stub_type != arm_stub_none)
11358 {
11359 stub_entry = elf32_arm_get_stub_entry (input_section,
11360 sym_sec, h,
11361 rel, globals,
11362 stub_type);
11363 if (stub_entry != NULL)
11364 {
11365 value = (stub_entry->stub_offset
11366 + stub_entry->stub_sec->output_offset
11367 + stub_entry->stub_sec->output_section->vma);
11368 }
11369 }
11370
11371 relocation = value + signed_addend;
11372 relocation -= (input_section->output_section->vma
11373 + input_section->output_offset
11374 + rel->r_offset);
11375 signed_check = (bfd_signed_vma) relocation;
11376
11377 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11378 overflow = TRUE;
11379
11380 /* Put RELOCATION back into the insn. */
11381 {
11382 bfd_vma S = (relocation & 0x00100000) >> 20;
11383 bfd_vma J2 = (relocation & 0x00080000) >> 19;
11384 bfd_vma J1 = (relocation & 0x00040000) >> 18;
11385 bfd_vma hi = (relocation & 0x0003f000) >> 12;
11386 bfd_vma lo = (relocation & 0x00000ffe) >> 1;
11387
11388 upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11389 lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11390 }
11391
11392 /* Put the relocated value back in the object file: */
11393 bfd_put_16 (input_bfd, upper_insn, hit_data);
11394 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11395
11396 return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11397 }
11398
11399 case R_ARM_THM_JUMP11:
11400 case R_ARM_THM_JUMP8:
11401 case R_ARM_THM_JUMP6:
11402 /* Thumb B (branch) instruction). */
11403 {
11404 bfd_signed_vma relocation;
11405 bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11406 bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11407 bfd_signed_vma signed_check;
11408
11409 /* CZB cannot jump backward. */
11410 if (r_type == R_ARM_THM_JUMP6)
11411 reloc_signed_min = 0;
11412
11413 if (globals->use_rel)
11414 {
11415 /* Need to refetch addend. */
11416 addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11417 if (addend & ((howto->src_mask + 1) >> 1))
11418 {
11419 signed_addend = -1;
11420 signed_addend &= ~ howto->src_mask;
11421 signed_addend |= addend;
11422 }
11423 else
11424 signed_addend = addend;
11425 /* The value in the insn has been right shifted. We need to
11426 undo this, so that we can perform the address calculation
11427 in terms of bytes. */
11428 signed_addend <<= howto->rightshift;
11429 }
11430 relocation = value + signed_addend;
11431
11432 relocation -= (input_section->output_section->vma
11433 + input_section->output_offset
11434 + rel->r_offset);
11435
11436 relocation >>= howto->rightshift;
11437 signed_check = relocation;
11438
11439 if (r_type == R_ARM_THM_JUMP6)
11440 relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11441 else
11442 relocation &= howto->dst_mask;
11443 relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11444
11445 bfd_put_16 (input_bfd, relocation, hit_data);
11446
11447 /* Assumes two's complement. */
11448 if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11449 return bfd_reloc_overflow;
11450
11451 return bfd_reloc_ok;
11452 }
11453
11454 case R_ARM_ALU_PCREL7_0:
11455 case R_ARM_ALU_PCREL15_8:
11456 case R_ARM_ALU_PCREL23_15:
11457 {
11458 bfd_vma insn;
11459 bfd_vma relocation;
11460
11461 insn = bfd_get_32 (input_bfd, hit_data);
11462 if (globals->use_rel)
11463 {
11464 /* Extract the addend. */
11465 addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11466 signed_addend = addend;
11467 }
11468 relocation = value + signed_addend;
11469
11470 relocation -= (input_section->output_section->vma
11471 + input_section->output_offset
11472 + rel->r_offset);
11473 insn = (insn & ~0xfff)
11474 | ((howto->bitpos << 7) & 0xf00)
11475 | ((relocation >> howto->bitpos) & 0xff);
11476 bfd_put_32 (input_bfd, value, hit_data);
11477 }
11478 return bfd_reloc_ok;
11479
11480 case R_ARM_GNU_VTINHERIT:
11481 case R_ARM_GNU_VTENTRY:
11482 return bfd_reloc_ok;
11483
11484 case R_ARM_GOTOFF32:
11485 /* Relocation is relative to the start of the
11486 global offset table. */
11487
11488 BFD_ASSERT (sgot != NULL);
11489 if (sgot == NULL)
11490 return bfd_reloc_notsupported;
11491
11492 /* If we are addressing a Thumb function, we need to adjust the
11493 address by one, so that attempts to call the function pointer will
11494 correctly interpret it as Thumb code. */
11495 if (branch_type == ST_BRANCH_TO_THUMB)
11496 value += 1;
11497
11498 /* Note that sgot->output_offset is not involved in this
11499 calculation. We always want the start of .got. If we
11500 define _GLOBAL_OFFSET_TABLE in a different way, as is
11501 permitted by the ABI, we might have to change this
11502 calculation. */
11503 value -= sgot->output_section->vma;
11504 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11505 contents, rel->r_offset, value,
11506 rel->r_addend);
11507
11508 case R_ARM_GOTPC:
11509 /* Use global offset table as symbol value. */
11510 BFD_ASSERT (sgot != NULL);
11511
11512 if (sgot == NULL)
11513 return bfd_reloc_notsupported;
11514
11515 *unresolved_reloc_p = FALSE;
11516 value = sgot->output_section->vma;
11517 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11518 contents, rel->r_offset, value,
11519 rel->r_addend);
11520
11521 case R_ARM_GOT32:
11522 case R_ARM_GOT_PREL:
11523 /* Relocation is to the entry for this symbol in the
11524 global offset table. */
11525 if (sgot == NULL)
11526 return bfd_reloc_notsupported;
11527
11528 if (dynreloc_st_type == STT_GNU_IFUNC
11529 && plt_offset != (bfd_vma) -1
11530 && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11531 {
11532 /* We have a relocation against a locally-binding STT_GNU_IFUNC
11533 symbol, and the relocation resolves directly to the runtime
11534 target rather than to the .iplt entry. This means that any
11535 .got entry would be the same value as the .igot.plt entry,
11536 so there's no point creating both. */
11537 sgot = globals->root.igotplt;
11538 value = sgot->output_offset + gotplt_offset;
11539 }
11540 else if (h != NULL)
11541 {
11542 bfd_vma off;
11543
11544 off = h->got.offset;
11545 BFD_ASSERT (off != (bfd_vma) -1);
11546 if ((off & 1) != 0)
11547 {
11548 /* We have already processsed one GOT relocation against
11549 this symbol. */
11550 off &= ~1;
11551 if (globals->root.dynamic_sections_created
11552 && !SYMBOL_REFERENCES_LOCAL (info, h))
11553 *unresolved_reloc_p = FALSE;
11554 }
11555 else
11556 {
11557 Elf_Internal_Rela outrel;
11558 int isrofixup = 0;
11559
11560 if (((h->dynindx != -1) || globals->fdpic_p)
11561 && !SYMBOL_REFERENCES_LOCAL (info, h))
11562 {
11563 /* If the symbol doesn't resolve locally in a static
11564 object, we have an undefined reference. If the
11565 symbol doesn't resolve locally in a dynamic object,
11566 it should be resolved by the dynamic linker. */
11567 if (globals->root.dynamic_sections_created)
11568 {
11569 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11570 *unresolved_reloc_p = FALSE;
11571 }
11572 else
11573 outrel.r_info = 0;
11574 outrel.r_addend = 0;
11575 }
11576 else
11577 {
11578 if (dynreloc_st_type == STT_GNU_IFUNC)
11579 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11580 else if (bfd_link_pic (info)
11581 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11582 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11583 else
11584 {
11585 outrel.r_info = 0;
11586 if (globals->fdpic_p)
11587 isrofixup = 1;
11588 }
11589 outrel.r_addend = dynreloc_value;
11590 }
11591
11592 /* The GOT entry is initialized to zero by default.
11593 See if we should install a different value. */
11594 if (outrel.r_addend != 0
11595 && (globals->use_rel || outrel.r_info == 0))
11596 {
11597 bfd_put_32 (output_bfd, outrel.r_addend,
11598 sgot->contents + off);
11599 outrel.r_addend = 0;
11600 }
11601
11602 if (isrofixup)
11603 arm_elf_add_rofixup (output_bfd,
11604 elf32_arm_hash_table(info)->srofixup,
11605 sgot->output_section->vma
11606 + sgot->output_offset + off);
11607
11608 else if (outrel.r_info != 0)
11609 {
11610 outrel.r_offset = (sgot->output_section->vma
11611 + sgot->output_offset
11612 + off);
11613 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11614 }
11615
11616 h->got.offset |= 1;
11617 }
11618 value = sgot->output_offset + off;
11619 }
11620 else
11621 {
11622 bfd_vma off;
11623
11624 BFD_ASSERT (local_got_offsets != NULL
11625 && local_got_offsets[r_symndx] != (bfd_vma) -1);
11626
11627 off = local_got_offsets[r_symndx];
11628
11629 /* The offset must always be a multiple of 4. We use the
11630 least significant bit to record whether we have already
11631 generated the necessary reloc. */
11632 if ((off & 1) != 0)
11633 off &= ~1;
11634 else
11635 {
11636 Elf_Internal_Rela outrel;
11637 int isrofixup = 0;
11638
11639 if (dynreloc_st_type == STT_GNU_IFUNC)
11640 outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11641 else if (bfd_link_pic (info))
11642 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11643 else
11644 {
11645 outrel.r_info = 0;
11646 if (globals->fdpic_p)
11647 isrofixup = 1;
11648 }
11649
11650 /* The GOT entry is initialized to zero by default.
11651 See if we should install a different value. */
11652 if (globals->use_rel || outrel.r_info == 0)
11653 bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11654
11655 if (isrofixup)
11656 arm_elf_add_rofixup (output_bfd,
11657 globals->srofixup,
11658 sgot->output_section->vma
11659 + sgot->output_offset + off);
11660
11661 else if (outrel.r_info != 0)
11662 {
11663 outrel.r_addend = addend + dynreloc_value;
11664 outrel.r_offset = (sgot->output_section->vma
11665 + sgot->output_offset
11666 + off);
11667 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11668 }
11669
11670 local_got_offsets[r_symndx] |= 1;
11671 }
11672
11673 value = sgot->output_offset + off;
11674 }
11675 if (r_type != R_ARM_GOT32)
11676 value += sgot->output_section->vma;
11677
11678 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11679 contents, rel->r_offset, value,
11680 rel->r_addend);
11681
11682 case R_ARM_TLS_LDO32:
11683 value = value - dtpoff_base (info);
11684
11685 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11686 contents, rel->r_offset, value,
11687 rel->r_addend);
11688
11689 case R_ARM_TLS_LDM32:
11690 case R_ARM_TLS_LDM32_FDPIC:
11691 {
11692 bfd_vma off;
11693
11694 if (sgot == NULL)
11695 abort ();
11696
11697 off = globals->tls_ldm_got.offset;
11698
11699 if ((off & 1) != 0)
11700 off &= ~1;
11701 else
11702 {
11703 /* If we don't know the module number, create a relocation
11704 for it. */
11705 if (bfd_link_dll (info))
11706 {
11707 Elf_Internal_Rela outrel;
11708
11709 if (srelgot == NULL)
11710 abort ();
11711
11712 outrel.r_addend = 0;
11713 outrel.r_offset = (sgot->output_section->vma
11714 + sgot->output_offset + off);
11715 outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11716
11717 if (globals->use_rel)
11718 bfd_put_32 (output_bfd, outrel.r_addend,
11719 sgot->contents + off);
11720
11721 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11722 }
11723 else
11724 bfd_put_32 (output_bfd, 1, sgot->contents + off);
11725
11726 globals->tls_ldm_got.offset |= 1;
11727 }
11728
11729 if (r_type == R_ARM_TLS_LDM32_FDPIC)
11730 {
11731 bfd_put_32(output_bfd,
11732 globals->root.sgot->output_offset + off,
11733 contents + rel->r_offset);
11734
11735 return bfd_reloc_ok;
11736 }
11737 else
11738 {
11739 value = sgot->output_section->vma + sgot->output_offset + off
11740 - (input_section->output_section->vma
11741 + input_section->output_offset + rel->r_offset);
11742
11743 return _bfd_final_link_relocate (howto, input_bfd, input_section,
11744 contents, rel->r_offset, value,
11745 rel->r_addend);
11746 }
11747 }
11748
11749 case R_ARM_TLS_CALL:
11750 case R_ARM_THM_TLS_CALL:
11751 case R_ARM_TLS_GD32:
11752 case R_ARM_TLS_GD32_FDPIC:
11753 case R_ARM_TLS_IE32:
11754 case R_ARM_TLS_IE32_FDPIC:
11755 case R_ARM_TLS_GOTDESC:
11756 case R_ARM_TLS_DESCSEQ:
11757 case R_ARM_THM_TLS_DESCSEQ:
11758 {
11759 bfd_vma off, offplt;
11760 int indx = 0;
11761 char tls_type;
11762
11763 BFD_ASSERT (sgot != NULL);
11764
11765 if (h != NULL)
11766 {
11767 bfd_boolean dyn;
11768 dyn = globals->root.dynamic_sections_created;
11769 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11770 bfd_link_pic (info),
11771 h)
11772 && (!bfd_link_pic (info)
11773 || !SYMBOL_REFERENCES_LOCAL (info, h)))
11774 {
11775 *unresolved_reloc_p = FALSE;
11776 indx = h->dynindx;
11777 }
11778 off = h->got.offset;
11779 offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11780 tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11781 }
11782 else
11783 {
11784 BFD_ASSERT (local_got_offsets != NULL);
11785 off = local_got_offsets[r_symndx];
11786 offplt = local_tlsdesc_gotents[r_symndx];
11787 tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11788 }
11789
11790 /* Linker relaxations happens from one of the
11791 R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE. */
11792 if (ELF32_R_TYPE(rel->r_info) != r_type)
11793 tls_type = GOT_TLS_IE;
11794
11795 BFD_ASSERT (tls_type != GOT_UNKNOWN);
11796
11797 if ((off & 1) != 0)
11798 off &= ~1;
11799 else
11800 {
11801 bfd_boolean need_relocs = FALSE;
11802 Elf_Internal_Rela outrel;
11803 int cur_off = off;
11804
11805 /* The GOT entries have not been initialized yet. Do it
11806 now, and emit any relocations. If both an IE GOT and a
11807 GD GOT are necessary, we emit the GD first. */
11808
11809 if ((bfd_link_dll (info) || indx != 0)
11810 && (h == NULL
11811 || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11812 && !resolved_to_zero)
11813 || h->root.type != bfd_link_hash_undefweak))
11814 {
11815 need_relocs = TRUE;
11816 BFD_ASSERT (srelgot != NULL);
11817 }
11818
11819 if (tls_type & GOT_TLS_GDESC)
11820 {
11821 bfd_byte *loc;
11822
11823 /* We should have relaxed, unless this is an undefined
11824 weak symbol. */
11825 BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11826 || bfd_link_dll (info));
11827 BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11828 <= globals->root.sgotplt->size);
11829
11830 outrel.r_addend = 0;
11831 outrel.r_offset = (globals->root.sgotplt->output_section->vma
11832 + globals->root.sgotplt->output_offset
11833 + offplt
11834 + globals->sgotplt_jump_table_size);
11835
11836 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11837 sreloc = globals->root.srelplt;
11838 loc = sreloc->contents;
11839 loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11840 BFD_ASSERT (loc + RELOC_SIZE (globals)
11841 <= sreloc->contents + sreloc->size);
11842
11843 SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11844
11845 /* For globals, the first word in the relocation gets
11846 the relocation index and the top bit set, or zero,
11847 if we're binding now. For locals, it gets the
11848 symbol's offset in the tls section. */
11849 bfd_put_32 (output_bfd,
11850 !h ? value - elf_hash_table (info)->tls_sec->vma
11851 : info->flags & DF_BIND_NOW ? 0
11852 : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11853 globals->root.sgotplt->contents + offplt
11854 + globals->sgotplt_jump_table_size);
11855
11856 /* Second word in the relocation is always zero. */
11857 bfd_put_32 (output_bfd, 0,
11858 globals->root.sgotplt->contents + offplt
11859 + globals->sgotplt_jump_table_size + 4);
11860 }
11861 if (tls_type & GOT_TLS_GD)
11862 {
11863 if (need_relocs)
11864 {
11865 outrel.r_addend = 0;
11866 outrel.r_offset = (sgot->output_section->vma
11867 + sgot->output_offset
11868 + cur_off);
11869 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11870
11871 if (globals->use_rel)
11872 bfd_put_32 (output_bfd, outrel.r_addend,
11873 sgot->contents + cur_off);
11874
11875 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11876
11877 if (indx == 0)
11878 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11879 sgot->contents + cur_off + 4);
11880 else
11881 {
11882 outrel.r_addend = 0;
11883 outrel.r_info = ELF32_R_INFO (indx,
11884 R_ARM_TLS_DTPOFF32);
11885 outrel.r_offset += 4;
11886
11887 if (globals->use_rel)
11888 bfd_put_32 (output_bfd, outrel.r_addend,
11889 sgot->contents + cur_off + 4);
11890
11891 elf32_arm_add_dynreloc (output_bfd, info,
11892 srelgot, &outrel);
11893 }
11894 }
11895 else
11896 {
11897 /* If we are not emitting relocations for a
11898 general dynamic reference, then we must be in a
11899 static link or an executable link with the
11900 symbol binding locally. Mark it as belonging
11901 to module 1, the executable. */
11902 bfd_put_32 (output_bfd, 1,
11903 sgot->contents + cur_off);
11904 bfd_put_32 (output_bfd, value - dtpoff_base (info),
11905 sgot->contents + cur_off + 4);
11906 }
11907
11908 cur_off += 8;
11909 }
11910
11911 if (tls_type & GOT_TLS_IE)
11912 {
11913 if (need_relocs)
11914 {
11915 if (indx == 0)
11916 outrel.r_addend = value - dtpoff_base (info);
11917 else
11918 outrel.r_addend = 0;
11919 outrel.r_offset = (sgot->output_section->vma
11920 + sgot->output_offset
11921 + cur_off);
11922 outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11923
11924 if (globals->use_rel)
11925 bfd_put_32 (output_bfd, outrel.r_addend,
11926 sgot->contents + cur_off);
11927
11928 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11929 }
11930 else
11931 bfd_put_32 (output_bfd, tpoff (info, value),
11932 sgot->contents + cur_off);
11933 cur_off += 4;
11934 }
11935
11936 if (h != NULL)
11937 h->got.offset |= 1;
11938 else
11939 local_got_offsets[r_symndx] |= 1;
11940 }
11941
11942 if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11943 off += 8;
11944 else if (tls_type & GOT_TLS_GDESC)
11945 off = offplt;
11946
11947 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11948 || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11949 {
11950 bfd_signed_vma offset;
11951 /* TLS stubs are arm mode. The original symbol is a
11952 data object, so branch_type is bogus. */
11953 branch_type = ST_BRANCH_TO_ARM;
11954 enum elf32_arm_stub_type stub_type
11955 = arm_type_of_stub (info, input_section, rel,
11956 st_type, &branch_type,
11957 (struct elf32_arm_link_hash_entry *)h,
11958 globals->tls_trampoline, globals->root.splt,
11959 input_bfd, sym_name);
11960
11961 if (stub_type != arm_stub_none)
11962 {
11963 struct elf32_arm_stub_hash_entry *stub_entry
11964 = elf32_arm_get_stub_entry
11965 (input_section, globals->root.splt, 0, rel,
11966 globals, stub_type);
11967 offset = (stub_entry->stub_offset
11968 + stub_entry->stub_sec->output_offset
11969 + stub_entry->stub_sec->output_section->vma);
11970 }
11971 else
11972 offset = (globals->root.splt->output_section->vma
11973 + globals->root.splt->output_offset
11974 + globals->tls_trampoline);
11975
11976 if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11977 {
11978 unsigned long inst;
11979
11980 offset -= (input_section->output_section->vma
11981 + input_section->output_offset
11982 + rel->r_offset + 8);
11983
11984 inst = offset >> 2;
11985 inst &= 0x00ffffff;
11986 value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11987 }
11988 else
11989 {
11990 /* Thumb blx encodes the offset in a complicated
11991 fashion. */
11992 unsigned upper_insn, lower_insn;
11993 unsigned neg;
11994
11995 offset -= (input_section->output_section->vma
11996 + input_section->output_offset
11997 + rel->r_offset + 4);
11998
11999 if (stub_type != arm_stub_none
12000 && arm_stub_is_thumb (stub_type))
12001 {
12002 lower_insn = 0xd000;
12003 }
12004 else
12005 {
12006 lower_insn = 0xc000;
12007 /* Round up the offset to a word boundary. */
12008 offset = (offset + 2) & ~2;
12009 }
12010
12011 neg = offset < 0;
12012 upper_insn = (0xf000
12013 | ((offset >> 12) & 0x3ff)
12014 | (neg << 10));
12015 lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
12016 | (((!((offset >> 22) & 1)) ^ neg) << 11)
12017 | ((offset >> 1) & 0x7ff);
12018 bfd_put_16 (input_bfd, upper_insn, hit_data);
12019 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12020 return bfd_reloc_ok;
12021 }
12022 }
12023 /* These relocations needs special care, as besides the fact
12024 they point somewhere in .gotplt, the addend must be
12025 adjusted accordingly depending on the type of instruction
12026 we refer to. */
12027 else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
12028 {
12029 unsigned long data, insn;
12030 unsigned thumb;
12031
12032 data = bfd_get_signed_32 (input_bfd, hit_data);
12033 thumb = data & 1;
12034 data &= ~1ul;
12035
12036 if (thumb)
12037 {
12038 insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
12039 if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
12040 insn = (insn << 16)
12041 | bfd_get_16 (input_bfd,
12042 contents + rel->r_offset - data + 2);
12043 if ((insn & 0xf800c000) == 0xf000c000)
12044 /* bl/blx */
12045 value = -6;
12046 else if ((insn & 0xffffff00) == 0x4400)
12047 /* add */
12048 value = -5;
12049 else
12050 {
12051 _bfd_error_handler
12052 /* xgettext:c-format */
12053 (_("%pB(%pA+%#" PRIx64 "): "
12054 "unexpected %s instruction '%#lx' "
12055 "referenced by TLS_GOTDESC"),
12056 input_bfd, input_section, (uint64_t) rel->r_offset,
12057 "Thumb", insn);
12058 return bfd_reloc_notsupported;
12059 }
12060 }
12061 else
12062 {
12063 insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
12064
12065 switch (insn >> 24)
12066 {
12067 case 0xeb: /* bl */
12068 case 0xfa: /* blx */
12069 value = -4;
12070 break;
12071
12072 case 0xe0: /* add */
12073 value = -8;
12074 break;
12075
12076 default:
12077 _bfd_error_handler
12078 /* xgettext:c-format */
12079 (_("%pB(%pA+%#" PRIx64 "): "
12080 "unexpected %s instruction '%#lx' "
12081 "referenced by TLS_GOTDESC"),
12082 input_bfd, input_section, (uint64_t) rel->r_offset,
12083 "ARM", insn);
12084 return bfd_reloc_notsupported;
12085 }
12086 }
12087
12088 value += ((globals->root.sgotplt->output_section->vma
12089 + globals->root.sgotplt->output_offset + off)
12090 - (input_section->output_section->vma
12091 + input_section->output_offset
12092 + rel->r_offset)
12093 + globals->sgotplt_jump_table_size);
12094 }
12095 else
12096 value = ((globals->root.sgot->output_section->vma
12097 + globals->root.sgot->output_offset + off)
12098 - (input_section->output_section->vma
12099 + input_section->output_offset + rel->r_offset));
12100
12101 if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12102 r_type == R_ARM_TLS_IE32_FDPIC))
12103 {
12104 /* For FDPIC relocations, resolve to the offset of the GOT
12105 entry from the start of GOT. */
12106 bfd_put_32(output_bfd,
12107 globals->root.sgot->output_offset + off,
12108 contents + rel->r_offset);
12109
12110 return bfd_reloc_ok;
12111 }
12112 else
12113 {
12114 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12115 contents, rel->r_offset, value,
12116 rel->r_addend);
12117 }
12118 }
12119
12120 case R_ARM_TLS_LE32:
12121 if (bfd_link_dll (info))
12122 {
12123 _bfd_error_handler
12124 /* xgettext:c-format */
12125 (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12126 "in shared object"),
12127 input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12128 return bfd_reloc_notsupported;
12129 }
12130 else
12131 value = tpoff (info, value);
12132
12133 return _bfd_final_link_relocate (howto, input_bfd, input_section,
12134 contents, rel->r_offset, value,
12135 rel->r_addend);
12136
12137 case R_ARM_V4BX:
12138 if (globals->fix_v4bx)
12139 {
12140 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12141
12142 /* Ensure that we have a BX instruction. */
12143 BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12144
12145 if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12146 {
12147 /* Branch to veneer. */
12148 bfd_vma glue_addr;
12149 glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12150 glue_addr -= input_section->output_section->vma
12151 + input_section->output_offset
12152 + rel->r_offset + 8;
12153 insn = (insn & 0xf0000000) | 0x0a000000
12154 | ((glue_addr >> 2) & 0x00ffffff);
12155 }
12156 else
12157 {
12158 /* Preserve Rm (lowest four bits) and the condition code
12159 (highest four bits). Other bits encode MOV PC,Rm. */
12160 insn = (insn & 0xf000000f) | 0x01a0f000;
12161 }
12162
12163 bfd_put_32 (input_bfd, insn, hit_data);
12164 }
12165 return bfd_reloc_ok;
12166
12167 case R_ARM_MOVW_ABS_NC:
12168 case R_ARM_MOVT_ABS:
12169 case R_ARM_MOVW_PREL_NC:
12170 case R_ARM_MOVT_PREL:
12171 /* Until we properly support segment-base-relative addressing then
12172 we assume the segment base to be zero, as for the group relocations.
12173 Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12174 and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS. */
12175 case R_ARM_MOVW_BREL_NC:
12176 case R_ARM_MOVW_BREL:
12177 case R_ARM_MOVT_BREL:
12178 {
12179 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12180
12181 if (globals->use_rel)
12182 {
12183 addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12184 signed_addend = (addend ^ 0x8000) - 0x8000;
12185 }
12186
12187 value += signed_addend;
12188
12189 if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12190 value -= (input_section->output_section->vma
12191 + input_section->output_offset + rel->r_offset);
12192
12193 if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12194 return bfd_reloc_overflow;
12195
12196 if (branch_type == ST_BRANCH_TO_THUMB)
12197 value |= 1;
12198
12199 if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12200 || r_type == R_ARM_MOVT_BREL)
12201 value >>= 16;
12202
12203 insn &= 0xfff0f000;
12204 insn |= value & 0xfff;
12205 insn |= (value & 0xf000) << 4;
12206 bfd_put_32 (input_bfd, insn, hit_data);
12207 }
12208 return bfd_reloc_ok;
12209
12210 case R_ARM_THM_MOVW_ABS_NC:
12211 case R_ARM_THM_MOVT_ABS:
12212 case R_ARM_THM_MOVW_PREL_NC:
12213 case R_ARM_THM_MOVT_PREL:
12214 /* Until we properly support segment-base-relative addressing then
12215 we assume the segment base to be zero, as for the above relocations.
12216 Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12217 R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12218 as R_ARM_THM_MOVT_ABS. */
12219 case R_ARM_THM_MOVW_BREL_NC:
12220 case R_ARM_THM_MOVW_BREL:
12221 case R_ARM_THM_MOVT_BREL:
12222 {
12223 bfd_vma insn;
12224
12225 insn = bfd_get_16 (input_bfd, hit_data) << 16;
12226 insn |= bfd_get_16 (input_bfd, hit_data + 2);
12227
12228 if (globals->use_rel)
12229 {
12230 addend = ((insn >> 4) & 0xf000)
12231 | ((insn >> 15) & 0x0800)
12232 | ((insn >> 4) & 0x0700)
12233 | (insn & 0x00ff);
12234 signed_addend = (addend ^ 0x8000) - 0x8000;
12235 }
12236
12237 value += signed_addend;
12238
12239 if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12240 value -= (input_section->output_section->vma
12241 + input_section->output_offset + rel->r_offset);
12242
12243 if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12244 return bfd_reloc_overflow;
12245
12246 if (branch_type == ST_BRANCH_TO_THUMB)
12247 value |= 1;
12248
12249 if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12250 || r_type == R_ARM_THM_MOVT_BREL)
12251 value >>= 16;
12252
12253 insn &= 0xfbf08f00;
12254 insn |= (value & 0xf000) << 4;
12255 insn |= (value & 0x0800) << 15;
12256 insn |= (value & 0x0700) << 4;
12257 insn |= (value & 0x00ff);
12258
12259 bfd_put_16 (input_bfd, insn >> 16, hit_data);
12260 bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12261 }
12262 return bfd_reloc_ok;
12263
12264 case R_ARM_ALU_PC_G0_NC:
12265 case R_ARM_ALU_PC_G1_NC:
12266 case R_ARM_ALU_PC_G0:
12267 case R_ARM_ALU_PC_G1:
12268 case R_ARM_ALU_PC_G2:
12269 case R_ARM_ALU_SB_G0_NC:
12270 case R_ARM_ALU_SB_G1_NC:
12271 case R_ARM_ALU_SB_G0:
12272 case R_ARM_ALU_SB_G1:
12273 case R_ARM_ALU_SB_G2:
12274 {
12275 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12276 bfd_vma pc = input_section->output_section->vma
12277 + input_section->output_offset + rel->r_offset;
12278 /* sb is the origin of the *segment* containing the symbol. */
12279 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12280 bfd_vma residual;
12281 bfd_vma g_n;
12282 bfd_signed_vma signed_value;
12283 int group = 0;
12284
12285 /* Determine which group of bits to select. */
12286 switch (r_type)
12287 {
12288 case R_ARM_ALU_PC_G0_NC:
12289 case R_ARM_ALU_PC_G0:
12290 case R_ARM_ALU_SB_G0_NC:
12291 case R_ARM_ALU_SB_G0:
12292 group = 0;
12293 break;
12294
12295 case R_ARM_ALU_PC_G1_NC:
12296 case R_ARM_ALU_PC_G1:
12297 case R_ARM_ALU_SB_G1_NC:
12298 case R_ARM_ALU_SB_G1:
12299 group = 1;
12300 break;
12301
12302 case R_ARM_ALU_PC_G2:
12303 case R_ARM_ALU_SB_G2:
12304 group = 2;
12305 break;
12306
12307 default:
12308 abort ();
12309 }
12310
12311 /* If REL, extract the addend from the insn. If RELA, it will
12312 have already been fetched for us. */
12313 if (globals->use_rel)
12314 {
12315 int negative;
12316 bfd_vma constant = insn & 0xff;
12317 bfd_vma rotation = (insn & 0xf00) >> 8;
12318
12319 if (rotation == 0)
12320 signed_addend = constant;
12321 else
12322 {
12323 /* Compensate for the fact that in the instruction, the
12324 rotation is stored in multiples of 2 bits. */
12325 rotation *= 2;
12326
12327 /* Rotate "constant" right by "rotation" bits. */
12328 signed_addend = (constant >> rotation) |
12329 (constant << (8 * sizeof (bfd_vma) - rotation));
12330 }
12331
12332 /* Determine if the instruction is an ADD or a SUB.
12333 (For REL, this determines the sign of the addend.) */
12334 negative = identify_add_or_sub (insn);
12335 if (negative == 0)
12336 {
12337 _bfd_error_handler
12338 /* xgettext:c-format */
12339 (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12340 "are allowed for ALU group relocations"),
12341 input_bfd, input_section, (uint64_t) rel->r_offset);
12342 return bfd_reloc_overflow;
12343 }
12344
12345 signed_addend *= negative;
12346 }
12347
12348 /* Compute the value (X) to go in the place. */
12349 if (r_type == R_ARM_ALU_PC_G0_NC
12350 || r_type == R_ARM_ALU_PC_G1_NC
12351 || r_type == R_ARM_ALU_PC_G0
12352 || r_type == R_ARM_ALU_PC_G1
12353 || r_type == R_ARM_ALU_PC_G2)
12354 /* PC relative. */
12355 signed_value = value - pc + signed_addend;
12356 else
12357 /* Section base relative. */
12358 signed_value = value - sb + signed_addend;
12359
12360 /* If the target symbol is a Thumb function, then set the
12361 Thumb bit in the address. */
12362 if (branch_type == ST_BRANCH_TO_THUMB)
12363 signed_value |= 1;
12364
12365 /* Calculate the value of the relevant G_n, in encoded
12366 constant-with-rotation format. */
12367 g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12368 group, &residual);
12369
12370 /* Check for overflow if required. */
12371 if ((r_type == R_ARM_ALU_PC_G0
12372 || r_type == R_ARM_ALU_PC_G1
12373 || r_type == R_ARM_ALU_PC_G2
12374 || r_type == R_ARM_ALU_SB_G0
12375 || r_type == R_ARM_ALU_SB_G1
12376 || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12377 {
12378 _bfd_error_handler
12379 /* xgettext:c-format */
12380 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12381 "splitting %#" PRIx64 " for group relocation %s"),
12382 input_bfd, input_section, (uint64_t) rel->r_offset,
12383 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12384 howto->name);
12385 return bfd_reloc_overflow;
12386 }
12387
12388 /* Mask out the value and the ADD/SUB part of the opcode; take care
12389 not to destroy the S bit. */
12390 insn &= 0xff1ff000;
12391
12392 /* Set the opcode according to whether the value to go in the
12393 place is negative. */
12394 if (signed_value < 0)
12395 insn |= 1 << 22;
12396 else
12397 insn |= 1 << 23;
12398
12399 /* Encode the offset. */
12400 insn |= g_n;
12401
12402 bfd_put_32 (input_bfd, insn, hit_data);
12403 }
12404 return bfd_reloc_ok;
12405
12406 case R_ARM_LDR_PC_G0:
12407 case R_ARM_LDR_PC_G1:
12408 case R_ARM_LDR_PC_G2:
12409 case R_ARM_LDR_SB_G0:
12410 case R_ARM_LDR_SB_G1:
12411 case R_ARM_LDR_SB_G2:
12412 {
12413 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12414 bfd_vma pc = input_section->output_section->vma
12415 + input_section->output_offset + rel->r_offset;
12416 /* sb is the origin of the *segment* containing the symbol. */
12417 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12418 bfd_vma residual;
12419 bfd_signed_vma signed_value;
12420 int group = 0;
12421
12422 /* Determine which groups of bits to calculate. */
12423 switch (r_type)
12424 {
12425 case R_ARM_LDR_PC_G0:
12426 case R_ARM_LDR_SB_G0:
12427 group = 0;
12428 break;
12429
12430 case R_ARM_LDR_PC_G1:
12431 case R_ARM_LDR_SB_G1:
12432 group = 1;
12433 break;
12434
12435 case R_ARM_LDR_PC_G2:
12436 case R_ARM_LDR_SB_G2:
12437 group = 2;
12438 break;
12439
12440 default:
12441 abort ();
12442 }
12443
12444 /* If REL, extract the addend from the insn. If RELA, it will
12445 have already been fetched for us. */
12446 if (globals->use_rel)
12447 {
12448 int negative = (insn & (1 << 23)) ? 1 : -1;
12449 signed_addend = negative * (insn & 0xfff);
12450 }
12451
12452 /* Compute the value (X) to go in the place. */
12453 if (r_type == R_ARM_LDR_PC_G0
12454 || r_type == R_ARM_LDR_PC_G1
12455 || r_type == R_ARM_LDR_PC_G2)
12456 /* PC relative. */
12457 signed_value = value - pc + signed_addend;
12458 else
12459 /* Section base relative. */
12460 signed_value = value - sb + signed_addend;
12461
12462 /* Calculate the value of the relevant G_{n-1} to obtain
12463 the residual at that stage. */
12464 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12465 group - 1, &residual);
12466
12467 /* Check for overflow. */
12468 if (residual >= 0x1000)
12469 {
12470 _bfd_error_handler
12471 /* xgettext:c-format */
12472 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12473 "splitting %#" PRIx64 " for group relocation %s"),
12474 input_bfd, input_section, (uint64_t) rel->r_offset,
12475 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12476 howto->name);
12477 return bfd_reloc_overflow;
12478 }
12479
12480 /* Mask out the value and U bit. */
12481 insn &= 0xff7ff000;
12482
12483 /* Set the U bit if the value to go in the place is non-negative. */
12484 if (signed_value >= 0)
12485 insn |= 1 << 23;
12486
12487 /* Encode the offset. */
12488 insn |= residual;
12489
12490 bfd_put_32 (input_bfd, insn, hit_data);
12491 }
12492 return bfd_reloc_ok;
12493
12494 case R_ARM_LDRS_PC_G0:
12495 case R_ARM_LDRS_PC_G1:
12496 case R_ARM_LDRS_PC_G2:
12497 case R_ARM_LDRS_SB_G0:
12498 case R_ARM_LDRS_SB_G1:
12499 case R_ARM_LDRS_SB_G2:
12500 {
12501 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12502 bfd_vma pc = input_section->output_section->vma
12503 + input_section->output_offset + rel->r_offset;
12504 /* sb is the origin of the *segment* containing the symbol. */
12505 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12506 bfd_vma residual;
12507 bfd_signed_vma signed_value;
12508 int group = 0;
12509
12510 /* Determine which groups of bits to calculate. */
12511 switch (r_type)
12512 {
12513 case R_ARM_LDRS_PC_G0:
12514 case R_ARM_LDRS_SB_G0:
12515 group = 0;
12516 break;
12517
12518 case R_ARM_LDRS_PC_G1:
12519 case R_ARM_LDRS_SB_G1:
12520 group = 1;
12521 break;
12522
12523 case R_ARM_LDRS_PC_G2:
12524 case R_ARM_LDRS_SB_G2:
12525 group = 2;
12526 break;
12527
12528 default:
12529 abort ();
12530 }
12531
12532 /* If REL, extract the addend from the insn. If RELA, it will
12533 have already been fetched for us. */
12534 if (globals->use_rel)
12535 {
12536 int negative = (insn & (1 << 23)) ? 1 : -1;
12537 signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12538 }
12539
12540 /* Compute the value (X) to go in the place. */
12541 if (r_type == R_ARM_LDRS_PC_G0
12542 || r_type == R_ARM_LDRS_PC_G1
12543 || r_type == R_ARM_LDRS_PC_G2)
12544 /* PC relative. */
12545 signed_value = value - pc + signed_addend;
12546 else
12547 /* Section base relative. */
12548 signed_value = value - sb + signed_addend;
12549
12550 /* Calculate the value of the relevant G_{n-1} to obtain
12551 the residual at that stage. */
12552 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12553 group - 1, &residual);
12554
12555 /* Check for overflow. */
12556 if (residual >= 0x100)
12557 {
12558 _bfd_error_handler
12559 /* xgettext:c-format */
12560 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12561 "splitting %#" PRIx64 " for group relocation %s"),
12562 input_bfd, input_section, (uint64_t) rel->r_offset,
12563 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12564 howto->name);
12565 return bfd_reloc_overflow;
12566 }
12567
12568 /* Mask out the value and U bit. */
12569 insn &= 0xff7ff0f0;
12570
12571 /* Set the U bit if the value to go in the place is non-negative. */
12572 if (signed_value >= 0)
12573 insn |= 1 << 23;
12574
12575 /* Encode the offset. */
12576 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12577
12578 bfd_put_32 (input_bfd, insn, hit_data);
12579 }
12580 return bfd_reloc_ok;
12581
12582 case R_ARM_LDC_PC_G0:
12583 case R_ARM_LDC_PC_G1:
12584 case R_ARM_LDC_PC_G2:
12585 case R_ARM_LDC_SB_G0:
12586 case R_ARM_LDC_SB_G1:
12587 case R_ARM_LDC_SB_G2:
12588 {
12589 bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12590 bfd_vma pc = input_section->output_section->vma
12591 + input_section->output_offset + rel->r_offset;
12592 /* sb is the origin of the *segment* containing the symbol. */
12593 bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12594 bfd_vma residual;
12595 bfd_signed_vma signed_value;
12596 int group = 0;
12597
12598 /* Determine which groups of bits to calculate. */
12599 switch (r_type)
12600 {
12601 case R_ARM_LDC_PC_G0:
12602 case R_ARM_LDC_SB_G0:
12603 group = 0;
12604 break;
12605
12606 case R_ARM_LDC_PC_G1:
12607 case R_ARM_LDC_SB_G1:
12608 group = 1;
12609 break;
12610
12611 case R_ARM_LDC_PC_G2:
12612 case R_ARM_LDC_SB_G2:
12613 group = 2;
12614 break;
12615
12616 default:
12617 abort ();
12618 }
12619
12620 /* If REL, extract the addend from the insn. If RELA, it will
12621 have already been fetched for us. */
12622 if (globals->use_rel)
12623 {
12624 int negative = (insn & (1 << 23)) ? 1 : -1;
12625 signed_addend = negative * ((insn & 0xff) << 2);
12626 }
12627
12628 /* Compute the value (X) to go in the place. */
12629 if (r_type == R_ARM_LDC_PC_G0
12630 || r_type == R_ARM_LDC_PC_G1
12631 || r_type == R_ARM_LDC_PC_G2)
12632 /* PC relative. */
12633 signed_value = value - pc + signed_addend;
12634 else
12635 /* Section base relative. */
12636 signed_value = value - sb + signed_addend;
12637
12638 /* Calculate the value of the relevant G_{n-1} to obtain
12639 the residual at that stage. */
12640 calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12641 group - 1, &residual);
12642
12643 /* Check for overflow. (The absolute value to go in the place must be
12644 divisible by four and, after having been divided by four, must
12645 fit in eight bits.) */
12646 if ((residual & 0x3) != 0 || residual >= 0x400)
12647 {
12648 _bfd_error_handler
12649 /* xgettext:c-format */
12650 (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12651 "splitting %#" PRIx64 " for group relocation %s"),
12652 input_bfd, input_section, (uint64_t) rel->r_offset,
12653 (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12654 howto->name);
12655 return bfd_reloc_overflow;
12656 }
12657
12658 /* Mask out the value and U bit. */
12659 insn &= 0xff7fff00;
12660
12661 /* Set the U bit if the value to go in the place is non-negative. */
12662 if (signed_value >= 0)
12663 insn |= 1 << 23;
12664
12665 /* Encode the offset. */
12666 insn |= residual >> 2;
12667
12668 bfd_put_32 (input_bfd, insn, hit_data);
12669 }
12670 return bfd_reloc_ok;
12671
12672 case R_ARM_THM_ALU_ABS_G0_NC:
12673 case R_ARM_THM_ALU_ABS_G1_NC:
12674 case R_ARM_THM_ALU_ABS_G2_NC:
12675 case R_ARM_THM_ALU_ABS_G3_NC:
12676 {
12677 const int shift_array[4] = {0, 8, 16, 24};
12678 bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12679 bfd_vma addr = value;
12680 int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12681
12682 /* Compute address. */
12683 if (globals->use_rel)
12684 signed_addend = insn & 0xff;
12685 addr += signed_addend;
12686 if (branch_type == ST_BRANCH_TO_THUMB)
12687 addr |= 1;
12688 /* Clean imm8 insn. */
12689 insn &= 0xff00;
12690 /* And update with correct part of address. */
12691 insn |= (addr >> shift) & 0xff;
12692 /* Update insn. */
12693 bfd_put_16 (input_bfd, insn, hit_data);
12694 }
12695
12696 *unresolved_reloc_p = FALSE;
12697 return bfd_reloc_ok;
12698
12699 case R_ARM_GOTOFFFUNCDESC:
12700 {
12701 if (h == NULL)
12702 {
12703 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12704 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12705 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12706 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12707 bfd_vma seg = -1;
12708
12709 if (bfd_link_pic(info) && dynindx == 0)
12710 abort();
12711
12712 /* Resolve relocation. */
12713 bfd_put_32(output_bfd, (offset + sgot->output_offset)
12714 , contents + rel->r_offset);
12715 /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12716 not done yet. */
12717 arm_elf_fill_funcdesc(output_bfd, info,
12718 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12719 dynindx, offset, addr, dynreloc_value, seg);
12720 }
12721 else
12722 {
12723 int dynindx;
12724 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12725 bfd_vma addr;
12726 bfd_vma seg = -1;
12727
12728 /* For static binaries, sym_sec can be null. */
12729 if (sym_sec)
12730 {
12731 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12732 addr = dynreloc_value - sym_sec->output_section->vma;
12733 }
12734 else
12735 {
12736 dynindx = 0;
12737 addr = 0;
12738 }
12739
12740 if (bfd_link_pic(info) && dynindx == 0)
12741 abort();
12742
12743 /* This case cannot occur since funcdesc is allocated by
12744 the dynamic loader so we cannot resolve the relocation. */
12745 if (h->dynindx != -1)
12746 abort();
12747
12748 /* Resolve relocation. */
12749 bfd_put_32(output_bfd, (offset + sgot->output_offset),
12750 contents + rel->r_offset);
12751 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12752 arm_elf_fill_funcdesc(output_bfd, info,
12753 &eh->fdpic_cnts.funcdesc_offset,
12754 dynindx, offset, addr, dynreloc_value, seg);
12755 }
12756 }
12757 *unresolved_reloc_p = FALSE;
12758 return bfd_reloc_ok;
12759
12760 case R_ARM_GOTFUNCDESC:
12761 {
12762 if (h != NULL)
12763 {
12764 Elf_Internal_Rela outrel;
12765
12766 /* Resolve relocation. */
12767 bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12768 + sgot->output_offset),
12769 contents + rel->r_offset);
12770 /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE. */
12771 if(h->dynindx == -1)
12772 {
12773 int dynindx;
12774 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12775 bfd_vma addr;
12776 bfd_vma seg = -1;
12777
12778 /* For static binaries sym_sec can be null. */
12779 if (sym_sec)
12780 {
12781 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12782 addr = dynreloc_value - sym_sec->output_section->vma;
12783 }
12784 else
12785 {
12786 dynindx = 0;
12787 addr = 0;
12788 }
12789
12790 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12791 arm_elf_fill_funcdesc(output_bfd, info,
12792 &eh->fdpic_cnts.funcdesc_offset,
12793 dynindx, offset, addr, dynreloc_value, seg);
12794 }
12795
12796 /* Add a dynamic relocation on GOT entry if not already done. */
12797 if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12798 {
12799 if (h->dynindx == -1)
12800 {
12801 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12802 if (h->root.type == bfd_link_hash_undefweak)
12803 bfd_put_32(output_bfd, 0, sgot->contents
12804 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12805 else
12806 bfd_put_32(output_bfd, sgot->output_section->vma
12807 + sgot->output_offset
12808 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12809 sgot->contents
12810 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12811 }
12812 else
12813 {
12814 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12815 }
12816 outrel.r_offset = sgot->output_section->vma
12817 + sgot->output_offset
12818 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12819 outrel.r_addend = 0;
12820 if (h->dynindx == -1 && !bfd_link_pic(info))
12821 if (h->root.type == bfd_link_hash_undefweak)
12822 arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12823 else
12824 arm_elf_add_rofixup(output_bfd, globals->srofixup,
12825 outrel.r_offset);
12826 else
12827 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12828 eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12829 }
12830 }
12831 else
12832 {
12833 /* Such relocation on static function should not have been
12834 emitted by the compiler. */
12835 abort();
12836 }
12837 }
12838 *unresolved_reloc_p = FALSE;
12839 return bfd_reloc_ok;
12840
12841 case R_ARM_FUNCDESC:
12842 {
12843 if (h == NULL)
12844 {
12845 struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12846 Elf_Internal_Rela outrel;
12847 int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12848 int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12849 bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12850 bfd_vma seg = -1;
12851
12852 if (bfd_link_pic(info) && dynindx == 0)
12853 abort();
12854
12855 /* Replace static FUNCDESC relocation with a
12856 R_ARM_RELATIVE dynamic relocation or with a rofixup for
12857 executable. */
12858 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12859 outrel.r_offset = input_section->output_section->vma
12860 + input_section->output_offset + rel->r_offset;
12861 outrel.r_addend = 0;
12862 if (bfd_link_pic(info))
12863 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12864 else
12865 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12866
12867 bfd_put_32 (input_bfd, sgot->output_section->vma
12868 + sgot->output_offset + offset, hit_data);
12869
12870 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12871 arm_elf_fill_funcdesc(output_bfd, info,
12872 &local_fdpic_cnts[r_symndx].funcdesc_offset,
12873 dynindx, offset, addr, dynreloc_value, seg);
12874 }
12875 else
12876 {
12877 if (h->dynindx == -1)
12878 {
12879 int dynindx;
12880 int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12881 bfd_vma addr;
12882 bfd_vma seg = -1;
12883 Elf_Internal_Rela outrel;
12884
12885 /* For static binaries sym_sec can be null. */
12886 if (sym_sec)
12887 {
12888 dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12889 addr = dynreloc_value - sym_sec->output_section->vma;
12890 }
12891 else
12892 {
12893 dynindx = 0;
12894 addr = 0;
12895 }
12896
12897 if (bfd_link_pic(info) && dynindx == 0)
12898 abort();
12899
12900 /* Replace static FUNCDESC relocation with a
12901 R_ARM_RELATIVE dynamic relocation. */
12902 outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12903 outrel.r_offset = input_section->output_section->vma
12904 + input_section->output_offset + rel->r_offset;
12905 outrel.r_addend = 0;
12906 if (bfd_link_pic(info))
12907 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12908 else
12909 arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12910
12911 bfd_put_32 (input_bfd, sgot->output_section->vma
12912 + sgot->output_offset + offset, hit_data);
12913
12914 /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet. */
12915 arm_elf_fill_funcdesc(output_bfd, info,
12916 &eh->fdpic_cnts.funcdesc_offset,
12917 dynindx, offset, addr, dynreloc_value, seg);
12918 }
12919 else
12920 {
12921 Elf_Internal_Rela outrel;
12922
12923 /* Add a dynamic relocation. */
12924 outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12925 outrel.r_offset = input_section->output_section->vma
12926 + input_section->output_offset + rel->r_offset;
12927 outrel.r_addend = 0;
12928 elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12929 }
12930 }
12931 }
12932 *unresolved_reloc_p = FALSE;
12933 return bfd_reloc_ok;
12934
12935 case R_ARM_THM_BF16:
12936 {
12937 bfd_vma relocation;
12938 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12939 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12940
12941 if (globals->use_rel)
12942 {
12943 bfd_vma immA = (upper_insn & 0x001f);
12944 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12945 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12946 addend = (immA << 12);
12947 addend |= (immB << 2);
12948 addend |= (immC << 1);
12949 addend |= 1;
12950 /* Sign extend. */
12951 signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12952 }
12953
12954 relocation = value + signed_addend;
12955 relocation -= (input_section->output_section->vma
12956 + input_section->output_offset
12957 + rel->r_offset);
12958
12959 /* Put RELOCATION back into the insn. */
12960 {
12961 bfd_vma immA = (relocation & 0x0001f000) >> 12;
12962 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12963 bfd_vma immC = (relocation & 0x00000002) >> 1;
12964
12965 upper_insn = (upper_insn & 0xffe0) | immA;
12966 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12967 }
12968
12969 /* Put the relocated value back in the object file: */
12970 bfd_put_16 (input_bfd, upper_insn, hit_data);
12971 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12972
12973 return bfd_reloc_ok;
12974 }
12975
12976 case R_ARM_THM_BF12:
12977 {
12978 bfd_vma relocation;
12979 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12980 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12981
12982 if (globals->use_rel)
12983 {
12984 bfd_vma immA = (upper_insn & 0x0001);
12985 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
12986 bfd_vma immC = (lower_insn & 0x0800) >> 11;
12987 addend = (immA << 12);
12988 addend |= (immB << 2);
12989 addend |= (immC << 1);
12990 addend |= 1;
12991 /* Sign extend. */
12992 addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12993 signed_addend = addend;
12994 }
12995
12996 relocation = value + signed_addend;
12997 relocation -= (input_section->output_section->vma
12998 + input_section->output_offset
12999 + rel->r_offset);
13000
13001 /* Put RELOCATION back into the insn. */
13002 {
13003 bfd_vma immA = (relocation & 0x00001000) >> 12;
13004 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13005 bfd_vma immC = (relocation & 0x00000002) >> 1;
13006
13007 upper_insn = (upper_insn & 0xfffe) | immA;
13008 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13009 }
13010
13011 /* Put the relocated value back in the object file: */
13012 bfd_put_16 (input_bfd, upper_insn, hit_data);
13013 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13014
13015 return bfd_reloc_ok;
13016 }
13017
13018 case R_ARM_THM_BF18:
13019 {
13020 bfd_vma relocation;
13021 bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
13022 bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
13023
13024 if (globals->use_rel)
13025 {
13026 bfd_vma immA = (upper_insn & 0x007f);
13027 bfd_vma immB = (lower_insn & 0x07fe) >> 1;
13028 bfd_vma immC = (lower_insn & 0x0800) >> 11;
13029 addend = (immA << 12);
13030 addend |= (immB << 2);
13031 addend |= (immC << 1);
13032 addend |= 1;
13033 /* Sign extend. */
13034 addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
13035 signed_addend = addend;
13036 }
13037
13038 relocation = value + signed_addend;
13039 relocation -= (input_section->output_section->vma
13040 + input_section->output_offset
13041 + rel->r_offset);
13042
13043 /* Put RELOCATION back into the insn. */
13044 {
13045 bfd_vma immA = (relocation & 0x0007f000) >> 12;
13046 bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13047 bfd_vma immC = (relocation & 0x00000002) >> 1;
13048
13049 upper_insn = (upper_insn & 0xff80) | immA;
13050 lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13051 }
13052
13053 /* Put the relocated value back in the object file: */
13054 bfd_put_16 (input_bfd, upper_insn, hit_data);
13055 bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13056
13057 return bfd_reloc_ok;
13058 }
13059
13060 default:
13061 return bfd_reloc_notsupported;
13062 }
13063 }
13064
13065 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS. */
13066 static void
13067 arm_add_to_rel (bfd * abfd,
13068 bfd_byte * address,
13069 reloc_howto_type * howto,
13070 bfd_signed_vma increment)
13071 {
13072 bfd_signed_vma addend;
13073
13074 if (howto->type == R_ARM_THM_CALL
13075 || howto->type == R_ARM_THM_JUMP24)
13076 {
13077 int upper_insn, lower_insn;
13078 int upper, lower;
13079
13080 upper_insn = bfd_get_16 (abfd, address);
13081 lower_insn = bfd_get_16 (abfd, address + 2);
13082 upper = upper_insn & 0x7ff;
13083 lower = lower_insn & 0x7ff;
13084
13085 addend = (upper << 12) | (lower << 1);
13086 addend += increment;
13087 addend >>= 1;
13088
13089 upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13090 lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13091
13092 bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13093 bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13094 }
13095 else
13096 {
13097 bfd_vma contents;
13098
13099 contents = bfd_get_32 (abfd, address);
13100
13101 /* Get the (signed) value from the instruction. */
13102 addend = contents & howto->src_mask;
13103 if (addend & ((howto->src_mask + 1) >> 1))
13104 {
13105 bfd_signed_vma mask;
13106
13107 mask = -1;
13108 mask &= ~ howto->src_mask;
13109 addend |= mask;
13110 }
13111
13112 /* Add in the increment, (which is a byte value). */
13113 switch (howto->type)
13114 {
13115 default:
13116 addend += increment;
13117 break;
13118
13119 case R_ARM_PC24:
13120 case R_ARM_PLT32:
13121 case R_ARM_CALL:
13122 case R_ARM_JUMP24:
13123 addend <<= howto->size;
13124 addend += increment;
13125
13126 /* Should we check for overflow here ? */
13127
13128 /* Drop any undesired bits. */
13129 addend >>= howto->rightshift;
13130 break;
13131 }
13132
13133 contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13134
13135 bfd_put_32 (abfd, contents, address);
13136 }
13137 }
13138
13139 #define IS_ARM_TLS_RELOC(R_TYPE) \
13140 ((R_TYPE) == R_ARM_TLS_GD32 \
13141 || (R_TYPE) == R_ARM_TLS_GD32_FDPIC \
13142 || (R_TYPE) == R_ARM_TLS_LDO32 \
13143 || (R_TYPE) == R_ARM_TLS_LDM32 \
13144 || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC \
13145 || (R_TYPE) == R_ARM_TLS_DTPOFF32 \
13146 || (R_TYPE) == R_ARM_TLS_DTPMOD32 \
13147 || (R_TYPE) == R_ARM_TLS_TPOFF32 \
13148 || (R_TYPE) == R_ARM_TLS_LE32 \
13149 || (R_TYPE) == R_ARM_TLS_IE32 \
13150 || (R_TYPE) == R_ARM_TLS_IE32_FDPIC \
13151 || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13152
13153 /* Specific set of relocations for the gnu tls dialect. */
13154 #define IS_ARM_TLS_GNU_RELOC(R_TYPE) \
13155 ((R_TYPE) == R_ARM_TLS_GOTDESC \
13156 || (R_TYPE) == R_ARM_TLS_CALL \
13157 || (R_TYPE) == R_ARM_THM_TLS_CALL \
13158 || (R_TYPE) == R_ARM_TLS_DESCSEQ \
13159 || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13160
13161 /* Relocate an ARM ELF section. */
13162
13163 static bfd_boolean
13164 elf32_arm_relocate_section (bfd * output_bfd,
13165 struct bfd_link_info * info,
13166 bfd * input_bfd,
13167 asection * input_section,
13168 bfd_byte * contents,
13169 Elf_Internal_Rela * relocs,
13170 Elf_Internal_Sym * local_syms,
13171 asection ** local_sections)
13172 {
13173 Elf_Internal_Shdr *symtab_hdr;
13174 struct elf_link_hash_entry **sym_hashes;
13175 Elf_Internal_Rela *rel;
13176 Elf_Internal_Rela *relend;
13177 const char *name;
13178 struct elf32_arm_link_hash_table * globals;
13179
13180 globals = elf32_arm_hash_table (info);
13181 if (globals == NULL)
13182 return FALSE;
13183
13184 symtab_hdr = & elf_symtab_hdr (input_bfd);
13185 sym_hashes = elf_sym_hashes (input_bfd);
13186
13187 rel = relocs;
13188 relend = relocs + input_section->reloc_count;
13189 for (; rel < relend; rel++)
13190 {
13191 int r_type;
13192 reloc_howto_type * howto;
13193 unsigned long r_symndx;
13194 Elf_Internal_Sym * sym;
13195 asection * sec;
13196 struct elf_link_hash_entry * h;
13197 bfd_vma relocation;
13198 bfd_reloc_status_type r;
13199 arelent bfd_reloc;
13200 char sym_type;
13201 bfd_boolean unresolved_reloc = FALSE;
13202 char *error_message = NULL;
13203
13204 r_symndx = ELF32_R_SYM (rel->r_info);
13205 r_type = ELF32_R_TYPE (rel->r_info);
13206 r_type = arm_real_reloc_type (globals, r_type);
13207
13208 if ( r_type == R_ARM_GNU_VTENTRY
13209 || r_type == R_ARM_GNU_VTINHERIT)
13210 continue;
13211
13212 howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13213
13214 if (howto == NULL)
13215 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13216
13217 h = NULL;
13218 sym = NULL;
13219 sec = NULL;
13220
13221 if (r_symndx < symtab_hdr->sh_info)
13222 {
13223 sym = local_syms + r_symndx;
13224 sym_type = ELF32_ST_TYPE (sym->st_info);
13225 sec = local_sections[r_symndx];
13226
13227 /* An object file might have a reference to a local
13228 undefined symbol. This is a daft object file, but we
13229 should at least do something about it. V4BX & NONE
13230 relocations do not use the symbol and are explicitly
13231 allowed to use the undefined symbol, so allow those.
13232 Likewise for relocations against STN_UNDEF. */
13233 if (r_type != R_ARM_V4BX
13234 && r_type != R_ARM_NONE
13235 && r_symndx != STN_UNDEF
13236 && bfd_is_und_section (sec)
13237 && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13238 (*info->callbacks->undefined_symbol)
13239 (info, bfd_elf_string_from_elf_section
13240 (input_bfd, symtab_hdr->sh_link, sym->st_name),
13241 input_bfd, input_section,
13242 rel->r_offset, TRUE);
13243
13244 if (globals->use_rel)
13245 {
13246 relocation = (sec->output_section->vma
13247 + sec->output_offset
13248 + sym->st_value);
13249 if (!bfd_link_relocatable (info)
13250 && (sec->flags & SEC_MERGE)
13251 && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13252 {
13253 asection *msec;
13254 bfd_vma addend, value;
13255
13256 switch (r_type)
13257 {
13258 case R_ARM_MOVW_ABS_NC:
13259 case R_ARM_MOVT_ABS:
13260 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13261 addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13262 addend = (addend ^ 0x8000) - 0x8000;
13263 break;
13264
13265 case R_ARM_THM_MOVW_ABS_NC:
13266 case R_ARM_THM_MOVT_ABS:
13267 value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13268 << 16;
13269 value |= bfd_get_16 (input_bfd,
13270 contents + rel->r_offset + 2);
13271 addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13272 | ((value & 0x04000000) >> 15);
13273 addend = (addend ^ 0x8000) - 0x8000;
13274 break;
13275
13276 default:
13277 if (howto->rightshift
13278 || (howto->src_mask & (howto->src_mask + 1)))
13279 {
13280 _bfd_error_handler
13281 /* xgettext:c-format */
13282 (_("%pB(%pA+%#" PRIx64 "): "
13283 "%s relocation against SEC_MERGE section"),
13284 input_bfd, input_section,
13285 (uint64_t) rel->r_offset, howto->name);
13286 return FALSE;
13287 }
13288
13289 value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13290
13291 /* Get the (signed) value from the instruction. */
13292 addend = value & howto->src_mask;
13293 if (addend & ((howto->src_mask + 1) >> 1))
13294 {
13295 bfd_signed_vma mask;
13296
13297 mask = -1;
13298 mask &= ~ howto->src_mask;
13299 addend |= mask;
13300 }
13301 break;
13302 }
13303
13304 msec = sec;
13305 addend =
13306 _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13307 - relocation;
13308 addend += msec->output_section->vma + msec->output_offset;
13309
13310 /* Cases here must match those in the preceding
13311 switch statement. */
13312 switch (r_type)
13313 {
13314 case R_ARM_MOVW_ABS_NC:
13315 case R_ARM_MOVT_ABS:
13316 value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13317 | (addend & 0xfff);
13318 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13319 break;
13320
13321 case R_ARM_THM_MOVW_ABS_NC:
13322 case R_ARM_THM_MOVT_ABS:
13323 value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13324 | (addend & 0xff) | ((addend & 0x0800) << 15);
13325 bfd_put_16 (input_bfd, value >> 16,
13326 contents + rel->r_offset);
13327 bfd_put_16 (input_bfd, value,
13328 contents + rel->r_offset + 2);
13329 break;
13330
13331 default:
13332 value = (value & ~ howto->dst_mask)
13333 | (addend & howto->dst_mask);
13334 bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13335 break;
13336 }
13337 }
13338 }
13339 else
13340 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13341 }
13342 else
13343 {
13344 bfd_boolean warned, ignored;
13345
13346 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13347 r_symndx, symtab_hdr, sym_hashes,
13348 h, sec, relocation,
13349 unresolved_reloc, warned, ignored);
13350
13351 sym_type = h->type;
13352 }
13353
13354 if (sec != NULL && discarded_section (sec))
13355 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13356 rel, 1, relend, howto, 0, contents);
13357
13358 if (bfd_link_relocatable (info))
13359 {
13360 /* This is a relocatable link. We don't have to change
13361 anything, unless the reloc is against a section symbol,
13362 in which case we have to adjust according to where the
13363 section symbol winds up in the output section. */
13364 if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13365 {
13366 if (globals->use_rel)
13367 arm_add_to_rel (input_bfd, contents + rel->r_offset,
13368 howto, (bfd_signed_vma) sec->output_offset);
13369 else
13370 rel->r_addend += sec->output_offset;
13371 }
13372 continue;
13373 }
13374
13375 if (h != NULL)
13376 name = h->root.root.string;
13377 else
13378 {
13379 name = (bfd_elf_string_from_elf_section
13380 (input_bfd, symtab_hdr->sh_link, sym->st_name));
13381 if (name == NULL || *name == '\0')
13382 name = bfd_section_name (sec);
13383 }
13384
13385 if (r_symndx != STN_UNDEF
13386 && r_type != R_ARM_NONE
13387 && (h == NULL
13388 || h->root.type == bfd_link_hash_defined
13389 || h->root.type == bfd_link_hash_defweak)
13390 && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13391 {
13392 _bfd_error_handler
13393 ((sym_type == STT_TLS
13394 /* xgettext:c-format */
13395 ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13396 /* xgettext:c-format */
13397 : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13398 input_bfd,
13399 input_section,
13400 (uint64_t) rel->r_offset,
13401 howto->name,
13402 name);
13403 }
13404
13405 /* We call elf32_arm_final_link_relocate unless we're completely
13406 done, i.e., the relaxation produced the final output we want,
13407 and we won't let anybody mess with it. Also, we have to do
13408 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13409 both in relaxed and non-relaxed cases. */
13410 if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13411 || (IS_ARM_TLS_GNU_RELOC (r_type)
13412 && !((h ? elf32_arm_hash_entry (h)->tls_type :
13413 elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13414 & GOT_TLS_GDESC)))
13415 {
13416 r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13417 contents, rel, h == NULL);
13418 /* This may have been marked unresolved because it came from
13419 a shared library. But we've just dealt with that. */
13420 unresolved_reloc = 0;
13421 }
13422 else
13423 r = bfd_reloc_continue;
13424
13425 if (r == bfd_reloc_continue)
13426 {
13427 unsigned char branch_type =
13428 h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13429 : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13430
13431 r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13432 input_section, contents, rel,
13433 relocation, info, sec, name,
13434 sym_type, branch_type, h,
13435 &unresolved_reloc,
13436 &error_message);
13437 }
13438
13439 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13440 because such sections are not SEC_ALLOC and thus ld.so will
13441 not process them. */
13442 if (unresolved_reloc
13443 && !((input_section->flags & SEC_DEBUGGING) != 0
13444 && h->def_dynamic)
13445 && _bfd_elf_section_offset (output_bfd, info, input_section,
13446 rel->r_offset) != (bfd_vma) -1)
13447 {
13448 _bfd_error_handler
13449 /* xgettext:c-format */
13450 (_("%pB(%pA+%#" PRIx64 "): "
13451 "unresolvable %s relocation against symbol `%s'"),
13452 input_bfd,
13453 input_section,
13454 (uint64_t) rel->r_offset,
13455 howto->name,
13456 h->root.root.string);
13457 return FALSE;
13458 }
13459
13460 if (r != bfd_reloc_ok)
13461 {
13462 switch (r)
13463 {
13464 case bfd_reloc_overflow:
13465 /* If the overflowing reloc was to an undefined symbol,
13466 we have already printed one error message and there
13467 is no point complaining again. */
13468 if (!h || h->root.type != bfd_link_hash_undefined)
13469 (*info->callbacks->reloc_overflow)
13470 (info, (h ? &h->root : NULL), name, howto->name,
13471 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13472 break;
13473
13474 case bfd_reloc_undefined:
13475 (*info->callbacks->undefined_symbol)
13476 (info, name, input_bfd, input_section, rel->r_offset, TRUE);
13477 break;
13478
13479 case bfd_reloc_outofrange:
13480 error_message = _("out of range");
13481 goto common_error;
13482
13483 case bfd_reloc_notsupported:
13484 error_message = _("unsupported relocation");
13485 goto common_error;
13486
13487 case bfd_reloc_dangerous:
13488 /* error_message should already be set. */
13489 goto common_error;
13490
13491 default:
13492 error_message = _("unknown error");
13493 /* Fall through. */
13494
13495 common_error:
13496 BFD_ASSERT (error_message != NULL);
13497 (*info->callbacks->reloc_dangerous)
13498 (info, error_message, input_bfd, input_section, rel->r_offset);
13499 break;
13500 }
13501 }
13502 }
13503
13504 return TRUE;
13505 }
13506
13507 /* Add a new unwind edit to the list described by HEAD, TAIL. If TINDEX is zero,
13508 adds the edit to the start of the list. (The list must be built in order of
13509 ascending TINDEX: the function's callers are primarily responsible for
13510 maintaining that condition). */
13511
13512 static void
13513 add_unwind_table_edit (arm_unwind_table_edit **head,
13514 arm_unwind_table_edit **tail,
13515 arm_unwind_edit_type type,
13516 asection *linked_section,
13517 unsigned int tindex)
13518 {
13519 arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13520 xmalloc (sizeof (arm_unwind_table_edit));
13521
13522 new_edit->type = type;
13523 new_edit->linked_section = linked_section;
13524 new_edit->index = tindex;
13525
13526 if (tindex > 0)
13527 {
13528 new_edit->next = NULL;
13529
13530 if (*tail)
13531 (*tail)->next = new_edit;
13532
13533 (*tail) = new_edit;
13534
13535 if (!*head)
13536 (*head) = new_edit;
13537 }
13538 else
13539 {
13540 new_edit->next = *head;
13541
13542 if (!*tail)
13543 *tail = new_edit;
13544
13545 *head = new_edit;
13546 }
13547 }
13548
13549 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13550
13551 /* Increase the size of EXIDX_SEC by ADJUST bytes. ADJUST mau be negative. */
13552 static void
13553 adjust_exidx_size(asection *exidx_sec, int adjust)
13554 {
13555 asection *out_sec;
13556
13557 if (!exidx_sec->rawsize)
13558 exidx_sec->rawsize = exidx_sec->size;
13559
13560 bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13561 out_sec = exidx_sec->output_section;
13562 /* Adjust size of output section. */
13563 bfd_set_section_size (out_sec, out_sec->size +adjust);
13564 }
13565
13566 /* Insert an EXIDX_CANTUNWIND marker at the end of a section. */
13567 static void
13568 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13569 {
13570 struct _arm_elf_section_data *exidx_arm_data;
13571
13572 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13573 add_unwind_table_edit (
13574 &exidx_arm_data->u.exidx.unwind_edit_list,
13575 &exidx_arm_data->u.exidx.unwind_edit_tail,
13576 INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13577
13578 exidx_arm_data->additional_reloc_count++;
13579
13580 adjust_exidx_size(exidx_sec, 8);
13581 }
13582
13583 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13584 made to those tables, such that:
13585
13586 1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13587 2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13588 codes which have been inlined into the index).
13589
13590 If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13591
13592 The edits are applied when the tables are written
13593 (in elf32_arm_write_section). */
13594
13595 bfd_boolean
13596 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13597 unsigned int num_text_sections,
13598 struct bfd_link_info *info,
13599 bfd_boolean merge_exidx_entries)
13600 {
13601 bfd *inp;
13602 unsigned int last_second_word = 0, i;
13603 asection *last_exidx_sec = NULL;
13604 asection *last_text_sec = NULL;
13605 int last_unwind_type = -1;
13606
13607 /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13608 text sections. */
13609 for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13610 {
13611 asection *sec;
13612
13613 for (sec = inp->sections; sec != NULL; sec = sec->next)
13614 {
13615 struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13616 Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13617
13618 if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13619 continue;
13620
13621 if (elf_sec->linked_to)
13622 {
13623 Elf_Internal_Shdr *linked_hdr
13624 = &elf_section_data (elf_sec->linked_to)->this_hdr;
13625 struct _arm_elf_section_data *linked_sec_arm_data
13626 = get_arm_elf_section_data (linked_hdr->bfd_section);
13627
13628 if (linked_sec_arm_data == NULL)
13629 continue;
13630
13631 /* Link this .ARM.exidx section back from the text section it
13632 describes. */
13633 linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13634 }
13635 }
13636 }
13637
13638 /* Walk all text sections in order of increasing VMA. Eilminate duplicate
13639 index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13640 and add EXIDX_CANTUNWIND entries for sections with no unwind table data. */
13641
13642 for (i = 0; i < num_text_sections; i++)
13643 {
13644 asection *sec = text_section_order[i];
13645 asection *exidx_sec;
13646 struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13647 struct _arm_elf_section_data *exidx_arm_data;
13648 bfd_byte *contents = NULL;
13649 int deleted_exidx_bytes = 0;
13650 bfd_vma j;
13651 arm_unwind_table_edit *unwind_edit_head = NULL;
13652 arm_unwind_table_edit *unwind_edit_tail = NULL;
13653 Elf_Internal_Shdr *hdr;
13654 bfd *ibfd;
13655
13656 if (arm_data == NULL)
13657 continue;
13658
13659 exidx_sec = arm_data->u.text.arm_exidx_sec;
13660 if (exidx_sec == NULL)
13661 {
13662 /* Section has no unwind data. */
13663 if (last_unwind_type == 0 || !last_exidx_sec)
13664 continue;
13665
13666 /* Ignore zero sized sections. */
13667 if (sec->size == 0)
13668 continue;
13669
13670 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13671 last_unwind_type = 0;
13672 continue;
13673 }
13674
13675 /* Skip /DISCARD/ sections. */
13676 if (bfd_is_abs_section (exidx_sec->output_section))
13677 continue;
13678
13679 hdr = &elf_section_data (exidx_sec)->this_hdr;
13680 if (hdr->sh_type != SHT_ARM_EXIDX)
13681 continue;
13682
13683 exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13684 if (exidx_arm_data == NULL)
13685 continue;
13686
13687 ibfd = exidx_sec->owner;
13688
13689 if (hdr->contents != NULL)
13690 contents = hdr->contents;
13691 else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13692 /* An error? */
13693 continue;
13694
13695 if (last_unwind_type > 0)
13696 {
13697 unsigned int first_word = bfd_get_32 (ibfd, contents);
13698 /* Add cantunwind if first unwind item does not match section
13699 start. */
13700 if (first_word != sec->vma)
13701 {
13702 insert_cantunwind_after (last_text_sec, last_exidx_sec);
13703 last_unwind_type = 0;
13704 }
13705 }
13706
13707 for (j = 0; j < hdr->sh_size; j += 8)
13708 {
13709 unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13710 int unwind_type;
13711 int elide = 0;
13712
13713 /* An EXIDX_CANTUNWIND entry. */
13714 if (second_word == 1)
13715 {
13716 if (last_unwind_type == 0)
13717 elide = 1;
13718 unwind_type = 0;
13719 }
13720 /* Inlined unwinding data. Merge if equal to previous. */
13721 else if ((second_word & 0x80000000) != 0)
13722 {
13723 if (merge_exidx_entries
13724 && last_second_word == second_word && last_unwind_type == 1)
13725 elide = 1;
13726 unwind_type = 1;
13727 last_second_word = second_word;
13728 }
13729 /* Normal table entry. In theory we could merge these too,
13730 but duplicate entries are likely to be much less common. */
13731 else
13732 unwind_type = 2;
13733
13734 if (elide && !bfd_link_relocatable (info))
13735 {
13736 add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13737 DELETE_EXIDX_ENTRY, NULL, j / 8);
13738
13739 deleted_exidx_bytes += 8;
13740 }
13741
13742 last_unwind_type = unwind_type;
13743 }
13744
13745 /* Free contents if we allocated it ourselves. */
13746 if (contents != hdr->contents)
13747 free (contents);
13748
13749 /* Record edits to be applied later (in elf32_arm_write_section). */
13750 exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13751 exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13752
13753 if (deleted_exidx_bytes > 0)
13754 adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13755
13756 last_exidx_sec = exidx_sec;
13757 last_text_sec = sec;
13758 }
13759
13760 /* Add terminating CANTUNWIND entry. */
13761 if (!bfd_link_relocatable (info) && last_exidx_sec
13762 && last_unwind_type != 0)
13763 insert_cantunwind_after(last_text_sec, last_exidx_sec);
13764
13765 return TRUE;
13766 }
13767
13768 static bfd_boolean
13769 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13770 bfd *ibfd, const char *name)
13771 {
13772 asection *sec, *osec;
13773
13774 sec = bfd_get_linker_section (ibfd, name);
13775 if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13776 return TRUE;
13777
13778 osec = sec->output_section;
13779 if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13780 return TRUE;
13781
13782 if (! bfd_set_section_contents (obfd, osec, sec->contents,
13783 sec->output_offset, sec->size))
13784 return FALSE;
13785
13786 return TRUE;
13787 }
13788
13789 static bfd_boolean
13790 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13791 {
13792 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13793 asection *sec, *osec;
13794
13795 if (globals == NULL)
13796 return FALSE;
13797
13798 /* Invoke the regular ELF backend linker to do all the work. */
13799 if (!bfd_elf_final_link (abfd, info))
13800 return FALSE;
13801
13802 /* Process stub sections (eg BE8 encoding, ...). */
13803 struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13804 unsigned int i;
13805 for (i=0; i<htab->top_id; i++)
13806 {
13807 sec = htab->stub_group[i].stub_sec;
13808 /* Only process it once, in its link_sec slot. */
13809 if (sec && i == htab->stub_group[i].link_sec->id)
13810 {
13811 osec = sec->output_section;
13812 elf32_arm_write_section (abfd, info, sec, sec->contents);
13813 if (! bfd_set_section_contents (abfd, osec, sec->contents,
13814 sec->output_offset, sec->size))
13815 return FALSE;
13816 }
13817 }
13818
13819 /* Write out any glue sections now that we have created all the
13820 stubs. */
13821 if (globals->bfd_of_glue_owner != NULL)
13822 {
13823 if (! elf32_arm_output_glue_section (info, abfd,
13824 globals->bfd_of_glue_owner,
13825 ARM2THUMB_GLUE_SECTION_NAME))
13826 return FALSE;
13827
13828 if (! elf32_arm_output_glue_section (info, abfd,
13829 globals->bfd_of_glue_owner,
13830 THUMB2ARM_GLUE_SECTION_NAME))
13831 return FALSE;
13832
13833 if (! elf32_arm_output_glue_section (info, abfd,
13834 globals->bfd_of_glue_owner,
13835 VFP11_ERRATUM_VENEER_SECTION_NAME))
13836 return FALSE;
13837
13838 if (! elf32_arm_output_glue_section (info, abfd,
13839 globals->bfd_of_glue_owner,
13840 STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13841 return FALSE;
13842
13843 if (! elf32_arm_output_glue_section (info, abfd,
13844 globals->bfd_of_glue_owner,
13845 ARM_BX_GLUE_SECTION_NAME))
13846 return FALSE;
13847 }
13848
13849 return TRUE;
13850 }
13851
13852 /* Return a best guess for the machine number based on the attributes. */
13853
13854 static unsigned int
13855 bfd_arm_get_mach_from_attributes (bfd * abfd)
13856 {
13857 int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13858
13859 switch (arch)
13860 {
13861 case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13862 case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13863 case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13864 case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13865
13866 case TAG_CPU_ARCH_V5TE:
13867 {
13868 char * name;
13869
13870 BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13871 name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13872
13873 if (name)
13874 {
13875 if (strcmp (name, "IWMMXT2") == 0)
13876 return bfd_mach_arm_iWMMXt2;
13877
13878 if (strcmp (name, "IWMMXT") == 0)
13879 return bfd_mach_arm_iWMMXt;
13880
13881 if (strcmp (name, "XSCALE") == 0)
13882 {
13883 int wmmx;
13884
13885 BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13886 wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13887 switch (wmmx)
13888 {
13889 case 1: return bfd_mach_arm_iWMMXt;
13890 case 2: return bfd_mach_arm_iWMMXt2;
13891 default: return bfd_mach_arm_XScale;
13892 }
13893 }
13894 }
13895
13896 return bfd_mach_arm_5TE;
13897 }
13898
13899 case TAG_CPU_ARCH_V5TEJ:
13900 return bfd_mach_arm_5TEJ;
13901 case TAG_CPU_ARCH_V6:
13902 return bfd_mach_arm_6;
13903 case TAG_CPU_ARCH_V6KZ:
13904 return bfd_mach_arm_6KZ;
13905 case TAG_CPU_ARCH_V6T2:
13906 return bfd_mach_arm_6T2;
13907 case TAG_CPU_ARCH_V6K:
13908 return bfd_mach_arm_6K;
13909 case TAG_CPU_ARCH_V7:
13910 return bfd_mach_arm_7;
13911 case TAG_CPU_ARCH_V6_M:
13912 return bfd_mach_arm_6M;
13913 case TAG_CPU_ARCH_V6S_M:
13914 return bfd_mach_arm_6SM;
13915 case TAG_CPU_ARCH_V7E_M:
13916 return bfd_mach_arm_7EM;
13917 case TAG_CPU_ARCH_V8:
13918 return bfd_mach_arm_8;
13919 case TAG_CPU_ARCH_V8R:
13920 return bfd_mach_arm_8R;
13921 case TAG_CPU_ARCH_V8M_BASE:
13922 return bfd_mach_arm_8M_BASE;
13923 case TAG_CPU_ARCH_V8M_MAIN:
13924 return bfd_mach_arm_8M_MAIN;
13925 case TAG_CPU_ARCH_V8_1M_MAIN:
13926 return bfd_mach_arm_8_1M_MAIN;
13927
13928 default:
13929 /* Force entry to be added for any new known Tag_CPU_arch value. */
13930 BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13931
13932 /* Unknown Tag_CPU_arch value. */
13933 return bfd_mach_arm_unknown;
13934 }
13935 }
13936
13937 /* Set the right machine number. */
13938
13939 static bfd_boolean
13940 elf32_arm_object_p (bfd *abfd)
13941 {
13942 unsigned int mach;
13943
13944 mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13945
13946 if (mach == bfd_mach_arm_unknown)
13947 {
13948 if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13949 mach = bfd_mach_arm_ep9312;
13950 else
13951 mach = bfd_arm_get_mach_from_attributes (abfd);
13952 }
13953
13954 bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13955 return TRUE;
13956 }
13957
13958 /* Function to keep ARM specific flags in the ELF header. */
13959
13960 static bfd_boolean
13961 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13962 {
13963 if (elf_flags_init (abfd)
13964 && elf_elfheader (abfd)->e_flags != flags)
13965 {
13966 if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13967 {
13968 if (flags & EF_ARM_INTERWORK)
13969 _bfd_error_handler
13970 (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13971 abfd);
13972 else
13973 _bfd_error_handler
13974 (_("warning: clearing the interworking flag of %pB due to outside request"),
13975 abfd);
13976 }
13977 }
13978 else
13979 {
13980 elf_elfheader (abfd)->e_flags = flags;
13981 elf_flags_init (abfd) = TRUE;
13982 }
13983
13984 return TRUE;
13985 }
13986
13987 /* Copy backend specific data from one object module to another. */
13988
13989 static bfd_boolean
13990 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13991 {
13992 flagword in_flags;
13993 flagword out_flags;
13994
13995 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13996 return TRUE;
13997
13998 in_flags = elf_elfheader (ibfd)->e_flags;
13999 out_flags = elf_elfheader (obfd)->e_flags;
14000
14001 if (elf_flags_init (obfd)
14002 && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
14003 && in_flags != out_flags)
14004 {
14005 /* Cannot mix APCS26 and APCS32 code. */
14006 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14007 return FALSE;
14008
14009 /* Cannot mix float APCS and non-float APCS code. */
14010 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14011 return FALSE;
14012
14013 /* If the src and dest have different interworking flags
14014 then turn off the interworking bit. */
14015 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14016 {
14017 if (out_flags & EF_ARM_INTERWORK)
14018 _bfd_error_handler
14019 (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14020 obfd, ibfd);
14021
14022 in_flags &= ~EF_ARM_INTERWORK;
14023 }
14024
14025 /* Likewise for PIC, though don't warn for this case. */
14026 if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
14027 in_flags &= ~EF_ARM_PIC;
14028 }
14029
14030 elf_elfheader (obfd)->e_flags = in_flags;
14031 elf_flags_init (obfd) = TRUE;
14032
14033 return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
14034 }
14035
14036 /* Values for Tag_ABI_PCS_R9_use. */
14037 enum
14038 {
14039 AEABI_R9_V6,
14040 AEABI_R9_SB,
14041 AEABI_R9_TLS,
14042 AEABI_R9_unused
14043 };
14044
14045 /* Values for Tag_ABI_PCS_RW_data. */
14046 enum
14047 {
14048 AEABI_PCS_RW_data_absolute,
14049 AEABI_PCS_RW_data_PCrel,
14050 AEABI_PCS_RW_data_SBrel,
14051 AEABI_PCS_RW_data_unused
14052 };
14053
14054 /* Values for Tag_ABI_enum_size. */
14055 enum
14056 {
14057 AEABI_enum_unused,
14058 AEABI_enum_short,
14059 AEABI_enum_wide,
14060 AEABI_enum_forced_wide
14061 };
14062
14063 /* Determine whether an object attribute tag takes an integer, a
14064 string or both. */
14065
14066 static int
14067 elf32_arm_obj_attrs_arg_type (int tag)
14068 {
14069 if (tag == Tag_compatibility)
14070 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14071 else if (tag == Tag_nodefaults)
14072 return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14073 else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14074 return ATTR_TYPE_FLAG_STR_VAL;
14075 else if (tag < 32)
14076 return ATTR_TYPE_FLAG_INT_VAL;
14077 else
14078 return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14079 }
14080
14081 /* The ABI defines that Tag_conformance should be emitted first, and that
14082 Tag_nodefaults should be second (if either is defined). This sets those
14083 two positions, and bumps up the position of all the remaining tags to
14084 compensate. */
14085 static int
14086 elf32_arm_obj_attrs_order (int num)
14087 {
14088 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14089 return Tag_conformance;
14090 if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14091 return Tag_nodefaults;
14092 if ((num - 2) < Tag_nodefaults)
14093 return num - 2;
14094 if ((num - 1) < Tag_conformance)
14095 return num - 1;
14096 return num;
14097 }
14098
14099 /* Attribute numbers >=64 (mod 128) can be safely ignored. */
14100 static bfd_boolean
14101 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14102 {
14103 if ((tag & 127) < 64)
14104 {
14105 _bfd_error_handler
14106 (_("%pB: unknown mandatory EABI object attribute %d"),
14107 abfd, tag);
14108 bfd_set_error (bfd_error_bad_value);
14109 return FALSE;
14110 }
14111 else
14112 {
14113 _bfd_error_handler
14114 (_("warning: %pB: unknown EABI object attribute %d"),
14115 abfd, tag);
14116 return TRUE;
14117 }
14118 }
14119
14120 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14121 Returns -1 if no architecture could be read. */
14122
14123 static int
14124 get_secondary_compatible_arch (bfd *abfd)
14125 {
14126 obj_attribute *attr =
14127 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14128
14129 /* Note: the tag and its argument below are uleb128 values, though
14130 currently-defined values fit in one byte for each. */
14131 if (attr->s
14132 && attr->s[0] == Tag_CPU_arch
14133 && (attr->s[1] & 128) != 128
14134 && attr->s[2] == 0)
14135 return attr->s[1];
14136
14137 /* This tag is "safely ignorable", so don't complain if it looks funny. */
14138 return -1;
14139 }
14140
14141 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14142 The tag is removed if ARCH is -1. */
14143
14144 static void
14145 set_secondary_compatible_arch (bfd *abfd, int arch)
14146 {
14147 obj_attribute *attr =
14148 &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14149
14150 if (arch == -1)
14151 {
14152 attr->s = NULL;
14153 return;
14154 }
14155
14156 /* Note: the tag and its argument below are uleb128 values, though
14157 currently-defined values fit in one byte for each. */
14158 if (!attr->s)
14159 attr->s = (char *) bfd_alloc (abfd, 3);
14160 attr->s[0] = Tag_CPU_arch;
14161 attr->s[1] = arch;
14162 attr->s[2] = '\0';
14163 }
14164
14165 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14166 into account. */
14167
14168 static int
14169 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14170 int newtag, int secondary_compat)
14171 {
14172 #define T(X) TAG_CPU_ARCH_##X
14173 int tagl, tagh, result;
14174 const int v6t2[] =
14175 {
14176 T(V6T2), /* PRE_V4. */
14177 T(V6T2), /* V4. */
14178 T(V6T2), /* V4T. */
14179 T(V6T2), /* V5T. */
14180 T(V6T2), /* V5TE. */
14181 T(V6T2), /* V5TEJ. */
14182 T(V6T2), /* V6. */
14183 T(V7), /* V6KZ. */
14184 T(V6T2) /* V6T2. */
14185 };
14186 const int v6k[] =
14187 {
14188 T(V6K), /* PRE_V4. */
14189 T(V6K), /* V4. */
14190 T(V6K), /* V4T. */
14191 T(V6K), /* V5T. */
14192 T(V6K), /* V5TE. */
14193 T(V6K), /* V5TEJ. */
14194 T(V6K), /* V6. */
14195 T(V6KZ), /* V6KZ. */
14196 T(V7), /* V6T2. */
14197 T(V6K) /* V6K. */
14198 };
14199 const int v7[] =
14200 {
14201 T(V7), /* PRE_V4. */
14202 T(V7), /* V4. */
14203 T(V7), /* V4T. */
14204 T(V7), /* V5T. */
14205 T(V7), /* V5TE. */
14206 T(V7), /* V5TEJ. */
14207 T(V7), /* V6. */
14208 T(V7), /* V6KZ. */
14209 T(V7), /* V6T2. */
14210 T(V7), /* V6K. */
14211 T(V7) /* V7. */
14212 };
14213 const int v6_m[] =
14214 {
14215 -1, /* PRE_V4. */
14216 -1, /* V4. */
14217 T(V6K), /* V4T. */
14218 T(V6K), /* V5T. */
14219 T(V6K), /* V5TE. */
14220 T(V6K), /* V5TEJ. */
14221 T(V6K), /* V6. */
14222 T(V6KZ), /* V6KZ. */
14223 T(V7), /* V6T2. */
14224 T(V6K), /* V6K. */
14225 T(V7), /* V7. */
14226 T(V6_M) /* V6_M. */
14227 };
14228 const int v6s_m[] =
14229 {
14230 -1, /* PRE_V4. */
14231 -1, /* V4. */
14232 T(V6K), /* V4T. */
14233 T(V6K), /* V5T. */
14234 T(V6K), /* V5TE. */
14235 T(V6K), /* V5TEJ. */
14236 T(V6K), /* V6. */
14237 T(V6KZ), /* V6KZ. */
14238 T(V7), /* V6T2. */
14239 T(V6K), /* V6K. */
14240 T(V7), /* V7. */
14241 T(V6S_M), /* V6_M. */
14242 T(V6S_M) /* V6S_M. */
14243 };
14244 const int v7e_m[] =
14245 {
14246 -1, /* PRE_V4. */
14247 -1, /* V4. */
14248 T(V7E_M), /* V4T. */
14249 T(V7E_M), /* V5T. */
14250 T(V7E_M), /* V5TE. */
14251 T(V7E_M), /* V5TEJ. */
14252 T(V7E_M), /* V6. */
14253 T(V7E_M), /* V6KZ. */
14254 T(V7E_M), /* V6T2. */
14255 T(V7E_M), /* V6K. */
14256 T(V7E_M), /* V7. */
14257 T(V7E_M), /* V6_M. */
14258 T(V7E_M), /* V6S_M. */
14259 T(V7E_M) /* V7E_M. */
14260 };
14261 const int v8[] =
14262 {
14263 T(V8), /* PRE_V4. */
14264 T(V8), /* V4. */
14265 T(V8), /* V4T. */
14266 T(V8), /* V5T. */
14267 T(V8), /* V5TE. */
14268 T(V8), /* V5TEJ. */
14269 T(V8), /* V6. */
14270 T(V8), /* V6KZ. */
14271 T(V8), /* V6T2. */
14272 T(V8), /* V6K. */
14273 T(V8), /* V7. */
14274 T(V8), /* V6_M. */
14275 T(V8), /* V6S_M. */
14276 T(V8), /* V7E_M. */
14277 T(V8) /* V8. */
14278 };
14279 const int v8r[] =
14280 {
14281 T(V8R), /* PRE_V4. */
14282 T(V8R), /* V4. */
14283 T(V8R), /* V4T. */
14284 T(V8R), /* V5T. */
14285 T(V8R), /* V5TE. */
14286 T(V8R), /* V5TEJ. */
14287 T(V8R), /* V6. */
14288 T(V8R), /* V6KZ. */
14289 T(V8R), /* V6T2. */
14290 T(V8R), /* V6K. */
14291 T(V8R), /* V7. */
14292 T(V8R), /* V6_M. */
14293 T(V8R), /* V6S_M. */
14294 T(V8R), /* V7E_M. */
14295 T(V8), /* V8. */
14296 T(V8R), /* V8R. */
14297 };
14298 const int v8m_baseline[] =
14299 {
14300 -1, /* PRE_V4. */
14301 -1, /* V4. */
14302 -1, /* V4T. */
14303 -1, /* V5T. */
14304 -1, /* V5TE. */
14305 -1, /* V5TEJ. */
14306 -1, /* V6. */
14307 -1, /* V6KZ. */
14308 -1, /* V6T2. */
14309 -1, /* V6K. */
14310 -1, /* V7. */
14311 T(V8M_BASE), /* V6_M. */
14312 T(V8M_BASE), /* V6S_M. */
14313 -1, /* V7E_M. */
14314 -1, /* V8. */
14315 -1, /* V8R. */
14316 T(V8M_BASE) /* V8-M BASELINE. */
14317 };
14318 const int v8m_mainline[] =
14319 {
14320 -1, /* PRE_V4. */
14321 -1, /* V4. */
14322 -1, /* V4T. */
14323 -1, /* V5T. */
14324 -1, /* V5TE. */
14325 -1, /* V5TEJ. */
14326 -1, /* V6. */
14327 -1, /* V6KZ. */
14328 -1, /* V6T2. */
14329 -1, /* V6K. */
14330 T(V8M_MAIN), /* V7. */
14331 T(V8M_MAIN), /* V6_M. */
14332 T(V8M_MAIN), /* V6S_M. */
14333 T(V8M_MAIN), /* V7E_M. */
14334 -1, /* V8. */
14335 -1, /* V8R. */
14336 T(V8M_MAIN), /* V8-M BASELINE. */
14337 T(V8M_MAIN) /* V8-M MAINLINE. */
14338 };
14339 const int v8_1m_mainline[] =
14340 {
14341 -1, /* PRE_V4. */
14342 -1, /* V4. */
14343 -1, /* V4T. */
14344 -1, /* V5T. */
14345 -1, /* V5TE. */
14346 -1, /* V5TEJ. */
14347 -1, /* V6. */
14348 -1, /* V6KZ. */
14349 -1, /* V6T2. */
14350 -1, /* V6K. */
14351 T(V8_1M_MAIN), /* V7. */
14352 T(V8_1M_MAIN), /* V6_M. */
14353 T(V8_1M_MAIN), /* V6S_M. */
14354 T(V8_1M_MAIN), /* V7E_M. */
14355 -1, /* V8. */
14356 -1, /* V8R. */
14357 T(V8_1M_MAIN), /* V8-M BASELINE. */
14358 T(V8_1M_MAIN), /* V8-M MAINLINE. */
14359 -1, /* Unused (18). */
14360 -1, /* Unused (19). */
14361 -1, /* Unused (20). */
14362 T(V8_1M_MAIN) /* V8.1-M MAINLINE. */
14363 };
14364 const int v4t_plus_v6_m[] =
14365 {
14366 -1, /* PRE_V4. */
14367 -1, /* V4. */
14368 T(V4T), /* V4T. */
14369 T(V5T), /* V5T. */
14370 T(V5TE), /* V5TE. */
14371 T(V5TEJ), /* V5TEJ. */
14372 T(V6), /* V6. */
14373 T(V6KZ), /* V6KZ. */
14374 T(V6T2), /* V6T2. */
14375 T(V6K), /* V6K. */
14376 T(V7), /* V7. */
14377 T(V6_M), /* V6_M. */
14378 T(V6S_M), /* V6S_M. */
14379 T(V7E_M), /* V7E_M. */
14380 T(V8), /* V8. */
14381 -1, /* V8R. */
14382 T(V8M_BASE), /* V8-M BASELINE. */
14383 T(V8M_MAIN), /* V8-M MAINLINE. */
14384 -1, /* Unused (18). */
14385 -1, /* Unused (19). */
14386 -1, /* Unused (20). */
14387 T(V8_1M_MAIN), /* V8.1-M MAINLINE. */
14388 T(V4T_PLUS_V6_M) /* V4T plus V6_M. */
14389 };
14390 const int *comb[] =
14391 {
14392 v6t2,
14393 v6k,
14394 v7,
14395 v6_m,
14396 v6s_m,
14397 v7e_m,
14398 v8,
14399 v8r,
14400 v8m_baseline,
14401 v8m_mainline,
14402 NULL,
14403 NULL,
14404 NULL,
14405 v8_1m_mainline,
14406 /* Pseudo-architecture. */
14407 v4t_plus_v6_m
14408 };
14409
14410 /* Check we've not got a higher architecture than we know about. */
14411
14412 if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14413 {
14414 _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14415 return -1;
14416 }
14417
14418 /* Override old tag if we have a Tag_also_compatible_with on the output. */
14419
14420 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14421 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14422 oldtag = T(V4T_PLUS_V6_M);
14423
14424 /* And override the new tag if we have a Tag_also_compatible_with on the
14425 input. */
14426
14427 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14428 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14429 newtag = T(V4T_PLUS_V6_M);
14430
14431 tagl = (oldtag < newtag) ? oldtag : newtag;
14432 result = tagh = (oldtag > newtag) ? oldtag : newtag;
14433
14434 /* Architectures before V6KZ add features monotonically. */
14435 if (tagh <= TAG_CPU_ARCH_V6KZ)
14436 return result;
14437
14438 result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14439
14440 /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14441 as the canonical version. */
14442 if (result == T(V4T_PLUS_V6_M))
14443 {
14444 result = T(V4T);
14445 *secondary_compat_out = T(V6_M);
14446 }
14447 else
14448 *secondary_compat_out = -1;
14449
14450 if (result == -1)
14451 {
14452 _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14453 ibfd, oldtag, newtag);
14454 return -1;
14455 }
14456
14457 return result;
14458 #undef T
14459 }
14460
14461 /* Query attributes object to see if integer divide instructions may be
14462 present in an object. */
14463 static bfd_boolean
14464 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14465 {
14466 int arch = attr[Tag_CPU_arch].i;
14467 int profile = attr[Tag_CPU_arch_profile].i;
14468
14469 switch (attr[Tag_DIV_use].i)
14470 {
14471 case 0:
14472 /* Integer divide allowed if instruction contained in archetecture. */
14473 if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14474 return TRUE;
14475 else if (arch >= TAG_CPU_ARCH_V7E_M)
14476 return TRUE;
14477 else
14478 return FALSE;
14479
14480 case 1:
14481 /* Integer divide explicitly prohibited. */
14482 return FALSE;
14483
14484 default:
14485 /* Unrecognised case - treat as allowing divide everywhere. */
14486 case 2:
14487 /* Integer divide allowed in ARM state. */
14488 return TRUE;
14489 }
14490 }
14491
14492 /* Query attributes object to see if integer divide instructions are
14493 forbidden to be in the object. This is not the inverse of
14494 elf32_arm_attributes_accept_div. */
14495 static bfd_boolean
14496 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14497 {
14498 return attr[Tag_DIV_use].i == 1;
14499 }
14500
14501 /* Merge EABI object attributes from IBFD into OBFD. Raise an error if there
14502 are conflicting attributes. */
14503
14504 static bfd_boolean
14505 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14506 {
14507 bfd *obfd = info->output_bfd;
14508 obj_attribute *in_attr;
14509 obj_attribute *out_attr;
14510 /* Some tags have 0 = don't care, 1 = strong requirement,
14511 2 = weak requirement. */
14512 static const int order_021[3] = {0, 2, 1};
14513 int i;
14514 bfd_boolean result = TRUE;
14515 const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14516
14517 /* Skip the linker stubs file. This preserves previous behavior
14518 of accepting unknown attributes in the first input file - but
14519 is that a bug? */
14520 if (ibfd->flags & BFD_LINKER_CREATED)
14521 return TRUE;
14522
14523 /* Skip any input that hasn't attribute section.
14524 This enables to link object files without attribute section with
14525 any others. */
14526 if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14527 return TRUE;
14528
14529 if (!elf_known_obj_attributes_proc (obfd)[0].i)
14530 {
14531 /* This is the first object. Copy the attributes. */
14532 _bfd_elf_copy_obj_attributes (ibfd, obfd);
14533
14534 out_attr = elf_known_obj_attributes_proc (obfd);
14535
14536 /* Use the Tag_null value to indicate the attributes have been
14537 initialized. */
14538 out_attr[0].i = 1;
14539
14540 /* We do not output objects with Tag_MPextension_use_legacy - we move
14541 the attribute's value to Tag_MPextension_use. */
14542 if (out_attr[Tag_MPextension_use_legacy].i != 0)
14543 {
14544 if (out_attr[Tag_MPextension_use].i != 0
14545 && out_attr[Tag_MPextension_use_legacy].i
14546 != out_attr[Tag_MPextension_use].i)
14547 {
14548 _bfd_error_handler
14549 (_("Error: %pB has both the current and legacy "
14550 "Tag_MPextension_use attributes"), ibfd);
14551 result = FALSE;
14552 }
14553
14554 out_attr[Tag_MPextension_use] =
14555 out_attr[Tag_MPextension_use_legacy];
14556 out_attr[Tag_MPextension_use_legacy].type = 0;
14557 out_attr[Tag_MPextension_use_legacy].i = 0;
14558 }
14559
14560 return result;
14561 }
14562
14563 in_attr = elf_known_obj_attributes_proc (ibfd);
14564 out_attr = elf_known_obj_attributes_proc (obfd);
14565 /* This needs to happen before Tag_ABI_FP_number_model is merged. */
14566 if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14567 {
14568 /* Ignore mismatches if the object doesn't use floating point or is
14569 floating point ABI independent. */
14570 if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14571 || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14572 && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14573 out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14574 else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14575 && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14576 {
14577 _bfd_error_handler
14578 (_("error: %pB uses VFP register arguments, %pB does not"),
14579 in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14580 in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14581 result = FALSE;
14582 }
14583 }
14584
14585 for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14586 {
14587 /* Merge this attribute with existing attributes. */
14588 switch (i)
14589 {
14590 case Tag_CPU_raw_name:
14591 case Tag_CPU_name:
14592 /* These are merged after Tag_CPU_arch. */
14593 break;
14594
14595 case Tag_ABI_optimization_goals:
14596 case Tag_ABI_FP_optimization_goals:
14597 /* Use the first value seen. */
14598 break;
14599
14600 case Tag_CPU_arch:
14601 {
14602 int secondary_compat = -1, secondary_compat_out = -1;
14603 unsigned int saved_out_attr = out_attr[i].i;
14604 int arch_attr;
14605 static const char *name_table[] =
14606 {
14607 /* These aren't real CPU names, but we can't guess
14608 that from the architecture version alone. */
14609 "Pre v4",
14610 "ARM v4",
14611 "ARM v4T",
14612 "ARM v5T",
14613 "ARM v5TE",
14614 "ARM v5TEJ",
14615 "ARM v6",
14616 "ARM v6KZ",
14617 "ARM v6T2",
14618 "ARM v6K",
14619 "ARM v7",
14620 "ARM v6-M",
14621 "ARM v6S-M",
14622 "ARM v8",
14623 "",
14624 "ARM v8-M.baseline",
14625 "ARM v8-M.mainline",
14626 };
14627
14628 /* Merge Tag_CPU_arch and Tag_also_compatible_with. */
14629 secondary_compat = get_secondary_compatible_arch (ibfd);
14630 secondary_compat_out = get_secondary_compatible_arch (obfd);
14631 arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14632 &secondary_compat_out,
14633 in_attr[i].i,
14634 secondary_compat);
14635
14636 /* Return with error if failed to merge. */
14637 if (arch_attr == -1)
14638 return FALSE;
14639
14640 out_attr[i].i = arch_attr;
14641
14642 set_secondary_compatible_arch (obfd, secondary_compat_out);
14643
14644 /* Merge Tag_CPU_name and Tag_CPU_raw_name. */
14645 if (out_attr[i].i == saved_out_attr)
14646 ; /* Leave the names alone. */
14647 else if (out_attr[i].i == in_attr[i].i)
14648 {
14649 /* The output architecture has been changed to match the
14650 input architecture. Use the input names. */
14651 out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14652 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14653 : NULL;
14654 out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14655 ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14656 : NULL;
14657 }
14658 else
14659 {
14660 out_attr[Tag_CPU_name].s = NULL;
14661 out_attr[Tag_CPU_raw_name].s = NULL;
14662 }
14663
14664 /* If we still don't have a value for Tag_CPU_name,
14665 make one up now. Tag_CPU_raw_name remains blank. */
14666 if (out_attr[Tag_CPU_name].s == NULL
14667 && out_attr[i].i < ARRAY_SIZE (name_table))
14668 out_attr[Tag_CPU_name].s =
14669 _bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14670 }
14671 break;
14672
14673 case Tag_ARM_ISA_use:
14674 case Tag_THUMB_ISA_use:
14675 case Tag_WMMX_arch:
14676 case Tag_Advanced_SIMD_arch:
14677 /* ??? Do Advanced_SIMD (NEON) and WMMX conflict? */
14678 case Tag_ABI_FP_rounding:
14679 case Tag_ABI_FP_exceptions:
14680 case Tag_ABI_FP_user_exceptions:
14681 case Tag_ABI_FP_number_model:
14682 case Tag_FP_HP_extension:
14683 case Tag_CPU_unaligned_access:
14684 case Tag_T2EE_use:
14685 case Tag_MPextension_use:
14686 case Tag_MVE_arch:
14687 /* Use the largest value specified. */
14688 if (in_attr[i].i > out_attr[i].i)
14689 out_attr[i].i = in_attr[i].i;
14690 break;
14691
14692 case Tag_ABI_align_preserved:
14693 case Tag_ABI_PCS_RO_data:
14694 /* Use the smallest value specified. */
14695 if (in_attr[i].i < out_attr[i].i)
14696 out_attr[i].i = in_attr[i].i;
14697 break;
14698
14699 case Tag_ABI_align_needed:
14700 if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14701 && (in_attr[Tag_ABI_align_preserved].i == 0
14702 || out_attr[Tag_ABI_align_preserved].i == 0))
14703 {
14704 /* This error message should be enabled once all non-conformant
14705 binaries in the toolchain have had the attributes set
14706 properly.
14707 _bfd_error_handler
14708 (_("error: %pB: 8-byte data alignment conflicts with %pB"),
14709 obfd, ibfd);
14710 result = FALSE; */
14711 }
14712 /* Fall through. */
14713 case Tag_ABI_FP_denormal:
14714 case Tag_ABI_PCS_GOT_use:
14715 /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14716 value if greater than 2 (for future-proofing). */
14717 if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14718 || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14719 && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14720 out_attr[i].i = in_attr[i].i;
14721 break;
14722
14723 case Tag_Virtualization_use:
14724 /* The virtualization tag effectively stores two bits of
14725 information: the intended use of TrustZone (in bit 0), and the
14726 intended use of Virtualization (in bit 1). */
14727 if (out_attr[i].i == 0)
14728 out_attr[i].i = in_attr[i].i;
14729 else if (in_attr[i].i != 0
14730 && in_attr[i].i != out_attr[i].i)
14731 {
14732 if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14733 out_attr[i].i = 3;
14734 else
14735 {
14736 _bfd_error_handler
14737 (_("error: %pB: unable to merge virtualization attributes "
14738 "with %pB"),
14739 obfd, ibfd);
14740 result = FALSE;
14741 }
14742 }
14743 break;
14744
14745 case Tag_CPU_arch_profile:
14746 if (out_attr[i].i != in_attr[i].i)
14747 {
14748 /* 0 will merge with anything.
14749 'A' and 'S' merge to 'A'.
14750 'R' and 'S' merge to 'R'.
14751 'M' and 'A|R|S' is an error. */
14752 if (out_attr[i].i == 0
14753 || (out_attr[i].i == 'S'
14754 && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14755 out_attr[i].i = in_attr[i].i;
14756 else if (in_attr[i].i == 0
14757 || (in_attr[i].i == 'S'
14758 && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14759 ; /* Do nothing. */
14760 else
14761 {
14762 _bfd_error_handler
14763 (_("error: %pB: conflicting architecture profiles %c/%c"),
14764 ibfd,
14765 in_attr[i].i ? in_attr[i].i : '0',
14766 out_attr[i].i ? out_attr[i].i : '0');
14767 result = FALSE;
14768 }
14769 }
14770 break;
14771
14772 case Tag_DSP_extension:
14773 /* No need to change output value if any of:
14774 - pre (<=) ARMv5T input architecture (do not have DSP)
14775 - M input profile not ARMv7E-M and do not have DSP. */
14776 if (in_attr[Tag_CPU_arch].i <= 3
14777 || (in_attr[Tag_CPU_arch_profile].i == 'M'
14778 && in_attr[Tag_CPU_arch].i != 13
14779 && in_attr[i].i == 0))
14780 ; /* Do nothing. */
14781 /* Output value should be 0 if DSP part of architecture, ie.
14782 - post (>=) ARMv5te architecture output
14783 - A, R or S profile output or ARMv7E-M output architecture. */
14784 else if (out_attr[Tag_CPU_arch].i >= 4
14785 && (out_attr[Tag_CPU_arch_profile].i == 'A'
14786 || out_attr[Tag_CPU_arch_profile].i == 'R'
14787 || out_attr[Tag_CPU_arch_profile].i == 'S'
14788 || out_attr[Tag_CPU_arch].i == 13))
14789 out_attr[i].i = 0;
14790 /* Otherwise, DSP instructions are added and not part of output
14791 architecture. */
14792 else
14793 out_attr[i].i = 1;
14794 break;
14795
14796 case Tag_FP_arch:
14797 {
14798 /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14799 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14800 when it's 0. It might mean absence of FP hardware if
14801 Tag_FP_arch is zero. */
14802
14803 #define VFP_VERSION_COUNT 9
14804 static const struct
14805 {
14806 int ver;
14807 int regs;
14808 } vfp_versions[VFP_VERSION_COUNT] =
14809 {
14810 {0, 0},
14811 {1, 16},
14812 {2, 16},
14813 {3, 32},
14814 {3, 16},
14815 {4, 32},
14816 {4, 16},
14817 {8, 32},
14818 {8, 16}
14819 };
14820 int ver;
14821 int regs;
14822 int newval;
14823
14824 /* If the output has no requirement about FP hardware,
14825 follow the requirement of the input. */
14826 if (out_attr[i].i == 0)
14827 {
14828 /* This assert is still reasonable, we shouldn't
14829 produce the suspicious build attribute
14830 combination (See below for in_attr). */
14831 BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14832 out_attr[i].i = in_attr[i].i;
14833 out_attr[Tag_ABI_HardFP_use].i
14834 = in_attr[Tag_ABI_HardFP_use].i;
14835 break;
14836 }
14837 /* If the input has no requirement about FP hardware, do
14838 nothing. */
14839 else if (in_attr[i].i == 0)
14840 {
14841 /* We used to assert that Tag_ABI_HardFP_use was
14842 zero here, but we should never assert when
14843 consuming an object file that has suspicious
14844 build attributes. The single precision variant
14845 of 'no FP architecture' is still 'no FP
14846 architecture', so we just ignore the tag in this
14847 case. */
14848 break;
14849 }
14850
14851 /* Both the input and the output have nonzero Tag_FP_arch.
14852 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero. */
14853
14854 /* If both the input and the output have zero Tag_ABI_HardFP_use,
14855 do nothing. */
14856 if (in_attr[Tag_ABI_HardFP_use].i == 0
14857 && out_attr[Tag_ABI_HardFP_use].i == 0)
14858 ;
14859 /* If the input and the output have different Tag_ABI_HardFP_use,
14860 the combination of them is 0 (implied by Tag_FP_arch). */
14861 else if (in_attr[Tag_ABI_HardFP_use].i
14862 != out_attr[Tag_ABI_HardFP_use].i)
14863 out_attr[Tag_ABI_HardFP_use].i = 0;
14864
14865 /* Now we can handle Tag_FP_arch. */
14866
14867 /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14868 pick the biggest. */
14869 if (in_attr[i].i >= VFP_VERSION_COUNT
14870 && in_attr[i].i > out_attr[i].i)
14871 {
14872 out_attr[i] = in_attr[i];
14873 break;
14874 }
14875 /* The output uses the superset of input features
14876 (ISA version) and registers. */
14877 ver = vfp_versions[in_attr[i].i].ver;
14878 if (ver < vfp_versions[out_attr[i].i].ver)
14879 ver = vfp_versions[out_attr[i].i].ver;
14880 regs = vfp_versions[in_attr[i].i].regs;
14881 if (regs < vfp_versions[out_attr[i].i].regs)
14882 regs = vfp_versions[out_attr[i].i].regs;
14883 /* This assumes all possible supersets are also a valid
14884 options. */
14885 for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14886 {
14887 if (regs == vfp_versions[newval].regs
14888 && ver == vfp_versions[newval].ver)
14889 break;
14890 }
14891 out_attr[i].i = newval;
14892 }
14893 break;
14894 case Tag_PCS_config:
14895 if (out_attr[i].i == 0)
14896 out_attr[i].i = in_attr[i].i;
14897 else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14898 {
14899 /* It's sometimes ok to mix different configs, so this is only
14900 a warning. */
14901 _bfd_error_handler
14902 (_("warning: %pB: conflicting platform configuration"), ibfd);
14903 }
14904 break;
14905 case Tag_ABI_PCS_R9_use:
14906 if (in_attr[i].i != out_attr[i].i
14907 && out_attr[i].i != AEABI_R9_unused
14908 && in_attr[i].i != AEABI_R9_unused)
14909 {
14910 _bfd_error_handler
14911 (_("error: %pB: conflicting use of R9"), ibfd);
14912 result = FALSE;
14913 }
14914 if (out_attr[i].i == AEABI_R9_unused)
14915 out_attr[i].i = in_attr[i].i;
14916 break;
14917 case Tag_ABI_PCS_RW_data:
14918 if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14919 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14920 && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14921 {
14922 _bfd_error_handler
14923 (_("error: %pB: SB relative addressing conflicts with use of R9"),
14924 ibfd);
14925 result = FALSE;
14926 }
14927 /* Use the smallest value specified. */
14928 if (in_attr[i].i < out_attr[i].i)
14929 out_attr[i].i = in_attr[i].i;
14930 break;
14931 case Tag_ABI_PCS_wchar_t:
14932 if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14933 && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14934 {
14935 _bfd_error_handler
14936 (_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14937 ibfd, in_attr[i].i, out_attr[i].i);
14938 }
14939 else if (in_attr[i].i && !out_attr[i].i)
14940 out_attr[i].i = in_attr[i].i;
14941 break;
14942 case Tag_ABI_enum_size:
14943 if (in_attr[i].i != AEABI_enum_unused)
14944 {
14945 if (out_attr[i].i == AEABI_enum_unused
14946 || out_attr[i].i == AEABI_enum_forced_wide)
14947 {
14948 /* The existing object is compatible with anything.
14949 Use whatever requirements the new object has. */
14950 out_attr[i].i = in_attr[i].i;
14951 }
14952 else if (in_attr[i].i != AEABI_enum_forced_wide
14953 && out_attr[i].i != in_attr[i].i
14954 && !elf_arm_tdata (obfd)->no_enum_size_warning)
14955 {
14956 static const char *aeabi_enum_names[] =
14957 { "", "variable-size", "32-bit", "" };
14958 const char *in_name =
14959 in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14960 ? aeabi_enum_names[in_attr[i].i]
14961 : "<unknown>";
14962 const char *out_name =
14963 out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14964 ? aeabi_enum_names[out_attr[i].i]
14965 : "<unknown>";
14966 _bfd_error_handler
14967 (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14968 ibfd, in_name, out_name);
14969 }
14970 }
14971 break;
14972 case Tag_ABI_VFP_args:
14973 /* Aready done. */
14974 break;
14975 case Tag_ABI_WMMX_args:
14976 if (in_attr[i].i != out_attr[i].i)
14977 {
14978 _bfd_error_handler
14979 (_("error: %pB uses iWMMXt register arguments, %pB does not"),
14980 ibfd, obfd);
14981 result = FALSE;
14982 }
14983 break;
14984 case Tag_compatibility:
14985 /* Merged in target-independent code. */
14986 break;
14987 case Tag_ABI_HardFP_use:
14988 /* This is handled along with Tag_FP_arch. */
14989 break;
14990 case Tag_ABI_FP_16bit_format:
14991 if (in_attr[i].i != 0 && out_attr[i].i != 0)
14992 {
14993 if (in_attr[i].i != out_attr[i].i)
14994 {
14995 _bfd_error_handler
14996 (_("error: fp16 format mismatch between %pB and %pB"),
14997 ibfd, obfd);
14998 result = FALSE;
14999 }
15000 }
15001 if (in_attr[i].i != 0)
15002 out_attr[i].i = in_attr[i].i;
15003 break;
15004
15005 case Tag_DIV_use:
15006 /* A value of zero on input means that the divide instruction may
15007 be used if available in the base architecture as specified via
15008 Tag_CPU_arch and Tag_CPU_arch_profile. A value of 1 means that
15009 the user did not want divide instructions. A value of 2
15010 explicitly means that divide instructions were allowed in ARM
15011 and Thumb state. */
15012 if (in_attr[i].i == out_attr[i].i)
15013 /* Do nothing. */ ;
15014 else if (elf32_arm_attributes_forbid_div (in_attr)
15015 && !elf32_arm_attributes_accept_div (out_attr))
15016 out_attr[i].i = 1;
15017 else if (elf32_arm_attributes_forbid_div (out_attr)
15018 && elf32_arm_attributes_accept_div (in_attr))
15019 out_attr[i].i = in_attr[i].i;
15020 else if (in_attr[i].i == 2)
15021 out_attr[i].i = in_attr[i].i;
15022 break;
15023
15024 case Tag_MPextension_use_legacy:
15025 /* We don't output objects with Tag_MPextension_use_legacy - we
15026 move the value to Tag_MPextension_use. */
15027 if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15028 {
15029 if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15030 {
15031 _bfd_error_handler
15032 (_("%pB has both the current and legacy "
15033 "Tag_MPextension_use attributes"),
15034 ibfd);
15035 result = FALSE;
15036 }
15037 }
15038
15039 if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15040 out_attr[Tag_MPextension_use] = in_attr[i];
15041
15042 break;
15043
15044 case Tag_nodefaults:
15045 /* This tag is set if it exists, but the value is unused (and is
15046 typically zero). We don't actually need to do anything here -
15047 the merge happens automatically when the type flags are merged
15048 below. */
15049 break;
15050 case Tag_also_compatible_with:
15051 /* Already done in Tag_CPU_arch. */
15052 break;
15053 case Tag_conformance:
15054 /* Keep the attribute if it matches. Throw it away otherwise.
15055 No attribute means no claim to conform. */
15056 if (!in_attr[i].s || !out_attr[i].s
15057 || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15058 out_attr[i].s = NULL;
15059 break;
15060
15061 default:
15062 result
15063 = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15064 }
15065
15066 /* If out_attr was copied from in_attr then it won't have a type yet. */
15067 if (in_attr[i].type && !out_attr[i].type)
15068 out_attr[i].type = in_attr[i].type;
15069 }
15070
15071 /* Merge Tag_compatibility attributes and any common GNU ones. */
15072 if (!_bfd_elf_merge_object_attributes (ibfd, info))
15073 return FALSE;
15074
15075 /* Check for any attributes not known on ARM. */
15076 result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15077
15078 return result;
15079 }
15080
15081
15082 /* Return TRUE if the two EABI versions are incompatible. */
15083
15084 static bfd_boolean
15085 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15086 {
15087 /* v4 and v5 are the same spec before and after it was released,
15088 so allow mixing them. */
15089 if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15090 || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15091 return TRUE;
15092
15093 return (iver == over);
15094 }
15095
15096 /* Merge backend specific data from an object file to the output
15097 object file when linking. */
15098
15099 static bfd_boolean
15100 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15101
15102 /* Display the flags field. */
15103
15104 static bfd_boolean
15105 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15106 {
15107 FILE * file = (FILE *) ptr;
15108 unsigned long flags;
15109
15110 BFD_ASSERT (abfd != NULL && ptr != NULL);
15111
15112 /* Print normal ELF private data. */
15113 _bfd_elf_print_private_bfd_data (abfd, ptr);
15114
15115 flags = elf_elfheader (abfd)->e_flags;
15116 /* Ignore init flag - it may not be set, despite the flags field
15117 containing valid data. */
15118
15119 fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
15120
15121 switch (EF_ARM_EABI_VERSION (flags))
15122 {
15123 case EF_ARM_EABI_UNKNOWN:
15124 /* The following flag bits are GNU extensions and not part of the
15125 official ARM ELF extended ABI. Hence they are only decoded if
15126 the EABI version is not set. */
15127 if (flags & EF_ARM_INTERWORK)
15128 fprintf (file, _(" [interworking enabled]"));
15129
15130 if (flags & EF_ARM_APCS_26)
15131 fprintf (file, " [APCS-26]");
15132 else
15133 fprintf (file, " [APCS-32]");
15134
15135 if (flags & EF_ARM_VFP_FLOAT)
15136 fprintf (file, _(" [VFP float format]"));
15137 else if (flags & EF_ARM_MAVERICK_FLOAT)
15138 fprintf (file, _(" [Maverick float format]"));
15139 else
15140 fprintf (file, _(" [FPA float format]"));
15141
15142 if (flags & EF_ARM_APCS_FLOAT)
15143 fprintf (file, _(" [floats passed in float registers]"));
15144
15145 if (flags & EF_ARM_PIC)
15146 fprintf (file, _(" [position independent]"));
15147
15148 if (flags & EF_ARM_NEW_ABI)
15149 fprintf (file, _(" [new ABI]"));
15150
15151 if (flags & EF_ARM_OLD_ABI)
15152 fprintf (file, _(" [old ABI]"));
15153
15154 if (flags & EF_ARM_SOFT_FLOAT)
15155 fprintf (file, _(" [software FP]"));
15156
15157 flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15158 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15159 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15160 | EF_ARM_MAVERICK_FLOAT);
15161 break;
15162
15163 case EF_ARM_EABI_VER1:
15164 fprintf (file, _(" [Version1 EABI]"));
15165
15166 if (flags & EF_ARM_SYMSARESORTED)
15167 fprintf (file, _(" [sorted symbol table]"));
15168 else
15169 fprintf (file, _(" [unsorted symbol table]"));
15170
15171 flags &= ~ EF_ARM_SYMSARESORTED;
15172 break;
15173
15174 case EF_ARM_EABI_VER2:
15175 fprintf (file, _(" [Version2 EABI]"));
15176
15177 if (flags & EF_ARM_SYMSARESORTED)
15178 fprintf (file, _(" [sorted symbol table]"));
15179 else
15180 fprintf (file, _(" [unsorted symbol table]"));
15181
15182 if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15183 fprintf (file, _(" [dynamic symbols use segment index]"));
15184
15185 if (flags & EF_ARM_MAPSYMSFIRST)
15186 fprintf (file, _(" [mapping symbols precede others]"));
15187
15188 flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15189 | EF_ARM_MAPSYMSFIRST);
15190 break;
15191
15192 case EF_ARM_EABI_VER3:
15193 fprintf (file, _(" [Version3 EABI]"));
15194 break;
15195
15196 case EF_ARM_EABI_VER4:
15197 fprintf (file, _(" [Version4 EABI]"));
15198 goto eabi;
15199
15200 case EF_ARM_EABI_VER5:
15201 fprintf (file, _(" [Version5 EABI]"));
15202
15203 if (flags & EF_ARM_ABI_FLOAT_SOFT)
15204 fprintf (file, _(" [soft-float ABI]"));
15205
15206 if (flags & EF_ARM_ABI_FLOAT_HARD)
15207 fprintf (file, _(" [hard-float ABI]"));
15208
15209 flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15210
15211 eabi:
15212 if (flags & EF_ARM_BE8)
15213 fprintf (file, _(" [BE8]"));
15214
15215 if (flags & EF_ARM_LE8)
15216 fprintf (file, _(" [LE8]"));
15217
15218 flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15219 break;
15220
15221 default:
15222 fprintf (file, _(" <EABI version unrecognised>"));
15223 break;
15224 }
15225
15226 flags &= ~ EF_ARM_EABIMASK;
15227
15228 if (flags & EF_ARM_RELEXEC)
15229 fprintf (file, _(" [relocatable executable]"));
15230
15231 if (flags & EF_ARM_PIC)
15232 fprintf (file, _(" [position independent]"));
15233
15234 if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15235 fprintf (file, _(" [FDPIC ABI supplement]"));
15236
15237 flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15238
15239 if (flags)
15240 fprintf (file, _("<Unrecognised flag bits set>"));
15241
15242 fputc ('\n', file);
15243
15244 return TRUE;
15245 }
15246
15247 static int
15248 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15249 {
15250 switch (ELF_ST_TYPE (elf_sym->st_info))
15251 {
15252 case STT_ARM_TFUNC:
15253 return ELF_ST_TYPE (elf_sym->st_info);
15254
15255 case STT_ARM_16BIT:
15256 /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15257 This allows us to distinguish between data used by Thumb instructions
15258 and non-data (which is probably code) inside Thumb regions of an
15259 executable. */
15260 if (type != STT_OBJECT && type != STT_TLS)
15261 return ELF_ST_TYPE (elf_sym->st_info);
15262 break;
15263
15264 default:
15265 break;
15266 }
15267
15268 return type;
15269 }
15270
15271 static asection *
15272 elf32_arm_gc_mark_hook (asection *sec,
15273 struct bfd_link_info *info,
15274 Elf_Internal_Rela *rel,
15275 struct elf_link_hash_entry *h,
15276 Elf_Internal_Sym *sym)
15277 {
15278 if (h != NULL)
15279 switch (ELF32_R_TYPE (rel->r_info))
15280 {
15281 case R_ARM_GNU_VTINHERIT:
15282 case R_ARM_GNU_VTENTRY:
15283 return NULL;
15284 }
15285
15286 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15287 }
15288
15289 /* Look through the relocs for a section during the first phase. */
15290
15291 static bfd_boolean
15292 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15293 asection *sec, const Elf_Internal_Rela *relocs)
15294 {
15295 Elf_Internal_Shdr *symtab_hdr;
15296 struct elf_link_hash_entry **sym_hashes;
15297 const Elf_Internal_Rela *rel;
15298 const Elf_Internal_Rela *rel_end;
15299 bfd *dynobj;
15300 asection *sreloc;
15301 struct elf32_arm_link_hash_table *htab;
15302 bfd_boolean call_reloc_p;
15303 bfd_boolean may_become_dynamic_p;
15304 bfd_boolean may_need_local_target_p;
15305 unsigned long nsyms;
15306
15307 if (bfd_link_relocatable (info))
15308 return TRUE;
15309
15310 BFD_ASSERT (is_arm_elf (abfd));
15311
15312 htab = elf32_arm_hash_table (info);
15313 if (htab == NULL)
15314 return FALSE;
15315
15316 sreloc = NULL;
15317
15318 /* Create dynamic sections for relocatable executables so that we can
15319 copy relocations. */
15320 if (htab->root.is_relocatable_executable
15321 && ! htab->root.dynamic_sections_created)
15322 {
15323 if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15324 return FALSE;
15325 }
15326
15327 if (htab->root.dynobj == NULL)
15328 htab->root.dynobj = abfd;
15329 if (!create_ifunc_sections (info))
15330 return FALSE;
15331
15332 dynobj = htab->root.dynobj;
15333
15334 symtab_hdr = & elf_symtab_hdr (abfd);
15335 sym_hashes = elf_sym_hashes (abfd);
15336 nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15337
15338 rel_end = relocs + sec->reloc_count;
15339 for (rel = relocs; rel < rel_end; rel++)
15340 {
15341 Elf_Internal_Sym *isym;
15342 struct elf_link_hash_entry *h;
15343 struct elf32_arm_link_hash_entry *eh;
15344 unsigned int r_symndx;
15345 int r_type;
15346
15347 r_symndx = ELF32_R_SYM (rel->r_info);
15348 r_type = ELF32_R_TYPE (rel->r_info);
15349 r_type = arm_real_reloc_type (htab, r_type);
15350
15351 if (r_symndx >= nsyms
15352 /* PR 9934: It is possible to have relocations that do not
15353 refer to symbols, thus it is also possible to have an
15354 object file containing relocations but no symbol table. */
15355 && (r_symndx > STN_UNDEF || nsyms > 0))
15356 {
15357 _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15358 r_symndx);
15359 return FALSE;
15360 }
15361
15362 h = NULL;
15363 isym = NULL;
15364 if (nsyms > 0)
15365 {
15366 if (r_symndx < symtab_hdr->sh_info)
15367 {
15368 /* A local symbol. */
15369 isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15370 abfd, r_symndx);
15371 if (isym == NULL)
15372 return FALSE;
15373 }
15374 else
15375 {
15376 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15377 while (h->root.type == bfd_link_hash_indirect
15378 || h->root.type == bfd_link_hash_warning)
15379 h = (struct elf_link_hash_entry *) h->root.u.i.link;
15380 }
15381 }
15382
15383 eh = (struct elf32_arm_link_hash_entry *) h;
15384
15385 call_reloc_p = FALSE;
15386 may_become_dynamic_p = FALSE;
15387 may_need_local_target_p = FALSE;
15388
15389 /* Could be done earlier, if h were already available. */
15390 r_type = elf32_arm_tls_transition (info, r_type, h);
15391 switch (r_type)
15392 {
15393 case R_ARM_GOTOFFFUNCDESC:
15394 {
15395 if (h == NULL)
15396 {
15397 if (!elf32_arm_allocate_local_sym_info (abfd))
15398 return FALSE;
15399 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15400 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15401 }
15402 else
15403 {
15404 eh->fdpic_cnts.gotofffuncdesc_cnt++;
15405 }
15406 }
15407 break;
15408
15409 case R_ARM_GOTFUNCDESC:
15410 {
15411 if (h == NULL)
15412 {
15413 /* Such a relocation is not supposed to be generated
15414 by gcc on a static function. */
15415 /* Anyway if needed it could be handled. */
15416 abort();
15417 }
15418 else
15419 {
15420 eh->fdpic_cnts.gotfuncdesc_cnt++;
15421 }
15422 }
15423 break;
15424
15425 case R_ARM_FUNCDESC:
15426 {
15427 if (h == NULL)
15428 {
15429 if (!elf32_arm_allocate_local_sym_info (abfd))
15430 return FALSE;
15431 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15432 elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15433 }
15434 else
15435 {
15436 eh->fdpic_cnts.funcdesc_cnt++;
15437 }
15438 }
15439 break;
15440
15441 case R_ARM_GOT32:
15442 case R_ARM_GOT_PREL:
15443 case R_ARM_TLS_GD32:
15444 case R_ARM_TLS_GD32_FDPIC:
15445 case R_ARM_TLS_IE32:
15446 case R_ARM_TLS_IE32_FDPIC:
15447 case R_ARM_TLS_GOTDESC:
15448 case R_ARM_TLS_DESCSEQ:
15449 case R_ARM_THM_TLS_DESCSEQ:
15450 case R_ARM_TLS_CALL:
15451 case R_ARM_THM_TLS_CALL:
15452 /* This symbol requires a global offset table entry. */
15453 {
15454 int tls_type, old_tls_type;
15455
15456 switch (r_type)
15457 {
15458 case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15459 case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15460
15461 case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15462 case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15463
15464 case R_ARM_TLS_GOTDESC:
15465 case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15466 case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15467 tls_type = GOT_TLS_GDESC; break;
15468
15469 default: tls_type = GOT_NORMAL; break;
15470 }
15471
15472 if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15473 info->flags |= DF_STATIC_TLS;
15474
15475 if (h != NULL)
15476 {
15477 h->got.refcount++;
15478 old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15479 }
15480 else
15481 {
15482 /* This is a global offset table entry for a local symbol. */
15483 if (!elf32_arm_allocate_local_sym_info (abfd))
15484 return FALSE;
15485 elf_local_got_refcounts (abfd)[r_symndx] += 1;
15486 old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15487 }
15488
15489 /* If a variable is accessed with both tls methods, two
15490 slots may be created. */
15491 if (GOT_TLS_GD_ANY_P (old_tls_type)
15492 && GOT_TLS_GD_ANY_P (tls_type))
15493 tls_type |= old_tls_type;
15494
15495 /* We will already have issued an error message if there
15496 is a TLS/non-TLS mismatch, based on the symbol
15497 type. So just combine any TLS types needed. */
15498 if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15499 && tls_type != GOT_NORMAL)
15500 tls_type |= old_tls_type;
15501
15502 /* If the symbol is accessed in both IE and GDESC
15503 method, we're able to relax. Turn off the GDESC flag,
15504 without messing up with any other kind of tls types
15505 that may be involved. */
15506 if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15507 tls_type &= ~GOT_TLS_GDESC;
15508
15509 if (old_tls_type != tls_type)
15510 {
15511 if (h != NULL)
15512 elf32_arm_hash_entry (h)->tls_type = tls_type;
15513 else
15514 elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15515 }
15516 }
15517 /* Fall through. */
15518
15519 case R_ARM_TLS_LDM32:
15520 case R_ARM_TLS_LDM32_FDPIC:
15521 if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15522 htab->tls_ldm_got.refcount++;
15523 /* Fall through. */
15524
15525 case R_ARM_GOTOFF32:
15526 case R_ARM_GOTPC:
15527 if (htab->root.sgot == NULL
15528 && !create_got_section (htab->root.dynobj, info))
15529 return FALSE;
15530 break;
15531
15532 case R_ARM_PC24:
15533 case R_ARM_PLT32:
15534 case R_ARM_CALL:
15535 case R_ARM_JUMP24:
15536 case R_ARM_PREL31:
15537 case R_ARM_THM_CALL:
15538 case R_ARM_THM_JUMP24:
15539 case R_ARM_THM_JUMP19:
15540 call_reloc_p = TRUE;
15541 may_need_local_target_p = TRUE;
15542 break;
15543
15544 case R_ARM_ABS12:
15545 /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15546 ldr __GOTT_INDEX__ offsets. */
15547 if (!htab->vxworks_p)
15548 {
15549 may_need_local_target_p = TRUE;
15550 break;
15551 }
15552 else goto jump_over;
15553
15554 /* Fall through. */
15555
15556 case R_ARM_MOVW_ABS_NC:
15557 case R_ARM_MOVT_ABS:
15558 case R_ARM_THM_MOVW_ABS_NC:
15559 case R_ARM_THM_MOVT_ABS:
15560 if (bfd_link_pic (info))
15561 {
15562 _bfd_error_handler
15563 (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15564 abfd, elf32_arm_howto_table_1[r_type].name,
15565 (h) ? h->root.root.string : "a local symbol");
15566 bfd_set_error (bfd_error_bad_value);
15567 return FALSE;
15568 }
15569
15570 /* Fall through. */
15571 case R_ARM_ABS32:
15572 case R_ARM_ABS32_NOI:
15573 jump_over:
15574 if (h != NULL && bfd_link_executable (info))
15575 {
15576 h->pointer_equality_needed = 1;
15577 }
15578 /* Fall through. */
15579 case R_ARM_REL32:
15580 case R_ARM_REL32_NOI:
15581 case R_ARM_MOVW_PREL_NC:
15582 case R_ARM_MOVT_PREL:
15583 case R_ARM_THM_MOVW_PREL_NC:
15584 case R_ARM_THM_MOVT_PREL:
15585
15586 /* Should the interworking branches be listed here? */
15587 if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15588 || htab->fdpic_p)
15589 && (sec->flags & SEC_ALLOC) != 0)
15590 {
15591 if (h == NULL
15592 && elf32_arm_howto_from_type (r_type)->pc_relative)
15593 {
15594 /* In shared libraries and relocatable executables,
15595 we treat local relative references as calls;
15596 see the related SYMBOL_CALLS_LOCAL code in
15597 allocate_dynrelocs. */
15598 call_reloc_p = TRUE;
15599 may_need_local_target_p = TRUE;
15600 }
15601 else
15602 /* We are creating a shared library or relocatable
15603 executable, and this is a reloc against a global symbol,
15604 or a non-PC-relative reloc against a local symbol.
15605 We may need to copy the reloc into the output. */
15606 may_become_dynamic_p = TRUE;
15607 }
15608 else
15609 may_need_local_target_p = TRUE;
15610 break;
15611
15612 /* This relocation describes the C++ object vtable hierarchy.
15613 Reconstruct it for later use during GC. */
15614 case R_ARM_GNU_VTINHERIT:
15615 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15616 return FALSE;
15617 break;
15618
15619 /* This relocation describes which C++ vtable entries are actually
15620 used. Record for later use during GC. */
15621 case R_ARM_GNU_VTENTRY:
15622 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15623 return FALSE;
15624 break;
15625 }
15626
15627 if (h != NULL)
15628 {
15629 if (call_reloc_p)
15630 /* We may need a .plt entry if the function this reloc
15631 refers to is in a different object, regardless of the
15632 symbol's type. We can't tell for sure yet, because
15633 something later might force the symbol local. */
15634 h->needs_plt = 1;
15635 else if (may_need_local_target_p)
15636 /* If this reloc is in a read-only section, we might
15637 need a copy reloc. We can't check reliably at this
15638 stage whether the section is read-only, as input
15639 sections have not yet been mapped to output sections.
15640 Tentatively set the flag for now, and correct in
15641 adjust_dynamic_symbol. */
15642 h->non_got_ref = 1;
15643 }
15644
15645 if (may_need_local_target_p
15646 && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15647 {
15648 union gotplt_union *root_plt;
15649 struct arm_plt_info *arm_plt;
15650 struct arm_local_iplt_info *local_iplt;
15651
15652 if (h != NULL)
15653 {
15654 root_plt = &h->plt;
15655 arm_plt = &eh->plt;
15656 }
15657 else
15658 {
15659 local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15660 if (local_iplt == NULL)
15661 return FALSE;
15662 root_plt = &local_iplt->root;
15663 arm_plt = &local_iplt->arm;
15664 }
15665
15666 /* If the symbol is a function that doesn't bind locally,
15667 this relocation will need a PLT entry. */
15668 if (root_plt->refcount != -1)
15669 root_plt->refcount += 1;
15670
15671 if (!call_reloc_p)
15672 arm_plt->noncall_refcount++;
15673
15674 /* It's too early to use htab->use_blx here, so we have to
15675 record possible blx references separately from
15676 relocs that definitely need a thumb stub. */
15677
15678 if (r_type == R_ARM_THM_CALL)
15679 arm_plt->maybe_thumb_refcount += 1;
15680
15681 if (r_type == R_ARM_THM_JUMP24
15682 || r_type == R_ARM_THM_JUMP19)
15683 arm_plt->thumb_refcount += 1;
15684 }
15685
15686 if (may_become_dynamic_p)
15687 {
15688 struct elf_dyn_relocs *p, **head;
15689
15690 /* Create a reloc section in dynobj. */
15691 if (sreloc == NULL)
15692 {
15693 sreloc = _bfd_elf_make_dynamic_reloc_section
15694 (sec, dynobj, 2, abfd, ! htab->use_rel);
15695
15696 if (sreloc == NULL)
15697 return FALSE;
15698
15699 /* BPABI objects never have dynamic relocations mapped. */
15700 if (htab->symbian_p)
15701 {
15702 flagword flags;
15703
15704 flags = bfd_section_flags (sreloc);
15705 flags &= ~(SEC_LOAD | SEC_ALLOC);
15706 bfd_set_section_flags (sreloc, flags);
15707 }
15708 }
15709
15710 /* If this is a global symbol, count the number of
15711 relocations we need for this symbol. */
15712 if (h != NULL)
15713 head = &h->dyn_relocs;
15714 else
15715 {
15716 head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15717 if (head == NULL)
15718 return FALSE;
15719 }
15720
15721 p = *head;
15722 if (p == NULL || p->sec != sec)
15723 {
15724 size_t amt = sizeof *p;
15725
15726 p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15727 if (p == NULL)
15728 return FALSE;
15729 p->next = *head;
15730 *head = p;
15731 p->sec = sec;
15732 p->count = 0;
15733 p->pc_count = 0;
15734 }
15735
15736 if (elf32_arm_howto_from_type (r_type)->pc_relative)
15737 p->pc_count += 1;
15738 p->count += 1;
15739 if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15740 && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15741 /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15742 that will become rofixup. */
15743 /* This is due to the fact that we suppose all will become rofixup. */
15744 fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15745 _bfd_error_handler
15746 (_("FDPIC does not yet support %s relocation"
15747 " to become dynamic for executable"),
15748 elf32_arm_howto_table_1[r_type].name);
15749 abort();
15750 }
15751 }
15752 }
15753
15754 return TRUE;
15755 }
15756
15757 static void
15758 elf32_arm_update_relocs (asection *o,
15759 struct bfd_elf_section_reloc_data *reldata)
15760 {
15761 void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15762 void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15763 const struct elf_backend_data *bed;
15764 _arm_elf_section_data *eado;
15765 struct bfd_link_order *p;
15766 bfd_byte *erela_head, *erela;
15767 Elf_Internal_Rela *irela_head, *irela;
15768 Elf_Internal_Shdr *rel_hdr;
15769 bfd *abfd;
15770 unsigned int count;
15771
15772 eado = get_arm_elf_section_data (o);
15773
15774 if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15775 return;
15776
15777 abfd = o->owner;
15778 bed = get_elf_backend_data (abfd);
15779 rel_hdr = reldata->hdr;
15780
15781 if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15782 {
15783 swap_in = bed->s->swap_reloc_in;
15784 swap_out = bed->s->swap_reloc_out;
15785 }
15786 else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15787 {
15788 swap_in = bed->s->swap_reloca_in;
15789 swap_out = bed->s->swap_reloca_out;
15790 }
15791 else
15792 abort ();
15793
15794 erela_head = rel_hdr->contents;
15795 irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15796 ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15797
15798 erela = erela_head;
15799 irela = irela_head;
15800 count = 0;
15801
15802 for (p = o->map_head.link_order; p; p = p->next)
15803 {
15804 if (p->type == bfd_section_reloc_link_order
15805 || p->type == bfd_symbol_reloc_link_order)
15806 {
15807 (*swap_in) (abfd, erela, irela);
15808 erela += rel_hdr->sh_entsize;
15809 irela++;
15810 count++;
15811 }
15812 else if (p->type == bfd_indirect_link_order)
15813 {
15814 struct bfd_elf_section_reloc_data *input_reldata;
15815 arm_unwind_table_edit *edit_list, *edit_tail;
15816 _arm_elf_section_data *eadi;
15817 bfd_size_type j;
15818 bfd_vma offset;
15819 asection *i;
15820
15821 i = p->u.indirect.section;
15822
15823 eadi = get_arm_elf_section_data (i);
15824 edit_list = eadi->u.exidx.unwind_edit_list;
15825 edit_tail = eadi->u.exidx.unwind_edit_tail;
15826 offset = i->output_offset;
15827
15828 if (eadi->elf.rel.hdr &&
15829 eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15830 input_reldata = &eadi->elf.rel;
15831 else if (eadi->elf.rela.hdr &&
15832 eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15833 input_reldata = &eadi->elf.rela;
15834 else
15835 abort ();
15836
15837 if (edit_list)
15838 {
15839 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15840 {
15841 arm_unwind_table_edit *edit_node, *edit_next;
15842 bfd_vma bias;
15843 bfd_vma reloc_index;
15844
15845 (*swap_in) (abfd, erela, irela);
15846 reloc_index = (irela->r_offset - offset) / 8;
15847
15848 bias = 0;
15849 edit_node = edit_list;
15850 for (edit_next = edit_list;
15851 edit_next && edit_next->index <= reloc_index;
15852 edit_next = edit_node->next)
15853 {
15854 bias++;
15855 edit_node = edit_next;
15856 }
15857
15858 if (edit_node->type != DELETE_EXIDX_ENTRY
15859 || edit_node->index != reloc_index)
15860 {
15861 irela->r_offset -= bias * 8;
15862 irela++;
15863 count++;
15864 }
15865
15866 erela += rel_hdr->sh_entsize;
15867 }
15868
15869 if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15870 {
15871 /* New relocation entity. */
15872 asection *text_sec = edit_tail->linked_section;
15873 asection *text_out = text_sec->output_section;
15874 bfd_vma exidx_offset = offset + i->size - 8;
15875
15876 irela->r_addend = 0;
15877 irela->r_offset = exidx_offset;
15878 irela->r_info = ELF32_R_INFO
15879 (text_out->target_index, R_ARM_PREL31);
15880 irela++;
15881 count++;
15882 }
15883 }
15884 else
15885 {
15886 for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15887 {
15888 (*swap_in) (abfd, erela, irela);
15889 erela += rel_hdr->sh_entsize;
15890 irela++;
15891 }
15892
15893 count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15894 }
15895 }
15896 }
15897
15898 reldata->count = count;
15899 rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15900
15901 erela = erela_head;
15902 irela = irela_head;
15903 while (count > 0)
15904 {
15905 (*swap_out) (abfd, irela, erela);
15906 erela += rel_hdr->sh_entsize;
15907 irela++;
15908 count--;
15909 }
15910
15911 free (irela_head);
15912
15913 /* Hashes are no longer valid. */
15914 free (reldata->hashes);
15915 reldata->hashes = NULL;
15916 }
15917
15918 /* Unwinding tables are not referenced directly. This pass marks them as
15919 required if the corresponding code section is marked. Similarly, ARMv8-M
15920 secure entry functions can only be referenced by SG veneers which are
15921 created after the GC process. They need to be marked in case they reside in
15922 their own section (as would be the case if code was compiled with
15923 -ffunction-sections). */
15924
15925 static bfd_boolean
15926 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15927 elf_gc_mark_hook_fn gc_mark_hook)
15928 {
15929 bfd *sub;
15930 Elf_Internal_Shdr **elf_shdrp;
15931 asection *cmse_sec;
15932 obj_attribute *out_attr;
15933 Elf_Internal_Shdr *symtab_hdr;
15934 unsigned i, sym_count, ext_start;
15935 const struct elf_backend_data *bed;
15936 struct elf_link_hash_entry **sym_hashes;
15937 struct elf32_arm_link_hash_entry *cmse_hash;
15938 bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15939 bfd_boolean debug_sec_need_to_be_marked = FALSE;
15940 asection *isec;
15941
15942 _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15943
15944 out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15945 is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15946 && out_attr[Tag_CPU_arch_profile].i == 'M';
15947
15948 /* Marking EH data may cause additional code sections to be marked,
15949 requiring multiple passes. */
15950 again = TRUE;
15951 while (again)
15952 {
15953 again = FALSE;
15954 for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15955 {
15956 asection *o;
15957
15958 if (! is_arm_elf (sub))
15959 continue;
15960
15961 elf_shdrp = elf_elfsections (sub);
15962 for (o = sub->sections; o != NULL; o = o->next)
15963 {
15964 Elf_Internal_Shdr *hdr;
15965
15966 hdr = &elf_section_data (o)->this_hdr;
15967 if (hdr->sh_type == SHT_ARM_EXIDX
15968 && hdr->sh_link
15969 && hdr->sh_link < elf_numsections (sub)
15970 && !o->gc_mark
15971 && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15972 {
15973 again = TRUE;
15974 if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15975 return FALSE;
15976 }
15977 }
15978
15979 /* Mark section holding ARMv8-M secure entry functions. We mark all
15980 of them so no need for a second browsing. */
15981 if (is_v8m && first_bfd_browse)
15982 {
15983 sym_hashes = elf_sym_hashes (sub);
15984 bed = get_elf_backend_data (sub);
15985 symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15986 sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15987 ext_start = symtab_hdr->sh_info;
15988
15989 /* Scan symbols. */
15990 for (i = ext_start; i < sym_count; i++)
15991 {
15992 cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15993
15994 /* Assume it is a special symbol. If not, cmse_scan will
15995 warn about it and user can do something about it. */
15996 if (CONST_STRNEQ (cmse_hash->root.root.root.string,
15997 CMSE_PREFIX))
15998 {
15999 cmse_sec = cmse_hash->root.root.u.def.section;
16000 if (!cmse_sec->gc_mark
16001 && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
16002 return FALSE;
16003 /* The debug sections related to these secure entry
16004 functions are marked on enabling below flag. */
16005 debug_sec_need_to_be_marked = TRUE;
16006 }
16007 }
16008
16009 if (debug_sec_need_to_be_marked)
16010 {
16011 /* Looping over all the sections of the object file containing
16012 Armv8-M secure entry functions and marking all the debug
16013 sections. */
16014 for (isec = sub->sections; isec != NULL; isec = isec->next)
16015 {
16016 /* If not a debug sections, skip it. */
16017 if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
16018 isec->gc_mark = 1 ;
16019 }
16020 debug_sec_need_to_be_marked = FALSE;
16021 }
16022 }
16023 }
16024 first_bfd_browse = FALSE;
16025 }
16026
16027 return TRUE;
16028 }
16029
16030 /* Treat mapping symbols as special target symbols. */
16031
16032 static bfd_boolean
16033 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16034 {
16035 return bfd_is_arm_special_symbol_name (sym->name,
16036 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16037 }
16038
16039 /* If the ELF symbol SYM might be a function in SEC, return the
16040 function size and set *CODE_OFF to the function's entry point,
16041 otherwise return zero. */
16042
16043 static bfd_size_type
16044 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
16045 bfd_vma *code_off)
16046 {
16047 bfd_size_type size;
16048
16049 if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
16050 | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
16051 || sym->section != sec)
16052 return 0;
16053
16054 if (!(sym->flags & BSF_SYNTHETIC))
16055 switch (ELF_ST_TYPE (((elf_symbol_type *) sym)->internal_elf_sym.st_info))
16056 {
16057 case STT_FUNC:
16058 case STT_ARM_TFUNC:
16059 case STT_NOTYPE:
16060 break;
16061 default:
16062 return 0;
16063 }
16064
16065 if ((sym->flags & BSF_LOCAL)
16066 && bfd_is_arm_special_symbol_name (sym->name,
16067 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16068 return 0;
16069
16070 *code_off = sym->value;
16071 size = 0;
16072 if (!(sym->flags & BSF_SYNTHETIC))
16073 size = ((elf_symbol_type *) sym)->internal_elf_sym.st_size;
16074 if (size == 0)
16075 size = 1;
16076 return size;
16077 }
16078
16079 static bfd_boolean
16080 elf32_arm_find_inliner_info (bfd * abfd,
16081 const char ** filename_ptr,
16082 const char ** functionname_ptr,
16083 unsigned int * line_ptr)
16084 {
16085 bfd_boolean found;
16086 found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16087 functionname_ptr, line_ptr,
16088 & elf_tdata (abfd)->dwarf2_find_line_info);
16089 return found;
16090 }
16091
16092 /* Adjust a symbol defined by a dynamic object and referenced by a
16093 regular object. The current definition is in some section of the
16094 dynamic object, but we're not including those sections. We have to
16095 change the definition to something the rest of the link can
16096 understand. */
16097
16098 static bfd_boolean
16099 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16100 struct elf_link_hash_entry * h)
16101 {
16102 bfd * dynobj;
16103 asection *s, *srel;
16104 struct elf32_arm_link_hash_entry * eh;
16105 struct elf32_arm_link_hash_table *globals;
16106
16107 globals = elf32_arm_hash_table (info);
16108 if (globals == NULL)
16109 return FALSE;
16110
16111 dynobj = elf_hash_table (info)->dynobj;
16112
16113 /* Make sure we know what is going on here. */
16114 BFD_ASSERT (dynobj != NULL
16115 && (h->needs_plt
16116 || h->type == STT_GNU_IFUNC
16117 || h->is_weakalias
16118 || (h->def_dynamic
16119 && h->ref_regular
16120 && !h->def_regular)));
16121
16122 eh = (struct elf32_arm_link_hash_entry *) h;
16123
16124 /* If this is a function, put it in the procedure linkage table. We
16125 will fill in the contents of the procedure linkage table later,
16126 when we know the address of the .got section. */
16127 if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16128 {
16129 /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16130 symbol binds locally. */
16131 if (h->plt.refcount <= 0
16132 || (h->type != STT_GNU_IFUNC
16133 && (SYMBOL_CALLS_LOCAL (info, h)
16134 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16135 && h->root.type == bfd_link_hash_undefweak))))
16136 {
16137 /* This case can occur if we saw a PLT32 reloc in an input
16138 file, but the symbol was never referred to by a dynamic
16139 object, or if all references were garbage collected. In
16140 such a case, we don't actually need to build a procedure
16141 linkage table, and we can just do a PC24 reloc instead. */
16142 h->plt.offset = (bfd_vma) -1;
16143 eh->plt.thumb_refcount = 0;
16144 eh->plt.maybe_thumb_refcount = 0;
16145 eh->plt.noncall_refcount = 0;
16146 h->needs_plt = 0;
16147 }
16148
16149 return TRUE;
16150 }
16151 else
16152 {
16153 /* It's possible that we incorrectly decided a .plt reloc was
16154 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16155 in check_relocs. We can't decide accurately between function
16156 and non-function syms in check-relocs; Objects loaded later in
16157 the link may change h->type. So fix it now. */
16158 h->plt.offset = (bfd_vma) -1;
16159 eh->plt.thumb_refcount = 0;
16160 eh->plt.maybe_thumb_refcount = 0;
16161 eh->plt.noncall_refcount = 0;
16162 }
16163
16164 /* If this is a weak symbol, and there is a real definition, the
16165 processor independent code will have arranged for us to see the
16166 real definition first, and we can just use the same value. */
16167 if (h->is_weakalias)
16168 {
16169 struct elf_link_hash_entry *def = weakdef (h);
16170 BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16171 h->root.u.def.section = def->root.u.def.section;
16172 h->root.u.def.value = def->root.u.def.value;
16173 return TRUE;
16174 }
16175
16176 /* If there are no non-GOT references, we do not need a copy
16177 relocation. */
16178 if (!h->non_got_ref)
16179 return TRUE;
16180
16181 /* This is a reference to a symbol defined by a dynamic object which
16182 is not a function. */
16183
16184 /* If we are creating a shared library, we must presume that the
16185 only references to the symbol are via the global offset table.
16186 For such cases we need not do anything here; the relocations will
16187 be handled correctly by relocate_section. Relocatable executables
16188 can reference data in shared objects directly, so we don't need to
16189 do anything here. */
16190 if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16191 return TRUE;
16192
16193 /* We must allocate the symbol in our .dynbss section, which will
16194 become part of the .bss section of the executable. There will be
16195 an entry for this symbol in the .dynsym section. The dynamic
16196 object will contain position independent code, so all references
16197 from the dynamic object to this symbol will go through the global
16198 offset table. The dynamic linker will use the .dynsym entry to
16199 determine the address it must put in the global offset table, so
16200 both the dynamic object and the regular object will refer to the
16201 same memory location for the variable. */
16202 /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16203 linker to copy the initial value out of the dynamic object and into
16204 the runtime process image. We need to remember the offset into the
16205 .rel(a).bss section we are going to use. */
16206 if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16207 {
16208 s = globals->root.sdynrelro;
16209 srel = globals->root.sreldynrelro;
16210 }
16211 else
16212 {
16213 s = globals->root.sdynbss;
16214 srel = globals->root.srelbss;
16215 }
16216 if (info->nocopyreloc == 0
16217 && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16218 && h->size != 0)
16219 {
16220 elf32_arm_allocate_dynrelocs (info, srel, 1);
16221 h->needs_copy = 1;
16222 }
16223
16224 return _bfd_elf_adjust_dynamic_copy (info, h, s);
16225 }
16226
16227 /* Allocate space in .plt, .got and associated reloc sections for
16228 dynamic relocs. */
16229
16230 static bfd_boolean
16231 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16232 {
16233 struct bfd_link_info *info;
16234 struct elf32_arm_link_hash_table *htab;
16235 struct elf32_arm_link_hash_entry *eh;
16236 struct elf_dyn_relocs *p;
16237
16238 if (h->root.type == bfd_link_hash_indirect)
16239 return TRUE;
16240
16241 eh = (struct elf32_arm_link_hash_entry *) h;
16242
16243 info = (struct bfd_link_info *) inf;
16244 htab = elf32_arm_hash_table (info);
16245 if (htab == NULL)
16246 return FALSE;
16247
16248 if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16249 && h->plt.refcount > 0)
16250 {
16251 /* Make sure this symbol is output as a dynamic symbol.
16252 Undefined weak syms won't yet be marked as dynamic. */
16253 if (h->dynindx == -1 && !h->forced_local
16254 && h->root.type == bfd_link_hash_undefweak)
16255 {
16256 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16257 return FALSE;
16258 }
16259
16260 /* If the call in the PLT entry binds locally, the associated
16261 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16262 the usual R_ARM_JUMP_SLOT. Put it in the .iplt section rather
16263 than the .plt section. */
16264 if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16265 {
16266 eh->is_iplt = 1;
16267 if (eh->plt.noncall_refcount == 0
16268 && SYMBOL_REFERENCES_LOCAL (info, h))
16269 /* All non-call references can be resolved directly.
16270 This means that they can (and in some cases, must)
16271 resolve directly to the run-time target, rather than
16272 to the PLT. That in turns means that any .got entry
16273 would be equal to the .igot.plt entry, so there's
16274 no point having both. */
16275 h->got.refcount = 0;
16276 }
16277
16278 if (bfd_link_pic (info)
16279 || eh->is_iplt
16280 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16281 {
16282 elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16283
16284 /* If this symbol is not defined in a regular file, and we are
16285 not generating a shared library, then set the symbol to this
16286 location in the .plt. This is required to make function
16287 pointers compare as equal between the normal executable and
16288 the shared library. */
16289 if (! bfd_link_pic (info)
16290 && !h->def_regular)
16291 {
16292 h->root.u.def.section = htab->root.splt;
16293 h->root.u.def.value = h->plt.offset;
16294
16295 /* Make sure the function is not marked as Thumb, in case
16296 it is the target of an ABS32 relocation, which will
16297 point to the PLT entry. */
16298 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16299 }
16300
16301 /* VxWorks executables have a second set of relocations for
16302 each PLT entry. They go in a separate relocation section,
16303 which is processed by the kernel loader. */
16304 if (htab->vxworks_p && !bfd_link_pic (info))
16305 {
16306 /* There is a relocation for the initial PLT entry:
16307 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_. */
16308 if (h->plt.offset == htab->plt_header_size)
16309 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16310
16311 /* There are two extra relocations for each subsequent
16312 PLT entry: an R_ARM_32 relocation for the GOT entry,
16313 and an R_ARM_32 relocation for the PLT entry. */
16314 elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16315 }
16316 }
16317 else
16318 {
16319 h->plt.offset = (bfd_vma) -1;
16320 h->needs_plt = 0;
16321 }
16322 }
16323 else
16324 {
16325 h->plt.offset = (bfd_vma) -1;
16326 h->needs_plt = 0;
16327 }
16328
16329 eh = (struct elf32_arm_link_hash_entry *) h;
16330 eh->tlsdesc_got = (bfd_vma) -1;
16331
16332 if (h->got.refcount > 0)
16333 {
16334 asection *s;
16335 bfd_boolean dyn;
16336 int tls_type = elf32_arm_hash_entry (h)->tls_type;
16337 int indx;
16338
16339 /* Make sure this symbol is output as a dynamic symbol.
16340 Undefined weak syms won't yet be marked as dynamic. */
16341 if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16342 && h->root.type == bfd_link_hash_undefweak)
16343 {
16344 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16345 return FALSE;
16346 }
16347
16348 if (!htab->symbian_p)
16349 {
16350 s = htab->root.sgot;
16351 h->got.offset = s->size;
16352
16353 if (tls_type == GOT_UNKNOWN)
16354 abort ();
16355
16356 if (tls_type == GOT_NORMAL)
16357 /* Non-TLS symbols need one GOT slot. */
16358 s->size += 4;
16359 else
16360 {
16361 if (tls_type & GOT_TLS_GDESC)
16362 {
16363 /* R_ARM_TLS_DESC needs 2 GOT slots. */
16364 eh->tlsdesc_got
16365 = (htab->root.sgotplt->size
16366 - elf32_arm_compute_jump_table_size (htab));
16367 htab->root.sgotplt->size += 8;
16368 h->got.offset = (bfd_vma) -2;
16369 /* plt.got_offset needs to know there's a TLS_DESC
16370 reloc in the middle of .got.plt. */
16371 htab->num_tls_desc++;
16372 }
16373
16374 if (tls_type & GOT_TLS_GD)
16375 {
16376 /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16377 consecutive GOT slots. If the symbol is both GD
16378 and GDESC, got.offset may have been
16379 overwritten. */
16380 h->got.offset = s->size;
16381 s->size += 8;
16382 }
16383
16384 if (tls_type & GOT_TLS_IE)
16385 /* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16386 slot. */
16387 s->size += 4;
16388 }
16389
16390 dyn = htab->root.dynamic_sections_created;
16391
16392 indx = 0;
16393 if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16394 bfd_link_pic (info),
16395 h)
16396 && (!bfd_link_pic (info)
16397 || !SYMBOL_REFERENCES_LOCAL (info, h)))
16398 indx = h->dynindx;
16399
16400 if (tls_type != GOT_NORMAL
16401 && (bfd_link_dll (info) || indx != 0)
16402 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16403 || h->root.type != bfd_link_hash_undefweak))
16404 {
16405 if (tls_type & GOT_TLS_IE)
16406 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16407
16408 if (tls_type & GOT_TLS_GD)
16409 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16410
16411 if (tls_type & GOT_TLS_GDESC)
16412 {
16413 elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16414 /* GDESC needs a trampoline to jump to. */
16415 htab->tls_trampoline = -1;
16416 }
16417
16418 /* Only GD needs it. GDESC just emits one relocation per
16419 2 entries. */
16420 if ((tls_type & GOT_TLS_GD) && indx != 0)
16421 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16422 }
16423 else if (((indx != -1) || htab->fdpic_p)
16424 && !SYMBOL_REFERENCES_LOCAL (info, h))
16425 {
16426 if (htab->root.dynamic_sections_created)
16427 /* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation. */
16428 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16429 }
16430 else if (h->type == STT_GNU_IFUNC
16431 && eh->plt.noncall_refcount == 0)
16432 /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16433 they all resolve dynamically instead. Reserve room for the
16434 GOT entry's R_ARM_IRELATIVE relocation. */
16435 elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16436 else if (bfd_link_pic (info)
16437 && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16438 /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation. */
16439 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16440 else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16441 /* Reserve room for rofixup for FDPIC executable. */
16442 /* TLS relocs do not need space since they are completely
16443 resolved. */
16444 htab->srofixup->size += 4;
16445 }
16446 }
16447 else
16448 h->got.offset = (bfd_vma) -1;
16449
16450 /* FDPIC support. */
16451 if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16452 {
16453 /* Symbol musn't be exported. */
16454 if (h->dynindx != -1)
16455 abort();
16456
16457 /* We only allocate one function descriptor with its associated relocation. */
16458 if (eh->fdpic_cnts.funcdesc_offset == -1)
16459 {
16460 asection *s = htab->root.sgot;
16461
16462 eh->fdpic_cnts.funcdesc_offset = s->size;
16463 s->size += 8;
16464 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16465 if (bfd_link_pic(info))
16466 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16467 else
16468 htab->srofixup->size += 8;
16469 }
16470 }
16471
16472 if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16473 {
16474 asection *s = htab->root.sgot;
16475
16476 if (htab->root.dynamic_sections_created && h->dynindx == -1
16477 && !h->forced_local)
16478 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16479 return FALSE;
16480
16481 if (h->dynindx == -1)
16482 {
16483 /* We only allocate one function descriptor with its associated relocation. q */
16484 if (eh->fdpic_cnts.funcdesc_offset == -1)
16485 {
16486
16487 eh->fdpic_cnts.funcdesc_offset = s->size;
16488 s->size += 8;
16489 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16490 if (bfd_link_pic(info))
16491 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16492 else
16493 htab->srofixup->size += 8;
16494 }
16495 }
16496
16497 /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16498 R_ARM_RELATIVE/rofixup relocation on it. */
16499 eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16500 s->size += 4;
16501 if (h->dynindx == -1 && !bfd_link_pic(info))
16502 htab->srofixup->size += 4;
16503 else
16504 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16505 }
16506
16507 if (eh->fdpic_cnts.funcdesc_cnt > 0)
16508 {
16509 if (htab->root.dynamic_sections_created && h->dynindx == -1
16510 && !h->forced_local)
16511 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16512 return FALSE;
16513
16514 if (h->dynindx == -1)
16515 {
16516 /* We only allocate one function descriptor with its associated relocation. */
16517 if (eh->fdpic_cnts.funcdesc_offset == -1)
16518 {
16519 asection *s = htab->root.sgot;
16520
16521 eh->fdpic_cnts.funcdesc_offset = s->size;
16522 s->size += 8;
16523 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16524 if (bfd_link_pic(info))
16525 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16526 else
16527 htab->srofixup->size += 8;
16528 }
16529 }
16530 if (h->dynindx == -1 && !bfd_link_pic(info))
16531 {
16532 /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup. */
16533 htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16534 }
16535 else
16536 {
16537 /* Will need one dynamic reloc per reference. will be either
16538 R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols. */
16539 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16540 eh->fdpic_cnts.funcdesc_cnt);
16541 }
16542 }
16543
16544 /* Allocate stubs for exported Thumb functions on v4t. */
16545 if (!htab->use_blx && h->dynindx != -1
16546 && h->def_regular
16547 && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16548 && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16549 {
16550 struct elf_link_hash_entry * th;
16551 struct bfd_link_hash_entry * bh;
16552 struct elf_link_hash_entry * myh;
16553 char name[1024];
16554 asection *s;
16555 bh = NULL;
16556 /* Create a new symbol to regist the real location of the function. */
16557 s = h->root.u.def.section;
16558 sprintf (name, "__real_%s", h->root.root.string);
16559 _bfd_generic_link_add_one_symbol (info, s->owner,
16560 name, BSF_GLOBAL, s,
16561 h->root.u.def.value,
16562 NULL, TRUE, FALSE, &bh);
16563
16564 myh = (struct elf_link_hash_entry *) bh;
16565 myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16566 myh->forced_local = 1;
16567 ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16568 eh->export_glue = myh;
16569 th = record_arm_to_thumb_glue (info, h);
16570 /* Point the symbol at the stub. */
16571 h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16572 ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16573 h->root.u.def.section = th->root.u.def.section;
16574 h->root.u.def.value = th->root.u.def.value & ~1;
16575 }
16576
16577 if (h->dyn_relocs == NULL)
16578 return TRUE;
16579
16580 /* In the shared -Bsymbolic case, discard space allocated for
16581 dynamic pc-relative relocs against symbols which turn out to be
16582 defined in regular objects. For the normal shared case, discard
16583 space for pc-relative relocs that have become local due to symbol
16584 visibility changes. */
16585
16586 if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16587 {
16588 /* Relocs that use pc_count are PC-relative forms, which will appear
16589 on something like ".long foo - ." or "movw REG, foo - .". We want
16590 calls to protected symbols to resolve directly to the function
16591 rather than going via the plt. If people want function pointer
16592 comparisons to work as expected then they should avoid writing
16593 assembly like ".long foo - .". */
16594 if (SYMBOL_CALLS_LOCAL (info, h))
16595 {
16596 struct elf_dyn_relocs **pp;
16597
16598 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16599 {
16600 p->count -= p->pc_count;
16601 p->pc_count = 0;
16602 if (p->count == 0)
16603 *pp = p->next;
16604 else
16605 pp = &p->next;
16606 }
16607 }
16608
16609 if (htab->vxworks_p)
16610 {
16611 struct elf_dyn_relocs **pp;
16612
16613 for (pp = &h->dyn_relocs; (p = *pp) != NULL; )
16614 {
16615 if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16616 *pp = p->next;
16617 else
16618 pp = &p->next;
16619 }
16620 }
16621
16622 /* Also discard relocs on undefined weak syms with non-default
16623 visibility. */
16624 if (h->dyn_relocs != NULL
16625 && h->root.type == bfd_link_hash_undefweak)
16626 {
16627 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16628 || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16629 h->dyn_relocs = NULL;
16630
16631 /* Make sure undefined weak symbols are output as a dynamic
16632 symbol in PIEs. */
16633 else if (htab->root.dynamic_sections_created && h->dynindx == -1
16634 && !h->forced_local)
16635 {
16636 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16637 return FALSE;
16638 }
16639 }
16640
16641 else if (htab->root.is_relocatable_executable && h->dynindx == -1
16642 && h->root.type == bfd_link_hash_new)
16643 {
16644 /* Output absolute symbols so that we can create relocations
16645 against them. For normal symbols we output a relocation
16646 against the section that contains them. */
16647 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16648 return FALSE;
16649 }
16650
16651 }
16652 else
16653 {
16654 /* For the non-shared case, discard space for relocs against
16655 symbols which turn out to need copy relocs or are not
16656 dynamic. */
16657
16658 if (!h->non_got_ref
16659 && ((h->def_dynamic
16660 && !h->def_regular)
16661 || (htab->root.dynamic_sections_created
16662 && (h->root.type == bfd_link_hash_undefweak
16663 || h->root.type == bfd_link_hash_undefined))))
16664 {
16665 /* Make sure this symbol is output as a dynamic symbol.
16666 Undefined weak syms won't yet be marked as dynamic. */
16667 if (h->dynindx == -1 && !h->forced_local
16668 && h->root.type == bfd_link_hash_undefweak)
16669 {
16670 if (! bfd_elf_link_record_dynamic_symbol (info, h))
16671 return FALSE;
16672 }
16673
16674 /* If that succeeded, we know we'll be keeping all the
16675 relocs. */
16676 if (h->dynindx != -1)
16677 goto keep;
16678 }
16679
16680 h->dyn_relocs = NULL;
16681
16682 keep: ;
16683 }
16684
16685 /* Finally, allocate space. */
16686 for (p = h->dyn_relocs; p != NULL; p = p->next)
16687 {
16688 asection *sreloc = elf_section_data (p->sec)->sreloc;
16689
16690 if (h->type == STT_GNU_IFUNC
16691 && eh->plt.noncall_refcount == 0
16692 && SYMBOL_REFERENCES_LOCAL (info, h))
16693 elf32_arm_allocate_irelocs (info, sreloc, p->count);
16694 else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16695 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16696 else if (htab->fdpic_p && !bfd_link_pic(info))
16697 htab->srofixup->size += 4 * p->count;
16698 else
16699 elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16700 }
16701
16702 return TRUE;
16703 }
16704
16705 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16706 read-only sections. */
16707
16708 static bfd_boolean
16709 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16710 {
16711 asection *sec;
16712
16713 if (h->root.type == bfd_link_hash_indirect)
16714 return TRUE;
16715
16716 sec = _bfd_elf_readonly_dynrelocs (h);
16717 if (sec != NULL)
16718 {
16719 struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16720
16721 info->flags |= DF_TEXTREL;
16722 info->callbacks->minfo
16723 (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16724 sec->owner, h->root.root.string, sec);
16725
16726 /* Not an error, just cut short the traversal. */
16727 return FALSE;
16728 }
16729
16730 return TRUE;
16731 }
16732
16733 void
16734 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16735 int byteswap_code)
16736 {
16737 struct elf32_arm_link_hash_table *globals;
16738
16739 globals = elf32_arm_hash_table (info);
16740 if (globals == NULL)
16741 return;
16742
16743 globals->byteswap_code = byteswap_code;
16744 }
16745
16746 /* Set the sizes of the dynamic sections. */
16747
16748 static bfd_boolean
16749 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16750 struct bfd_link_info * info)
16751 {
16752 bfd * dynobj;
16753 asection * s;
16754 bfd_boolean plt;
16755 bfd_boolean relocs;
16756 bfd *ibfd;
16757 struct elf32_arm_link_hash_table *htab;
16758
16759 htab = elf32_arm_hash_table (info);
16760 if (htab == NULL)
16761 return FALSE;
16762
16763 dynobj = elf_hash_table (info)->dynobj;
16764 BFD_ASSERT (dynobj != NULL);
16765 check_use_blx (htab);
16766
16767 if (elf_hash_table (info)->dynamic_sections_created)
16768 {
16769 /* Set the contents of the .interp section to the interpreter. */
16770 if (bfd_link_executable (info) && !info->nointerp)
16771 {
16772 s = bfd_get_linker_section (dynobj, ".interp");
16773 BFD_ASSERT (s != NULL);
16774 s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16775 s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16776 }
16777 }
16778
16779 /* Set up .got offsets for local syms, and space for local dynamic
16780 relocs. */
16781 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16782 {
16783 bfd_signed_vma *local_got;
16784 bfd_signed_vma *end_local_got;
16785 struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16786 char *local_tls_type;
16787 bfd_vma *local_tlsdesc_gotent;
16788 bfd_size_type locsymcount;
16789 Elf_Internal_Shdr *symtab_hdr;
16790 asection *srel;
16791 bfd_boolean is_vxworks = htab->vxworks_p;
16792 unsigned int symndx;
16793 struct fdpic_local *local_fdpic_cnts;
16794
16795 if (! is_arm_elf (ibfd))
16796 continue;
16797
16798 for (s = ibfd->sections; s != NULL; s = s->next)
16799 {
16800 struct elf_dyn_relocs *p;
16801
16802 for (p = (struct elf_dyn_relocs *)
16803 elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16804 {
16805 if (!bfd_is_abs_section (p->sec)
16806 && bfd_is_abs_section (p->sec->output_section))
16807 {
16808 /* Input section has been discarded, either because
16809 it is a copy of a linkonce section or due to
16810 linker script /DISCARD/, so we'll be discarding
16811 the relocs too. */
16812 }
16813 else if (is_vxworks
16814 && strcmp (p->sec->output_section->name,
16815 ".tls_vars") == 0)
16816 {
16817 /* Relocations in vxworks .tls_vars sections are
16818 handled specially by the loader. */
16819 }
16820 else if (p->count != 0)
16821 {
16822 srel = elf_section_data (p->sec)->sreloc;
16823 if (htab->fdpic_p && !bfd_link_pic(info))
16824 htab->srofixup->size += 4 * p->count;
16825 else
16826 elf32_arm_allocate_dynrelocs (info, srel, p->count);
16827 if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16828 info->flags |= DF_TEXTREL;
16829 }
16830 }
16831 }
16832
16833 local_got = elf_local_got_refcounts (ibfd);
16834 if (!local_got)
16835 continue;
16836
16837 symtab_hdr = & elf_symtab_hdr (ibfd);
16838 locsymcount = symtab_hdr->sh_info;
16839 end_local_got = local_got + locsymcount;
16840 local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16841 local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16842 local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16843 local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16844 symndx = 0;
16845 s = htab->root.sgot;
16846 srel = htab->root.srelgot;
16847 for (; local_got < end_local_got;
16848 ++local_got, ++local_iplt_ptr, ++local_tls_type,
16849 ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16850 {
16851 *local_tlsdesc_gotent = (bfd_vma) -1;
16852 local_iplt = *local_iplt_ptr;
16853
16854 /* FDPIC support. */
16855 if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16856 {
16857 if (local_fdpic_cnts->funcdesc_offset == -1)
16858 {
16859 local_fdpic_cnts->funcdesc_offset = s->size;
16860 s->size += 8;
16861
16862 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16863 if (bfd_link_pic(info))
16864 elf32_arm_allocate_dynrelocs (info, srel, 1);
16865 else
16866 htab->srofixup->size += 8;
16867 }
16868 }
16869
16870 if (local_fdpic_cnts->funcdesc_cnt > 0)
16871 {
16872 if (local_fdpic_cnts->funcdesc_offset == -1)
16873 {
16874 local_fdpic_cnts->funcdesc_offset = s->size;
16875 s->size += 8;
16876
16877 /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups. */
16878 if (bfd_link_pic(info))
16879 elf32_arm_allocate_dynrelocs (info, srel, 1);
16880 else
16881 htab->srofixup->size += 8;
16882 }
16883
16884 /* We will add n R_ARM_RELATIVE relocations or n rofixups. */
16885 if (bfd_link_pic(info))
16886 elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16887 else
16888 htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16889 }
16890
16891 if (local_iplt != NULL)
16892 {
16893 struct elf_dyn_relocs *p;
16894
16895 if (local_iplt->root.refcount > 0)
16896 {
16897 elf32_arm_allocate_plt_entry (info, TRUE,
16898 &local_iplt->root,
16899 &local_iplt->arm);
16900 if (local_iplt->arm.noncall_refcount == 0)
16901 /* All references to the PLT are calls, so all
16902 non-call references can resolve directly to the
16903 run-time target. This means that the .got entry
16904 would be the same as the .igot.plt entry, so there's
16905 no point creating both. */
16906 *local_got = 0;
16907 }
16908 else
16909 {
16910 BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16911 local_iplt->root.offset = (bfd_vma) -1;
16912 }
16913
16914 for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16915 {
16916 asection *psrel;
16917
16918 psrel = elf_section_data (p->sec)->sreloc;
16919 if (local_iplt->arm.noncall_refcount == 0)
16920 elf32_arm_allocate_irelocs (info, psrel, p->count);
16921 else
16922 elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16923 }
16924 }
16925 if (*local_got > 0)
16926 {
16927 Elf_Internal_Sym *isym;
16928
16929 *local_got = s->size;
16930 if (*local_tls_type & GOT_TLS_GD)
16931 /* TLS_GD relocs need an 8-byte structure in the GOT. */
16932 s->size += 8;
16933 if (*local_tls_type & GOT_TLS_GDESC)
16934 {
16935 *local_tlsdesc_gotent = htab->root.sgotplt->size
16936 - elf32_arm_compute_jump_table_size (htab);
16937 htab->root.sgotplt->size += 8;
16938 *local_got = (bfd_vma) -2;
16939 /* plt.got_offset needs to know there's a TLS_DESC
16940 reloc in the middle of .got.plt. */
16941 htab->num_tls_desc++;
16942 }
16943 if (*local_tls_type & GOT_TLS_IE)
16944 s->size += 4;
16945
16946 if (*local_tls_type & GOT_NORMAL)
16947 {
16948 /* If the symbol is both GD and GDESC, *local_got
16949 may have been overwritten. */
16950 *local_got = s->size;
16951 s->size += 4;
16952 }
16953
16954 isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16955 if (isym == NULL)
16956 return FALSE;
16957
16958 /* If all references to an STT_GNU_IFUNC PLT are calls,
16959 then all non-call references, including this GOT entry,
16960 resolve directly to the run-time target. */
16961 if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16962 && (local_iplt == NULL
16963 || local_iplt->arm.noncall_refcount == 0))
16964 elf32_arm_allocate_irelocs (info, srel, 1);
16965 else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16966 {
16967 if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16968 elf32_arm_allocate_dynrelocs (info, srel, 1);
16969 else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16970 htab->srofixup->size += 4;
16971
16972 if ((bfd_link_pic (info) || htab->fdpic_p)
16973 && *local_tls_type & GOT_TLS_GDESC)
16974 {
16975 elf32_arm_allocate_dynrelocs (info,
16976 htab->root.srelplt, 1);
16977 htab->tls_trampoline = -1;
16978 }
16979 }
16980 }
16981 else
16982 *local_got = (bfd_vma) -1;
16983 }
16984 }
16985
16986 if (htab->tls_ldm_got.refcount > 0)
16987 {
16988 /* Allocate two GOT entries and one dynamic relocation (if necessary)
16989 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations. */
16990 htab->tls_ldm_got.offset = htab->root.sgot->size;
16991 htab->root.sgot->size += 8;
16992 if (bfd_link_pic (info))
16993 elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16994 }
16995 else
16996 htab->tls_ldm_got.offset = -1;
16997
16998 /* At the very end of the .rofixup section is a pointer to the GOT,
16999 reserve space for it. */
17000 if (htab->fdpic_p && htab->srofixup != NULL)
17001 htab->srofixup->size += 4;
17002
17003 /* Allocate global sym .plt and .got entries, and space for global
17004 sym dynamic relocs. */
17005 elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
17006
17007 /* Here we rummage through the found bfds to collect glue information. */
17008 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
17009 {
17010 if (! is_arm_elf (ibfd))
17011 continue;
17012
17013 /* Initialise mapping tables for code/data. */
17014 bfd_elf32_arm_init_maps (ibfd);
17015
17016 if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17017 || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17018 || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17019 _bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17020 }
17021
17022 /* Allocate space for the glue sections now that we've sized them. */
17023 bfd_elf32_arm_allocate_interworking_sections (info);
17024
17025 /* For every jump slot reserved in the sgotplt, reloc_count is
17026 incremented. However, when we reserve space for TLS descriptors,
17027 it's not incremented, so in order to compute the space reserved
17028 for them, it suffices to multiply the reloc count by the jump
17029 slot size. */
17030 if (htab->root.srelplt)
17031 htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
17032
17033 if (htab->tls_trampoline)
17034 {
17035 if (htab->root.splt->size == 0)
17036 htab->root.splt->size += htab->plt_header_size;
17037
17038 htab->tls_trampoline = htab->root.splt->size;
17039 htab->root.splt->size += htab->plt_entry_size;
17040
17041 /* If we're not using lazy TLS relocations, don't generate the
17042 PLT and GOT entries they require. */
17043 if (!(info->flags & DF_BIND_NOW))
17044 {
17045 htab->dt_tlsdesc_got = htab->root.sgot->size;
17046 htab->root.sgot->size += 4;
17047
17048 htab->dt_tlsdesc_plt = htab->root.splt->size;
17049 htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17050 }
17051 }
17052
17053 /* The check_relocs and adjust_dynamic_symbol entry points have
17054 determined the sizes of the various dynamic sections. Allocate
17055 memory for them. */
17056 plt = FALSE;
17057 relocs = FALSE;
17058 for (s = dynobj->sections; s != NULL; s = s->next)
17059 {
17060 const char * name;
17061
17062 if ((s->flags & SEC_LINKER_CREATED) == 0)
17063 continue;
17064
17065 /* It's OK to base decisions on the section name, because none
17066 of the dynobj section names depend upon the input files. */
17067 name = bfd_section_name (s);
17068
17069 if (s == htab->root.splt)
17070 {
17071 /* Remember whether there is a PLT. */
17072 plt = s->size != 0;
17073 }
17074 else if (CONST_STRNEQ (name, ".rel"))
17075 {
17076 if (s->size != 0)
17077 {
17078 /* Remember whether there are any reloc sections other
17079 than .rel(a).plt and .rela.plt.unloaded. */
17080 if (s != htab->root.srelplt && s != htab->srelplt2)
17081 relocs = TRUE;
17082
17083 /* We use the reloc_count field as a counter if we need
17084 to copy relocs into the output file. */
17085 s->reloc_count = 0;
17086 }
17087 }
17088 else if (s != htab->root.sgot
17089 && s != htab->root.sgotplt
17090 && s != htab->root.iplt
17091 && s != htab->root.igotplt
17092 && s != htab->root.sdynbss
17093 && s != htab->root.sdynrelro
17094 && s != htab->srofixup)
17095 {
17096 /* It's not one of our sections, so don't allocate space. */
17097 continue;
17098 }
17099
17100 if (s->size == 0)
17101 {
17102 /* If we don't need this section, strip it from the
17103 output file. This is mostly to handle .rel(a).bss and
17104 .rel(a).plt. We must create both sections in
17105 create_dynamic_sections, because they must be created
17106 before the linker maps input sections to output
17107 sections. The linker does that before
17108 adjust_dynamic_symbol is called, and it is that
17109 function which decides whether anything needs to go
17110 into these sections. */
17111 s->flags |= SEC_EXCLUDE;
17112 continue;
17113 }
17114
17115 if ((s->flags & SEC_HAS_CONTENTS) == 0)
17116 continue;
17117
17118 /* Allocate memory for the section contents. */
17119 s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17120 if (s->contents == NULL)
17121 return FALSE;
17122 }
17123
17124 if (elf_hash_table (info)->dynamic_sections_created)
17125 {
17126 /* Add some entries to the .dynamic section. We fill in the
17127 values later, in elf32_arm_finish_dynamic_sections, but we
17128 must add the entries now so that we get the correct size for
17129 the .dynamic section. The DT_DEBUG entry is filled in by the
17130 dynamic linker and used by the debugger. */
17131 #define add_dynamic_entry(TAG, VAL) \
17132 _bfd_elf_add_dynamic_entry (info, TAG, VAL)
17133
17134 if (bfd_link_executable (info))
17135 {
17136 if (!add_dynamic_entry (DT_DEBUG, 0))
17137 return FALSE;
17138 }
17139
17140 if (plt)
17141 {
17142 if ( !add_dynamic_entry (DT_PLTGOT, 0)
17143 || !add_dynamic_entry (DT_PLTRELSZ, 0)
17144 || !add_dynamic_entry (DT_PLTREL,
17145 htab->use_rel ? DT_REL : DT_RELA)
17146 || !add_dynamic_entry (DT_JMPREL, 0))
17147 return FALSE;
17148
17149 if (htab->dt_tlsdesc_plt
17150 && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
17151 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
17152 return FALSE;
17153 }
17154
17155 if (relocs)
17156 {
17157 if (htab->use_rel)
17158 {
17159 if (!add_dynamic_entry (DT_REL, 0)
17160 || !add_dynamic_entry (DT_RELSZ, 0)
17161 || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
17162 return FALSE;
17163 }
17164 else
17165 {
17166 if (!add_dynamic_entry (DT_RELA, 0)
17167 || !add_dynamic_entry (DT_RELASZ, 0)
17168 || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17169 return FALSE;
17170 }
17171 }
17172
17173 /* If any dynamic relocs apply to a read-only section,
17174 then we need a DT_TEXTREL entry. */
17175 if ((info->flags & DF_TEXTREL) == 0)
17176 elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17177
17178 if ((info->flags & DF_TEXTREL) != 0)
17179 {
17180 if (!add_dynamic_entry (DT_TEXTREL, 0))
17181 return FALSE;
17182 }
17183 if (htab->vxworks_p
17184 && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17185 return FALSE;
17186 }
17187 #undef add_dynamic_entry
17188
17189 return TRUE;
17190 }
17191
17192 /* Size sections even though they're not dynamic. We use it to setup
17193 _TLS_MODULE_BASE_, if needed. */
17194
17195 static bfd_boolean
17196 elf32_arm_always_size_sections (bfd *output_bfd,
17197 struct bfd_link_info *info)
17198 {
17199 asection *tls_sec;
17200 struct elf32_arm_link_hash_table *htab;
17201
17202 htab = elf32_arm_hash_table (info);
17203
17204 if (bfd_link_relocatable (info))
17205 return TRUE;
17206
17207 tls_sec = elf_hash_table (info)->tls_sec;
17208
17209 if (tls_sec)
17210 {
17211 struct elf_link_hash_entry *tlsbase;
17212
17213 tlsbase = elf_link_hash_lookup
17214 (elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17215
17216 if (tlsbase)
17217 {
17218 struct bfd_link_hash_entry *bh = NULL;
17219 const struct elf_backend_data *bed
17220 = get_elf_backend_data (output_bfd);
17221
17222 if (!(_bfd_generic_link_add_one_symbol
17223 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17224 tls_sec, 0, NULL, FALSE,
17225 bed->collect, &bh)))
17226 return FALSE;
17227
17228 tlsbase->type = STT_TLS;
17229 tlsbase = (struct elf_link_hash_entry *)bh;
17230 tlsbase->def_regular = 1;
17231 tlsbase->other = STV_HIDDEN;
17232 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17233 }
17234 }
17235
17236 if (htab->fdpic_p && !bfd_link_relocatable (info)
17237 && !bfd_elf_stack_segment_size (output_bfd, info,
17238 "__stacksize", DEFAULT_STACK_SIZE))
17239 return FALSE;
17240
17241 return TRUE;
17242 }
17243
17244 /* Finish up dynamic symbol handling. We set the contents of various
17245 dynamic sections here. */
17246
17247 static bfd_boolean
17248 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17249 struct bfd_link_info * info,
17250 struct elf_link_hash_entry * h,
17251 Elf_Internal_Sym * sym)
17252 {
17253 struct elf32_arm_link_hash_table *htab;
17254 struct elf32_arm_link_hash_entry *eh;
17255
17256 htab = elf32_arm_hash_table (info);
17257 if (htab == NULL)
17258 return FALSE;
17259
17260 eh = (struct elf32_arm_link_hash_entry *) h;
17261
17262 if (h->plt.offset != (bfd_vma) -1)
17263 {
17264 if (!eh->is_iplt)
17265 {
17266 BFD_ASSERT (h->dynindx != -1);
17267 if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17268 h->dynindx, 0))
17269 return FALSE;
17270 }
17271
17272 if (!h->def_regular)
17273 {
17274 /* Mark the symbol as undefined, rather than as defined in
17275 the .plt section. */
17276 sym->st_shndx = SHN_UNDEF;
17277 /* If the symbol is weak we need to clear the value.
17278 Otherwise, the PLT entry would provide a definition for
17279 the symbol even if the symbol wasn't defined anywhere,
17280 and so the symbol would never be NULL. Leave the value if
17281 there were any relocations where pointer equality matters
17282 (this is a clue for the dynamic linker, to make function
17283 pointer comparisons work between an application and shared
17284 library). */
17285 if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17286 sym->st_value = 0;
17287 }
17288 else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17289 {
17290 /* At least one non-call relocation references this .iplt entry,
17291 so the .iplt entry is the function's canonical address. */
17292 sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17293 ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17294 sym->st_shndx = (_bfd_elf_section_from_bfd_section
17295 (output_bfd, htab->root.iplt->output_section));
17296 sym->st_value = (h->plt.offset
17297 + htab->root.iplt->output_section->vma
17298 + htab->root.iplt->output_offset);
17299 }
17300 }
17301
17302 if (h->needs_copy)
17303 {
17304 asection * s;
17305 Elf_Internal_Rela rel;
17306
17307 /* This symbol needs a copy reloc. Set it up. */
17308 BFD_ASSERT (h->dynindx != -1
17309 && (h->root.type == bfd_link_hash_defined
17310 || h->root.type == bfd_link_hash_defweak));
17311
17312 rel.r_addend = 0;
17313 rel.r_offset = (h->root.u.def.value
17314 + h->root.u.def.section->output_section->vma
17315 + h->root.u.def.section->output_offset);
17316 rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17317 if (h->root.u.def.section == htab->root.sdynrelro)
17318 s = htab->root.sreldynrelro;
17319 else
17320 s = htab->root.srelbss;
17321 elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17322 }
17323
17324 /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute. On VxWorks,
17325 and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17326 it is relative to the ".got" section. */
17327 if (h == htab->root.hdynamic
17328 || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17329 sym->st_shndx = SHN_ABS;
17330
17331 return TRUE;
17332 }
17333
17334 static void
17335 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17336 void *contents,
17337 const unsigned long *template, unsigned count)
17338 {
17339 unsigned ix;
17340
17341 for (ix = 0; ix != count; ix++)
17342 {
17343 unsigned long insn = template[ix];
17344
17345 /* Emit mov pc,rx if bx is not permitted. */
17346 if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17347 insn = (insn & 0xf000000f) | 0x01a0f000;
17348 put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17349 }
17350 }
17351
17352 /* Install the special first PLT entry for elf32-arm-nacl. Unlike
17353 other variants, NaCl needs this entry in a static executable's
17354 .iplt too. When we're handling that case, GOT_DISPLACEMENT is
17355 zero. For .iplt really only the last bundle is useful, and .iplt
17356 could have a shorter first entry, with each individual PLT entry's
17357 relative branch calculated differently so it targets the last
17358 bundle instead of the instruction before it (labelled .Lplt_tail
17359 above). But it's simpler to keep the size and layout of PLT0
17360 consistent with the dynamic case, at the cost of some dead code at
17361 the start of .iplt and the one dead store to the stack at the start
17362 of .Lplt_tail. */
17363 static void
17364 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17365 asection *plt, bfd_vma got_displacement)
17366 {
17367 unsigned int i;
17368
17369 put_arm_insn (htab, output_bfd,
17370 elf32_arm_nacl_plt0_entry[0]
17371 | arm_movw_immediate (got_displacement),
17372 plt->contents + 0);
17373 put_arm_insn (htab, output_bfd,
17374 elf32_arm_nacl_plt0_entry[1]
17375 | arm_movt_immediate (got_displacement),
17376 plt->contents + 4);
17377
17378 for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17379 put_arm_insn (htab, output_bfd,
17380 elf32_arm_nacl_plt0_entry[i],
17381 plt->contents + (i * 4));
17382 }
17383
17384 /* Finish up the dynamic sections. */
17385
17386 static bfd_boolean
17387 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17388 {
17389 bfd * dynobj;
17390 asection * sgot;
17391 asection * sdyn;
17392 struct elf32_arm_link_hash_table *htab;
17393
17394 htab = elf32_arm_hash_table (info);
17395 if (htab == NULL)
17396 return FALSE;
17397
17398 dynobj = elf_hash_table (info)->dynobj;
17399
17400 sgot = htab->root.sgotplt;
17401 /* A broken linker script might have discarded the dynamic sections.
17402 Catch this here so that we do not seg-fault later on. */
17403 if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17404 return FALSE;
17405 sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17406
17407 if (elf_hash_table (info)->dynamic_sections_created)
17408 {
17409 asection *splt;
17410 Elf32_External_Dyn *dyncon, *dynconend;
17411
17412 splt = htab->root.splt;
17413 BFD_ASSERT (splt != NULL && sdyn != NULL);
17414 BFD_ASSERT (htab->symbian_p || sgot != NULL);
17415
17416 dyncon = (Elf32_External_Dyn *) sdyn->contents;
17417 dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17418
17419 for (; dyncon < dynconend; dyncon++)
17420 {
17421 Elf_Internal_Dyn dyn;
17422 const char * name;
17423 asection * s;
17424
17425 bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17426
17427 switch (dyn.d_tag)
17428 {
17429 unsigned int type;
17430
17431 default:
17432 if (htab->vxworks_p
17433 && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17434 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17435 break;
17436
17437 case DT_HASH:
17438 name = ".hash";
17439 goto get_vma_if_bpabi;
17440 case DT_STRTAB:
17441 name = ".dynstr";
17442 goto get_vma_if_bpabi;
17443 case DT_SYMTAB:
17444 name = ".dynsym";
17445 goto get_vma_if_bpabi;
17446 case DT_VERSYM:
17447 name = ".gnu.version";
17448 goto get_vma_if_bpabi;
17449 case DT_VERDEF:
17450 name = ".gnu.version_d";
17451 goto get_vma_if_bpabi;
17452 case DT_VERNEED:
17453 name = ".gnu.version_r";
17454 goto get_vma_if_bpabi;
17455
17456 case DT_PLTGOT:
17457 name = htab->symbian_p ? ".got" : ".got.plt";
17458 goto get_vma;
17459 case DT_JMPREL:
17460 name = RELOC_SECTION (htab, ".plt");
17461 get_vma:
17462 s = bfd_get_linker_section (dynobj, name);
17463 if (s == NULL)
17464 {
17465 _bfd_error_handler
17466 (_("could not find section %s"), name);
17467 bfd_set_error (bfd_error_invalid_operation);
17468 return FALSE;
17469 }
17470 if (!htab->symbian_p)
17471 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17472 else
17473 /* In the BPABI, tags in the PT_DYNAMIC section point
17474 at the file offset, not the memory address, for the
17475 convenience of the post linker. */
17476 dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17477 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17478 break;
17479
17480 get_vma_if_bpabi:
17481 if (htab->symbian_p)
17482 goto get_vma;
17483 break;
17484
17485 case DT_PLTRELSZ:
17486 s = htab->root.srelplt;
17487 BFD_ASSERT (s != NULL);
17488 dyn.d_un.d_val = s->size;
17489 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17490 break;
17491
17492 case DT_RELSZ:
17493 case DT_RELASZ:
17494 case DT_REL:
17495 case DT_RELA:
17496 /* In the BPABI, the DT_REL tag must point at the file
17497 offset, not the VMA, of the first relocation
17498 section. So, we use code similar to that in
17499 elflink.c, but do not check for SHF_ALLOC on the
17500 relocation section, since relocation sections are
17501 never allocated under the BPABI. PLT relocs are also
17502 included. */
17503 if (htab->symbian_p)
17504 {
17505 unsigned int i;
17506 type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17507 ? SHT_REL : SHT_RELA);
17508 dyn.d_un.d_val = 0;
17509 for (i = 1; i < elf_numsections (output_bfd); i++)
17510 {
17511 Elf_Internal_Shdr *hdr
17512 = elf_elfsections (output_bfd)[i];
17513 if (hdr->sh_type == type)
17514 {
17515 if (dyn.d_tag == DT_RELSZ
17516 || dyn.d_tag == DT_RELASZ)
17517 dyn.d_un.d_val += hdr->sh_size;
17518 else if ((ufile_ptr) hdr->sh_offset
17519 <= dyn.d_un.d_val - 1)
17520 dyn.d_un.d_val = hdr->sh_offset;
17521 }
17522 }
17523 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17524 }
17525 break;
17526
17527 case DT_TLSDESC_PLT:
17528 s = htab->root.splt;
17529 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17530 + htab->dt_tlsdesc_plt);
17531 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17532 break;
17533
17534 case DT_TLSDESC_GOT:
17535 s = htab->root.sgot;
17536 dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17537 + htab->dt_tlsdesc_got);
17538 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17539 break;
17540
17541 /* Set the bottom bit of DT_INIT/FINI if the
17542 corresponding function is Thumb. */
17543 case DT_INIT:
17544 name = info->init_function;
17545 goto get_sym;
17546 case DT_FINI:
17547 name = info->fini_function;
17548 get_sym:
17549 /* If it wasn't set by elf_bfd_final_link
17550 then there is nothing to adjust. */
17551 if (dyn.d_un.d_val != 0)
17552 {
17553 struct elf_link_hash_entry * eh;
17554
17555 eh = elf_link_hash_lookup (elf_hash_table (info), name,
17556 FALSE, FALSE, TRUE);
17557 if (eh != NULL
17558 && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17559 == ST_BRANCH_TO_THUMB)
17560 {
17561 dyn.d_un.d_val |= 1;
17562 bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17563 }
17564 }
17565 break;
17566 }
17567 }
17568
17569 /* Fill in the first entry in the procedure linkage table. */
17570 if (splt->size > 0 && htab->plt_header_size)
17571 {
17572 const bfd_vma *plt0_entry;
17573 bfd_vma got_address, plt_address, got_displacement;
17574
17575 /* Calculate the addresses of the GOT and PLT. */
17576 got_address = sgot->output_section->vma + sgot->output_offset;
17577 plt_address = splt->output_section->vma + splt->output_offset;
17578
17579 if (htab->vxworks_p)
17580 {
17581 /* The VxWorks GOT is relocated by the dynamic linker.
17582 Therefore, we must emit relocations rather than simply
17583 computing the values now. */
17584 Elf_Internal_Rela rel;
17585
17586 plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17587 put_arm_insn (htab, output_bfd, plt0_entry[0],
17588 splt->contents + 0);
17589 put_arm_insn (htab, output_bfd, plt0_entry[1],
17590 splt->contents + 4);
17591 put_arm_insn (htab, output_bfd, plt0_entry[2],
17592 splt->contents + 8);
17593 bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17594
17595 /* Generate a relocation for _GLOBAL_OFFSET_TABLE_. */
17596 rel.r_offset = plt_address + 12;
17597 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17598 rel.r_addend = 0;
17599 SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17600 htab->srelplt2->contents);
17601 }
17602 else if (htab->nacl_p)
17603 arm_nacl_put_plt0 (htab, output_bfd, splt,
17604 got_address + 8 - (plt_address + 16));
17605 else if (using_thumb_only (htab))
17606 {
17607 got_displacement = got_address - (plt_address + 12);
17608
17609 plt0_entry = elf32_thumb2_plt0_entry;
17610 put_arm_insn (htab, output_bfd, plt0_entry[0],
17611 splt->contents + 0);
17612 put_arm_insn (htab, output_bfd, plt0_entry[1],
17613 splt->contents + 4);
17614 put_arm_insn (htab, output_bfd, plt0_entry[2],
17615 splt->contents + 8);
17616
17617 bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17618 }
17619 else
17620 {
17621 got_displacement = got_address - (plt_address + 16);
17622
17623 plt0_entry = elf32_arm_plt0_entry;
17624 put_arm_insn (htab, output_bfd, plt0_entry[0],
17625 splt->contents + 0);
17626 put_arm_insn (htab, output_bfd, plt0_entry[1],
17627 splt->contents + 4);
17628 put_arm_insn (htab, output_bfd, plt0_entry[2],
17629 splt->contents + 8);
17630 put_arm_insn (htab, output_bfd, plt0_entry[3],
17631 splt->contents + 12);
17632
17633 #ifdef FOUR_WORD_PLT
17634 /* The displacement value goes in the otherwise-unused
17635 last word of the second entry. */
17636 bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17637 #else
17638 bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17639 #endif
17640 }
17641 }
17642
17643 /* UnixWare sets the entsize of .plt to 4, although that doesn't
17644 really seem like the right value. */
17645 if (splt->output_section->owner == output_bfd)
17646 elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17647
17648 if (htab->dt_tlsdesc_plt)
17649 {
17650 bfd_vma got_address
17651 = sgot->output_section->vma + sgot->output_offset;
17652 bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17653 + htab->root.sgot->output_offset);
17654 bfd_vma plt_address
17655 = splt->output_section->vma + splt->output_offset;
17656
17657 arm_put_trampoline (htab, output_bfd,
17658 splt->contents + htab->dt_tlsdesc_plt,
17659 dl_tlsdesc_lazy_trampoline, 6);
17660
17661 bfd_put_32 (output_bfd,
17662 gotplt_address + htab->dt_tlsdesc_got
17663 - (plt_address + htab->dt_tlsdesc_plt)
17664 - dl_tlsdesc_lazy_trampoline[6],
17665 splt->contents + htab->dt_tlsdesc_plt + 24);
17666 bfd_put_32 (output_bfd,
17667 got_address - (plt_address + htab->dt_tlsdesc_plt)
17668 - dl_tlsdesc_lazy_trampoline[7],
17669 splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17670 }
17671
17672 if (htab->tls_trampoline)
17673 {
17674 arm_put_trampoline (htab, output_bfd,
17675 splt->contents + htab->tls_trampoline,
17676 tls_trampoline, 3);
17677 #ifdef FOUR_WORD_PLT
17678 bfd_put_32 (output_bfd, 0x00000000,
17679 splt->contents + htab->tls_trampoline + 12);
17680 #endif
17681 }
17682
17683 if (htab->vxworks_p
17684 && !bfd_link_pic (info)
17685 && htab->root.splt->size > 0)
17686 {
17687 /* Correct the .rel(a).plt.unloaded relocations. They will have
17688 incorrect symbol indexes. */
17689 int num_plts;
17690 unsigned char *p;
17691
17692 num_plts = ((htab->root.splt->size - htab->plt_header_size)
17693 / htab->plt_entry_size);
17694 p = htab->srelplt2->contents + RELOC_SIZE (htab);
17695
17696 for (; num_plts; num_plts--)
17697 {
17698 Elf_Internal_Rela rel;
17699
17700 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17701 rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17702 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17703 p += RELOC_SIZE (htab);
17704
17705 SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17706 rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17707 SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17708 p += RELOC_SIZE (htab);
17709 }
17710 }
17711 }
17712
17713 if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17714 /* NaCl uses a special first entry in .iplt too. */
17715 arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17716
17717 /* Fill in the first three entries in the global offset table. */
17718 if (sgot)
17719 {
17720 if (sgot->size > 0)
17721 {
17722 if (sdyn == NULL)
17723 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17724 else
17725 bfd_put_32 (output_bfd,
17726 sdyn->output_section->vma + sdyn->output_offset,
17727 sgot->contents);
17728 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17729 bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17730 }
17731
17732 elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17733 }
17734
17735 /* At the very end of the .rofixup section is a pointer to the GOT. */
17736 if (htab->fdpic_p && htab->srofixup != NULL)
17737 {
17738 struct elf_link_hash_entry *hgot = htab->root.hgot;
17739
17740 bfd_vma got_value = hgot->root.u.def.value
17741 + hgot->root.u.def.section->output_section->vma
17742 + hgot->root.u.def.section->output_offset;
17743
17744 arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17745
17746 /* Make sure we allocated and generated the same number of fixups. */
17747 BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17748 }
17749
17750 return TRUE;
17751 }
17752
17753 static bfd_boolean
17754 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17755 {
17756 Elf_Internal_Ehdr * i_ehdrp; /* ELF file header, internal form. */
17757 struct elf32_arm_link_hash_table *globals;
17758 struct elf_segment_map *m;
17759
17760 if (!_bfd_elf_init_file_header (abfd, link_info))
17761 return FALSE;
17762
17763 i_ehdrp = elf_elfheader (abfd);
17764
17765 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17766 i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17767 i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17768
17769 if (link_info)
17770 {
17771 globals = elf32_arm_hash_table (link_info);
17772 if (globals != NULL && globals->byteswap_code)
17773 i_ehdrp->e_flags |= EF_ARM_BE8;
17774
17775 if (globals->fdpic_p)
17776 i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17777 }
17778
17779 if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17780 && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17781 {
17782 int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17783 if (abi == AEABI_VFP_args_vfp)
17784 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17785 else
17786 i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17787 }
17788
17789 /* Scan segment to set p_flags attribute if it contains only sections with
17790 SHF_ARM_PURECODE flag. */
17791 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17792 {
17793 unsigned int j;
17794
17795 if (m->count == 0)
17796 continue;
17797 for (j = 0; j < m->count; j++)
17798 {
17799 if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17800 break;
17801 }
17802 if (j == m->count)
17803 {
17804 m->p_flags = PF_X;
17805 m->p_flags_valid = 1;
17806 }
17807 }
17808 return TRUE;
17809 }
17810
17811 static enum elf_reloc_type_class
17812 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17813 const asection *rel_sec ATTRIBUTE_UNUSED,
17814 const Elf_Internal_Rela *rela)
17815 {
17816 switch ((int) ELF32_R_TYPE (rela->r_info))
17817 {
17818 case R_ARM_RELATIVE:
17819 return reloc_class_relative;
17820 case R_ARM_JUMP_SLOT:
17821 return reloc_class_plt;
17822 case R_ARM_COPY:
17823 return reloc_class_copy;
17824 case R_ARM_IRELATIVE:
17825 return reloc_class_ifunc;
17826 default:
17827 return reloc_class_normal;
17828 }
17829 }
17830
17831 static void
17832 arm_final_write_processing (bfd *abfd)
17833 {
17834 bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17835 }
17836
17837 static bfd_boolean
17838 elf32_arm_final_write_processing (bfd *abfd)
17839 {
17840 arm_final_write_processing (abfd);
17841 return _bfd_elf_final_write_processing (abfd);
17842 }
17843
17844 /* Return TRUE if this is an unwinding table entry. */
17845
17846 static bfd_boolean
17847 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17848 {
17849 return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17850 || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17851 }
17852
17853
17854 /* Set the type and flags for an ARM section. We do this by
17855 the section name, which is a hack, but ought to work. */
17856
17857 static bfd_boolean
17858 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17859 {
17860 const char * name;
17861
17862 name = bfd_section_name (sec);
17863
17864 if (is_arm_elf_unwind_section_name (abfd, name))
17865 {
17866 hdr->sh_type = SHT_ARM_EXIDX;
17867 hdr->sh_flags |= SHF_LINK_ORDER;
17868 }
17869
17870 if (sec->flags & SEC_ELF_PURECODE)
17871 hdr->sh_flags |= SHF_ARM_PURECODE;
17872
17873 return TRUE;
17874 }
17875
17876 /* Handle an ARM specific section when reading an object file. This is
17877 called when bfd_section_from_shdr finds a section with an unknown
17878 type. */
17879
17880 static bfd_boolean
17881 elf32_arm_section_from_shdr (bfd *abfd,
17882 Elf_Internal_Shdr * hdr,
17883 const char *name,
17884 int shindex)
17885 {
17886 /* There ought to be a place to keep ELF backend specific flags, but
17887 at the moment there isn't one. We just keep track of the
17888 sections by their name, instead. Fortunately, the ABI gives
17889 names for all the ARM specific sections, so we will probably get
17890 away with this. */
17891 switch (hdr->sh_type)
17892 {
17893 case SHT_ARM_EXIDX:
17894 case SHT_ARM_PREEMPTMAP:
17895 case SHT_ARM_ATTRIBUTES:
17896 break;
17897
17898 default:
17899 return FALSE;
17900 }
17901
17902 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17903 return FALSE;
17904
17905 return TRUE;
17906 }
17907
17908 static _arm_elf_section_data *
17909 get_arm_elf_section_data (asection * sec)
17910 {
17911 if (sec && sec->owner && is_arm_elf (sec->owner))
17912 return elf32_arm_section_data (sec);
17913 else
17914 return NULL;
17915 }
17916
17917 typedef struct
17918 {
17919 void *flaginfo;
17920 struct bfd_link_info *info;
17921 asection *sec;
17922 int sec_shndx;
17923 int (*func) (void *, const char *, Elf_Internal_Sym *,
17924 asection *, struct elf_link_hash_entry *);
17925 } output_arch_syminfo;
17926
17927 enum map_symbol_type
17928 {
17929 ARM_MAP_ARM,
17930 ARM_MAP_THUMB,
17931 ARM_MAP_DATA
17932 };
17933
17934
17935 /* Output a single mapping symbol. */
17936
17937 static bfd_boolean
17938 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17939 enum map_symbol_type type,
17940 bfd_vma offset)
17941 {
17942 static const char *names[3] = {"$a", "$t", "$d"};
17943 Elf_Internal_Sym sym;
17944
17945 sym.st_value = osi->sec->output_section->vma
17946 + osi->sec->output_offset
17947 + offset;
17948 sym.st_size = 0;
17949 sym.st_other = 0;
17950 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17951 sym.st_shndx = osi->sec_shndx;
17952 sym.st_target_internal = 0;
17953 elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17954 return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17955 }
17956
17957 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17958 IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt. */
17959
17960 static bfd_boolean
17961 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17962 bfd_boolean is_iplt_entry_p,
17963 union gotplt_union *root_plt,
17964 struct arm_plt_info *arm_plt)
17965 {
17966 struct elf32_arm_link_hash_table *htab;
17967 bfd_vma addr, plt_header_size;
17968
17969 if (root_plt->offset == (bfd_vma) -1)
17970 return TRUE;
17971
17972 htab = elf32_arm_hash_table (osi->info);
17973 if (htab == NULL)
17974 return FALSE;
17975
17976 if (is_iplt_entry_p)
17977 {
17978 osi->sec = htab->root.iplt;
17979 plt_header_size = 0;
17980 }
17981 else
17982 {
17983 osi->sec = htab->root.splt;
17984 plt_header_size = htab->plt_header_size;
17985 }
17986 osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17987 (osi->info->output_bfd, osi->sec->output_section));
17988
17989 addr = root_plt->offset & -2;
17990 if (htab->symbian_p)
17991 {
17992 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17993 return FALSE;
17994 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17995 return FALSE;
17996 }
17997 else if (htab->vxworks_p)
17998 {
17999 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18000 return FALSE;
18001 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
18002 return FALSE;
18003 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
18004 return FALSE;
18005 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
18006 return FALSE;
18007 }
18008 else if (htab->nacl_p)
18009 {
18010 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18011 return FALSE;
18012 }
18013 else if (htab->fdpic_p)
18014 {
18015 enum map_symbol_type type = using_thumb_only(htab)
18016 ? ARM_MAP_THUMB
18017 : ARM_MAP_ARM;
18018
18019 if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
18020 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18021 return FALSE;
18022 if (!elf32_arm_output_map_sym (osi, type, addr))
18023 return FALSE;
18024 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
18025 return FALSE;
18026 if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
18027 if (!elf32_arm_output_map_sym (osi, type, addr + 24))
18028 return FALSE;
18029 }
18030 else if (using_thumb_only (htab))
18031 {
18032 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
18033 return FALSE;
18034 }
18035 else
18036 {
18037 bfd_boolean thumb_stub_p;
18038
18039 thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
18040 if (thumb_stub_p)
18041 {
18042 if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18043 return FALSE;
18044 }
18045 #ifdef FOUR_WORD_PLT
18046 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18047 return FALSE;
18048 if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
18049 return FALSE;
18050 #else
18051 /* A three-word PLT with no Thumb thunk contains only Arm code,
18052 so only need to output a mapping symbol for the first PLT entry and
18053 entries with thumb thunks. */
18054 if (thumb_stub_p || addr == plt_header_size)
18055 {
18056 if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18057 return FALSE;
18058 }
18059 #endif
18060 }
18061
18062 return TRUE;
18063 }
18064
18065 /* Output mapping symbols for PLT entries associated with H. */
18066
18067 static bfd_boolean
18068 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
18069 {
18070 output_arch_syminfo *osi = (output_arch_syminfo *) inf;
18071 struct elf32_arm_link_hash_entry *eh;
18072
18073 if (h->root.type == bfd_link_hash_indirect)
18074 return TRUE;
18075
18076 if (h->root.type == bfd_link_hash_warning)
18077 /* When warning symbols are created, they **replace** the "real"
18078 entry in the hash table, thus we never get to see the real
18079 symbol in a hash traversal. So look at it now. */
18080 h = (struct elf_link_hash_entry *) h->root.u.i.link;
18081
18082 eh = (struct elf32_arm_link_hash_entry *) h;
18083 return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
18084 &h->plt, &eh->plt);
18085 }
18086
18087 /* Bind a veneered symbol to its veneer identified by its hash entry
18088 STUB_ENTRY. The veneered location thus loose its symbol. */
18089
18090 static void
18091 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
18092 {
18093 struct elf32_arm_link_hash_entry *hash = stub_entry->h;
18094
18095 BFD_ASSERT (hash);
18096 hash->root.root.u.def.section = stub_entry->stub_sec;
18097 hash->root.root.u.def.value = stub_entry->stub_offset;
18098 hash->root.size = stub_entry->stub_size;
18099 }
18100
18101 /* Output a single local symbol for a generated stub. */
18102
18103 static bfd_boolean
18104 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
18105 bfd_vma offset, bfd_vma size)
18106 {
18107 Elf_Internal_Sym sym;
18108
18109 sym.st_value = osi->sec->output_section->vma
18110 + osi->sec->output_offset
18111 + offset;
18112 sym.st_size = size;
18113 sym.st_other = 0;
18114 sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18115 sym.st_shndx = osi->sec_shndx;
18116 sym.st_target_internal = 0;
18117 return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18118 }
18119
18120 static bfd_boolean
18121 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18122 void * in_arg)
18123 {
18124 struct elf32_arm_stub_hash_entry *stub_entry;
18125 asection *stub_sec;
18126 bfd_vma addr;
18127 char *stub_name;
18128 output_arch_syminfo *osi;
18129 const insn_sequence *template_sequence;
18130 enum stub_insn_type prev_type;
18131 int size;
18132 int i;
18133 enum map_symbol_type sym_type;
18134
18135 /* Massage our args to the form they really have. */
18136 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18137 osi = (output_arch_syminfo *) in_arg;
18138
18139 stub_sec = stub_entry->stub_sec;
18140
18141 /* Ensure this stub is attached to the current section being
18142 processed. */
18143 if (stub_sec != osi->sec)
18144 return TRUE;
18145
18146 addr = (bfd_vma) stub_entry->stub_offset;
18147 template_sequence = stub_entry->stub_template;
18148
18149 if (arm_stub_sym_claimed (stub_entry->stub_type))
18150 arm_stub_claim_sym (stub_entry);
18151 else
18152 {
18153 stub_name = stub_entry->output_name;
18154 switch (template_sequence[0].type)
18155 {
18156 case ARM_TYPE:
18157 if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18158 stub_entry->stub_size))
18159 return FALSE;
18160 break;
18161 case THUMB16_TYPE:
18162 case THUMB32_TYPE:
18163 if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18164 stub_entry->stub_size))
18165 return FALSE;
18166 break;
18167 default:
18168 BFD_FAIL ();
18169 return 0;
18170 }
18171 }
18172
18173 prev_type = DATA_TYPE;
18174 size = 0;
18175 for (i = 0; i < stub_entry->stub_template_size; i++)
18176 {
18177 switch (template_sequence[i].type)
18178 {
18179 case ARM_TYPE:
18180 sym_type = ARM_MAP_ARM;
18181 break;
18182
18183 case THUMB16_TYPE:
18184 case THUMB32_TYPE:
18185 sym_type = ARM_MAP_THUMB;
18186 break;
18187
18188 case DATA_TYPE:
18189 sym_type = ARM_MAP_DATA;
18190 break;
18191
18192 default:
18193 BFD_FAIL ();
18194 return FALSE;
18195 }
18196
18197 if (template_sequence[i].type != prev_type)
18198 {
18199 prev_type = template_sequence[i].type;
18200 if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18201 return FALSE;
18202 }
18203
18204 switch (template_sequence[i].type)
18205 {
18206 case ARM_TYPE:
18207 case THUMB32_TYPE:
18208 size += 4;
18209 break;
18210
18211 case THUMB16_TYPE:
18212 size += 2;
18213 break;
18214
18215 case DATA_TYPE:
18216 size += 4;
18217 break;
18218
18219 default:
18220 BFD_FAIL ();
18221 return FALSE;
18222 }
18223 }
18224
18225 return TRUE;
18226 }
18227
18228 /* Output mapping symbols for linker generated sections,
18229 and for those data-only sections that do not have a
18230 $d. */
18231
18232 static bfd_boolean
18233 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18234 struct bfd_link_info *info,
18235 void *flaginfo,
18236 int (*func) (void *, const char *,
18237 Elf_Internal_Sym *,
18238 asection *,
18239 struct elf_link_hash_entry *))
18240 {
18241 output_arch_syminfo osi;
18242 struct elf32_arm_link_hash_table *htab;
18243 bfd_vma offset;
18244 bfd_size_type size;
18245 bfd *input_bfd;
18246
18247 htab = elf32_arm_hash_table (info);
18248 if (htab == NULL)
18249 return FALSE;
18250
18251 check_use_blx (htab);
18252
18253 osi.flaginfo = flaginfo;
18254 osi.info = info;
18255 osi.func = func;
18256
18257 /* Add a $d mapping symbol to data-only sections that
18258 don't have any mapping symbol. This may result in (harmless) redundant
18259 mapping symbols. */
18260 for (input_bfd = info->input_bfds;
18261 input_bfd != NULL;
18262 input_bfd = input_bfd->link.next)
18263 {
18264 if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18265 for (osi.sec = input_bfd->sections;
18266 osi.sec != NULL;
18267 osi.sec = osi.sec->next)
18268 {
18269 if (osi.sec->output_section != NULL
18270 && ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18271 != 0)
18272 && (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18273 == SEC_HAS_CONTENTS
18274 && get_arm_elf_section_data (osi.sec) != NULL
18275 && get_arm_elf_section_data (osi.sec)->mapcount == 0
18276 && osi.sec->size > 0
18277 && (osi.sec->flags & SEC_EXCLUDE) == 0)
18278 {
18279 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18280 (output_bfd, osi.sec->output_section);
18281 if (osi.sec_shndx != (int)SHN_BAD)
18282 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18283 }
18284 }
18285 }
18286
18287 /* ARM->Thumb glue. */
18288 if (htab->arm_glue_size > 0)
18289 {
18290 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18291 ARM2THUMB_GLUE_SECTION_NAME);
18292
18293 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18294 (output_bfd, osi.sec->output_section);
18295 if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18296 || htab->pic_veneer)
18297 size = ARM2THUMB_PIC_GLUE_SIZE;
18298 else if (htab->use_blx)
18299 size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18300 else
18301 size = ARM2THUMB_STATIC_GLUE_SIZE;
18302
18303 for (offset = 0; offset < htab->arm_glue_size; offset += size)
18304 {
18305 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18306 elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18307 }
18308 }
18309
18310 /* Thumb->ARM glue. */
18311 if (htab->thumb_glue_size > 0)
18312 {
18313 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18314 THUMB2ARM_GLUE_SECTION_NAME);
18315
18316 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18317 (output_bfd, osi.sec->output_section);
18318 size = THUMB2ARM_GLUE_SIZE;
18319
18320 for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18321 {
18322 elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18323 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18324 }
18325 }
18326
18327 /* ARMv4 BX veneers. */
18328 if (htab->bx_glue_size > 0)
18329 {
18330 osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18331 ARM_BX_GLUE_SECTION_NAME);
18332
18333 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18334 (output_bfd, osi.sec->output_section);
18335
18336 elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18337 }
18338
18339 /* Long calls stubs. */
18340 if (htab->stub_bfd && htab->stub_bfd->sections)
18341 {
18342 asection* stub_sec;
18343
18344 for (stub_sec = htab->stub_bfd->sections;
18345 stub_sec != NULL;
18346 stub_sec = stub_sec->next)
18347 {
18348 /* Ignore non-stub sections. */
18349 if (!strstr (stub_sec->name, STUB_SUFFIX))
18350 continue;
18351
18352 osi.sec = stub_sec;
18353
18354 osi.sec_shndx = _bfd_elf_section_from_bfd_section
18355 (output_bfd, osi.sec->output_section);
18356
18357 bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18358 }
18359 }
18360
18361 /* Finally, output mapping symbols for the PLT. */
18362 if (htab->root.splt && htab->root.splt->size > 0)
18363 {
18364 osi.sec = htab->root.splt;
18365 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18366 (output_bfd, osi.sec->output_section));
18367
18368 /* Output mapping symbols for the plt header. SymbianOS does not have a
18369 plt header. */
18370 if (htab->vxworks_p)
18371 {
18372 /* VxWorks shared libraries have no PLT header. */
18373 if (!bfd_link_pic (info))
18374 {
18375 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18376 return FALSE;
18377 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18378 return FALSE;
18379 }
18380 }
18381 else if (htab->nacl_p)
18382 {
18383 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18384 return FALSE;
18385 }
18386 else if (using_thumb_only (htab) && !htab->fdpic_p)
18387 {
18388 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18389 return FALSE;
18390 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18391 return FALSE;
18392 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18393 return FALSE;
18394 }
18395 else if (!htab->symbian_p && !htab->fdpic_p)
18396 {
18397 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18398 return FALSE;
18399 #ifndef FOUR_WORD_PLT
18400 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18401 return FALSE;
18402 #endif
18403 }
18404 }
18405 if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18406 {
18407 /* NaCl uses a special first entry in .iplt too. */
18408 osi.sec = htab->root.iplt;
18409 osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18410 (output_bfd, osi.sec->output_section));
18411 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18412 return FALSE;
18413 }
18414 if ((htab->root.splt && htab->root.splt->size > 0)
18415 || (htab->root.iplt && htab->root.iplt->size > 0))
18416 {
18417 elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18418 for (input_bfd = info->input_bfds;
18419 input_bfd != NULL;
18420 input_bfd = input_bfd->link.next)
18421 {
18422 struct arm_local_iplt_info **local_iplt;
18423 unsigned int i, num_syms;
18424
18425 local_iplt = elf32_arm_local_iplt (input_bfd);
18426 if (local_iplt != NULL)
18427 {
18428 num_syms = elf_symtab_hdr (input_bfd).sh_info;
18429 for (i = 0; i < num_syms; i++)
18430 if (local_iplt[i] != NULL
18431 && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18432 &local_iplt[i]->root,
18433 &local_iplt[i]->arm))
18434 return FALSE;
18435 }
18436 }
18437 }
18438 if (htab->dt_tlsdesc_plt != 0)
18439 {
18440 /* Mapping symbols for the lazy tls trampoline. */
18441 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18442 return FALSE;
18443
18444 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18445 htab->dt_tlsdesc_plt + 24))
18446 return FALSE;
18447 }
18448 if (htab->tls_trampoline != 0)
18449 {
18450 /* Mapping symbols for the tls trampoline. */
18451 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18452 return FALSE;
18453 #ifdef FOUR_WORD_PLT
18454 if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18455 htab->tls_trampoline + 12))
18456 return FALSE;
18457 #endif
18458 }
18459
18460 return TRUE;
18461 }
18462
18463 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18464 the import library. All SYMCOUNT symbols of ABFD can be examined
18465 from their pointers in SYMS. Pointers of symbols to keep should be
18466 stored continuously at the beginning of that array.
18467
18468 Returns the number of symbols to keep. */
18469
18470 static unsigned int
18471 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18472 struct bfd_link_info *info,
18473 asymbol **syms, long symcount)
18474 {
18475 size_t maxnamelen;
18476 char *cmse_name;
18477 long src_count, dst_count = 0;
18478 struct elf32_arm_link_hash_table *htab;
18479
18480 htab = elf32_arm_hash_table (info);
18481 if (!htab->stub_bfd || !htab->stub_bfd->sections)
18482 symcount = 0;
18483
18484 maxnamelen = 128;
18485 cmse_name = (char *) bfd_malloc (maxnamelen);
18486 BFD_ASSERT (cmse_name);
18487
18488 for (src_count = 0; src_count < symcount; src_count++)
18489 {
18490 struct elf32_arm_link_hash_entry *cmse_hash;
18491 asymbol *sym;
18492 flagword flags;
18493 char *name;
18494 size_t namelen;
18495
18496 sym = syms[src_count];
18497 flags = sym->flags;
18498 name = (char *) bfd_asymbol_name (sym);
18499
18500 if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18501 continue;
18502 if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18503 continue;
18504
18505 namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18506 if (namelen > maxnamelen)
18507 {
18508 cmse_name = (char *)
18509 bfd_realloc (cmse_name, namelen);
18510 maxnamelen = namelen;
18511 }
18512 snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18513 cmse_hash = (struct elf32_arm_link_hash_entry *)
18514 elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18515
18516 if (!cmse_hash
18517 || (cmse_hash->root.root.type != bfd_link_hash_defined
18518 && cmse_hash->root.root.type != bfd_link_hash_defweak)
18519 || cmse_hash->root.type != STT_FUNC)
18520 continue;
18521
18522 syms[dst_count++] = sym;
18523 }
18524 free (cmse_name);
18525
18526 syms[dst_count] = NULL;
18527
18528 return dst_count;
18529 }
18530
18531 /* Filter symbols of ABFD to include in the import library. All
18532 SYMCOUNT symbols of ABFD can be examined from their pointers in
18533 SYMS. Pointers of symbols to keep should be stored continuously at
18534 the beginning of that array.
18535
18536 Returns the number of symbols to keep. */
18537
18538 static unsigned int
18539 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18540 struct bfd_link_info *info,
18541 asymbol **syms, long symcount)
18542 {
18543 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18544
18545 /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18546 Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18547 library to be a relocatable object file. */
18548 BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18549 if (globals->cmse_implib)
18550 return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18551 else
18552 return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18553 }
18554
18555 /* Allocate target specific section data. */
18556
18557 static bfd_boolean
18558 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18559 {
18560 if (!sec->used_by_bfd)
18561 {
18562 _arm_elf_section_data *sdata;
18563 size_t amt = sizeof (*sdata);
18564
18565 sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18566 if (sdata == NULL)
18567 return FALSE;
18568 sec->used_by_bfd = sdata;
18569 }
18570
18571 return _bfd_elf_new_section_hook (abfd, sec);
18572 }
18573
18574
18575 /* Used to order a list of mapping symbols by address. */
18576
18577 static int
18578 elf32_arm_compare_mapping (const void * a, const void * b)
18579 {
18580 const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18581 const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18582
18583 if (amap->vma > bmap->vma)
18584 return 1;
18585 else if (amap->vma < bmap->vma)
18586 return -1;
18587 else if (amap->type > bmap->type)
18588 /* Ensure results do not depend on the host qsort for objects with
18589 multiple mapping symbols at the same address by sorting on type
18590 after vma. */
18591 return 1;
18592 else if (amap->type < bmap->type)
18593 return -1;
18594 else
18595 return 0;
18596 }
18597
18598 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified. */
18599
18600 static unsigned long
18601 offset_prel31 (unsigned long addr, bfd_vma offset)
18602 {
18603 return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18604 }
18605
18606 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18607 relocations. */
18608
18609 static void
18610 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18611 {
18612 unsigned long first_word = bfd_get_32 (output_bfd, from);
18613 unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18614
18615 /* High bit of first word is supposed to be zero. */
18616 if ((first_word & 0x80000000ul) == 0)
18617 first_word = offset_prel31 (first_word, offset);
18618
18619 /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18620 (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry. */
18621 if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18622 second_word = offset_prel31 (second_word, offset);
18623
18624 bfd_put_32 (output_bfd, first_word, to);
18625 bfd_put_32 (output_bfd, second_word, to + 4);
18626 }
18627
18628 /* Data for make_branch_to_a8_stub(). */
18629
18630 struct a8_branch_to_stub_data
18631 {
18632 asection *writing_section;
18633 bfd_byte *contents;
18634 };
18635
18636
18637 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18638 places for a particular section. */
18639
18640 static bfd_boolean
18641 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18642 void *in_arg)
18643 {
18644 struct elf32_arm_stub_hash_entry *stub_entry;
18645 struct a8_branch_to_stub_data *data;
18646 bfd_byte *contents;
18647 unsigned long branch_insn;
18648 bfd_vma veneered_insn_loc, veneer_entry_loc;
18649 bfd_signed_vma branch_offset;
18650 bfd *abfd;
18651 unsigned int loc;
18652
18653 stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18654 data = (struct a8_branch_to_stub_data *) in_arg;
18655
18656 if (stub_entry->target_section != data->writing_section
18657 || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18658 return TRUE;
18659
18660 contents = data->contents;
18661
18662 /* We use target_section as Cortex-A8 erratum workaround stubs are only
18663 generated when both source and target are in the same section. */
18664 veneered_insn_loc = stub_entry->target_section->output_section->vma
18665 + stub_entry->target_section->output_offset
18666 + stub_entry->source_value;
18667
18668 veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18669 + stub_entry->stub_sec->output_offset
18670 + stub_entry->stub_offset;
18671
18672 if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18673 veneered_insn_loc &= ~3u;
18674
18675 branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18676
18677 abfd = stub_entry->target_section->owner;
18678 loc = stub_entry->source_value;
18679
18680 /* We attempt to avoid this condition by setting stubs_always_after_branch
18681 in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18682 This check is just to be on the safe side... */
18683 if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18684 {
18685 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18686 "allocated in unsafe location"), abfd);
18687 return FALSE;
18688 }
18689
18690 switch (stub_entry->stub_type)
18691 {
18692 case arm_stub_a8_veneer_b:
18693 case arm_stub_a8_veneer_b_cond:
18694 branch_insn = 0xf0009000;
18695 goto jump24;
18696
18697 case arm_stub_a8_veneer_blx:
18698 branch_insn = 0xf000e800;
18699 goto jump24;
18700
18701 case arm_stub_a8_veneer_bl:
18702 {
18703 unsigned int i1, j1, i2, j2, s;
18704
18705 branch_insn = 0xf000d000;
18706
18707 jump24:
18708 if (branch_offset < -16777216 || branch_offset > 16777214)
18709 {
18710 /* There's not much we can do apart from complain if this
18711 happens. */
18712 _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18713 "of range (input file too large)"), abfd);
18714 return FALSE;
18715 }
18716
18717 /* i1 = not(j1 eor s), so:
18718 not i1 = j1 eor s
18719 j1 = (not i1) eor s. */
18720
18721 branch_insn |= (branch_offset >> 1) & 0x7ff;
18722 branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18723 i2 = (branch_offset >> 22) & 1;
18724 i1 = (branch_offset >> 23) & 1;
18725 s = (branch_offset >> 24) & 1;
18726 j1 = (!i1) ^ s;
18727 j2 = (!i2) ^ s;
18728 branch_insn |= j2 << 11;
18729 branch_insn |= j1 << 13;
18730 branch_insn |= s << 26;
18731 }
18732 break;
18733
18734 default:
18735 BFD_FAIL ();
18736 return FALSE;
18737 }
18738
18739 bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18740 bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18741
18742 return TRUE;
18743 }
18744
18745 /* Beginning of stm32l4xx work-around. */
18746
18747 /* Functions encoding instructions necessary for the emission of the
18748 fix-stm32l4xx-629360.
18749 Encoding is extracted from the
18750 ARM (C) Architecture Reference Manual
18751 ARMv7-A and ARMv7-R edition
18752 ARM DDI 0406C.b (ID072512). */
18753
18754 static inline bfd_vma
18755 create_instruction_branch_absolute (int branch_offset)
18756 {
18757 /* A8.8.18 B (A8-334)
18758 B target_address (Encoding T4). */
18759 /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii. */
18760 /* jump offset is: S:I1:I2:imm10:imm11:0. */
18761 /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S). */
18762
18763 int s = ((branch_offset & 0x1000000) >> 24);
18764 int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18765 int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18766
18767 if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18768 BFD_ASSERT (0 && "Error: branch out of range. Cannot create branch.");
18769
18770 bfd_vma patched_inst = 0xf0009000
18771 | s << 26 /* S. */
18772 | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10. */
18773 | j1 << 13 /* J1. */
18774 | j2 << 11 /* J2. */
18775 | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11. */
18776
18777 return patched_inst;
18778 }
18779
18780 static inline bfd_vma
18781 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18782 {
18783 /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18784 LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2). */
18785 bfd_vma patched_inst = 0xe8900000
18786 | (/*W=*/wback << 21)
18787 | (base_reg << 16)
18788 | (reg_mask & 0x0000ffff);
18789
18790 return patched_inst;
18791 }
18792
18793 static inline bfd_vma
18794 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18795 {
18796 /* A8.8.60 LDMDB/LDMEA (A8-402)
18797 LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1). */
18798 bfd_vma patched_inst = 0xe9100000
18799 | (/*W=*/wback << 21)
18800 | (base_reg << 16)
18801 | (reg_mask & 0x0000ffff);
18802
18803 return patched_inst;
18804 }
18805
18806 static inline bfd_vma
18807 create_instruction_mov (int target_reg, int source_reg)
18808 {
18809 /* A8.8.103 MOV (register) (A8-486)
18810 MOV Rd, Rm (Encoding T1). */
18811 bfd_vma patched_inst = 0x4600
18812 | (target_reg & 0x7)
18813 | ((target_reg & 0x8) >> 3) << 7
18814 | (source_reg << 3);
18815
18816 return patched_inst;
18817 }
18818
18819 static inline bfd_vma
18820 create_instruction_sub (int target_reg, int source_reg, int value)
18821 {
18822 /* A8.8.221 SUB (immediate) (A8-708)
18823 SUB Rd, Rn, #value (Encoding T3). */
18824 bfd_vma patched_inst = 0xf1a00000
18825 | (target_reg << 8)
18826 | (source_reg << 16)
18827 | (/*S=*/0 << 20)
18828 | ((value & 0x800) >> 11) << 26
18829 | ((value & 0x700) >> 8) << 12
18830 | (value & 0x0ff);
18831
18832 return patched_inst;
18833 }
18834
18835 static inline bfd_vma
18836 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18837 int first_reg)
18838 {
18839 /* A8.8.332 VLDM (A8-922)
18840 VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2). */
18841 bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18842 | (/*W=*/wback << 21)
18843 | (base_reg << 16)
18844 | (num_words & 0x000000ff)
18845 | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18846 | (first_reg & 0x00000001) << 22;
18847
18848 return patched_inst;
18849 }
18850
18851 static inline bfd_vma
18852 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18853 int first_reg)
18854 {
18855 /* A8.8.332 VLDM (A8-922)
18856 VLMD{MODE} Rn!, {} (Encoding T1 or T2). */
18857 bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18858 | (base_reg << 16)
18859 | (num_words & 0x000000ff)
18860 | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18861 | (first_reg & 0x00000001) << 22;
18862
18863 return patched_inst;
18864 }
18865
18866 static inline bfd_vma
18867 create_instruction_udf_w (int value)
18868 {
18869 /* A8.8.247 UDF (A8-758)
18870 Undefined (Encoding T2). */
18871 bfd_vma patched_inst = 0xf7f0a000
18872 | (value & 0x00000fff)
18873 | (value & 0x000f0000) << 16;
18874
18875 return patched_inst;
18876 }
18877
18878 static inline bfd_vma
18879 create_instruction_udf (int value)
18880 {
18881 /* A8.8.247 UDF (A8-758)
18882 Undefined (Encoding T1). */
18883 bfd_vma patched_inst = 0xde00
18884 | (value & 0xff);
18885
18886 return patched_inst;
18887 }
18888
18889 /* Functions writing an instruction in memory, returning the next
18890 memory position to write to. */
18891
18892 static inline bfd_byte *
18893 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18894 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18895 {
18896 put_thumb2_insn (htab, output_bfd, insn, pt);
18897 return pt + 4;
18898 }
18899
18900 static inline bfd_byte *
18901 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18902 bfd * output_bfd, bfd_byte *pt, insn32 insn)
18903 {
18904 put_thumb_insn (htab, output_bfd, insn, pt);
18905 return pt + 2;
18906 }
18907
18908 /* Function filling up a region in memory with T1 and T2 UDFs taking
18909 care of alignment. */
18910
18911 static bfd_byte *
18912 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18913 bfd * output_bfd,
18914 const bfd_byte * const base_stub_contents,
18915 bfd_byte * const from_stub_contents,
18916 const bfd_byte * const end_stub_contents)
18917 {
18918 bfd_byte *current_stub_contents = from_stub_contents;
18919
18920 /* Fill the remaining of the stub with deterministic contents : UDF
18921 instructions.
18922 Check if realignment is needed on modulo 4 frontier using T1, to
18923 further use T2. */
18924 if ((current_stub_contents < end_stub_contents)
18925 && !((current_stub_contents - base_stub_contents) % 2)
18926 && ((current_stub_contents - base_stub_contents) % 4))
18927 current_stub_contents =
18928 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18929 create_instruction_udf (0));
18930
18931 for (; current_stub_contents < end_stub_contents;)
18932 current_stub_contents =
18933 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18934 create_instruction_udf_w (0));
18935
18936 return current_stub_contents;
18937 }
18938
18939 /* Functions writing the stream of instructions equivalent to the
18940 derived sequence for ldmia, ldmdb, vldm respectively. */
18941
18942 static void
18943 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18944 bfd * output_bfd,
18945 const insn32 initial_insn,
18946 const bfd_byte *const initial_insn_addr,
18947 bfd_byte *const base_stub_contents)
18948 {
18949 int wback = (initial_insn & 0x00200000) >> 21;
18950 int ri, rn = (initial_insn & 0x000F0000) >> 16;
18951 int insn_all_registers = initial_insn & 0x0000ffff;
18952 int insn_low_registers, insn_high_registers;
18953 int usable_register_mask;
18954 int nb_registers = elf32_arm_popcount (insn_all_registers);
18955 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18956 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18957 bfd_byte *current_stub_contents = base_stub_contents;
18958
18959 BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18960
18961 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18962 smaller than 8 registers load sequences that do not cause the
18963 hardware issue. */
18964 if (nb_registers <= 8)
18965 {
18966 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
18967 current_stub_contents =
18968 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18969 initial_insn);
18970
18971 /* B initial_insn_addr+4. */
18972 if (!restore_pc)
18973 current_stub_contents =
18974 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18975 create_instruction_branch_absolute
18976 (initial_insn_addr - current_stub_contents));
18977
18978 /* Fill the remaining of the stub with deterministic contents. */
18979 current_stub_contents =
18980 stm32l4xx_fill_stub_udf (htab, output_bfd,
18981 base_stub_contents, current_stub_contents,
18982 base_stub_contents +
18983 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18984
18985 return;
18986 }
18987
18988 /* - reg_list[13] == 0. */
18989 BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18990
18991 /* - reg_list[14] & reg_list[15] != 1. */
18992 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18993
18994 /* - if (wback==1) reg_list[rn] == 0. */
18995 BFD_ASSERT (!wback || !restore_rn);
18996
18997 /* - nb_registers > 8. */
18998 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18999
19000 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19001
19002 /* In the following algorithm, we split this wide LDM using 2 LDM insns:
19003 - One with the 7 lowest registers (register mask 0x007F)
19004 This LDM will finally contain between 2 and 7 registers
19005 - One with the 7 highest registers (register mask 0xDF80)
19006 This ldm will finally contain between 2 and 7 registers. */
19007 insn_low_registers = insn_all_registers & 0x007F;
19008 insn_high_registers = insn_all_registers & 0xDF80;
19009
19010 /* A spare register may be needed during this veneer to temporarily
19011 handle the base register. This register will be restored with the
19012 last LDM operation.
19013 The usable register may be any general purpose register (that
19014 excludes PC, SP, LR : register mask is 0x1FFF). */
19015 usable_register_mask = 0x1FFF;
19016
19017 /* Generate the stub function. */
19018 if (wback)
19019 {
19020 /* LDMIA Rn!, {R-low-register-list} : (Encoding T2). */
19021 current_stub_contents =
19022 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19023 create_instruction_ldmia
19024 (rn, /*wback=*/1, insn_low_registers));
19025
19026 /* LDMIA Rn!, {R-high-register-list} : (Encoding T2). */
19027 current_stub_contents =
19028 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19029 create_instruction_ldmia
19030 (rn, /*wback=*/1, insn_high_registers));
19031 if (!restore_pc)
19032 {
19033 /* B initial_insn_addr+4. */
19034 current_stub_contents =
19035 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19036 create_instruction_branch_absolute
19037 (initial_insn_addr - current_stub_contents));
19038 }
19039 }
19040 else /* if (!wback). */
19041 {
19042 ri = rn;
19043
19044 /* If Rn is not part of the high-register-list, move it there. */
19045 if (!(insn_high_registers & (1 << rn)))
19046 {
19047 /* Choose a Ri in the high-register-list that will be restored. */
19048 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19049
19050 /* MOV Ri, Rn. */
19051 current_stub_contents =
19052 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19053 create_instruction_mov (ri, rn));
19054 }
19055
19056 /* LDMIA Ri!, {R-low-register-list} : (Encoding T2). */
19057 current_stub_contents =
19058 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19059 create_instruction_ldmia
19060 (ri, /*wback=*/1, insn_low_registers));
19061
19062 /* LDMIA Ri, {R-high-register-list} : (Encoding T2). */
19063 current_stub_contents =
19064 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19065 create_instruction_ldmia
19066 (ri, /*wback=*/0, insn_high_registers));
19067
19068 if (!restore_pc)
19069 {
19070 /* B initial_insn_addr+4. */
19071 current_stub_contents =
19072 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19073 create_instruction_branch_absolute
19074 (initial_insn_addr - current_stub_contents));
19075 }
19076 }
19077
19078 /* Fill the remaining of the stub with deterministic contents. */
19079 current_stub_contents =
19080 stm32l4xx_fill_stub_udf (htab, output_bfd,
19081 base_stub_contents, current_stub_contents,
19082 base_stub_contents +
19083 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19084 }
19085
19086 static void
19087 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
19088 bfd * output_bfd,
19089 const insn32 initial_insn,
19090 const bfd_byte *const initial_insn_addr,
19091 bfd_byte *const base_stub_contents)
19092 {
19093 int wback = (initial_insn & 0x00200000) >> 21;
19094 int ri, rn = (initial_insn & 0x000f0000) >> 16;
19095 int insn_all_registers = initial_insn & 0x0000ffff;
19096 int insn_low_registers, insn_high_registers;
19097 int usable_register_mask;
19098 int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19099 int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19100 int nb_registers = elf32_arm_popcount (insn_all_registers);
19101 bfd_byte *current_stub_contents = base_stub_contents;
19102
19103 BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19104
19105 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19106 smaller than 8 registers load sequences that do not cause the
19107 hardware issue. */
19108 if (nb_registers <= 8)
19109 {
19110 /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}. */
19111 current_stub_contents =
19112 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19113 initial_insn);
19114
19115 /* B initial_insn_addr+4. */
19116 current_stub_contents =
19117 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19118 create_instruction_branch_absolute
19119 (initial_insn_addr - current_stub_contents));
19120
19121 /* Fill the remaining of the stub with deterministic contents. */
19122 current_stub_contents =
19123 stm32l4xx_fill_stub_udf (htab, output_bfd,
19124 base_stub_contents, current_stub_contents,
19125 base_stub_contents +
19126 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19127
19128 return;
19129 }
19130
19131 /* - reg_list[13] == 0. */
19132 BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19133
19134 /* - reg_list[14] & reg_list[15] != 1. */
19135 BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19136
19137 /* - if (wback==1) reg_list[rn] == 0. */
19138 BFD_ASSERT (!wback || !restore_rn);
19139
19140 /* - nb_registers > 8. */
19141 BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19142
19143 /* At this point, LDMxx initial insn loads between 9 and 14 registers. */
19144
19145 /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19146 - One with the 7 lowest registers (register mask 0x007F)
19147 This LDM will finally contain between 2 and 7 registers
19148 - One with the 7 highest registers (register mask 0xDF80)
19149 This ldm will finally contain between 2 and 7 registers. */
19150 insn_low_registers = insn_all_registers & 0x007F;
19151 insn_high_registers = insn_all_registers & 0xDF80;
19152
19153 /* A spare register may be needed during this veneer to temporarily
19154 handle the base register. This register will be restored with
19155 the last LDM operation.
19156 The usable register may be any general purpose register (that excludes
19157 PC, SP, LR : register mask is 0x1FFF). */
19158 usable_register_mask = 0x1FFF;
19159
19160 /* Generate the stub function. */
19161 if (!wback && !restore_pc && !restore_rn)
19162 {
19163 /* Choose a Ri in the low-register-list that will be restored. */
19164 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19165
19166 /* MOV Ri, Rn. */
19167 current_stub_contents =
19168 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19169 create_instruction_mov (ri, rn));
19170
19171 /* LDMDB Ri!, {R-high-register-list}. */
19172 current_stub_contents =
19173 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19174 create_instruction_ldmdb
19175 (ri, /*wback=*/1, insn_high_registers));
19176
19177 /* LDMDB Ri, {R-low-register-list}. */
19178 current_stub_contents =
19179 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19180 create_instruction_ldmdb
19181 (ri, /*wback=*/0, insn_low_registers));
19182
19183 /* B initial_insn_addr+4. */
19184 current_stub_contents =
19185 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19186 create_instruction_branch_absolute
19187 (initial_insn_addr - current_stub_contents));
19188 }
19189 else if (wback && !restore_pc && !restore_rn)
19190 {
19191 /* LDMDB Rn!, {R-high-register-list}. */
19192 current_stub_contents =
19193 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19194 create_instruction_ldmdb
19195 (rn, /*wback=*/1, insn_high_registers));
19196
19197 /* LDMDB Rn!, {R-low-register-list}. */
19198 current_stub_contents =
19199 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19200 create_instruction_ldmdb
19201 (rn, /*wback=*/1, insn_low_registers));
19202
19203 /* B initial_insn_addr+4. */
19204 current_stub_contents =
19205 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19206 create_instruction_branch_absolute
19207 (initial_insn_addr - current_stub_contents));
19208 }
19209 else if (!wback && restore_pc && !restore_rn)
19210 {
19211 /* Choose a Ri in the high-register-list that will be restored. */
19212 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19213
19214 /* SUB Ri, Rn, #(4*nb_registers). */
19215 current_stub_contents =
19216 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19217 create_instruction_sub (ri, rn, (4 * nb_registers)));
19218
19219 /* LDMIA Ri!, {R-low-register-list}. */
19220 current_stub_contents =
19221 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19222 create_instruction_ldmia
19223 (ri, /*wback=*/1, insn_low_registers));
19224
19225 /* LDMIA Ri, {R-high-register-list}. */
19226 current_stub_contents =
19227 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19228 create_instruction_ldmia
19229 (ri, /*wback=*/0, insn_high_registers));
19230 }
19231 else if (wback && restore_pc && !restore_rn)
19232 {
19233 /* Choose a Ri in the high-register-list that will be restored. */
19234 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19235
19236 /* SUB Rn, Rn, #(4*nb_registers) */
19237 current_stub_contents =
19238 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19239 create_instruction_sub (rn, rn, (4 * nb_registers)));
19240
19241 /* MOV Ri, Rn. */
19242 current_stub_contents =
19243 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19244 create_instruction_mov (ri, rn));
19245
19246 /* LDMIA Ri!, {R-low-register-list}. */
19247 current_stub_contents =
19248 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19249 create_instruction_ldmia
19250 (ri, /*wback=*/1, insn_low_registers));
19251
19252 /* LDMIA Ri, {R-high-register-list}. */
19253 current_stub_contents =
19254 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19255 create_instruction_ldmia
19256 (ri, /*wback=*/0, insn_high_registers));
19257 }
19258 else if (!wback && !restore_pc && restore_rn)
19259 {
19260 ri = rn;
19261 if (!(insn_low_registers & (1 << rn)))
19262 {
19263 /* Choose a Ri in the low-register-list that will be restored. */
19264 ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19265
19266 /* MOV Ri, Rn. */
19267 current_stub_contents =
19268 push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19269 create_instruction_mov (ri, rn));
19270 }
19271
19272 /* LDMDB Ri!, {R-high-register-list}. */
19273 current_stub_contents =
19274 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19275 create_instruction_ldmdb
19276 (ri, /*wback=*/1, insn_high_registers));
19277
19278 /* LDMDB Ri, {R-low-register-list}. */
19279 current_stub_contents =
19280 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19281 create_instruction_ldmdb
19282 (ri, /*wback=*/0, insn_low_registers));
19283
19284 /* B initial_insn_addr+4. */
19285 current_stub_contents =
19286 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19287 create_instruction_branch_absolute
19288 (initial_insn_addr - current_stub_contents));
19289 }
19290 else if (!wback && restore_pc && restore_rn)
19291 {
19292 ri = rn;
19293 if (!(insn_high_registers & (1 << rn)))
19294 {
19295 /* Choose a Ri in the high-register-list that will be restored. */
19296 ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19297 }
19298
19299 /* SUB Ri, Rn, #(4*nb_registers). */
19300 current_stub_contents =
19301 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19302 create_instruction_sub (ri, rn, (4 * nb_registers)));
19303
19304 /* LDMIA Ri!, {R-low-register-list}. */
19305 current_stub_contents =
19306 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19307 create_instruction_ldmia
19308 (ri, /*wback=*/1, insn_low_registers));
19309
19310 /* LDMIA Ri, {R-high-register-list}. */
19311 current_stub_contents =
19312 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19313 create_instruction_ldmia
19314 (ri, /*wback=*/0, insn_high_registers));
19315 }
19316 else if (wback && restore_rn)
19317 {
19318 /* The assembler should not have accepted to encode this. */
19319 BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19320 "undefined behavior.\n");
19321 }
19322
19323 /* Fill the remaining of the stub with deterministic contents. */
19324 current_stub_contents =
19325 stm32l4xx_fill_stub_udf (htab, output_bfd,
19326 base_stub_contents, current_stub_contents,
19327 base_stub_contents +
19328 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19329
19330 }
19331
19332 static void
19333 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19334 bfd * output_bfd,
19335 const insn32 initial_insn,
19336 const bfd_byte *const initial_insn_addr,
19337 bfd_byte *const base_stub_contents)
19338 {
19339 int num_words = initial_insn & 0xff;
19340 bfd_byte *current_stub_contents = base_stub_contents;
19341
19342 BFD_ASSERT (is_thumb2_vldm (initial_insn));
19343
19344 /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19345 smaller than 8 words load sequences that do not cause the
19346 hardware issue. */
19347 if (num_words <= 8)
19348 {
19349 /* Untouched instruction. */
19350 current_stub_contents =
19351 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19352 initial_insn);
19353
19354 /* B initial_insn_addr+4. */
19355 current_stub_contents =
19356 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19357 create_instruction_branch_absolute
19358 (initial_insn_addr - current_stub_contents));
19359 }
19360 else
19361 {
19362 bfd_boolean is_dp = /* DP encoding. */
19363 (initial_insn & 0xfe100f00) == 0xec100b00;
19364 bfd_boolean is_ia_nobang = /* (IA without !). */
19365 (((initial_insn << 7) >> 28) & 0xd) == 0x4;
19366 bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP. */
19367 (((initial_insn << 7) >> 28) & 0xd) == 0x5;
19368 bfd_boolean is_db_bang = /* (DB with !). */
19369 (((initial_insn << 7) >> 28) & 0xd) == 0x9;
19370 int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19371 /* d = UInt (Vd:D);. */
19372 int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19373 | (((unsigned int)initial_insn << 9) >> 31);
19374
19375 /* Compute the number of 8-words chunks needed to split. */
19376 int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19377 int chunk;
19378
19379 /* The test coverage has been done assuming the following
19380 hypothesis that exactly one of the previous is_ predicates is
19381 true. */
19382 BFD_ASSERT ( (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19383 && !(is_ia_nobang & is_ia_bang & is_db_bang));
19384
19385 /* We treat the cutting of the words in one pass for all
19386 cases, then we emit the adjustments:
19387
19388 vldm rx, {...}
19389 -> vldm rx!, {8_words_or_less} for each needed 8_word
19390 -> sub rx, rx, #size (list)
19391
19392 vldm rx!, {...}
19393 -> vldm rx!, {8_words_or_less} for each needed 8_word
19394 This also handles vpop instruction (when rx is sp)
19395
19396 vldmd rx!, {...}
19397 -> vldmb rx!, {8_words_or_less} for each needed 8_word. */
19398 for (chunk = 0; chunk < chunks; ++chunk)
19399 {
19400 bfd_vma new_insn = 0;
19401
19402 if (is_ia_nobang || is_ia_bang)
19403 {
19404 new_insn = create_instruction_vldmia
19405 (base_reg,
19406 is_dp,
19407 /*wback= . */1,
19408 chunks - (chunk + 1) ?
19409 8 : num_words - chunk * 8,
19410 first_reg + chunk * 8);
19411 }
19412 else if (is_db_bang)
19413 {
19414 new_insn = create_instruction_vldmdb
19415 (base_reg,
19416 is_dp,
19417 chunks - (chunk + 1) ?
19418 8 : num_words - chunk * 8,
19419 first_reg + chunk * 8);
19420 }
19421
19422 if (new_insn)
19423 current_stub_contents =
19424 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19425 new_insn);
19426 }
19427
19428 /* Only this case requires the base register compensation
19429 subtract. */
19430 if (is_ia_nobang)
19431 {
19432 current_stub_contents =
19433 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19434 create_instruction_sub
19435 (base_reg, base_reg, 4*num_words));
19436 }
19437
19438 /* B initial_insn_addr+4. */
19439 current_stub_contents =
19440 push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19441 create_instruction_branch_absolute
19442 (initial_insn_addr - current_stub_contents));
19443 }
19444
19445 /* Fill the remaining of the stub with deterministic contents. */
19446 current_stub_contents =
19447 stm32l4xx_fill_stub_udf (htab, output_bfd,
19448 base_stub_contents, current_stub_contents,
19449 base_stub_contents +
19450 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19451 }
19452
19453 static void
19454 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19455 bfd * output_bfd,
19456 const insn32 wrong_insn,
19457 const bfd_byte *const wrong_insn_addr,
19458 bfd_byte *const stub_contents)
19459 {
19460 if (is_thumb2_ldmia (wrong_insn))
19461 stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19462 wrong_insn, wrong_insn_addr,
19463 stub_contents);
19464 else if (is_thumb2_ldmdb (wrong_insn))
19465 stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19466 wrong_insn, wrong_insn_addr,
19467 stub_contents);
19468 else if (is_thumb2_vldm (wrong_insn))
19469 stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19470 wrong_insn, wrong_insn_addr,
19471 stub_contents);
19472 }
19473
19474 /* End of stm32l4xx work-around. */
19475
19476
19477 /* Do code byteswapping. Return FALSE afterwards so that the section is
19478 written out as normal. */
19479
19480 static bfd_boolean
19481 elf32_arm_write_section (bfd *output_bfd,
19482 struct bfd_link_info *link_info,
19483 asection *sec,
19484 bfd_byte *contents)
19485 {
19486 unsigned int mapcount, errcount;
19487 _arm_elf_section_data *arm_data;
19488 struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19489 elf32_arm_section_map *map;
19490 elf32_vfp11_erratum_list *errnode;
19491 elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19492 bfd_vma ptr;
19493 bfd_vma end;
19494 bfd_vma offset = sec->output_section->vma + sec->output_offset;
19495 bfd_byte tmp;
19496 unsigned int i;
19497
19498 if (globals == NULL)
19499 return FALSE;
19500
19501 /* If this section has not been allocated an _arm_elf_section_data
19502 structure then we cannot record anything. */
19503 arm_data = get_arm_elf_section_data (sec);
19504 if (arm_data == NULL)
19505 return FALSE;
19506
19507 mapcount = arm_data->mapcount;
19508 map = arm_data->map;
19509 errcount = arm_data->erratumcount;
19510
19511 if (errcount != 0)
19512 {
19513 unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19514
19515 for (errnode = arm_data->erratumlist; errnode != 0;
19516 errnode = errnode->next)
19517 {
19518 bfd_vma target = errnode->vma - offset;
19519
19520 switch (errnode->type)
19521 {
19522 case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19523 {
19524 bfd_vma branch_to_veneer;
19525 /* Original condition code of instruction, plus bit mask for
19526 ARM B instruction. */
19527 unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19528 | 0x0a000000;
19529
19530 /* The instruction is before the label. */
19531 target -= 4;
19532
19533 /* Above offset included in -4 below. */
19534 branch_to_veneer = errnode->u.b.veneer->vma
19535 - errnode->vma - 4;
19536
19537 if ((signed) branch_to_veneer < -(1 << 25)
19538 || (signed) branch_to_veneer >= (1 << 25))
19539 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19540 "range"), output_bfd);
19541
19542 insn |= (branch_to_veneer >> 2) & 0xffffff;
19543 contents[endianflip ^ target] = insn & 0xff;
19544 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19545 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19546 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19547 }
19548 break;
19549
19550 case VFP11_ERRATUM_ARM_VENEER:
19551 {
19552 bfd_vma branch_from_veneer;
19553 unsigned int insn;
19554
19555 /* Take size of veneer into account. */
19556 branch_from_veneer = errnode->u.v.branch->vma
19557 - errnode->vma - 12;
19558
19559 if ((signed) branch_from_veneer < -(1 << 25)
19560 || (signed) branch_from_veneer >= (1 << 25))
19561 _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19562 "range"), output_bfd);
19563
19564 /* Original instruction. */
19565 insn = errnode->u.v.branch->u.b.vfp_insn;
19566 contents[endianflip ^ target] = insn & 0xff;
19567 contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19568 contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19569 contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19570
19571 /* Branch back to insn after original insn. */
19572 insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19573 contents[endianflip ^ (target + 4)] = insn & 0xff;
19574 contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19575 contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19576 contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19577 }
19578 break;
19579
19580 default:
19581 abort ();
19582 }
19583 }
19584 }
19585
19586 if (arm_data->stm32l4xx_erratumcount != 0)
19587 {
19588 for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19589 stm32l4xx_errnode != 0;
19590 stm32l4xx_errnode = stm32l4xx_errnode->next)
19591 {
19592 bfd_vma target = stm32l4xx_errnode->vma - offset;
19593
19594 switch (stm32l4xx_errnode->type)
19595 {
19596 case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19597 {
19598 unsigned int insn;
19599 bfd_vma branch_to_veneer =
19600 stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19601
19602 if ((signed) branch_to_veneer < -(1 << 24)
19603 || (signed) branch_to_veneer >= (1 << 24))
19604 {
19605 bfd_vma out_of_range =
19606 ((signed) branch_to_veneer < -(1 << 24)) ?
19607 - branch_to_veneer - (1 << 24) :
19608 ((signed) branch_to_veneer >= (1 << 24)) ?
19609 branch_to_veneer - (1 << 24) : 0;
19610
19611 _bfd_error_handler
19612 (_("%pB(%#" PRIx64 "): error: "
19613 "cannot create STM32L4XX veneer; "
19614 "jump out of range by %" PRId64 " bytes; "
19615 "cannot encode branch instruction"),
19616 output_bfd,
19617 (uint64_t) (stm32l4xx_errnode->vma - 4),
19618 (int64_t) out_of_range);
19619 continue;
19620 }
19621
19622 insn = create_instruction_branch_absolute
19623 (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19624
19625 /* The instruction is before the label. */
19626 target -= 4;
19627
19628 put_thumb2_insn (globals, output_bfd,
19629 (bfd_vma) insn, contents + target);
19630 }
19631 break;
19632
19633 case STM32L4XX_ERRATUM_VENEER:
19634 {
19635 bfd_byte * veneer;
19636 bfd_byte * veneer_r;
19637 unsigned int insn;
19638
19639 veneer = contents + target;
19640 veneer_r = veneer
19641 + stm32l4xx_errnode->u.b.veneer->vma
19642 - stm32l4xx_errnode->vma - 4;
19643
19644 if ((signed) (veneer_r - veneer -
19645 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19646 STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19647 STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19648 STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19649 || (signed) (veneer_r - veneer) >= (1 << 24))
19650 {
19651 _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19652 "veneer"), output_bfd);
19653 continue;
19654 }
19655
19656 /* Original instruction. */
19657 insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19658
19659 stm32l4xx_create_replacing_stub
19660 (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19661 }
19662 break;
19663
19664 default:
19665 abort ();
19666 }
19667 }
19668 }
19669
19670 if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19671 {
19672 arm_unwind_table_edit *edit_node
19673 = arm_data->u.exidx.unwind_edit_list;
19674 /* Now, sec->size is the size of the section we will write. The original
19675 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19676 markers) was sec->rawsize. (This isn't the case if we perform no
19677 edits, then rawsize will be zero and we should use size). */
19678 bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19679 unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19680 unsigned int in_index, out_index;
19681 bfd_vma add_to_offsets = 0;
19682
19683 if (edited_contents == NULL)
19684 return FALSE;
19685 for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19686 {
19687 if (edit_node)
19688 {
19689 unsigned int edit_index = edit_node->index;
19690
19691 if (in_index < edit_index && in_index * 8 < input_size)
19692 {
19693 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19694 contents + in_index * 8, add_to_offsets);
19695 out_index++;
19696 in_index++;
19697 }
19698 else if (in_index == edit_index
19699 || (in_index * 8 >= input_size
19700 && edit_index == UINT_MAX))
19701 {
19702 switch (edit_node->type)
19703 {
19704 case DELETE_EXIDX_ENTRY:
19705 in_index++;
19706 add_to_offsets += 8;
19707 break;
19708
19709 case INSERT_EXIDX_CANTUNWIND_AT_END:
19710 {
19711 asection *text_sec = edit_node->linked_section;
19712 bfd_vma text_offset = text_sec->output_section->vma
19713 + text_sec->output_offset
19714 + text_sec->size;
19715 bfd_vma exidx_offset = offset + out_index * 8;
19716 unsigned long prel31_offset;
19717
19718 /* Note: this is meant to be equivalent to an
19719 R_ARM_PREL31 relocation. These synthetic
19720 EXIDX_CANTUNWIND markers are not relocated by the
19721 usual BFD method. */
19722 prel31_offset = (text_offset - exidx_offset)
19723 & 0x7ffffffful;
19724 if (bfd_link_relocatable (link_info))
19725 {
19726 /* Here relocation for new EXIDX_CANTUNWIND is
19727 created, so there is no need to
19728 adjust offset by hand. */
19729 prel31_offset = text_sec->output_offset
19730 + text_sec->size;
19731 }
19732
19733 /* First address we can't unwind. */
19734 bfd_put_32 (output_bfd, prel31_offset,
19735 &edited_contents[out_index * 8]);
19736
19737 /* Code for EXIDX_CANTUNWIND. */
19738 bfd_put_32 (output_bfd, 0x1,
19739 &edited_contents[out_index * 8 + 4]);
19740
19741 out_index++;
19742 add_to_offsets -= 8;
19743 }
19744 break;
19745 }
19746
19747 edit_node = edit_node->next;
19748 }
19749 }
19750 else
19751 {
19752 /* No more edits, copy remaining entries verbatim. */
19753 copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19754 contents + in_index * 8, add_to_offsets);
19755 out_index++;
19756 in_index++;
19757 }
19758 }
19759
19760 if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19761 bfd_set_section_contents (output_bfd, sec->output_section,
19762 edited_contents,
19763 (file_ptr) sec->output_offset, sec->size);
19764
19765 return TRUE;
19766 }
19767
19768 /* Fix code to point to Cortex-A8 erratum stubs. */
19769 if (globals->fix_cortex_a8)
19770 {
19771 struct a8_branch_to_stub_data data;
19772
19773 data.writing_section = sec;
19774 data.contents = contents;
19775
19776 bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19777 & data);
19778 }
19779
19780 if (mapcount == 0)
19781 return FALSE;
19782
19783 if (globals->byteswap_code)
19784 {
19785 qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19786
19787 ptr = map[0].vma;
19788 for (i = 0; i < mapcount; i++)
19789 {
19790 if (i == mapcount - 1)
19791 end = sec->size;
19792 else
19793 end = map[i + 1].vma;
19794
19795 switch (map[i].type)
19796 {
19797 case 'a':
19798 /* Byte swap code words. */
19799 while (ptr + 3 < end)
19800 {
19801 tmp = contents[ptr];
19802 contents[ptr] = contents[ptr + 3];
19803 contents[ptr + 3] = tmp;
19804 tmp = contents[ptr + 1];
19805 contents[ptr + 1] = contents[ptr + 2];
19806 contents[ptr + 2] = tmp;
19807 ptr += 4;
19808 }
19809 break;
19810
19811 case 't':
19812 /* Byte swap code halfwords. */
19813 while (ptr + 1 < end)
19814 {
19815 tmp = contents[ptr];
19816 contents[ptr] = contents[ptr + 1];
19817 contents[ptr + 1] = tmp;
19818 ptr += 2;
19819 }
19820 break;
19821
19822 case 'd':
19823 /* Leave data alone. */
19824 break;
19825 }
19826 ptr = end;
19827 }
19828 }
19829
19830 free (map);
19831 arm_data->mapcount = -1;
19832 arm_data->mapsize = 0;
19833 arm_data->map = NULL;
19834
19835 return FALSE;
19836 }
19837
19838 /* Mangle thumb function symbols as we read them in. */
19839
19840 static bfd_boolean
19841 elf32_arm_swap_symbol_in (bfd * abfd,
19842 const void *psrc,
19843 const void *pshn,
19844 Elf_Internal_Sym *dst)
19845 {
19846 if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19847 return FALSE;
19848 dst->st_target_internal = 0;
19849
19850 /* New EABI objects mark thumb function symbols by setting the low bit of
19851 the address. */
19852 if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19853 || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19854 {
19855 if (dst->st_value & 1)
19856 {
19857 dst->st_value &= ~(bfd_vma) 1;
19858 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19859 ST_BRANCH_TO_THUMB);
19860 }
19861 else
19862 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19863 }
19864 else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19865 {
19866 dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19867 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19868 }
19869 else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19870 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19871 else
19872 ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19873
19874 return TRUE;
19875 }
19876
19877
19878 /* Mangle thumb function symbols as we write them out. */
19879
19880 static void
19881 elf32_arm_swap_symbol_out (bfd *abfd,
19882 const Elf_Internal_Sym *src,
19883 void *cdst,
19884 void *shndx)
19885 {
19886 Elf_Internal_Sym newsym;
19887
19888 /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19889 of the address set, as per the new EABI. We do this unconditionally
19890 because objcopy does not set the elf header flags until after
19891 it writes out the symbol table. */
19892 if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19893 {
19894 newsym = *src;
19895 if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19896 newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19897 if (newsym.st_shndx != SHN_UNDEF)
19898 {
19899 /* Do this only for defined symbols. At link type, the static
19900 linker will simulate the work of dynamic linker of resolving
19901 symbols and will carry over the thumbness of found symbols to
19902 the output symbol table. It's not clear how it happens, but
19903 the thumbness of undefined symbols can well be different at
19904 runtime, and writing '1' for them will be confusing for users
19905 and possibly for dynamic linker itself.
19906 */
19907 newsym.st_value |= 1;
19908 }
19909
19910 src = &newsym;
19911 }
19912 bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19913 }
19914
19915 /* Add the PT_ARM_EXIDX program header. */
19916
19917 static bfd_boolean
19918 elf32_arm_modify_segment_map (bfd *abfd,
19919 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19920 {
19921 struct elf_segment_map *m;
19922 asection *sec;
19923
19924 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19925 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19926 {
19927 /* If there is already a PT_ARM_EXIDX header, then we do not
19928 want to add another one. This situation arises when running
19929 "strip"; the input binary already has the header. */
19930 m = elf_seg_map (abfd);
19931 while (m && m->p_type != PT_ARM_EXIDX)
19932 m = m->next;
19933 if (!m)
19934 {
19935 m = (struct elf_segment_map *)
19936 bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19937 if (m == NULL)
19938 return FALSE;
19939 m->p_type = PT_ARM_EXIDX;
19940 m->count = 1;
19941 m->sections[0] = sec;
19942
19943 m->next = elf_seg_map (abfd);
19944 elf_seg_map (abfd) = m;
19945 }
19946 }
19947
19948 return TRUE;
19949 }
19950
19951 /* We may add a PT_ARM_EXIDX program header. */
19952
19953 static int
19954 elf32_arm_additional_program_headers (bfd *abfd,
19955 struct bfd_link_info *info ATTRIBUTE_UNUSED)
19956 {
19957 asection *sec;
19958
19959 sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19960 if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19961 return 1;
19962 else
19963 return 0;
19964 }
19965
19966 /* Hook called by the linker routine which adds symbols from an object
19967 file. */
19968
19969 static bfd_boolean
19970 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19971 Elf_Internal_Sym *sym, const char **namep,
19972 flagword *flagsp, asection **secp, bfd_vma *valp)
19973 {
19974 if (elf32_arm_hash_table (info) == NULL)
19975 return FALSE;
19976
19977 if (elf32_arm_hash_table (info)->vxworks_p
19978 && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19979 flagsp, secp, valp))
19980 return FALSE;
19981
19982 return TRUE;
19983 }
19984
19985 /* We use this to override swap_symbol_in and swap_symbol_out. */
19986 const struct elf_size_info elf32_arm_size_info =
19987 {
19988 sizeof (Elf32_External_Ehdr),
19989 sizeof (Elf32_External_Phdr),
19990 sizeof (Elf32_External_Shdr),
19991 sizeof (Elf32_External_Rel),
19992 sizeof (Elf32_External_Rela),
19993 sizeof (Elf32_External_Sym),
19994 sizeof (Elf32_External_Dyn),
19995 sizeof (Elf_External_Note),
19996 4,
19997 1,
19998 32, 2,
19999 ELFCLASS32, EV_CURRENT,
20000 bfd_elf32_write_out_phdrs,
20001 bfd_elf32_write_shdrs_and_ehdr,
20002 bfd_elf32_checksum_contents,
20003 bfd_elf32_write_relocs,
20004 elf32_arm_swap_symbol_in,
20005 elf32_arm_swap_symbol_out,
20006 bfd_elf32_slurp_reloc_table,
20007 bfd_elf32_slurp_symbol_table,
20008 bfd_elf32_swap_dyn_in,
20009 bfd_elf32_swap_dyn_out,
20010 bfd_elf32_swap_reloc_in,
20011 bfd_elf32_swap_reloc_out,
20012 bfd_elf32_swap_reloca_in,
20013 bfd_elf32_swap_reloca_out
20014 };
20015
20016 static bfd_vma
20017 read_code32 (const bfd *abfd, const bfd_byte *addr)
20018 {
20019 /* V7 BE8 code is always little endian. */
20020 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20021 return bfd_getl32 (addr);
20022
20023 return bfd_get_32 (abfd, addr);
20024 }
20025
20026 static bfd_vma
20027 read_code16 (const bfd *abfd, const bfd_byte *addr)
20028 {
20029 /* V7 BE8 code is always little endian. */
20030 if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20031 return bfd_getl16 (addr);
20032
20033 return bfd_get_16 (abfd, addr);
20034 }
20035
20036 /* Return size of plt0 entry starting at ADDR
20037 or (bfd_vma) -1 if size can not be determined. */
20038
20039 static bfd_vma
20040 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
20041 {
20042 bfd_vma first_word;
20043 bfd_vma plt0_size;
20044
20045 first_word = read_code32 (abfd, addr);
20046
20047 if (first_word == elf32_arm_plt0_entry[0])
20048 plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
20049 else if (first_word == elf32_thumb2_plt0_entry[0])
20050 plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
20051 else
20052 /* We don't yet handle this PLT format. */
20053 return (bfd_vma) -1;
20054
20055 return plt0_size;
20056 }
20057
20058 /* Return size of plt entry starting at offset OFFSET
20059 of plt section located at address START
20060 or (bfd_vma) -1 if size can not be determined. */
20061
20062 static bfd_vma
20063 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
20064 {
20065 bfd_vma first_insn;
20066 bfd_vma plt_size = 0;
20067 const bfd_byte *addr = start + offset;
20068
20069 /* PLT entry size if fixed on Thumb-only platforms. */
20070 if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
20071 return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
20072
20073 /* Respect Thumb stub if necessary. */
20074 if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
20075 {
20076 plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
20077 }
20078
20079 /* Strip immediate from first add. */
20080 first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
20081
20082 #ifdef FOUR_WORD_PLT
20083 if (first_insn == elf32_arm_plt_entry[0])
20084 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
20085 #else
20086 if (first_insn == elf32_arm_plt_entry_long[0])
20087 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20088 else if (first_insn == elf32_arm_plt_entry_short[0])
20089 plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20090 #endif
20091 else
20092 /* We don't yet handle this PLT format. */
20093 return (bfd_vma) -1;
20094
20095 return plt_size;
20096 }
20097
20098 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab. */
20099
20100 static long
20101 elf32_arm_get_synthetic_symtab (bfd *abfd,
20102 long symcount ATTRIBUTE_UNUSED,
20103 asymbol **syms ATTRIBUTE_UNUSED,
20104 long dynsymcount,
20105 asymbol **dynsyms,
20106 asymbol **ret)
20107 {
20108 asection *relplt;
20109 asymbol *s;
20110 arelent *p;
20111 long count, i, n;
20112 size_t size;
20113 Elf_Internal_Shdr *hdr;
20114 char *names;
20115 asection *plt;
20116 bfd_vma offset;
20117 bfd_byte *data;
20118
20119 *ret = NULL;
20120
20121 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20122 return 0;
20123
20124 if (dynsymcount <= 0)
20125 return 0;
20126
20127 relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20128 if (relplt == NULL)
20129 return 0;
20130
20131 hdr = &elf_section_data (relplt)->this_hdr;
20132 if (hdr->sh_link != elf_dynsymtab (abfd)
20133 || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20134 return 0;
20135
20136 plt = bfd_get_section_by_name (abfd, ".plt");
20137 if (plt == NULL)
20138 return 0;
20139
20140 if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
20141 return -1;
20142
20143 data = plt->contents;
20144 if (data == NULL)
20145 {
20146 if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
20147 return -1;
20148 bfd_cache_section_contents((asection *) plt, data);
20149 }
20150
20151 count = relplt->size / hdr->sh_entsize;
20152 size = count * sizeof (asymbol);
20153 p = relplt->relocation;
20154 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20155 {
20156 size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20157 if (p->addend != 0)
20158 size += sizeof ("+0x") - 1 + 8;
20159 }
20160
20161 s = *ret = (asymbol *) bfd_malloc (size);
20162 if (s == NULL)
20163 return -1;
20164
20165 offset = elf32_arm_plt0_size (abfd, data);
20166 if (offset == (bfd_vma) -1)
20167 return -1;
20168
20169 names = (char *) (s + count);
20170 p = relplt->relocation;
20171 n = 0;
20172 for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20173 {
20174 size_t len;
20175
20176 bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20177 if (plt_size == (bfd_vma) -1)
20178 break;
20179
20180 *s = **p->sym_ptr_ptr;
20181 /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set. Since
20182 we are defining a symbol, ensure one of them is set. */
20183 if ((s->flags & BSF_LOCAL) == 0)
20184 s->flags |= BSF_GLOBAL;
20185 s->flags |= BSF_SYNTHETIC;
20186 s->section = plt;
20187 s->value = offset;
20188 s->name = names;
20189 s->udata.p = NULL;
20190 len = strlen ((*p->sym_ptr_ptr)->name);
20191 memcpy (names, (*p->sym_ptr_ptr)->name, len);
20192 names += len;
20193 if (p->addend != 0)
20194 {
20195 char buf[30], *a;
20196
20197 memcpy (names, "+0x", sizeof ("+0x") - 1);
20198 names += sizeof ("+0x") - 1;
20199 bfd_sprintf_vma (abfd, buf, p->addend);
20200 for (a = buf; *a == '0'; ++a)
20201 ;
20202 len = strlen (a);
20203 memcpy (names, a, len);
20204 names += len;
20205 }
20206 memcpy (names, "@plt", sizeof ("@plt"));
20207 names += sizeof ("@plt");
20208 ++s, ++n;
20209 offset += plt_size;
20210 }
20211
20212 return n;
20213 }
20214
20215 static bfd_boolean
20216 elf32_arm_section_flags (const Elf_Internal_Shdr *hdr)
20217 {
20218 if (hdr->sh_flags & SHF_ARM_PURECODE)
20219 hdr->bfd_section->flags |= SEC_ELF_PURECODE;
20220 return TRUE;
20221 }
20222
20223 static flagword
20224 elf32_arm_lookup_section_flags (char *flag_name)
20225 {
20226 if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20227 return SHF_ARM_PURECODE;
20228
20229 return SEC_NO_FLAGS;
20230 }
20231
20232 static unsigned int
20233 elf32_arm_count_additional_relocs (asection *sec)
20234 {
20235 struct _arm_elf_section_data *arm_data;
20236 arm_data = get_arm_elf_section_data (sec);
20237
20238 return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20239 }
20240
20241 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20242 has a type >= SHT_LOOS. Returns TRUE if these fields were initialised
20243 FALSE otherwise. ISECTION is the best guess matching section from the
20244 input bfd IBFD, but it might be NULL. */
20245
20246 static bfd_boolean
20247 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20248 bfd *obfd ATTRIBUTE_UNUSED,
20249 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20250 Elf_Internal_Shdr *osection)
20251 {
20252 switch (osection->sh_type)
20253 {
20254 case SHT_ARM_EXIDX:
20255 {
20256 Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20257 Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20258 unsigned i = 0;
20259
20260 osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20261 osection->sh_info = 0;
20262
20263 /* The sh_link field must be set to the text section associated with
20264 this index section. Unfortunately the ARM EHABI does not specify
20265 exactly how to determine this association. Our caller does try
20266 to match up OSECTION with its corresponding input section however
20267 so that is a good first guess. */
20268 if (isection != NULL
20269 && osection->bfd_section != NULL
20270 && isection->bfd_section != NULL
20271 && isection->bfd_section->output_section != NULL
20272 && isection->bfd_section->output_section == osection->bfd_section
20273 && iheaders != NULL
20274 && isection->sh_link > 0
20275 && isection->sh_link < elf_numsections (ibfd)
20276 && iheaders[isection->sh_link]->bfd_section != NULL
20277 && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20278 )
20279 {
20280 for (i = elf_numsections (obfd); i-- > 0;)
20281 if (oheaders[i]->bfd_section
20282 == iheaders[isection->sh_link]->bfd_section->output_section)
20283 break;
20284 }
20285
20286 if (i == 0)
20287 {
20288 /* Failing that we have to find a matching section ourselves. If
20289 we had the output section name available we could compare that
20290 with input section names. Unfortunately we don't. So instead
20291 we use a simple heuristic and look for the nearest executable
20292 section before this one. */
20293 for (i = elf_numsections (obfd); i-- > 0;)
20294 if (oheaders[i] == osection)
20295 break;
20296 if (i == 0)
20297 break;
20298
20299 while (i-- > 0)
20300 if (oheaders[i]->sh_type == SHT_PROGBITS
20301 && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20302 == (SHF_ALLOC | SHF_EXECINSTR))
20303 break;
20304 }
20305
20306 if (i)
20307 {
20308 osection->sh_link = i;
20309 /* If the text section was part of a group
20310 then the index section should be too. */
20311 if (oheaders[i]->sh_flags & SHF_GROUP)
20312 osection->sh_flags |= SHF_GROUP;
20313 return TRUE;
20314 }
20315 }
20316 break;
20317
20318 case SHT_ARM_PREEMPTMAP:
20319 osection->sh_flags = SHF_ALLOC;
20320 break;
20321
20322 case SHT_ARM_ATTRIBUTES:
20323 case SHT_ARM_DEBUGOVERLAY:
20324 case SHT_ARM_OVERLAYSECTION:
20325 default:
20326 break;
20327 }
20328
20329 return FALSE;
20330 }
20331
20332 /* Returns TRUE if NAME is an ARM mapping symbol.
20333 Traditionally the symbols $a, $d and $t have been used.
20334 The ARM ELF standard also defines $x (for A64 code). It also allows a
20335 period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20336 Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20337 not support them here. $t.x indicates the start of ThumbEE instructions. */
20338
20339 static bfd_boolean
20340 is_arm_mapping_symbol (const char * name)
20341 {
20342 return name != NULL /* Paranoia. */
20343 && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20344 the mapping symbols could have acquired a prefix.
20345 We do not support this here, since such symbols no
20346 longer conform to the ARM ELF ABI. */
20347 && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20348 && (name[2] == 0 || name[2] == '.');
20349 /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20350 any characters that follow the period are legal characters for the body
20351 of a symbol's name. For now we just assume that this is the case. */
20352 }
20353
20354 /* Make sure that mapping symbols in object files are not removed via the
20355 "strip --strip-unneeded" tool. These symbols are needed in order to
20356 correctly generate interworking veneers, and for byte swapping code
20357 regions. Once an object file has been linked, it is safe to remove the
20358 symbols as they will no longer be needed. */
20359
20360 static void
20361 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20362 {
20363 if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20364 && sym->section != bfd_abs_section_ptr
20365 && is_arm_mapping_symbol (sym->name))
20366 sym->flags |= BSF_KEEP;
20367 }
20368
20369 #undef elf_backend_copy_special_section_fields
20370 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20371
20372 #define ELF_ARCH bfd_arch_arm
20373 #define ELF_TARGET_ID ARM_ELF_DATA
20374 #define ELF_MACHINE_CODE EM_ARM
20375 #ifdef __QNXTARGET__
20376 #define ELF_MAXPAGESIZE 0x1000
20377 #else
20378 #define ELF_MAXPAGESIZE 0x10000
20379 #endif
20380 #define ELF_MINPAGESIZE 0x1000
20381 #define ELF_COMMONPAGESIZE 0x1000
20382
20383 #define bfd_elf32_mkobject elf32_arm_mkobject
20384
20385 #define bfd_elf32_bfd_copy_private_bfd_data elf32_arm_copy_private_bfd_data
20386 #define bfd_elf32_bfd_merge_private_bfd_data elf32_arm_merge_private_bfd_data
20387 #define bfd_elf32_bfd_set_private_flags elf32_arm_set_private_flags
20388 #define bfd_elf32_bfd_print_private_bfd_data elf32_arm_print_private_bfd_data
20389 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_link_hash_table_create
20390 #define bfd_elf32_bfd_reloc_type_lookup elf32_arm_reloc_type_lookup
20391 #define bfd_elf32_bfd_reloc_name_lookup elf32_arm_reloc_name_lookup
20392 #define bfd_elf32_find_inliner_info elf32_arm_find_inliner_info
20393 #define bfd_elf32_new_section_hook elf32_arm_new_section_hook
20394 #define bfd_elf32_bfd_is_target_special_symbol elf32_arm_is_target_special_symbol
20395 #define bfd_elf32_bfd_final_link elf32_arm_final_link
20396 #define bfd_elf32_get_synthetic_symtab elf32_arm_get_synthetic_symtab
20397
20398 #define elf_backend_get_symbol_type elf32_arm_get_symbol_type
20399 #define elf_backend_maybe_function_sym elf32_arm_maybe_function_sym
20400 #define elf_backend_gc_mark_hook elf32_arm_gc_mark_hook
20401 #define elf_backend_gc_mark_extra_sections elf32_arm_gc_mark_extra_sections
20402 #define elf_backend_check_relocs elf32_arm_check_relocs
20403 #define elf_backend_update_relocs elf32_arm_update_relocs
20404 #define elf_backend_relocate_section elf32_arm_relocate_section
20405 #define elf_backend_write_section elf32_arm_write_section
20406 #define elf_backend_adjust_dynamic_symbol elf32_arm_adjust_dynamic_symbol
20407 #define elf_backend_create_dynamic_sections elf32_arm_create_dynamic_sections
20408 #define elf_backend_finish_dynamic_symbol elf32_arm_finish_dynamic_symbol
20409 #define elf_backend_finish_dynamic_sections elf32_arm_finish_dynamic_sections
20410 #define elf_backend_size_dynamic_sections elf32_arm_size_dynamic_sections
20411 #define elf_backend_always_size_sections elf32_arm_always_size_sections
20412 #define elf_backend_init_index_section _bfd_elf_init_2_index_sections
20413 #define elf_backend_init_file_header elf32_arm_init_file_header
20414 #define elf_backend_reloc_type_class elf32_arm_reloc_type_class
20415 #define elf_backend_object_p elf32_arm_object_p
20416 #define elf_backend_fake_sections elf32_arm_fake_sections
20417 #define elf_backend_section_from_shdr elf32_arm_section_from_shdr
20418 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20419 #define elf_backend_copy_indirect_symbol elf32_arm_copy_indirect_symbol
20420 #define elf_backend_size_info elf32_arm_size_info
20421 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20422 #define elf_backend_additional_program_headers elf32_arm_additional_program_headers
20423 #define elf_backend_output_arch_local_syms elf32_arm_output_arch_local_syms
20424 #define elf_backend_filter_implib_symbols elf32_arm_filter_implib_symbols
20425 #define elf_backend_begin_write_processing elf32_arm_begin_write_processing
20426 #define elf_backend_add_symbol_hook elf32_arm_add_symbol_hook
20427 #define elf_backend_count_additional_relocs elf32_arm_count_additional_relocs
20428 #define elf_backend_symbol_processing elf32_arm_backend_symbol_processing
20429
20430 #define elf_backend_can_refcount 1
20431 #define elf_backend_can_gc_sections 1
20432 #define elf_backend_plt_readonly 1
20433 #define elf_backend_want_got_plt 1
20434 #define elf_backend_want_plt_sym 0
20435 #define elf_backend_want_dynrelro 1
20436 #define elf_backend_may_use_rel_p 1
20437 #define elf_backend_may_use_rela_p 0
20438 #define elf_backend_default_use_rela_p 0
20439 #define elf_backend_dtrel_excludes_plt 1
20440
20441 #define elf_backend_got_header_size 12
20442 #define elf_backend_extern_protected_data 1
20443
20444 #undef elf_backend_obj_attrs_vendor
20445 #define elf_backend_obj_attrs_vendor "aeabi"
20446 #undef elf_backend_obj_attrs_section
20447 #define elf_backend_obj_attrs_section ".ARM.attributes"
20448 #undef elf_backend_obj_attrs_arg_type
20449 #define elf_backend_obj_attrs_arg_type elf32_arm_obj_attrs_arg_type
20450 #undef elf_backend_obj_attrs_section_type
20451 #define elf_backend_obj_attrs_section_type SHT_ARM_ATTRIBUTES
20452 #define elf_backend_obj_attrs_order elf32_arm_obj_attrs_order
20453 #define elf_backend_obj_attrs_handle_unknown elf32_arm_obj_attrs_handle_unknown
20454
20455 #undef elf_backend_section_flags
20456 #define elf_backend_section_flags elf32_arm_section_flags
20457 #undef elf_backend_lookup_section_flags_hook
20458 #define elf_backend_lookup_section_flags_hook elf32_arm_lookup_section_flags
20459
20460 #define elf_backend_linux_prpsinfo32_ugid16 TRUE
20461
20462 #include "elf32-target.h"
20463
20464 /* Native Client targets. */
20465
20466 #undef TARGET_LITTLE_SYM
20467 #define TARGET_LITTLE_SYM arm_elf32_nacl_le_vec
20468 #undef TARGET_LITTLE_NAME
20469 #define TARGET_LITTLE_NAME "elf32-littlearm-nacl"
20470 #undef TARGET_BIG_SYM
20471 #define TARGET_BIG_SYM arm_elf32_nacl_be_vec
20472 #undef TARGET_BIG_NAME
20473 #define TARGET_BIG_NAME "elf32-bigarm-nacl"
20474
20475 /* Like elf32_arm_link_hash_table_create -- but overrides
20476 appropriately for NaCl. */
20477
20478 static struct bfd_link_hash_table *
20479 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20480 {
20481 struct bfd_link_hash_table *ret;
20482
20483 ret = elf32_arm_link_hash_table_create (abfd);
20484 if (ret)
20485 {
20486 struct elf32_arm_link_hash_table *htab
20487 = (struct elf32_arm_link_hash_table *) ret;
20488
20489 htab->nacl_p = 1;
20490
20491 htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20492 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20493 }
20494 return ret;
20495 }
20496
20497 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20498 really need to use elf32_arm_modify_segment_map. But we do it
20499 anyway just to reduce gratuitous differences with the stock ARM backend. */
20500
20501 static bfd_boolean
20502 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20503 {
20504 return (elf32_arm_modify_segment_map (abfd, info)
20505 && nacl_modify_segment_map (abfd, info));
20506 }
20507
20508 static bfd_boolean
20509 elf32_arm_nacl_final_write_processing (bfd *abfd)
20510 {
20511 arm_final_write_processing (abfd);
20512 return nacl_final_write_processing (abfd);
20513 }
20514
20515 static bfd_vma
20516 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20517 const arelent *rel ATTRIBUTE_UNUSED)
20518 {
20519 return plt->vma
20520 + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20521 i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20522 }
20523
20524 #undef elf32_bed
20525 #define elf32_bed elf32_arm_nacl_bed
20526 #undef bfd_elf32_bfd_link_hash_table_create
20527 #define bfd_elf32_bfd_link_hash_table_create \
20528 elf32_arm_nacl_link_hash_table_create
20529 #undef elf_backend_plt_alignment
20530 #define elf_backend_plt_alignment 4
20531 #undef elf_backend_modify_segment_map
20532 #define elf_backend_modify_segment_map elf32_arm_nacl_modify_segment_map
20533 #undef elf_backend_modify_headers
20534 #define elf_backend_modify_headers nacl_modify_headers
20535 #undef elf_backend_final_write_processing
20536 #define elf_backend_final_write_processing elf32_arm_nacl_final_write_processing
20537 #undef bfd_elf32_get_synthetic_symtab
20538 #undef elf_backend_plt_sym_val
20539 #define elf_backend_plt_sym_val elf32_arm_nacl_plt_sym_val
20540 #undef elf_backend_copy_special_section_fields
20541
20542 #undef ELF_MINPAGESIZE
20543 #undef ELF_COMMONPAGESIZE
20544
20545
20546 #include "elf32-target.h"
20547
20548 /* Reset to defaults. */
20549 #undef elf_backend_plt_alignment
20550 #undef elf_backend_modify_segment_map
20551 #define elf_backend_modify_segment_map elf32_arm_modify_segment_map
20552 #undef elf_backend_modify_headers
20553 #undef elf_backend_final_write_processing
20554 #define elf_backend_final_write_processing elf32_arm_final_write_processing
20555 #undef ELF_MINPAGESIZE
20556 #define ELF_MINPAGESIZE 0x1000
20557 #undef ELF_COMMONPAGESIZE
20558 #define ELF_COMMONPAGESIZE 0x1000
20559
20560
20561 /* FDPIC Targets. */
20562
20563 #undef TARGET_LITTLE_SYM
20564 #define TARGET_LITTLE_SYM arm_elf32_fdpic_le_vec
20565 #undef TARGET_LITTLE_NAME
20566 #define TARGET_LITTLE_NAME "elf32-littlearm-fdpic"
20567 #undef TARGET_BIG_SYM
20568 #define TARGET_BIG_SYM arm_elf32_fdpic_be_vec
20569 #undef TARGET_BIG_NAME
20570 #define TARGET_BIG_NAME "elf32-bigarm-fdpic"
20571 #undef elf_match_priority
20572 #define elf_match_priority 128
20573 #undef ELF_OSABI
20574 #define ELF_OSABI ELFOSABI_ARM_FDPIC
20575
20576 /* Like elf32_arm_link_hash_table_create -- but overrides
20577 appropriately for FDPIC. */
20578
20579 static struct bfd_link_hash_table *
20580 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20581 {
20582 struct bfd_link_hash_table *ret;
20583
20584 ret = elf32_arm_link_hash_table_create (abfd);
20585 if (ret)
20586 {
20587 struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20588
20589 htab->fdpic_p = 1;
20590 }
20591 return ret;
20592 }
20593
20594 /* We need dynamic symbols for every section, since segments can
20595 relocate independently. */
20596 static bfd_boolean
20597 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20598 struct bfd_link_info *info
20599 ATTRIBUTE_UNUSED,
20600 asection *p ATTRIBUTE_UNUSED)
20601 {
20602 switch (elf_section_data (p)->this_hdr.sh_type)
20603 {
20604 case SHT_PROGBITS:
20605 case SHT_NOBITS:
20606 /* If sh_type is yet undecided, assume it could be
20607 SHT_PROGBITS/SHT_NOBITS. */
20608 case SHT_NULL:
20609 return FALSE;
20610
20611 /* There shouldn't be section relative relocations
20612 against any other section. */
20613 default:
20614 return TRUE;
20615 }
20616 }
20617
20618 #undef elf32_bed
20619 #define elf32_bed elf32_arm_fdpic_bed
20620
20621 #undef bfd_elf32_bfd_link_hash_table_create
20622 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_fdpic_link_hash_table_create
20623
20624 #undef elf_backend_omit_section_dynsym
20625 #define elf_backend_omit_section_dynsym elf32_arm_fdpic_omit_section_dynsym
20626
20627 #include "elf32-target.h"
20628
20629 #undef elf_match_priority
20630 #undef ELF_OSABI
20631 #undef elf_backend_omit_section_dynsym
20632
20633 /* VxWorks Targets. */
20634
20635 #undef TARGET_LITTLE_SYM
20636 #define TARGET_LITTLE_SYM arm_elf32_vxworks_le_vec
20637 #undef TARGET_LITTLE_NAME
20638 #define TARGET_LITTLE_NAME "elf32-littlearm-vxworks"
20639 #undef TARGET_BIG_SYM
20640 #define TARGET_BIG_SYM arm_elf32_vxworks_be_vec
20641 #undef TARGET_BIG_NAME
20642 #define TARGET_BIG_NAME "elf32-bigarm-vxworks"
20643
20644 /* Like elf32_arm_link_hash_table_create -- but overrides
20645 appropriately for VxWorks. */
20646
20647 static struct bfd_link_hash_table *
20648 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20649 {
20650 struct bfd_link_hash_table *ret;
20651
20652 ret = elf32_arm_link_hash_table_create (abfd);
20653 if (ret)
20654 {
20655 struct elf32_arm_link_hash_table *htab
20656 = (struct elf32_arm_link_hash_table *) ret;
20657 htab->use_rel = 0;
20658 htab->vxworks_p = 1;
20659 }
20660 return ret;
20661 }
20662
20663 static bfd_boolean
20664 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20665 {
20666 arm_final_write_processing (abfd);
20667 return elf_vxworks_final_write_processing (abfd);
20668 }
20669
20670 #undef elf32_bed
20671 #define elf32_bed elf32_arm_vxworks_bed
20672
20673 #undef bfd_elf32_bfd_link_hash_table_create
20674 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_vxworks_link_hash_table_create
20675 #undef elf_backend_final_write_processing
20676 #define elf_backend_final_write_processing elf32_arm_vxworks_final_write_processing
20677 #undef elf_backend_emit_relocs
20678 #define elf_backend_emit_relocs elf_vxworks_emit_relocs
20679
20680 #undef elf_backend_may_use_rel_p
20681 #define elf_backend_may_use_rel_p 0
20682 #undef elf_backend_may_use_rela_p
20683 #define elf_backend_may_use_rela_p 1
20684 #undef elf_backend_default_use_rela_p
20685 #define elf_backend_default_use_rela_p 1
20686 #undef elf_backend_want_plt_sym
20687 #define elf_backend_want_plt_sym 1
20688 #undef ELF_MAXPAGESIZE
20689 #define ELF_MAXPAGESIZE 0x1000
20690
20691 #include "elf32-target.h"
20692
20693
20694 /* Merge backend specific data from an object file to the output
20695 object file when linking. */
20696
20697 static bfd_boolean
20698 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20699 {
20700 bfd *obfd = info->output_bfd;
20701 flagword out_flags;
20702 flagword in_flags;
20703 bfd_boolean flags_compatible = TRUE;
20704 asection *sec;
20705
20706 /* Check if we have the same endianness. */
20707 if (! _bfd_generic_verify_endian_match (ibfd, info))
20708 return FALSE;
20709
20710 if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20711 return TRUE;
20712
20713 if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20714 return FALSE;
20715
20716 /* The input BFD must have had its flags initialised. */
20717 /* The following seems bogus to me -- The flags are initialized in
20718 the assembler but I don't think an elf_flags_init field is
20719 written into the object. */
20720 /* BFD_ASSERT (elf_flags_init (ibfd)); */
20721
20722 in_flags = elf_elfheader (ibfd)->e_flags;
20723 out_flags = elf_elfheader (obfd)->e_flags;
20724
20725 /* In theory there is no reason why we couldn't handle this. However
20726 in practice it isn't even close to working and there is no real
20727 reason to want it. */
20728 if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20729 && !(ibfd->flags & DYNAMIC)
20730 && (in_flags & EF_ARM_BE8))
20731 {
20732 _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20733 ibfd);
20734 return FALSE;
20735 }
20736
20737 if (!elf_flags_init (obfd))
20738 {
20739 /* If the input is the default architecture and had the default
20740 flags then do not bother setting the flags for the output
20741 architecture, instead allow future merges to do this. If no
20742 future merges ever set these flags then they will retain their
20743 uninitialised values, which surprise surprise, correspond
20744 to the default values. */
20745 if (bfd_get_arch_info (ibfd)->the_default
20746 && elf_elfheader (ibfd)->e_flags == 0)
20747 return TRUE;
20748
20749 elf_flags_init (obfd) = TRUE;
20750 elf_elfheader (obfd)->e_flags = in_flags;
20751
20752 if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20753 && bfd_get_arch_info (obfd)->the_default)
20754 return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20755
20756 return TRUE;
20757 }
20758
20759 /* Determine what should happen if the input ARM architecture
20760 does not match the output ARM architecture. */
20761 if (! bfd_arm_merge_machines (ibfd, obfd))
20762 return FALSE;
20763
20764 /* Identical flags must be compatible. */
20765 if (in_flags == out_flags)
20766 return TRUE;
20767
20768 /* Check to see if the input BFD actually contains any sections. If
20769 not, its flags may not have been initialised either, but it
20770 cannot actually cause any incompatiblity. Do not short-circuit
20771 dynamic objects; their section list may be emptied by
20772 elf_link_add_object_symbols.
20773
20774 Also check to see if there are no code sections in the input.
20775 In this case there is no need to check for code specific flags.
20776 XXX - do we need to worry about floating-point format compatability
20777 in data sections ? */
20778 if (!(ibfd->flags & DYNAMIC))
20779 {
20780 bfd_boolean null_input_bfd = TRUE;
20781 bfd_boolean only_data_sections = TRUE;
20782
20783 for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20784 {
20785 /* Ignore synthetic glue sections. */
20786 if (strcmp (sec->name, ".glue_7")
20787 && strcmp (sec->name, ".glue_7t"))
20788 {
20789 if ((bfd_section_flags (sec)
20790 & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20791 == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20792 only_data_sections = FALSE;
20793
20794 null_input_bfd = FALSE;
20795 break;
20796 }
20797 }
20798
20799 if (null_input_bfd || only_data_sections)
20800 return TRUE;
20801 }
20802
20803 /* Complain about various flag mismatches. */
20804 if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20805 EF_ARM_EABI_VERSION (out_flags)))
20806 {
20807 _bfd_error_handler
20808 (_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20809 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20810 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20811 return FALSE;
20812 }
20813
20814 /* Not sure what needs to be checked for EABI versions >= 1. */
20815 /* VxWorks libraries do not use these flags. */
20816 if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20817 && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20818 && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20819 {
20820 if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20821 {
20822 _bfd_error_handler
20823 (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20824 ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20825 obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20826 flags_compatible = FALSE;
20827 }
20828
20829 if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20830 {
20831 if (in_flags & EF_ARM_APCS_FLOAT)
20832 _bfd_error_handler
20833 (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20834 ibfd, obfd);
20835 else
20836 _bfd_error_handler
20837 (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20838 ibfd, obfd);
20839
20840 flags_compatible = FALSE;
20841 }
20842
20843 if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20844 {
20845 if (in_flags & EF_ARM_VFP_FLOAT)
20846 _bfd_error_handler
20847 (_("error: %pB uses %s instructions, whereas %pB does not"),
20848 ibfd, "VFP", obfd);
20849 else
20850 _bfd_error_handler
20851 (_("error: %pB uses %s instructions, whereas %pB does not"),
20852 ibfd, "FPA", obfd);
20853
20854 flags_compatible = FALSE;
20855 }
20856
20857 if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20858 {
20859 if (in_flags & EF_ARM_MAVERICK_FLOAT)
20860 _bfd_error_handler
20861 (_("error: %pB uses %s instructions, whereas %pB does not"),
20862 ibfd, "Maverick", obfd);
20863 else
20864 _bfd_error_handler
20865 (_("error: %pB does not use %s instructions, whereas %pB does"),
20866 ibfd, "Maverick", obfd);
20867
20868 flags_compatible = FALSE;
20869 }
20870
20871 #ifdef EF_ARM_SOFT_FLOAT
20872 if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20873 {
20874 /* We can allow interworking between code that is VFP format
20875 layout, and uses either soft float or integer regs for
20876 passing floating point arguments and results. We already
20877 know that the APCS_FLOAT flags match; similarly for VFP
20878 flags. */
20879 if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20880 || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20881 {
20882 if (in_flags & EF_ARM_SOFT_FLOAT)
20883 _bfd_error_handler
20884 (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20885 ibfd, obfd);
20886 else
20887 _bfd_error_handler
20888 (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20889 ibfd, obfd);
20890
20891 flags_compatible = FALSE;
20892 }
20893 }
20894 #endif
20895
20896 /* Interworking mismatch is only a warning. */
20897 if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20898 {
20899 if (in_flags & EF_ARM_INTERWORK)
20900 {
20901 _bfd_error_handler
20902 (_("warning: %pB supports interworking, whereas %pB does not"),
20903 ibfd, obfd);
20904 }
20905 else
20906 {
20907 _bfd_error_handler
20908 (_("warning: %pB does not support interworking, whereas %pB does"),
20909 ibfd, obfd);
20910 }
20911 }
20912 }
20913
20914 return flags_compatible;
20915 }
20916
20917
20918 /* Symbian OS Targets. */
20919
20920 #undef TARGET_LITTLE_SYM
20921 #define TARGET_LITTLE_SYM arm_elf32_symbian_le_vec
20922 #undef TARGET_LITTLE_NAME
20923 #define TARGET_LITTLE_NAME "elf32-littlearm-symbian"
20924 #undef TARGET_BIG_SYM
20925 #define TARGET_BIG_SYM arm_elf32_symbian_be_vec
20926 #undef TARGET_BIG_NAME
20927 #define TARGET_BIG_NAME "elf32-bigarm-symbian"
20928
20929 /* Like elf32_arm_link_hash_table_create -- but overrides
20930 appropriately for Symbian OS. */
20931
20932 static struct bfd_link_hash_table *
20933 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20934 {
20935 struct bfd_link_hash_table *ret;
20936
20937 ret = elf32_arm_link_hash_table_create (abfd);
20938 if (ret)
20939 {
20940 struct elf32_arm_link_hash_table *htab
20941 = (struct elf32_arm_link_hash_table *)ret;
20942 /* There is no PLT header for Symbian OS. */
20943 htab->plt_header_size = 0;
20944 /* The PLT entries are each one instruction and one word. */
20945 htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20946 htab->symbian_p = 1;
20947 /* Symbian uses armv5t or above, so use_blx is always true. */
20948 htab->use_blx = 1;
20949 htab->root.is_relocatable_executable = 1;
20950 }
20951 return ret;
20952 }
20953
20954 static const struct bfd_elf_special_section
20955 elf32_arm_symbian_special_sections[] =
20956 {
20957 /* In a BPABI executable, the dynamic linking sections do not go in
20958 the loadable read-only segment. The post-linker may wish to
20959 refer to these sections, but they are not part of the final
20960 program image. */
20961 { STRING_COMMA_LEN (".dynamic"), 0, SHT_DYNAMIC, 0 },
20962 { STRING_COMMA_LEN (".dynstr"), 0, SHT_STRTAB, 0 },
20963 { STRING_COMMA_LEN (".dynsym"), 0, SHT_DYNSYM, 0 },
20964 { STRING_COMMA_LEN (".got"), 0, SHT_PROGBITS, 0 },
20965 { STRING_COMMA_LEN (".hash"), 0, SHT_HASH, 0 },
20966 /* These sections do not need to be writable as the SymbianOS
20967 postlinker will arrange things so that no dynamic relocation is
20968 required. */
20969 { STRING_COMMA_LEN (".init_array"), 0, SHT_INIT_ARRAY, SHF_ALLOC },
20970 { STRING_COMMA_LEN (".fini_array"), 0, SHT_FINI_ARRAY, SHF_ALLOC },
20971 { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20972 { NULL, 0, 0, 0, 0 }
20973 };
20974
20975 static void
20976 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20977 struct bfd_link_info *link_info)
20978 {
20979 /* BPABI objects are never loaded directly by an OS kernel; they are
20980 processed by a postlinker first, into an OS-specific format. If
20981 the D_PAGED bit is set on the file, BFD will align segments on
20982 page boundaries, so that an OS can directly map the file. With
20983 BPABI objects, that just results in wasted space. In addition,
20984 because we clear the D_PAGED bit, map_sections_to_segments will
20985 recognize that the program headers should not be mapped into any
20986 loadable segment. */
20987 abfd->flags &= ~D_PAGED;
20988 elf32_arm_begin_write_processing (abfd, link_info);
20989 }
20990
20991 static bfd_boolean
20992 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20993 struct bfd_link_info *info)
20994 {
20995 struct elf_segment_map *m;
20996 asection *dynsec;
20997
20998 /* BPABI shared libraries and executables should have a PT_DYNAMIC
20999 segment. However, because the .dynamic section is not marked
21000 with SEC_LOAD, the generic ELF code will not create such a
21001 segment. */
21002 dynsec = bfd_get_section_by_name (abfd, ".dynamic");
21003 if (dynsec)
21004 {
21005 for (m = elf_seg_map (abfd); m != NULL; m = m->next)
21006 if (m->p_type == PT_DYNAMIC)
21007 break;
21008
21009 if (m == NULL)
21010 {
21011 m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
21012 m->next = elf_seg_map (abfd);
21013 elf_seg_map (abfd) = m;
21014 }
21015 }
21016
21017 /* Also call the generic arm routine. */
21018 return elf32_arm_modify_segment_map (abfd, info);
21019 }
21020
21021 /* Return address for Ith PLT stub in section PLT, for relocation REL
21022 or (bfd_vma) -1 if it should not be included. */
21023
21024 static bfd_vma
21025 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
21026 const arelent *rel ATTRIBUTE_UNUSED)
21027 {
21028 return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
21029 }
21030
21031 #undef elf32_bed
21032 #define elf32_bed elf32_arm_symbian_bed
21033
21034 /* The dynamic sections are not allocated on SymbianOS; the postlinker
21035 will process them and then discard them. */
21036 #undef ELF_DYNAMIC_SEC_FLAGS
21037 #define ELF_DYNAMIC_SEC_FLAGS \
21038 (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
21039
21040 #undef elf_backend_emit_relocs
21041
21042 #undef bfd_elf32_bfd_link_hash_table_create
21043 #define bfd_elf32_bfd_link_hash_table_create elf32_arm_symbian_link_hash_table_create
21044 #undef elf_backend_special_sections
21045 #define elf_backend_special_sections elf32_arm_symbian_special_sections
21046 #undef elf_backend_begin_write_processing
21047 #define elf_backend_begin_write_processing elf32_arm_symbian_begin_write_processing
21048 #undef elf_backend_final_write_processing
21049 #define elf_backend_final_write_processing elf32_arm_final_write_processing
21050
21051 #undef elf_backend_modify_segment_map
21052 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
21053
21054 /* There is no .got section for BPABI objects, and hence no header. */
21055 #undef elf_backend_got_header_size
21056 #define elf_backend_got_header_size 0
21057
21058 /* Similarly, there is no .got.plt section. */
21059 #undef elf_backend_want_got_plt
21060 #define elf_backend_want_got_plt 0
21061
21062 #undef elf_backend_plt_sym_val
21063 #define elf_backend_plt_sym_val elf32_arm_symbian_plt_sym_val
21064
21065 #undef elf_backend_may_use_rel_p
21066 #define elf_backend_may_use_rel_p 1
21067 #undef elf_backend_may_use_rela_p
21068 #define elf_backend_may_use_rela_p 0
21069 #undef elf_backend_default_use_rela_p
21070 #define elf_backend_default_use_rela_p 0
21071 #undef elf_backend_want_plt_sym
21072 #define elf_backend_want_plt_sym 0
21073 #undef elf_backend_dtrel_excludes_plt
21074 #define elf_backend_dtrel_excludes_plt 0
21075 #undef ELF_MAXPAGESIZE
21076 #define ELF_MAXPAGESIZE 0x8000
21077
21078 #include "elf32-target.h"