]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gold/arm.cc
2010-01-29 Viktor Kutuzov <vkutuzov@accesssoftek.com>
[thirdparty/binutils-gdb.git] / gold / arm.cc
1 // arm.cc -- arm target support for gold.
2
3 // Copyright 2009, 2010 Free Software Foundation, Inc.
4 // Written by Doug Kwan <dougkwan@google.com> based on the i386 code
5 // by Ian Lance Taylor <iant@google.com>.
6 // This file also contains borrowed and adapted code from
7 // bfd/elf32-arm.c.
8
9 // This file is part of gold.
10
11 // This program is free software; you can redistribute it and/or modify
12 // it under the terms of the GNU General Public License as published by
13 // the Free Software Foundation; either version 3 of the License, or
14 // (at your option) any later version.
15
16 // This program is distributed in the hope that it will be useful,
17 // but WITHOUT ANY WARRANTY; without even the implied warranty of
18 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 // GNU General Public License for more details.
20
21 // You should have received a copy of the GNU General Public License
22 // along with this program; if not, write to the Free Software
23 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
24 // MA 02110-1301, USA.
25
26 #include "gold.h"
27
28 #include <cstring>
29 #include <limits>
30 #include <cstdio>
31 #include <string>
32 #include <algorithm>
33 #include <map>
34 #include <utility>
35 #include <set>
36
37 #include "elfcpp.h"
38 #include "parameters.h"
39 #include "reloc.h"
40 #include "arm.h"
41 #include "object.h"
42 #include "symtab.h"
43 #include "layout.h"
44 #include "output.h"
45 #include "copy-relocs.h"
46 #include "target.h"
47 #include "target-reloc.h"
48 #include "target-select.h"
49 #include "tls.h"
50 #include "defstd.h"
51 #include "gc.h"
52 #include "attributes.h"
53
54 namespace
55 {
56
57 using namespace gold;
58
59 template<bool big_endian>
60 class Output_data_plt_arm;
61
62 template<bool big_endian>
63 class Stub_table;
64
65 template<bool big_endian>
66 class Arm_input_section;
67
68 class Arm_exidx_cantunwind;
69
70 class Arm_exidx_merged_section;
71
72 class Arm_exidx_fixup;
73
74 template<bool big_endian>
75 class Arm_output_section;
76
77 class Arm_exidx_input_section;
78
79 template<bool big_endian>
80 class Arm_relobj;
81
82 template<bool big_endian>
83 class Target_arm;
84
85 // For convenience.
86 typedef elfcpp::Elf_types<32>::Elf_Addr Arm_address;
87
88 // Maximum branch offsets for ARM, THUMB and THUMB2.
89 const int32_t ARM_MAX_FWD_BRANCH_OFFSET = ((((1 << 23) - 1) << 2) + 8);
90 const int32_t ARM_MAX_BWD_BRANCH_OFFSET = ((-((1 << 23) << 2)) + 8);
91 const int32_t THM_MAX_FWD_BRANCH_OFFSET = ((1 << 22) -2 + 4);
92 const int32_t THM_MAX_BWD_BRANCH_OFFSET = (-(1 << 22) + 4);
93 const int32_t THM2_MAX_FWD_BRANCH_OFFSET = (((1 << 24) - 2) + 4);
94 const int32_t THM2_MAX_BWD_BRANCH_OFFSET = (-(1 << 24) + 4);
95
96 // The arm target class.
97 //
98 // This is a very simple port of gold for ARM-EABI. It is intended for
99 // supporting Android only for the time being.
100 //
101 // TODOs:
102 // - Support the following relocation types as needed:
103 // R_ARM_SBREL32
104 // R_ARM_LDR_SBREL_11_0_NC
105 // R_ARM_ALU_SBREL_19_12_NC
106 // R_ARM_ALU_SBREL_27_20_CK
107 // R_ARM_SBREL31
108 // R_ARM_REL32_NOI
109 // R_ARM_PLT32_ABS
110 // R_ARM_GOT_ABS
111 // R_ARM_GOT_BREL12
112 // R_ARM_GOTOFF12
113 // R_ARM_TLS_GD32
114 // R_ARM_TLS_LDM32
115 // R_ARM_TLS_LDO32
116 // R_ARM_TLS_IE32
117 // R_ARM_TLS_LE32
118 // R_ARM_TLS_LDO12
119 // R_ARM_TLS_LE12
120 // R_ARM_TLS_IE12GP
121 //
122 // - Make PLTs more flexible for different architecture features like
123 // Thumb-2 and BE8.
124 // There are probably a lot more.
125
126 // Instruction template class. This class is similar to the insn_sequence
127 // struct in bfd/elf32-arm.c.
128
129 class Insn_template
130 {
131 public:
132 // Types of instruction templates.
133 enum Type
134 {
135 THUMB16_TYPE = 1,
136 // THUMB16_SPECIAL_TYPE is used by sub-classes of Stub for instruction
137 // templates with class-specific semantics. Currently this is used
138 // only by the Cortex_a8_stub class for handling condition codes in
139 // conditional branches.
140 THUMB16_SPECIAL_TYPE,
141 THUMB32_TYPE,
142 ARM_TYPE,
143 DATA_TYPE
144 };
145
146 // Factory methods to create instruction templates in different formats.
147
148 static const Insn_template
149 thumb16_insn(uint32_t data)
150 { return Insn_template(data, THUMB16_TYPE, elfcpp::R_ARM_NONE, 0); }
151
152 // A Thumb conditional branch, in which the proper condition is inserted
153 // when we build the stub.
154 static const Insn_template
155 thumb16_bcond_insn(uint32_t data)
156 { return Insn_template(data, THUMB16_SPECIAL_TYPE, elfcpp::R_ARM_NONE, 1); }
157
158 static const Insn_template
159 thumb32_insn(uint32_t data)
160 { return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_NONE, 0); }
161
162 static const Insn_template
163 thumb32_b_insn(uint32_t data, int reloc_addend)
164 {
165 return Insn_template(data, THUMB32_TYPE, elfcpp::R_ARM_THM_JUMP24,
166 reloc_addend);
167 }
168
169 static const Insn_template
170 arm_insn(uint32_t data)
171 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_NONE, 0); }
172
173 static const Insn_template
174 arm_rel_insn(unsigned data, int reloc_addend)
175 { return Insn_template(data, ARM_TYPE, elfcpp::R_ARM_JUMP24, reloc_addend); }
176
177 static const Insn_template
178 data_word(unsigned data, unsigned int r_type, int reloc_addend)
179 { return Insn_template(data, DATA_TYPE, r_type, reloc_addend); }
180
181 // Accessors. This class is used for read-only objects so no modifiers
182 // are provided.
183
184 uint32_t
185 data() const
186 { return this->data_; }
187
188 // Return the instruction sequence type of this.
189 Type
190 type() const
191 { return this->type_; }
192
193 // Return the ARM relocation type of this.
194 unsigned int
195 r_type() const
196 { return this->r_type_; }
197
198 int32_t
199 reloc_addend() const
200 { return this->reloc_addend_; }
201
202 // Return size of instruction template in bytes.
203 size_t
204 size() const;
205
206 // Return byte-alignment of instruction template.
207 unsigned
208 alignment() const;
209
210 private:
211 // We make the constructor private to ensure that only the factory
212 // methods are used.
213 inline
214 Insn_template(unsigned data, Type type, unsigned int r_type, int reloc_addend)
215 : data_(data), type_(type), r_type_(r_type), reloc_addend_(reloc_addend)
216 { }
217
218 // Instruction specific data. This is used to store information like
219 // some of the instruction bits.
220 uint32_t data_;
221 // Instruction template type.
222 Type type_;
223 // Relocation type if there is a relocation or R_ARM_NONE otherwise.
224 unsigned int r_type_;
225 // Relocation addend.
226 int32_t reloc_addend_;
227 };
228
229 // Macro for generating code to stub types. One entry per long/short
230 // branch stub
231
232 #define DEF_STUBS \
233 DEF_STUB(long_branch_any_any) \
234 DEF_STUB(long_branch_v4t_arm_thumb) \
235 DEF_STUB(long_branch_thumb_only) \
236 DEF_STUB(long_branch_v4t_thumb_thumb) \
237 DEF_STUB(long_branch_v4t_thumb_arm) \
238 DEF_STUB(short_branch_v4t_thumb_arm) \
239 DEF_STUB(long_branch_any_arm_pic) \
240 DEF_STUB(long_branch_any_thumb_pic) \
241 DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
242 DEF_STUB(long_branch_v4t_arm_thumb_pic) \
243 DEF_STUB(long_branch_v4t_thumb_arm_pic) \
244 DEF_STUB(long_branch_thumb_only_pic) \
245 DEF_STUB(a8_veneer_b_cond) \
246 DEF_STUB(a8_veneer_b) \
247 DEF_STUB(a8_veneer_bl) \
248 DEF_STUB(a8_veneer_blx) \
249 DEF_STUB(v4_veneer_bx)
250
251 // Stub types.
252
253 #define DEF_STUB(x) arm_stub_##x,
254 typedef enum
255 {
256 arm_stub_none,
257 DEF_STUBS
258
259 // First reloc stub type.
260 arm_stub_reloc_first = arm_stub_long_branch_any_any,
261 // Last reloc stub type.
262 arm_stub_reloc_last = arm_stub_long_branch_thumb_only_pic,
263
264 // First Cortex-A8 stub type.
265 arm_stub_cortex_a8_first = arm_stub_a8_veneer_b_cond,
266 // Last Cortex-A8 stub type.
267 arm_stub_cortex_a8_last = arm_stub_a8_veneer_blx,
268
269 // Last stub type.
270 arm_stub_type_last = arm_stub_v4_veneer_bx
271 } Stub_type;
272 #undef DEF_STUB
273
274 // Stub template class. Templates are meant to be read-only objects.
275 // A stub template for a stub type contains all read-only attributes
276 // common to all stubs of the same type.
277
278 class Stub_template
279 {
280 public:
281 Stub_template(Stub_type, const Insn_template*, size_t);
282
283 ~Stub_template()
284 { }
285
286 // Return stub type.
287 Stub_type
288 type() const
289 { return this->type_; }
290
291 // Return an array of instruction templates.
292 const Insn_template*
293 insns() const
294 { return this->insns_; }
295
296 // Return size of template in number of instructions.
297 size_t
298 insn_count() const
299 { return this->insn_count_; }
300
301 // Return size of template in bytes.
302 size_t
303 size() const
304 { return this->size_; }
305
306 // Return alignment of the stub template.
307 unsigned
308 alignment() const
309 { return this->alignment_; }
310
311 // Return whether entry point is in thumb mode.
312 bool
313 entry_in_thumb_mode() const
314 { return this->entry_in_thumb_mode_; }
315
316 // Return number of relocations in this template.
317 size_t
318 reloc_count() const
319 { return this->relocs_.size(); }
320
321 // Return index of the I-th instruction with relocation.
322 size_t
323 reloc_insn_index(size_t i) const
324 {
325 gold_assert(i < this->relocs_.size());
326 return this->relocs_[i].first;
327 }
328
329 // Return the offset of the I-th instruction with relocation from the
330 // beginning of the stub.
331 section_size_type
332 reloc_offset(size_t i) const
333 {
334 gold_assert(i < this->relocs_.size());
335 return this->relocs_[i].second;
336 }
337
338 private:
339 // This contains information about an instruction template with a relocation
340 // and its offset from start of stub.
341 typedef std::pair<size_t, section_size_type> Reloc;
342
343 // A Stub_template may not be copied. We want to share templates as much
344 // as possible.
345 Stub_template(const Stub_template&);
346 Stub_template& operator=(const Stub_template&);
347
348 // Stub type.
349 Stub_type type_;
350 // Points to an array of Insn_templates.
351 const Insn_template* insns_;
352 // Number of Insn_templates in insns_[].
353 size_t insn_count_;
354 // Size of templated instructions in bytes.
355 size_t size_;
356 // Alignment of templated instructions.
357 unsigned alignment_;
358 // Flag to indicate if entry is in thumb mode.
359 bool entry_in_thumb_mode_;
360 // A table of reloc instruction indices and offsets. We can find these by
361 // looking at the instruction templates but we pre-compute and then stash
362 // them here for speed.
363 std::vector<Reloc> relocs_;
364 };
365
366 //
367 // A class for code stubs. This is a base class for different type of
368 // stubs used in the ARM target.
369 //
370
371 class Stub
372 {
373 private:
374 static const section_offset_type invalid_offset =
375 static_cast<section_offset_type>(-1);
376
377 public:
378 Stub(const Stub_template* stub_template)
379 : stub_template_(stub_template), offset_(invalid_offset)
380 { }
381
382 virtual
383 ~Stub()
384 { }
385
386 // Return the stub template.
387 const Stub_template*
388 stub_template() const
389 { return this->stub_template_; }
390
391 // Return offset of code stub from beginning of its containing stub table.
392 section_offset_type
393 offset() const
394 {
395 gold_assert(this->offset_ != invalid_offset);
396 return this->offset_;
397 }
398
399 // Set offset of code stub from beginning of its containing stub table.
400 void
401 set_offset(section_offset_type offset)
402 { this->offset_ = offset; }
403
404 // Return the relocation target address of the i-th relocation in the
405 // stub. This must be defined in a child class.
406 Arm_address
407 reloc_target(size_t i)
408 { return this->do_reloc_target(i); }
409
410 // Write a stub at output VIEW. BIG_ENDIAN select how a stub is written.
411 void
412 write(unsigned char* view, section_size_type view_size, bool big_endian)
413 { this->do_write(view, view_size, big_endian); }
414
415 // Return the instruction for THUMB16_SPECIAL_TYPE instruction template
416 // for the i-th instruction.
417 uint16_t
418 thumb16_special(size_t i)
419 { return this->do_thumb16_special(i); }
420
421 protected:
422 // This must be defined in the child class.
423 virtual Arm_address
424 do_reloc_target(size_t) = 0;
425
426 // This may be overridden in the child class.
427 virtual void
428 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
429 {
430 if (big_endian)
431 this->do_fixed_endian_write<true>(view, view_size);
432 else
433 this->do_fixed_endian_write<false>(view, view_size);
434 }
435
436 // This must be overridden if a child class uses the THUMB16_SPECIAL_TYPE
437 // instruction template.
438 virtual uint16_t
439 do_thumb16_special(size_t)
440 { gold_unreachable(); }
441
442 private:
443 // A template to implement do_write.
444 template<bool big_endian>
445 void inline
446 do_fixed_endian_write(unsigned char*, section_size_type);
447
448 // Its template.
449 const Stub_template* stub_template_;
450 // Offset within the section of containing this stub.
451 section_offset_type offset_;
452 };
453
454 // Reloc stub class. These are stubs we use to fix up relocation because
455 // of limited branch ranges.
456
457 class Reloc_stub : public Stub
458 {
459 public:
460 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
461 // We assume we never jump to this address.
462 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
463
464 // Return destination address.
465 Arm_address
466 destination_address() const
467 {
468 gold_assert(this->destination_address_ != this->invalid_address);
469 return this->destination_address_;
470 }
471
472 // Set destination address.
473 void
474 set_destination_address(Arm_address address)
475 {
476 gold_assert(address != this->invalid_address);
477 this->destination_address_ = address;
478 }
479
480 // Reset destination address.
481 void
482 reset_destination_address()
483 { this->destination_address_ = this->invalid_address; }
484
485 // Determine stub type for a branch of a relocation of R_TYPE going
486 // from BRANCH_ADDRESS to BRANCH_TARGET. If TARGET_IS_THUMB is set,
487 // the branch target is a thumb instruction. TARGET is used for look
488 // up ARM-specific linker settings.
489 static Stub_type
490 stub_type_for_reloc(unsigned int r_type, Arm_address branch_address,
491 Arm_address branch_target, bool target_is_thumb);
492
493 // Reloc_stub key. A key is logically a triplet of a stub type, a symbol
494 // and an addend. Since we treat global and local symbol differently, we
495 // use a Symbol object for a global symbol and a object-index pair for
496 // a local symbol.
497 class Key
498 {
499 public:
500 // If SYMBOL is not null, this is a global symbol, we ignore RELOBJ and
501 // R_SYM. Otherwise, this is a local symbol and RELOBJ must non-NULL
502 // and R_SYM must not be invalid_index.
503 Key(Stub_type stub_type, const Symbol* symbol, const Relobj* relobj,
504 unsigned int r_sym, int32_t addend)
505 : stub_type_(stub_type), addend_(addend)
506 {
507 if (symbol != NULL)
508 {
509 this->r_sym_ = Reloc_stub::invalid_index;
510 this->u_.symbol = symbol;
511 }
512 else
513 {
514 gold_assert(relobj != NULL && r_sym != invalid_index);
515 this->r_sym_ = r_sym;
516 this->u_.relobj = relobj;
517 }
518 }
519
520 ~Key()
521 { }
522
523 // Accessors: Keys are meant to be read-only object so no modifiers are
524 // provided.
525
526 // Return stub type.
527 Stub_type
528 stub_type() const
529 { return this->stub_type_; }
530
531 // Return the local symbol index or invalid_index.
532 unsigned int
533 r_sym() const
534 { return this->r_sym_; }
535
536 // Return the symbol if there is one.
537 const Symbol*
538 symbol() const
539 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
540
541 // Return the relobj if there is one.
542 const Relobj*
543 relobj() const
544 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
545
546 // Whether this equals to another key k.
547 bool
548 eq(const Key& k) const
549 {
550 return ((this->stub_type_ == k.stub_type_)
551 && (this->r_sym_ == k.r_sym_)
552 && ((this->r_sym_ != Reloc_stub::invalid_index)
553 ? (this->u_.relobj == k.u_.relobj)
554 : (this->u_.symbol == k.u_.symbol))
555 && (this->addend_ == k.addend_));
556 }
557
558 // Return a hash value.
559 size_t
560 hash_value() const
561 {
562 return (this->stub_type_
563 ^ this->r_sym_
564 ^ gold::string_hash<char>(
565 (this->r_sym_ != Reloc_stub::invalid_index)
566 ? this->u_.relobj->name().c_str()
567 : this->u_.symbol->name())
568 ^ this->addend_);
569 }
570
571 // Functors for STL associative containers.
572 struct hash
573 {
574 size_t
575 operator()(const Key& k) const
576 { return k.hash_value(); }
577 };
578
579 struct equal_to
580 {
581 bool
582 operator()(const Key& k1, const Key& k2) const
583 { return k1.eq(k2); }
584 };
585
586 // Name of key. This is mainly for debugging.
587 std::string
588 name() const;
589
590 private:
591 // Stub type.
592 Stub_type stub_type_;
593 // If this is a local symbol, this is the index in the defining object.
594 // Otherwise, it is invalid_index for a global symbol.
595 unsigned int r_sym_;
596 // If r_sym_ is invalid index. This points to a global symbol.
597 // Otherwise, this points a relobj. We used the unsized and target
598 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
599 // Arm_relobj. This is done to avoid making the stub class a template
600 // as most of the stub machinery is endianity-neutral. However, it
601 // may require a bit of casting done by users of this class.
602 union
603 {
604 const Symbol* symbol;
605 const Relobj* relobj;
606 } u_;
607 // Addend associated with a reloc.
608 int32_t addend_;
609 };
610
611 protected:
612 // Reloc_stubs are created via a stub factory. So these are protected.
613 Reloc_stub(const Stub_template* stub_template)
614 : Stub(stub_template), destination_address_(invalid_address)
615 { }
616
617 ~Reloc_stub()
618 { }
619
620 friend class Stub_factory;
621
622 // Return the relocation target address of the i-th relocation in the
623 // stub.
624 Arm_address
625 do_reloc_target(size_t i)
626 {
627 // All reloc stub have only one relocation.
628 gold_assert(i == 0);
629 return this->destination_address_;
630 }
631
632 private:
633 // Address of destination.
634 Arm_address destination_address_;
635 };
636
637 // Cortex-A8 stub class. We need a Cortex-A8 stub to redirect any 32-bit
638 // THUMB branch that meets the following conditions:
639 //
640 // 1. The branch straddles across a page boundary. i.e. lower 12-bit of
641 // branch address is 0xffe.
642 // 2. The branch target address is in the same page as the first word of the
643 // branch.
644 // 3. The branch follows a 32-bit instruction which is not a branch.
645 //
646 // To do the fix up, we need to store the address of the branch instruction
647 // and its target at least. We also need to store the original branch
648 // instruction bits for the condition code in a conditional branch. The
649 // condition code is used in a special instruction template. We also want
650 // to identify input sections needing Cortex-A8 workaround quickly. We store
651 // extra information about object and section index of the code section
652 // containing a branch being fixed up. The information is used to mark
653 // the code section when we finalize the Cortex-A8 stubs.
654 //
655
656 class Cortex_a8_stub : public Stub
657 {
658 public:
659 ~Cortex_a8_stub()
660 { }
661
662 // Return the object of the code section containing the branch being fixed
663 // up.
664 Relobj*
665 relobj() const
666 { return this->relobj_; }
667
668 // Return the section index of the code section containing the branch being
669 // fixed up.
670 unsigned int
671 shndx() const
672 { return this->shndx_; }
673
674 // Return the source address of stub. This is the address of the original
675 // branch instruction. LSB is 1 always set to indicate that it is a THUMB
676 // instruction.
677 Arm_address
678 source_address() const
679 { return this->source_address_; }
680
681 // Return the destination address of the stub. This is the branch taken
682 // address of the original branch instruction. LSB is 1 if it is a THUMB
683 // instruction address.
684 Arm_address
685 destination_address() const
686 { return this->destination_address_; }
687
688 // Return the instruction being fixed up.
689 uint32_t
690 original_insn() const
691 { return this->original_insn_; }
692
693 protected:
694 // Cortex_a8_stubs are created via a stub factory. So these are protected.
695 Cortex_a8_stub(const Stub_template* stub_template, Relobj* relobj,
696 unsigned int shndx, Arm_address source_address,
697 Arm_address destination_address, uint32_t original_insn)
698 : Stub(stub_template), relobj_(relobj), shndx_(shndx),
699 source_address_(source_address | 1U),
700 destination_address_(destination_address),
701 original_insn_(original_insn)
702 { }
703
704 friend class Stub_factory;
705
706 // Return the relocation target address of the i-th relocation in the
707 // stub.
708 Arm_address
709 do_reloc_target(size_t i)
710 {
711 if (this->stub_template()->type() == arm_stub_a8_veneer_b_cond)
712 {
713 // The conditional branch veneer has two relocations.
714 gold_assert(i < 2);
715 return i == 0 ? this->source_address_ + 4 : this->destination_address_;
716 }
717 else
718 {
719 // All other Cortex-A8 stubs have only one relocation.
720 gold_assert(i == 0);
721 return this->destination_address_;
722 }
723 }
724
725 // Return an instruction for the THUMB16_SPECIAL_TYPE instruction template.
726 uint16_t
727 do_thumb16_special(size_t);
728
729 private:
730 // Object of the code section containing the branch being fixed up.
731 Relobj* relobj_;
732 // Section index of the code section containing the branch begin fixed up.
733 unsigned int shndx_;
734 // Source address of original branch.
735 Arm_address source_address_;
736 // Destination address of the original branch.
737 Arm_address destination_address_;
738 // Original branch instruction. This is needed for copying the condition
739 // code from a condition branch to its stub.
740 uint32_t original_insn_;
741 };
742
743 // ARMv4 BX Rx branch relocation stub class.
744 class Arm_v4bx_stub : public Stub
745 {
746 public:
747 ~Arm_v4bx_stub()
748 { }
749
750 // Return the associated register.
751 uint32_t
752 reg() const
753 { return this->reg_; }
754
755 protected:
756 // Arm V4BX stubs are created via a stub factory. So these are protected.
757 Arm_v4bx_stub(const Stub_template* stub_template, const uint32_t reg)
758 : Stub(stub_template), reg_(reg)
759 { }
760
761 friend class Stub_factory;
762
763 // Return the relocation target address of the i-th relocation in the
764 // stub.
765 Arm_address
766 do_reloc_target(size_t)
767 { gold_unreachable(); }
768
769 // This may be overridden in the child class.
770 virtual void
771 do_write(unsigned char* view, section_size_type view_size, bool big_endian)
772 {
773 if (big_endian)
774 this->do_fixed_endian_v4bx_write<true>(view, view_size);
775 else
776 this->do_fixed_endian_v4bx_write<false>(view, view_size);
777 }
778
779 private:
780 // A template to implement do_write.
781 template<bool big_endian>
782 void inline
783 do_fixed_endian_v4bx_write(unsigned char* view, section_size_type)
784 {
785 const Insn_template* insns = this->stub_template()->insns();
786 elfcpp::Swap<32, big_endian>::writeval(view,
787 (insns[0].data()
788 + (this->reg_ << 16)));
789 view += insns[0].size();
790 elfcpp::Swap<32, big_endian>::writeval(view,
791 (insns[1].data() + this->reg_));
792 view += insns[1].size();
793 elfcpp::Swap<32, big_endian>::writeval(view,
794 (insns[2].data() + this->reg_));
795 }
796
797 // A register index (r0-r14), which is associated with the stub.
798 uint32_t reg_;
799 };
800
801 // Stub factory class.
802
803 class Stub_factory
804 {
805 public:
806 // Return the unique instance of this class.
807 static const Stub_factory&
808 get_instance()
809 {
810 static Stub_factory singleton;
811 return singleton;
812 }
813
814 // Make a relocation stub.
815 Reloc_stub*
816 make_reloc_stub(Stub_type stub_type) const
817 {
818 gold_assert(stub_type >= arm_stub_reloc_first
819 && stub_type <= arm_stub_reloc_last);
820 return new Reloc_stub(this->stub_templates_[stub_type]);
821 }
822
823 // Make a Cortex-A8 stub.
824 Cortex_a8_stub*
825 make_cortex_a8_stub(Stub_type stub_type, Relobj* relobj, unsigned int shndx,
826 Arm_address source, Arm_address destination,
827 uint32_t original_insn) const
828 {
829 gold_assert(stub_type >= arm_stub_cortex_a8_first
830 && stub_type <= arm_stub_cortex_a8_last);
831 return new Cortex_a8_stub(this->stub_templates_[stub_type], relobj, shndx,
832 source, destination, original_insn);
833 }
834
835 // Make an ARM V4BX relocation stub.
836 // This method creates a stub from the arm_stub_v4_veneer_bx template only.
837 Arm_v4bx_stub*
838 make_arm_v4bx_stub(uint32_t reg) const
839 {
840 gold_assert(reg < 0xf);
841 return new Arm_v4bx_stub(this->stub_templates_[arm_stub_v4_veneer_bx],
842 reg);
843 }
844
845 private:
846 // Constructor and destructor are protected since we only return a single
847 // instance created in Stub_factory::get_instance().
848
849 Stub_factory();
850
851 // A Stub_factory may not be copied since it is a singleton.
852 Stub_factory(const Stub_factory&);
853 Stub_factory& operator=(Stub_factory&);
854
855 // Stub templates. These are initialized in the constructor.
856 const Stub_template* stub_templates_[arm_stub_type_last+1];
857 };
858
859 // A class to hold stubs for the ARM target.
860
861 template<bool big_endian>
862 class Stub_table : public Output_data
863 {
864 public:
865 Stub_table(Arm_input_section<big_endian>* owner)
866 : Output_data(), owner_(owner), reloc_stubs_(), cortex_a8_stubs_(),
867 arm_v4bx_stubs_(0xf), prev_data_size_(0), prev_addralign_(1)
868 { }
869
870 ~Stub_table()
871 { }
872
873 // Owner of this stub table.
874 Arm_input_section<big_endian>*
875 owner() const
876 { return this->owner_; }
877
878 // Whether this stub table is empty.
879 bool
880 empty() const
881 {
882 return (this->reloc_stubs_.empty()
883 && this->cortex_a8_stubs_.empty()
884 && this->arm_v4bx_stubs_.empty());
885 }
886
887 // Return the current data size.
888 off_t
889 current_data_size() const
890 { return this->current_data_size_for_child(); }
891
892 // Add a STUB with using KEY. Caller is reponsible for avoid adding
893 // if already a STUB with the same key has been added.
894 void
895 add_reloc_stub(Reloc_stub* stub, const Reloc_stub::Key& key)
896 {
897 const Stub_template* stub_template = stub->stub_template();
898 gold_assert(stub_template->type() == key.stub_type());
899 this->reloc_stubs_[key] = stub;
900 }
901
902 // Add a Cortex-A8 STUB that fixes up a THUMB branch at ADDRESS.
903 // Caller is reponsible for avoid adding if already a STUB with the same
904 // address has been added.
905 void
906 add_cortex_a8_stub(Arm_address address, Cortex_a8_stub* stub)
907 {
908 std::pair<Arm_address, Cortex_a8_stub*> value(address, stub);
909 this->cortex_a8_stubs_.insert(value);
910 }
911
912 // Add an ARM V4BX relocation stub. A register index will be retrieved
913 // from the stub.
914 void
915 add_arm_v4bx_stub(Arm_v4bx_stub* stub)
916 {
917 gold_assert(stub != NULL && this->arm_v4bx_stubs_[stub->reg()] == NULL);
918 this->arm_v4bx_stubs_[stub->reg()] = stub;
919 }
920
921 // Remove all Cortex-A8 stubs.
922 void
923 remove_all_cortex_a8_stubs();
924
925 // Look up a relocation stub using KEY. Return NULL if there is none.
926 Reloc_stub*
927 find_reloc_stub(const Reloc_stub::Key& key) const
928 {
929 typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.find(key);
930 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
931 }
932
933 // Look up an arm v4bx relocation stub using the register index.
934 // Return NULL if there is none.
935 Arm_v4bx_stub*
936 find_arm_v4bx_stub(const uint32_t reg) const
937 {
938 gold_assert(reg < 0xf);
939 return this->arm_v4bx_stubs_[reg];
940 }
941
942 // Relocate stubs in this stub table.
943 void
944 relocate_stubs(const Relocate_info<32, big_endian>*,
945 Target_arm<big_endian>*, Output_section*,
946 unsigned char*, Arm_address, section_size_type);
947
948 // Update data size and alignment at the end of a relaxation pass. Return
949 // true if either data size or alignment is different from that of the
950 // previous relaxation pass.
951 bool
952 update_data_size_and_addralign();
953
954 // Finalize stubs. Set the offsets of all stubs and mark input sections
955 // needing the Cortex-A8 workaround.
956 void
957 finalize_stubs();
958
959 // Apply Cortex-A8 workaround to an address range.
960 void
961 apply_cortex_a8_workaround_to_address_range(Target_arm<big_endian>*,
962 unsigned char*, Arm_address,
963 section_size_type);
964
965 protected:
966 // Write out section contents.
967 void
968 do_write(Output_file*);
969
970 // Return the required alignment.
971 uint64_t
972 do_addralign() const
973 { return this->prev_addralign_; }
974
975 // Reset address and file offset.
976 void
977 do_reset_address_and_file_offset()
978 { this->set_current_data_size_for_child(this->prev_data_size_); }
979
980 // Set final data size.
981 void
982 set_final_data_size()
983 { this->set_data_size(this->current_data_size()); }
984
985 private:
986 // Relocate one stub.
987 void
988 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
989 Target_arm<big_endian>*, Output_section*,
990 unsigned char*, Arm_address, section_size_type);
991
992 // Unordered map of relocation stubs.
993 typedef
994 Unordered_map<Reloc_stub::Key, Reloc_stub*, Reloc_stub::Key::hash,
995 Reloc_stub::Key::equal_to>
996 Reloc_stub_map;
997
998 // List of Cortex-A8 stubs ordered by addresses of branches being
999 // fixed up in output.
1000 typedef std::map<Arm_address, Cortex_a8_stub*> Cortex_a8_stub_list;
1001 // List of Arm V4BX relocation stubs ordered by associated registers.
1002 typedef std::vector<Arm_v4bx_stub*> Arm_v4bx_stub_list;
1003
1004 // Owner of this stub table.
1005 Arm_input_section<big_endian>* owner_;
1006 // The relocation stubs.
1007 Reloc_stub_map reloc_stubs_;
1008 // The cortex_a8_stubs.
1009 Cortex_a8_stub_list cortex_a8_stubs_;
1010 // The Arm V4BX relocation stubs.
1011 Arm_v4bx_stub_list arm_v4bx_stubs_;
1012 // data size of this in the previous pass.
1013 off_t prev_data_size_;
1014 // address alignment of this in the previous pass.
1015 uint64_t prev_addralign_;
1016 };
1017
1018 // Arm_exidx_cantunwind class. This represents an EXIDX_CANTUNWIND entry
1019 // we add to the end of an EXIDX input section that goes into the output.
1020
1021 class Arm_exidx_cantunwind : public Output_section_data
1022 {
1023 public:
1024 Arm_exidx_cantunwind(Relobj* relobj, unsigned int shndx)
1025 : Output_section_data(8, 4, true), relobj_(relobj), shndx_(shndx)
1026 { }
1027
1028 // Return the object containing the section pointed by this.
1029 Relobj*
1030 relobj() const
1031 { return this->relobj_; }
1032
1033 // Return the section index of the section pointed by this.
1034 unsigned int
1035 shndx() const
1036 { return this->shndx_; }
1037
1038 protected:
1039 void
1040 do_write(Output_file* of)
1041 {
1042 if (parameters->target().is_big_endian())
1043 this->do_fixed_endian_write<true>(of);
1044 else
1045 this->do_fixed_endian_write<false>(of);
1046 }
1047
1048 private:
1049 // Implement do_write for a given endianity.
1050 template<bool big_endian>
1051 void inline
1052 do_fixed_endian_write(Output_file*);
1053
1054 // The object containing the section pointed by this.
1055 Relobj* relobj_;
1056 // The section index of the section pointed by this.
1057 unsigned int shndx_;
1058 };
1059
1060 // During EXIDX coverage fix-up, we compact an EXIDX section. The
1061 // Offset map is used to map input section offset within the EXIDX section
1062 // to the output offset from the start of this EXIDX section.
1063
1064 typedef std::map<section_offset_type, section_offset_type>
1065 Arm_exidx_section_offset_map;
1066
1067 // Arm_exidx_merged_section class. This represents an EXIDX input section
1068 // with some of its entries merged.
1069
1070 class Arm_exidx_merged_section : public Output_relaxed_input_section
1071 {
1072 public:
1073 // Constructor for Arm_exidx_merged_section.
1074 // EXIDX_INPUT_SECTION points to the unmodified EXIDX input section.
1075 // SECTION_OFFSET_MAP points to a section offset map describing how
1076 // parts of the input section are mapped to output. DELETED_BYTES is
1077 // the number of bytes deleted from the EXIDX input section.
1078 Arm_exidx_merged_section(
1079 const Arm_exidx_input_section& exidx_input_section,
1080 const Arm_exidx_section_offset_map& section_offset_map,
1081 uint32_t deleted_bytes);
1082
1083 // Return the original EXIDX input section.
1084 const Arm_exidx_input_section&
1085 exidx_input_section() const
1086 { return this->exidx_input_section_; }
1087
1088 // Return the section offset map.
1089 const Arm_exidx_section_offset_map&
1090 section_offset_map() const
1091 { return this->section_offset_map_; }
1092
1093 protected:
1094 // Write merged section into file OF.
1095 void
1096 do_write(Output_file* of);
1097
1098 bool
1099 do_output_offset(const Relobj*, unsigned int, section_offset_type,
1100 section_offset_type*) const;
1101
1102 private:
1103 // Original EXIDX input section.
1104 const Arm_exidx_input_section& exidx_input_section_;
1105 // Section offset map.
1106 const Arm_exidx_section_offset_map& section_offset_map_;
1107 };
1108
1109 // A class to wrap an ordinary input section containing executable code.
1110
1111 template<bool big_endian>
1112 class Arm_input_section : public Output_relaxed_input_section
1113 {
1114 public:
1115 Arm_input_section(Relobj* relobj, unsigned int shndx)
1116 : Output_relaxed_input_section(relobj, shndx, 1),
1117 original_addralign_(1), original_size_(0), stub_table_(NULL)
1118 { }
1119
1120 ~Arm_input_section()
1121 { }
1122
1123 // Initialize.
1124 void
1125 init();
1126
1127 // Whether this is a stub table owner.
1128 bool
1129 is_stub_table_owner() const
1130 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
1131
1132 // Return the stub table.
1133 Stub_table<big_endian>*
1134 stub_table() const
1135 { return this->stub_table_; }
1136
1137 // Set the stub_table.
1138 void
1139 set_stub_table(Stub_table<big_endian>* stub_table)
1140 { this->stub_table_ = stub_table; }
1141
1142 // Downcast a base pointer to an Arm_input_section pointer. This is
1143 // not type-safe but we only use Arm_input_section not the base class.
1144 static Arm_input_section<big_endian>*
1145 as_arm_input_section(Output_relaxed_input_section* poris)
1146 { return static_cast<Arm_input_section<big_endian>*>(poris); }
1147
1148 protected:
1149 // Write data to output file.
1150 void
1151 do_write(Output_file*);
1152
1153 // Return required alignment of this.
1154 uint64_t
1155 do_addralign() const
1156 {
1157 if (this->is_stub_table_owner())
1158 return std::max(this->stub_table_->addralign(),
1159 this->original_addralign_);
1160 else
1161 return this->original_addralign_;
1162 }
1163
1164 // Finalize data size.
1165 void
1166 set_final_data_size();
1167
1168 // Reset address and file offset.
1169 void
1170 do_reset_address_and_file_offset();
1171
1172 // Output offset.
1173 bool
1174 do_output_offset(const Relobj* object, unsigned int shndx,
1175 section_offset_type offset,
1176 section_offset_type* poutput) const
1177 {
1178 if ((object == this->relobj())
1179 && (shndx == this->shndx())
1180 && (offset >= 0)
1181 && (convert_types<uint64_t, section_offset_type>(offset)
1182 <= this->original_size_))
1183 {
1184 *poutput = offset;
1185 return true;
1186 }
1187 else
1188 return false;
1189 }
1190
1191 private:
1192 // Copying is not allowed.
1193 Arm_input_section(const Arm_input_section&);
1194 Arm_input_section& operator=(const Arm_input_section&);
1195
1196 // Address alignment of the original input section.
1197 uint64_t original_addralign_;
1198 // Section size of the original input section.
1199 uint64_t original_size_;
1200 // Stub table.
1201 Stub_table<big_endian>* stub_table_;
1202 };
1203
1204 // Arm_exidx_fixup class. This is used to define a number of methods
1205 // and keep states for fixing up EXIDX coverage.
1206
1207 class Arm_exidx_fixup
1208 {
1209 public:
1210 Arm_exidx_fixup(Output_section* exidx_output_section)
1211 : exidx_output_section_(exidx_output_section), last_unwind_type_(UT_NONE),
1212 last_inlined_entry_(0), last_input_section_(NULL),
1213 section_offset_map_(NULL)
1214 { }
1215
1216 ~Arm_exidx_fixup()
1217 { delete this->section_offset_map_; }
1218
1219 // Process an EXIDX section for entry merging. Return number of bytes to
1220 // be deleted in output. If parts of the input EXIDX section are merged
1221 // a heap allocated Arm_exidx_section_offset_map is store in the located
1222 // PSECTION_OFFSET_MAP. The caller owns the map and is reponsible for
1223 // releasing it.
1224 template<bool big_endian>
1225 uint32_t
1226 process_exidx_section(const Arm_exidx_input_section* exidx_input_section,
1227 Arm_exidx_section_offset_map** psection_offset_map);
1228
1229 // Append an EXIDX_CANTUNWIND entry pointing at the end of the last
1230 // input section, if there is not one already.
1231 void
1232 add_exidx_cantunwind_as_needed();
1233
1234 private:
1235 // Copying is not allowed.
1236 Arm_exidx_fixup(const Arm_exidx_fixup&);
1237 Arm_exidx_fixup& operator=(const Arm_exidx_fixup&);
1238
1239 // Type of EXIDX unwind entry.
1240 enum Unwind_type
1241 {
1242 // No type.
1243 UT_NONE,
1244 // EXIDX_CANTUNWIND.
1245 UT_EXIDX_CANTUNWIND,
1246 // Inlined entry.
1247 UT_INLINED_ENTRY,
1248 // Normal entry.
1249 UT_NORMAL_ENTRY,
1250 };
1251
1252 // Process an EXIDX entry. We only care about the second word of the
1253 // entry. Return true if the entry can be deleted.
1254 bool
1255 process_exidx_entry(uint32_t second_word);
1256
1257 // Update the current section offset map during EXIDX section fix-up.
1258 // If there is no map, create one. INPUT_OFFSET is the offset of a
1259 // reference point, DELETED_BYTES is the number of deleted by in the
1260 // section so far. If DELETE_ENTRY is true, the reference point and
1261 // all offsets after the previous reference point are discarded.
1262 void
1263 update_offset_map(section_offset_type input_offset,
1264 section_size_type deleted_bytes, bool delete_entry);
1265
1266 // EXIDX output section.
1267 Output_section* exidx_output_section_;
1268 // Unwind type of the last EXIDX entry processed.
1269 Unwind_type last_unwind_type_;
1270 // Last seen inlined EXIDX entry.
1271 uint32_t last_inlined_entry_;
1272 // Last processed EXIDX input section.
1273 const Arm_exidx_input_section* last_input_section_;
1274 // Section offset map created in process_exidx_section.
1275 Arm_exidx_section_offset_map* section_offset_map_;
1276 };
1277
1278 // Arm output section class. This is defined mainly to add a number of
1279 // stub generation methods.
1280
1281 template<bool big_endian>
1282 class Arm_output_section : public Output_section
1283 {
1284 public:
1285 typedef std::vector<std::pair<Relobj*, unsigned int> > Text_section_list;
1286
1287 Arm_output_section(const char* name, elfcpp::Elf_Word type,
1288 elfcpp::Elf_Xword flags)
1289 : Output_section(name, type, flags)
1290 { }
1291
1292 ~Arm_output_section()
1293 { }
1294
1295 // Group input sections for stub generation.
1296 void
1297 group_sections(section_size_type, bool, Target_arm<big_endian>*);
1298
1299 // Downcast a base pointer to an Arm_output_section pointer. This is
1300 // not type-safe but we only use Arm_output_section not the base class.
1301 static Arm_output_section<big_endian>*
1302 as_arm_output_section(Output_section* os)
1303 { return static_cast<Arm_output_section<big_endian>*>(os); }
1304
1305 // Append all input text sections in this into LIST.
1306 void
1307 append_text_sections_to_list(Text_section_list* list);
1308
1309 // Fix EXIDX coverage of this EXIDX output section. SORTED_TEXT_SECTION
1310 // is a list of text input sections sorted in ascending order of their
1311 // output addresses.
1312 void
1313 fix_exidx_coverage(const Text_section_list& sorted_text_section,
1314 Symbol_table* symtab);
1315
1316 private:
1317 // For convenience.
1318 typedef Output_section::Input_section Input_section;
1319 typedef Output_section::Input_section_list Input_section_list;
1320
1321 // Create a stub group.
1322 void create_stub_group(Input_section_list::const_iterator,
1323 Input_section_list::const_iterator,
1324 Input_section_list::const_iterator,
1325 Target_arm<big_endian>*,
1326 std::vector<Output_relaxed_input_section*>*);
1327 };
1328
1329 // Arm_exidx_input_section class. This represents an EXIDX input section.
1330
1331 class Arm_exidx_input_section
1332 {
1333 public:
1334 static const section_offset_type invalid_offset =
1335 static_cast<section_offset_type>(-1);
1336
1337 Arm_exidx_input_section(Relobj* relobj, unsigned int shndx,
1338 unsigned int link, uint32_t size, uint32_t addralign)
1339 : relobj_(relobj), shndx_(shndx), link_(link), size_(size),
1340 addralign_(addralign)
1341 { }
1342
1343 ~Arm_exidx_input_section()
1344 { }
1345
1346 // Accessors: This is a read-only class.
1347
1348 // Return the object containing this EXIDX input section.
1349 Relobj*
1350 relobj() const
1351 { return this->relobj_; }
1352
1353 // Return the section index of this EXIDX input section.
1354 unsigned int
1355 shndx() const
1356 { return this->shndx_; }
1357
1358 // Return the section index of linked text section in the same object.
1359 unsigned int
1360 link() const
1361 { return this->link_; }
1362
1363 // Return size of the EXIDX input section.
1364 uint32_t
1365 size() const
1366 { return this->size_; }
1367
1368 // Reutnr address alignment of EXIDX input section.
1369 uint32_t
1370 addralign() const
1371 { return this->addralign_; }
1372
1373 private:
1374 // Object containing this.
1375 Relobj* relobj_;
1376 // Section index of this.
1377 unsigned int shndx_;
1378 // text section linked to this in the same object.
1379 unsigned int link_;
1380 // Size of this. For ARM 32-bit is sufficient.
1381 uint32_t size_;
1382 // Address alignment of this. For ARM 32-bit is sufficient.
1383 uint32_t addralign_;
1384 };
1385
1386 // Arm_relobj class.
1387
1388 template<bool big_endian>
1389 class Arm_relobj : public Sized_relobj<32, big_endian>
1390 {
1391 public:
1392 static const Arm_address invalid_address = static_cast<Arm_address>(-1);
1393
1394 Arm_relobj(const std::string& name, Input_file* input_file, off_t offset,
1395 const typename elfcpp::Ehdr<32, big_endian>& ehdr)
1396 : Sized_relobj<32, big_endian>(name, input_file, offset, ehdr),
1397 stub_tables_(), local_symbol_is_thumb_function_(),
1398 attributes_section_data_(NULL), mapping_symbols_info_(),
1399 section_has_cortex_a8_workaround_(NULL), exidx_section_map_(),
1400 output_local_symbol_count_needs_update_(false)
1401 { }
1402
1403 ~Arm_relobj()
1404 { delete this->attributes_section_data_; }
1405
1406 // Return the stub table of the SHNDX-th section if there is one.
1407 Stub_table<big_endian>*
1408 stub_table(unsigned int shndx) const
1409 {
1410 gold_assert(shndx < this->stub_tables_.size());
1411 return this->stub_tables_[shndx];
1412 }
1413
1414 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1415 void
1416 set_stub_table(unsigned int shndx, Stub_table<big_endian>* stub_table)
1417 {
1418 gold_assert(shndx < this->stub_tables_.size());
1419 this->stub_tables_[shndx] = stub_table;
1420 }
1421
1422 // Whether a local symbol is a THUMB function. R_SYM is the symbol table
1423 // index. This is only valid after do_count_local_symbol is called.
1424 bool
1425 local_symbol_is_thumb_function(unsigned int r_sym) const
1426 {
1427 gold_assert(r_sym < this->local_symbol_is_thumb_function_.size());
1428 return this->local_symbol_is_thumb_function_[r_sym];
1429 }
1430
1431 // Scan all relocation sections for stub generation.
1432 void
1433 scan_sections_for_stubs(Target_arm<big_endian>*, const Symbol_table*,
1434 const Layout*);
1435
1436 // Convert regular input section with index SHNDX to a relaxed section.
1437 void
1438 convert_input_section_to_relaxed_section(unsigned shndx)
1439 {
1440 // The stubs have relocations and we need to process them after writing
1441 // out the stubs. So relocation now must follow section write.
1442 this->set_section_offset(shndx, -1ULL);
1443 this->set_relocs_must_follow_section_writes();
1444 }
1445
1446 // Downcast a base pointer to an Arm_relobj pointer. This is
1447 // not type-safe but we only use Arm_relobj not the base class.
1448 static Arm_relobj<big_endian>*
1449 as_arm_relobj(Relobj* relobj)
1450 { return static_cast<Arm_relobj<big_endian>*>(relobj); }
1451
1452 // Processor-specific flags in ELF file header. This is valid only after
1453 // reading symbols.
1454 elfcpp::Elf_Word
1455 processor_specific_flags() const
1456 { return this->processor_specific_flags_; }
1457
1458 // Attribute section data This is the contents of the .ARM.attribute section
1459 // if there is one.
1460 const Attributes_section_data*
1461 attributes_section_data() const
1462 { return this->attributes_section_data_; }
1463
1464 // Mapping symbol location.
1465 typedef std::pair<unsigned int, Arm_address> Mapping_symbol_position;
1466
1467 // Functor for STL container.
1468 struct Mapping_symbol_position_less
1469 {
1470 bool
1471 operator()(const Mapping_symbol_position& p1,
1472 const Mapping_symbol_position& p2) const
1473 {
1474 return (p1.first < p2.first
1475 || (p1.first == p2.first && p1.second < p2.second));
1476 }
1477 };
1478
1479 // We only care about the first character of a mapping symbol, so
1480 // we only store that instead of the whole symbol name.
1481 typedef std::map<Mapping_symbol_position, char,
1482 Mapping_symbol_position_less> Mapping_symbols_info;
1483
1484 // Whether a section contains any Cortex-A8 workaround.
1485 bool
1486 section_has_cortex_a8_workaround(unsigned int shndx) const
1487 {
1488 return (this->section_has_cortex_a8_workaround_ != NULL
1489 && (*this->section_has_cortex_a8_workaround_)[shndx]);
1490 }
1491
1492 // Mark a section that has Cortex-A8 workaround.
1493 void
1494 mark_section_for_cortex_a8_workaround(unsigned int shndx)
1495 {
1496 if (this->section_has_cortex_a8_workaround_ == NULL)
1497 this->section_has_cortex_a8_workaround_ =
1498 new std::vector<bool>(this->shnum(), false);
1499 (*this->section_has_cortex_a8_workaround_)[shndx] = true;
1500 }
1501
1502 // Return the EXIDX section of an text section with index SHNDX or NULL
1503 // if the text section has no associated EXIDX section.
1504 const Arm_exidx_input_section*
1505 exidx_input_section_by_link(unsigned int shndx) const
1506 {
1507 Exidx_section_map::const_iterator p = this->exidx_section_map_.find(shndx);
1508 return ((p != this->exidx_section_map_.end()
1509 && p->second->link() == shndx)
1510 ? p->second
1511 : NULL);
1512 }
1513
1514 // Return the EXIDX section with index SHNDX or NULL if there is none.
1515 const Arm_exidx_input_section*
1516 exidx_input_section_by_shndx(unsigned shndx) const
1517 {
1518 Exidx_section_map::const_iterator p = this->exidx_section_map_.find(shndx);
1519 return ((p != this->exidx_section_map_.end()
1520 && p->second->shndx() == shndx)
1521 ? p->second
1522 : NULL);
1523 }
1524
1525 // Whether output local symbol count needs updating.
1526 bool
1527 output_local_symbol_count_needs_update() const
1528 { return this->output_local_symbol_count_needs_update_; }
1529
1530 // Set output_local_symbol_count_needs_update flag to be true.
1531 void
1532 set_output_local_symbol_count_needs_update()
1533 { this->output_local_symbol_count_needs_update_ = true; }
1534
1535 // Update output local symbol count at the end of relaxation.
1536 void
1537 update_output_local_symbol_count();
1538
1539 protected:
1540 // Post constructor setup.
1541 void
1542 do_setup()
1543 {
1544 // Call parent's setup method.
1545 Sized_relobj<32, big_endian>::do_setup();
1546
1547 // Initialize look-up tables.
1548 Stub_table_list empty_stub_table_list(this->shnum(), NULL);
1549 this->stub_tables_.swap(empty_stub_table_list);
1550 }
1551
1552 // Count the local symbols.
1553 void
1554 do_count_local_symbols(Stringpool_template<char>*,
1555 Stringpool_template<char>*);
1556
1557 void
1558 do_relocate_sections(const Symbol_table* symtab, const Layout* layout,
1559 const unsigned char* pshdrs,
1560 typename Sized_relobj<32, big_endian>::Views* pivews);
1561
1562 // Read the symbol information.
1563 void
1564 do_read_symbols(Read_symbols_data* sd);
1565
1566 // Process relocs for garbage collection.
1567 void
1568 do_gc_process_relocs(Symbol_table*, Layout*, Read_relocs_data*);
1569
1570 private:
1571
1572 // Whether a section needs to be scanned for relocation stubs.
1573 bool
1574 section_needs_reloc_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1575 const Relobj::Output_sections&,
1576 const Symbol_table *, const unsigned char*);
1577
1578 // Whether a section needs to be scanned for the Cortex-A8 erratum.
1579 bool
1580 section_needs_cortex_a8_stub_scanning(const elfcpp::Shdr<32, big_endian>&,
1581 unsigned int, Output_section*,
1582 const Symbol_table *);
1583
1584 // Scan a section for the Cortex-A8 erratum.
1585 void
1586 scan_section_for_cortex_a8_erratum(const elfcpp::Shdr<32, big_endian>&,
1587 unsigned int, Output_section*,
1588 Target_arm<big_endian>*);
1589
1590 // Make a new Arm_exidx_input_section object for EXIDX section with
1591 // index SHNDX and section header SHDR.
1592 void
1593 make_exidx_input_section(unsigned int shndx,
1594 const elfcpp::Shdr<32, big_endian>& shdr);
1595
1596 typedef std::vector<Stub_table<big_endian>*> Stub_table_list;
1597 typedef Unordered_map<unsigned int, const Arm_exidx_input_section*>
1598 Exidx_section_map;
1599
1600 // List of stub tables.
1601 Stub_table_list stub_tables_;
1602 // Bit vector to tell if a local symbol is a thumb function or not.
1603 // This is only valid after do_count_local_symbol is called.
1604 std::vector<bool> local_symbol_is_thumb_function_;
1605 // processor-specific flags in ELF file header.
1606 elfcpp::Elf_Word processor_specific_flags_;
1607 // Object attributes if there is an .ARM.attributes section or NULL.
1608 Attributes_section_data* attributes_section_data_;
1609 // Mapping symbols information.
1610 Mapping_symbols_info mapping_symbols_info_;
1611 // Bitmap to indicate sections with Cortex-A8 workaround or NULL.
1612 std::vector<bool>* section_has_cortex_a8_workaround_;
1613 // Map a text section to its associated .ARM.exidx section, if there is one.
1614 Exidx_section_map exidx_section_map_;
1615 // Whether output local symbol count needs updating.
1616 bool output_local_symbol_count_needs_update_;
1617 };
1618
1619 // Arm_dynobj class.
1620
1621 template<bool big_endian>
1622 class Arm_dynobj : public Sized_dynobj<32, big_endian>
1623 {
1624 public:
1625 Arm_dynobj(const std::string& name, Input_file* input_file, off_t offset,
1626 const elfcpp::Ehdr<32, big_endian>& ehdr)
1627 : Sized_dynobj<32, big_endian>(name, input_file, offset, ehdr),
1628 processor_specific_flags_(0), attributes_section_data_(NULL)
1629 { }
1630
1631 ~Arm_dynobj()
1632 { delete this->attributes_section_data_; }
1633
1634 // Downcast a base pointer to an Arm_relobj pointer. This is
1635 // not type-safe but we only use Arm_relobj not the base class.
1636 static Arm_dynobj<big_endian>*
1637 as_arm_dynobj(Dynobj* dynobj)
1638 { return static_cast<Arm_dynobj<big_endian>*>(dynobj); }
1639
1640 // Processor-specific flags in ELF file header. This is valid only after
1641 // reading symbols.
1642 elfcpp::Elf_Word
1643 processor_specific_flags() const
1644 { return this->processor_specific_flags_; }
1645
1646 // Attributes section data.
1647 const Attributes_section_data*
1648 attributes_section_data() const
1649 { return this->attributes_section_data_; }
1650
1651 protected:
1652 // Read the symbol information.
1653 void
1654 do_read_symbols(Read_symbols_data* sd);
1655
1656 private:
1657 // processor-specific flags in ELF file header.
1658 elfcpp::Elf_Word processor_specific_flags_;
1659 // Object attributes if there is an .ARM.attributes section or NULL.
1660 Attributes_section_data* attributes_section_data_;
1661 };
1662
1663 // Functor to read reloc addends during stub generation.
1664
1665 template<int sh_type, bool big_endian>
1666 struct Stub_addend_reader
1667 {
1668 // Return the addend for a relocation of a particular type. Depending
1669 // on whether this is a REL or RELA relocation, read the addend from a
1670 // view or from a Reloc object.
1671 elfcpp::Elf_types<32>::Elf_Swxword
1672 operator()(
1673 unsigned int /* r_type */,
1674 const unsigned char* /* view */,
1675 const typename Reloc_types<sh_type,
1676 32, big_endian>::Reloc& /* reloc */) const;
1677 };
1678
1679 // Specialized Stub_addend_reader for SHT_REL type relocation sections.
1680
1681 template<bool big_endian>
1682 struct Stub_addend_reader<elfcpp::SHT_REL, big_endian>
1683 {
1684 elfcpp::Elf_types<32>::Elf_Swxword
1685 operator()(
1686 unsigned int,
1687 const unsigned char*,
1688 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const;
1689 };
1690
1691 // Specialized Stub_addend_reader for RELA type relocation sections.
1692 // We currently do not handle RELA type relocation sections but it is trivial
1693 // to implement the addend reader. This is provided for completeness and to
1694 // make it easier to add support for RELA relocation sections in the future.
1695
1696 template<bool big_endian>
1697 struct Stub_addend_reader<elfcpp::SHT_RELA, big_endian>
1698 {
1699 elfcpp::Elf_types<32>::Elf_Swxword
1700 operator()(
1701 unsigned int,
1702 const unsigned char*,
1703 const typename Reloc_types<elfcpp::SHT_RELA, 32,
1704 big_endian>::Reloc& reloc) const
1705 { return reloc.get_r_addend(); }
1706 };
1707
1708 // Cortex_a8_reloc class. We keep record of relocation that may need
1709 // the Cortex-A8 erratum workaround.
1710
1711 class Cortex_a8_reloc
1712 {
1713 public:
1714 Cortex_a8_reloc(Reloc_stub* reloc_stub, unsigned r_type,
1715 Arm_address destination)
1716 : reloc_stub_(reloc_stub), r_type_(r_type), destination_(destination)
1717 { }
1718
1719 ~Cortex_a8_reloc()
1720 { }
1721
1722 // Accessors: This is a read-only class.
1723
1724 // Return the relocation stub associated with this relocation if there is
1725 // one.
1726 const Reloc_stub*
1727 reloc_stub() const
1728 { return this->reloc_stub_; }
1729
1730 // Return the relocation type.
1731 unsigned int
1732 r_type() const
1733 { return this->r_type_; }
1734
1735 // Return the destination address of the relocation. LSB stores the THUMB
1736 // bit.
1737 Arm_address
1738 destination() const
1739 { return this->destination_; }
1740
1741 private:
1742 // Associated relocation stub if there is one, or NULL.
1743 const Reloc_stub* reloc_stub_;
1744 // Relocation type.
1745 unsigned int r_type_;
1746 // Destination address of this relocation. LSB is used to distinguish
1747 // ARM/THUMB mode.
1748 Arm_address destination_;
1749 };
1750
1751 // Utilities for manipulating integers of up to 32-bits
1752
1753 namespace utils
1754 {
1755 // Sign extend an n-bit unsigned integer stored in an uint32_t into
1756 // an int32_t. NO_BITS must be between 1 to 32.
1757 template<int no_bits>
1758 static inline int32_t
1759 sign_extend(uint32_t bits)
1760 {
1761 gold_assert(no_bits >= 0 && no_bits <= 32);
1762 if (no_bits == 32)
1763 return static_cast<int32_t>(bits);
1764 uint32_t mask = (~((uint32_t) 0)) >> (32 - no_bits);
1765 bits &= mask;
1766 uint32_t top_bit = 1U << (no_bits - 1);
1767 int32_t as_signed = static_cast<int32_t>(bits);
1768 return (bits & top_bit) ? as_signed + (-top_bit * 2) : as_signed;
1769 }
1770
1771 // Detects overflow of an NO_BITS integer stored in a uint32_t.
1772 template<int no_bits>
1773 static inline bool
1774 has_overflow(uint32_t bits)
1775 {
1776 gold_assert(no_bits >= 0 && no_bits <= 32);
1777 if (no_bits == 32)
1778 return false;
1779 int32_t max = (1 << (no_bits - 1)) - 1;
1780 int32_t min = -(1 << (no_bits - 1));
1781 int32_t as_signed = static_cast<int32_t>(bits);
1782 return as_signed > max || as_signed < min;
1783 }
1784
1785 // Detects overflow of an NO_BITS integer stored in a uint32_t when it
1786 // fits in the given number of bits as either a signed or unsigned value.
1787 // For example, has_signed_unsigned_overflow<8> would check
1788 // -128 <= bits <= 255
1789 template<int no_bits>
1790 static inline bool
1791 has_signed_unsigned_overflow(uint32_t bits)
1792 {
1793 gold_assert(no_bits >= 2 && no_bits <= 32);
1794 if (no_bits == 32)
1795 return false;
1796 int32_t max = static_cast<int32_t>((1U << no_bits) - 1);
1797 int32_t min = -(1 << (no_bits - 1));
1798 int32_t as_signed = static_cast<int32_t>(bits);
1799 return as_signed > max || as_signed < min;
1800 }
1801
1802 // Select bits from A and B using bits in MASK. For each n in [0..31],
1803 // the n-th bit in the result is chosen from the n-th bits of A and B.
1804 // A zero selects A and a one selects B.
1805 static inline uint32_t
1806 bit_select(uint32_t a, uint32_t b, uint32_t mask)
1807 { return (a & ~mask) | (b & mask); }
1808 };
1809
1810 template<bool big_endian>
1811 class Target_arm : public Sized_target<32, big_endian>
1812 {
1813 public:
1814 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
1815 Reloc_section;
1816
1817 // When were are relocating a stub, we pass this as the relocation number.
1818 static const size_t fake_relnum_for_stubs = static_cast<size_t>(-1);
1819
1820 Target_arm()
1821 : Sized_target<32, big_endian>(&arm_info),
1822 got_(NULL), plt_(NULL), got_plt_(NULL), rel_dyn_(NULL),
1823 copy_relocs_(elfcpp::R_ARM_COPY), dynbss_(NULL), stub_tables_(),
1824 stub_factory_(Stub_factory::get_instance()), may_use_blx_(false),
1825 should_force_pic_veneer_(false), arm_input_section_map_(),
1826 attributes_section_data_(NULL), fix_cortex_a8_(false),
1827 cortex_a8_relocs_info_()
1828 { }
1829
1830 // Whether we can use BLX.
1831 bool
1832 may_use_blx() const
1833 { return this->may_use_blx_; }
1834
1835 // Set use-BLX flag.
1836 void
1837 set_may_use_blx(bool value)
1838 { this->may_use_blx_ = value; }
1839
1840 // Whether we force PCI branch veneers.
1841 bool
1842 should_force_pic_veneer() const
1843 { return this->should_force_pic_veneer_; }
1844
1845 // Set PIC veneer flag.
1846 void
1847 set_should_force_pic_veneer(bool value)
1848 { this->should_force_pic_veneer_ = value; }
1849
1850 // Whether we use THUMB-2 instructions.
1851 bool
1852 using_thumb2() const
1853 {
1854 Object_attribute* attr =
1855 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1856 int arch = attr->int_value();
1857 return arch == elfcpp::TAG_CPU_ARCH_V6T2 || arch >= elfcpp::TAG_CPU_ARCH_V7;
1858 }
1859
1860 // Whether we use THUMB/THUMB-2 instructions only.
1861 bool
1862 using_thumb_only() const
1863 {
1864 Object_attribute* attr =
1865 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1866 if (attr->int_value() != elfcpp::TAG_CPU_ARCH_V7
1867 && attr->int_value() != elfcpp::TAG_CPU_ARCH_V7E_M)
1868 return false;
1869 attr = this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
1870 return attr->int_value() == 'M';
1871 }
1872
1873 // Whether we have an NOP instruction. If not, use mov r0, r0 instead.
1874 bool
1875 may_use_arm_nop() const
1876 {
1877 Object_attribute* attr =
1878 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1879 int arch = attr->int_value();
1880 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
1881 || arch == elfcpp::TAG_CPU_ARCH_V6K
1882 || arch == elfcpp::TAG_CPU_ARCH_V7
1883 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
1884 }
1885
1886 // Whether we have THUMB-2 NOP.W instruction.
1887 bool
1888 may_use_thumb2_nop() const
1889 {
1890 Object_attribute* attr =
1891 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
1892 int arch = attr->int_value();
1893 return (arch == elfcpp::TAG_CPU_ARCH_V6T2
1894 || arch == elfcpp::TAG_CPU_ARCH_V7
1895 || arch == elfcpp::TAG_CPU_ARCH_V7E_M);
1896 }
1897
1898 // Process the relocations to determine unreferenced sections for
1899 // garbage collection.
1900 void
1901 gc_process_relocs(Symbol_table* symtab,
1902 Layout* layout,
1903 Sized_relobj<32, big_endian>* object,
1904 unsigned int data_shndx,
1905 unsigned int sh_type,
1906 const unsigned char* prelocs,
1907 size_t reloc_count,
1908 Output_section* output_section,
1909 bool needs_special_offset_handling,
1910 size_t local_symbol_count,
1911 const unsigned char* plocal_symbols);
1912
1913 // Scan the relocations to look for symbol adjustments.
1914 void
1915 scan_relocs(Symbol_table* symtab,
1916 Layout* layout,
1917 Sized_relobj<32, big_endian>* object,
1918 unsigned int data_shndx,
1919 unsigned int sh_type,
1920 const unsigned char* prelocs,
1921 size_t reloc_count,
1922 Output_section* output_section,
1923 bool needs_special_offset_handling,
1924 size_t local_symbol_count,
1925 const unsigned char* plocal_symbols);
1926
1927 // Finalize the sections.
1928 void
1929 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
1930
1931 // Return the value to use for a dynamic symbol which requires special
1932 // treatment.
1933 uint64_t
1934 do_dynsym_value(const Symbol*) const;
1935
1936 // Relocate a section.
1937 void
1938 relocate_section(const Relocate_info<32, big_endian>*,
1939 unsigned int sh_type,
1940 const unsigned char* prelocs,
1941 size_t reloc_count,
1942 Output_section* output_section,
1943 bool needs_special_offset_handling,
1944 unsigned char* view,
1945 Arm_address view_address,
1946 section_size_type view_size,
1947 const Reloc_symbol_changes*);
1948
1949 // Scan the relocs during a relocatable link.
1950 void
1951 scan_relocatable_relocs(Symbol_table* symtab,
1952 Layout* layout,
1953 Sized_relobj<32, big_endian>* object,
1954 unsigned int data_shndx,
1955 unsigned int sh_type,
1956 const unsigned char* prelocs,
1957 size_t reloc_count,
1958 Output_section* output_section,
1959 bool needs_special_offset_handling,
1960 size_t local_symbol_count,
1961 const unsigned char* plocal_symbols,
1962 Relocatable_relocs*);
1963
1964 // Relocate a section during a relocatable link.
1965 void
1966 relocate_for_relocatable(const Relocate_info<32, big_endian>*,
1967 unsigned int sh_type,
1968 const unsigned char* prelocs,
1969 size_t reloc_count,
1970 Output_section* output_section,
1971 off_t offset_in_output_section,
1972 const Relocatable_relocs*,
1973 unsigned char* view,
1974 Arm_address view_address,
1975 section_size_type view_size,
1976 unsigned char* reloc_view,
1977 section_size_type reloc_view_size);
1978
1979 // Return whether SYM is defined by the ABI.
1980 bool
1981 do_is_defined_by_abi(Symbol* sym) const
1982 { return strcmp(sym->name(), "__tls_get_addr") == 0; }
1983
1984 // Return the size of the GOT section.
1985 section_size_type
1986 got_size()
1987 {
1988 gold_assert(this->got_ != NULL);
1989 return this->got_->data_size();
1990 }
1991
1992 // Map platform-specific reloc types
1993 static unsigned int
1994 get_real_reloc_type (unsigned int r_type);
1995
1996 //
1997 // Methods to support stub-generations.
1998 //
1999
2000 // Return the stub factory
2001 const Stub_factory&
2002 stub_factory() const
2003 { return this->stub_factory_; }
2004
2005 // Make a new Arm_input_section object.
2006 Arm_input_section<big_endian>*
2007 new_arm_input_section(Relobj*, unsigned int);
2008
2009 // Find the Arm_input_section object corresponding to the SHNDX-th input
2010 // section of RELOBJ.
2011 Arm_input_section<big_endian>*
2012 find_arm_input_section(Relobj* relobj, unsigned int shndx) const;
2013
2014 // Make a new Stub_table
2015 Stub_table<big_endian>*
2016 new_stub_table(Arm_input_section<big_endian>*);
2017
2018 // Scan a section for stub generation.
2019 void
2020 scan_section_for_stubs(const Relocate_info<32, big_endian>*, unsigned int,
2021 const unsigned char*, size_t, Output_section*,
2022 bool, const unsigned char*, Arm_address,
2023 section_size_type);
2024
2025 // Relocate a stub.
2026 void
2027 relocate_stub(Stub*, const Relocate_info<32, big_endian>*,
2028 Output_section*, unsigned char*, Arm_address,
2029 section_size_type);
2030
2031 // Get the default ARM target.
2032 static Target_arm<big_endian>*
2033 default_target()
2034 {
2035 gold_assert(parameters->target().machine_code() == elfcpp::EM_ARM
2036 && parameters->target().is_big_endian() == big_endian);
2037 return static_cast<Target_arm<big_endian>*>(
2038 parameters->sized_target<32, big_endian>());
2039 }
2040
2041 // Whether relocation type uses LSB to distinguish THUMB addresses.
2042 static bool
2043 reloc_uses_thumb_bit(unsigned int r_type);
2044
2045 // Whether NAME belongs to a mapping symbol.
2046 static bool
2047 is_mapping_symbol_name(const char* name)
2048 {
2049 return (name
2050 && name[0] == '$'
2051 && (name[1] == 'a' || name[1] == 't' || name[1] == 'd')
2052 && (name[2] == '\0' || name[2] == '.'));
2053 }
2054
2055 // Whether we work around the Cortex-A8 erratum.
2056 bool
2057 fix_cortex_a8() const
2058 { return this->fix_cortex_a8_; }
2059
2060 // Whether we fix R_ARM_V4BX relocation.
2061 // 0 - do not fix
2062 // 1 - replace with MOV instruction (armv4 target)
2063 // 2 - make interworking veneer (>= armv4t targets only)
2064 General_options::Fix_v4bx
2065 fix_v4bx() const
2066 { return parameters->options().fix_v4bx(); }
2067
2068 // Scan a span of THUMB code section for Cortex-A8 erratum.
2069 void
2070 scan_span_for_cortex_a8_erratum(Arm_relobj<big_endian>*, unsigned int,
2071 section_size_type, section_size_type,
2072 const unsigned char*, Arm_address);
2073
2074 // Apply Cortex-A8 workaround to a branch.
2075 void
2076 apply_cortex_a8_workaround(const Cortex_a8_stub*, Arm_address,
2077 unsigned char*, Arm_address);
2078
2079 protected:
2080 // Make an ELF object.
2081 Object*
2082 do_make_elf_object(const std::string&, Input_file*, off_t,
2083 const elfcpp::Ehdr<32, big_endian>& ehdr);
2084
2085 Object*
2086 do_make_elf_object(const std::string&, Input_file*, off_t,
2087 const elfcpp::Ehdr<32, !big_endian>&)
2088 { gold_unreachable(); }
2089
2090 Object*
2091 do_make_elf_object(const std::string&, Input_file*, off_t,
2092 const elfcpp::Ehdr<64, false>&)
2093 { gold_unreachable(); }
2094
2095 Object*
2096 do_make_elf_object(const std::string&, Input_file*, off_t,
2097 const elfcpp::Ehdr<64, true>&)
2098 { gold_unreachable(); }
2099
2100 // Make an output section.
2101 Output_section*
2102 do_make_output_section(const char* name, elfcpp::Elf_Word type,
2103 elfcpp::Elf_Xword flags)
2104 { return new Arm_output_section<big_endian>(name, type, flags); }
2105
2106 void
2107 do_adjust_elf_header(unsigned char* view, int len) const;
2108
2109 // We only need to generate stubs, and hence perform relaxation if we are
2110 // not doing relocatable linking.
2111 bool
2112 do_may_relax() const
2113 { return !parameters->options().relocatable(); }
2114
2115 bool
2116 do_relax(int, const Input_objects*, Symbol_table*, Layout*);
2117
2118 // Determine whether an object attribute tag takes an integer, a
2119 // string or both.
2120 int
2121 do_attribute_arg_type(int tag) const;
2122
2123 // Reorder tags during output.
2124 int
2125 do_attributes_order(int num) const;
2126
2127 private:
2128 // The class which scans relocations.
2129 class Scan
2130 {
2131 public:
2132 Scan()
2133 : issued_non_pic_error_(false)
2134 { }
2135
2136 inline void
2137 local(Symbol_table* symtab, Layout* layout, Target_arm* target,
2138 Sized_relobj<32, big_endian>* object,
2139 unsigned int data_shndx,
2140 Output_section* output_section,
2141 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
2142 const elfcpp::Sym<32, big_endian>& lsym);
2143
2144 inline void
2145 global(Symbol_table* symtab, Layout* layout, Target_arm* target,
2146 Sized_relobj<32, big_endian>* object,
2147 unsigned int data_shndx,
2148 Output_section* output_section,
2149 const elfcpp::Rel<32, big_endian>& reloc, unsigned int r_type,
2150 Symbol* gsym);
2151
2152 private:
2153 static void
2154 unsupported_reloc_local(Sized_relobj<32, big_endian>*,
2155 unsigned int r_type);
2156
2157 static void
2158 unsupported_reloc_global(Sized_relobj<32, big_endian>*,
2159 unsigned int r_type, Symbol*);
2160
2161 void
2162 check_non_pic(Relobj*, unsigned int r_type);
2163
2164 // Almost identical to Symbol::needs_plt_entry except that it also
2165 // handles STT_ARM_TFUNC.
2166 static bool
2167 symbol_needs_plt_entry(const Symbol* sym)
2168 {
2169 // An undefined symbol from an executable does not need a PLT entry.
2170 if (sym->is_undefined() && !parameters->options().shared())
2171 return false;
2172
2173 return (!parameters->doing_static_link()
2174 && (sym->type() == elfcpp::STT_FUNC
2175 || sym->type() == elfcpp::STT_ARM_TFUNC)
2176 && (sym->is_from_dynobj()
2177 || sym->is_undefined()
2178 || sym->is_preemptible()));
2179 }
2180
2181 // Whether we have issued an error about a non-PIC compilation.
2182 bool issued_non_pic_error_;
2183 };
2184
2185 // The class which implements relocation.
2186 class Relocate
2187 {
2188 public:
2189 Relocate()
2190 { }
2191
2192 ~Relocate()
2193 { }
2194
2195 // Return whether the static relocation needs to be applied.
2196 inline bool
2197 should_apply_static_reloc(const Sized_symbol<32>* gsym,
2198 int ref_flags,
2199 bool is_32bit,
2200 Output_section* output_section);
2201
2202 // Do a relocation. Return false if the caller should not issue
2203 // any warnings about this relocation.
2204 inline bool
2205 relocate(const Relocate_info<32, big_endian>*, Target_arm*,
2206 Output_section*, size_t relnum,
2207 const elfcpp::Rel<32, big_endian>&,
2208 unsigned int r_type, const Sized_symbol<32>*,
2209 const Symbol_value<32>*,
2210 unsigned char*, Arm_address,
2211 section_size_type);
2212
2213 // Return whether we want to pass flag NON_PIC_REF for this
2214 // reloc. This means the relocation type accesses a symbol not via
2215 // GOT or PLT.
2216 static inline bool
2217 reloc_is_non_pic (unsigned int r_type)
2218 {
2219 switch (r_type)
2220 {
2221 // These relocation types reference GOT or PLT entries explicitly.
2222 case elfcpp::R_ARM_GOT_BREL:
2223 case elfcpp::R_ARM_GOT_ABS:
2224 case elfcpp::R_ARM_GOT_PREL:
2225 case elfcpp::R_ARM_GOT_BREL12:
2226 case elfcpp::R_ARM_PLT32_ABS:
2227 case elfcpp::R_ARM_TLS_GD32:
2228 case elfcpp::R_ARM_TLS_LDM32:
2229 case elfcpp::R_ARM_TLS_IE32:
2230 case elfcpp::R_ARM_TLS_IE12GP:
2231
2232 // These relocate types may use PLT entries.
2233 case elfcpp::R_ARM_CALL:
2234 case elfcpp::R_ARM_THM_CALL:
2235 case elfcpp::R_ARM_JUMP24:
2236 case elfcpp::R_ARM_THM_JUMP24:
2237 case elfcpp::R_ARM_THM_JUMP19:
2238 case elfcpp::R_ARM_PLT32:
2239 case elfcpp::R_ARM_THM_XPC22:
2240 return false;
2241
2242 default:
2243 return true;
2244 }
2245 }
2246
2247 // Return whether we need to calculate the addressing origin of
2248 // the output segment defining the symbol - B(S).
2249 static bool
2250 reloc_needs_sym_origin(unsigned int r_type)
2251 {
2252 switch (r_type)
2253 {
2254 case elfcpp::R_ARM_SBREL32:
2255 case elfcpp::R_ARM_BASE_PREL:
2256 case elfcpp::R_ARM_BASE_ABS:
2257 case elfcpp::R_ARM_LDR_SBREL_11_0_NC:
2258 case elfcpp::R_ARM_ALU_SBREL_19_12_NC:
2259 case elfcpp::R_ARM_ALU_SBREL_27_20_CK:
2260 case elfcpp::R_ARM_SBREL31:
2261 case elfcpp::R_ARM_ALU_SB_G0_NC:
2262 case elfcpp::R_ARM_ALU_SB_G0:
2263 case elfcpp::R_ARM_ALU_SB_G1_NC:
2264 case elfcpp::R_ARM_ALU_SB_G1:
2265 case elfcpp::R_ARM_ALU_SB_G2:
2266 case elfcpp::R_ARM_LDR_SB_G0:
2267 case elfcpp::R_ARM_LDR_SB_G1:
2268 case elfcpp::R_ARM_LDR_SB_G2:
2269 case elfcpp::R_ARM_LDRS_SB_G0:
2270 case elfcpp::R_ARM_LDRS_SB_G1:
2271 case elfcpp::R_ARM_LDRS_SB_G2:
2272 case elfcpp::R_ARM_LDC_SB_G0:
2273 case elfcpp::R_ARM_LDC_SB_G1:
2274 case elfcpp::R_ARM_LDC_SB_G2:
2275 case elfcpp::R_ARM_MOVW_BREL_NC:
2276 case elfcpp::R_ARM_MOVT_BREL:
2277 case elfcpp::R_ARM_MOVW_BREL:
2278 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
2279 case elfcpp::R_ARM_THM_MOVT_BREL:
2280 case elfcpp::R_ARM_THM_MOVW_BREL:
2281 return true;
2282
2283 default:
2284 return false;
2285 }
2286 }
2287 };
2288
2289 // A class which returns the size required for a relocation type,
2290 // used while scanning relocs during a relocatable link.
2291 class Relocatable_size_for_reloc
2292 {
2293 public:
2294 unsigned int
2295 get_size_for_reloc(unsigned int, Relobj*);
2296 };
2297
2298 // Get the GOT section, creating it if necessary.
2299 Output_data_got<32, big_endian>*
2300 got_section(Symbol_table*, Layout*);
2301
2302 // Get the GOT PLT section.
2303 Output_data_space*
2304 got_plt_section() const
2305 {
2306 gold_assert(this->got_plt_ != NULL);
2307 return this->got_plt_;
2308 }
2309
2310 // Create a PLT entry for a global symbol.
2311 void
2312 make_plt_entry(Symbol_table*, Layout*, Symbol*);
2313
2314 // Get the PLT section.
2315 const Output_data_plt_arm<big_endian>*
2316 plt_section() const
2317 {
2318 gold_assert(this->plt_ != NULL);
2319 return this->plt_;
2320 }
2321
2322 // Get the dynamic reloc section, creating it if necessary.
2323 Reloc_section*
2324 rel_dyn_section(Layout*);
2325
2326 // Return true if the symbol may need a COPY relocation.
2327 // References from an executable object to non-function symbols
2328 // defined in a dynamic object may need a COPY relocation.
2329 bool
2330 may_need_copy_reloc(Symbol* gsym)
2331 {
2332 return (gsym->type() != elfcpp::STT_ARM_TFUNC
2333 && gsym->may_need_copy_reloc());
2334 }
2335
2336 // Add a potential copy relocation.
2337 void
2338 copy_reloc(Symbol_table* symtab, Layout* layout,
2339 Sized_relobj<32, big_endian>* object,
2340 unsigned int shndx, Output_section* output_section,
2341 Symbol* sym, const elfcpp::Rel<32, big_endian>& reloc)
2342 {
2343 this->copy_relocs_.copy_reloc(symtab, layout,
2344 symtab->get_sized_symbol<32>(sym),
2345 object, shndx, output_section, reloc,
2346 this->rel_dyn_section(layout));
2347 }
2348
2349 // Whether two EABI versions are compatible.
2350 static bool
2351 are_eabi_versions_compatible(elfcpp::Elf_Word v1, elfcpp::Elf_Word v2);
2352
2353 // Merge processor-specific flags from input object and those in the ELF
2354 // header of the output.
2355 void
2356 merge_processor_specific_flags(const std::string&, elfcpp::Elf_Word);
2357
2358 // Get the secondary compatible architecture.
2359 static int
2360 get_secondary_compatible_arch(const Attributes_section_data*);
2361
2362 // Set the secondary compatible architecture.
2363 static void
2364 set_secondary_compatible_arch(Attributes_section_data*, int);
2365
2366 static int
2367 tag_cpu_arch_combine(const char*, int, int*, int, int);
2368
2369 // Helper to print AEABI enum tag value.
2370 static std::string
2371 aeabi_enum_name(unsigned int);
2372
2373 // Return string value for TAG_CPU_name.
2374 static std::string
2375 tag_cpu_name_value(unsigned int);
2376
2377 // Merge object attributes from input object and those in the output.
2378 void
2379 merge_object_attributes(const char*, const Attributes_section_data*);
2380
2381 // Helper to get an AEABI object attribute
2382 Object_attribute*
2383 get_aeabi_object_attribute(int tag) const
2384 {
2385 Attributes_section_data* pasd = this->attributes_section_data_;
2386 gold_assert(pasd != NULL);
2387 Object_attribute* attr =
2388 pasd->get_attribute(Object_attribute::OBJ_ATTR_PROC, tag);
2389 gold_assert(attr != NULL);
2390 return attr;
2391 }
2392
2393 //
2394 // Methods to support stub-generations.
2395 //
2396
2397 // Group input sections for stub generation.
2398 void
2399 group_sections(Layout*, section_size_type, bool);
2400
2401 // Scan a relocation for stub generation.
2402 void
2403 scan_reloc_for_stub(const Relocate_info<32, big_endian>*, unsigned int,
2404 const Sized_symbol<32>*, unsigned int,
2405 const Symbol_value<32>*,
2406 elfcpp::Elf_types<32>::Elf_Swxword, Arm_address);
2407
2408 // Scan a relocation section for stub.
2409 template<int sh_type>
2410 void
2411 scan_reloc_section_for_stubs(
2412 const Relocate_info<32, big_endian>* relinfo,
2413 const unsigned char* prelocs,
2414 size_t reloc_count,
2415 Output_section* output_section,
2416 bool needs_special_offset_handling,
2417 const unsigned char* view,
2418 elfcpp::Elf_types<32>::Elf_Addr view_address,
2419 section_size_type);
2420
2421 // Fix .ARM.exidx section coverage.
2422 void
2423 fix_exidx_coverage(Layout*, Arm_output_section<big_endian>*, Symbol_table*);
2424
2425 // Functors for STL set.
2426 struct output_section_address_less_than
2427 {
2428 bool
2429 operator()(const Output_section* s1, const Output_section* s2) const
2430 { return s1->address() < s2->address(); }
2431 };
2432
2433 // Information about this specific target which we pass to the
2434 // general Target structure.
2435 static const Target::Target_info arm_info;
2436
2437 // The types of GOT entries needed for this platform.
2438 enum Got_type
2439 {
2440 GOT_TYPE_STANDARD = 0 // GOT entry for a regular symbol
2441 };
2442
2443 typedef typename std::vector<Stub_table<big_endian>*> Stub_table_list;
2444
2445 // Map input section to Arm_input_section.
2446 typedef Unordered_map<Section_id,
2447 Arm_input_section<big_endian>*,
2448 Section_id_hash>
2449 Arm_input_section_map;
2450
2451 // Map output addresses to relocs for Cortex-A8 erratum.
2452 typedef Unordered_map<Arm_address, const Cortex_a8_reloc*>
2453 Cortex_a8_relocs_info;
2454
2455 // The GOT section.
2456 Output_data_got<32, big_endian>* got_;
2457 // The PLT section.
2458 Output_data_plt_arm<big_endian>* plt_;
2459 // The GOT PLT section.
2460 Output_data_space* got_plt_;
2461 // The dynamic reloc section.
2462 Reloc_section* rel_dyn_;
2463 // Relocs saved to avoid a COPY reloc.
2464 Copy_relocs<elfcpp::SHT_REL, 32, big_endian> copy_relocs_;
2465 // Space for variables copied with a COPY reloc.
2466 Output_data_space* dynbss_;
2467 // Vector of Stub_tables created.
2468 Stub_table_list stub_tables_;
2469 // Stub factory.
2470 const Stub_factory &stub_factory_;
2471 // Whether we can use BLX.
2472 bool may_use_blx_;
2473 // Whether we force PIC branch veneers.
2474 bool should_force_pic_veneer_;
2475 // Map for locating Arm_input_sections.
2476 Arm_input_section_map arm_input_section_map_;
2477 // Attributes section data in output.
2478 Attributes_section_data* attributes_section_data_;
2479 // Whether we want to fix code for Cortex-A8 erratum.
2480 bool fix_cortex_a8_;
2481 // Map addresses to relocs for Cortex-A8 erratum.
2482 Cortex_a8_relocs_info cortex_a8_relocs_info_;
2483 };
2484
2485 template<bool big_endian>
2486 const Target::Target_info Target_arm<big_endian>::arm_info =
2487 {
2488 32, // size
2489 big_endian, // is_big_endian
2490 elfcpp::EM_ARM, // machine_code
2491 false, // has_make_symbol
2492 false, // has_resolve
2493 false, // has_code_fill
2494 true, // is_default_stack_executable
2495 '\0', // wrap_char
2496 "/usr/lib/libc.so.1", // dynamic_linker
2497 0x8000, // default_text_segment_address
2498 0x1000, // abi_pagesize (overridable by -z max-page-size)
2499 0x1000, // common_pagesize (overridable by -z common-page-size)
2500 elfcpp::SHN_UNDEF, // small_common_shndx
2501 elfcpp::SHN_UNDEF, // large_common_shndx
2502 0, // small_common_section_flags
2503 0, // large_common_section_flags
2504 ".ARM.attributes", // attributes_section
2505 "aeabi" // attributes_vendor
2506 };
2507
2508 // Arm relocate functions class
2509 //
2510
2511 template<bool big_endian>
2512 class Arm_relocate_functions : public Relocate_functions<32, big_endian>
2513 {
2514 public:
2515 typedef enum
2516 {
2517 STATUS_OKAY, // No error during relocation.
2518 STATUS_OVERFLOW, // Relocation oveflow.
2519 STATUS_BAD_RELOC // Relocation cannot be applied.
2520 } Status;
2521
2522 private:
2523 typedef Relocate_functions<32, big_endian> Base;
2524 typedef Arm_relocate_functions<big_endian> This;
2525
2526 // Encoding of imm16 argument for movt and movw ARM instructions
2527 // from ARM ARM:
2528 //
2529 // imm16 := imm4 | imm12
2530 //
2531 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2532 // +-------+---------------+-------+-------+-----------------------+
2533 // | | |imm4 | |imm12 |
2534 // +-------+---------------+-------+-------+-----------------------+
2535
2536 // Extract the relocation addend from VAL based on the ARM
2537 // instruction encoding described above.
2538 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2539 extract_arm_movw_movt_addend(
2540 typename elfcpp::Swap<32, big_endian>::Valtype val)
2541 {
2542 // According to the Elf ABI for ARM Architecture the immediate
2543 // field is sign-extended to form the addend.
2544 return utils::sign_extend<16>(((val >> 4) & 0xf000) | (val & 0xfff));
2545 }
2546
2547 // Insert X into VAL based on the ARM instruction encoding described
2548 // above.
2549 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2550 insert_val_arm_movw_movt(
2551 typename elfcpp::Swap<32, big_endian>::Valtype val,
2552 typename elfcpp::Swap<32, big_endian>::Valtype x)
2553 {
2554 val &= 0xfff0f000;
2555 val |= x & 0x0fff;
2556 val |= (x & 0xf000) << 4;
2557 return val;
2558 }
2559
2560 // Encoding of imm16 argument for movt and movw Thumb2 instructions
2561 // from ARM ARM:
2562 //
2563 // imm16 := imm4 | i | imm3 | imm8
2564 //
2565 // f e d c b a 9 8 7 6 5 4 3 2 1 0 f e d c b a 9 8 7 6 5 4 3 2 1 0
2566 // +---------+-+-----------+-------++-+-----+-------+---------------+
2567 // | |i| |imm4 || |imm3 | |imm8 |
2568 // +---------+-+-----------+-------++-+-----+-------+---------------+
2569
2570 // Extract the relocation addend from VAL based on the Thumb2
2571 // instruction encoding described above.
2572 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2573 extract_thumb_movw_movt_addend(
2574 typename elfcpp::Swap<32, big_endian>::Valtype val)
2575 {
2576 // According to the Elf ABI for ARM Architecture the immediate
2577 // field is sign-extended to form the addend.
2578 return utils::sign_extend<16>(((val >> 4) & 0xf000)
2579 | ((val >> 15) & 0x0800)
2580 | ((val >> 4) & 0x0700)
2581 | (val & 0x00ff));
2582 }
2583
2584 // Insert X into VAL based on the Thumb2 instruction encoding
2585 // described above.
2586 static inline typename elfcpp::Swap<32, big_endian>::Valtype
2587 insert_val_thumb_movw_movt(
2588 typename elfcpp::Swap<32, big_endian>::Valtype val,
2589 typename elfcpp::Swap<32, big_endian>::Valtype x)
2590 {
2591 val &= 0xfbf08f00;
2592 val |= (x & 0xf000) << 4;
2593 val |= (x & 0x0800) << 15;
2594 val |= (x & 0x0700) << 4;
2595 val |= (x & 0x00ff);
2596 return val;
2597 }
2598
2599 // Calculate the smallest constant Kn for the specified residual.
2600 // (see (AAELF 4.6.1.4 Static ARM relocations, Group Relocations, p.32)
2601 static uint32_t
2602 calc_grp_kn(typename elfcpp::Swap<32, big_endian>::Valtype residual)
2603 {
2604 int32_t msb;
2605
2606 if (residual == 0)
2607 return 0;
2608 // Determine the most significant bit in the residual and
2609 // align the resulting value to a 2-bit boundary.
2610 for (msb = 30; (msb >= 0) && !(residual & (3 << msb)); msb -= 2)
2611 ;
2612 // The desired shift is now (msb - 6), or zero, whichever
2613 // is the greater.
2614 return (((msb - 6) < 0) ? 0 : (msb - 6));
2615 }
2616
2617 // Calculate the final residual for the specified group index.
2618 // If the passed group index is less than zero, the method will return
2619 // the value of the specified residual without any change.
2620 // (see (AAELF 4.6.1.4 Static ARM relocations, Group Relocations, p.32)
2621 static typename elfcpp::Swap<32, big_endian>::Valtype
2622 calc_grp_residual(typename elfcpp::Swap<32, big_endian>::Valtype residual,
2623 const int group)
2624 {
2625 for (int n = 0; n <= group; n++)
2626 {
2627 // Calculate which part of the value to mask.
2628 uint32_t shift = calc_grp_kn(residual);
2629 // Calculate the residual for the next time around.
2630 residual &= ~(residual & (0xff << shift));
2631 }
2632
2633 return residual;
2634 }
2635
2636 // Calculate the value of Gn for the specified group index.
2637 // We return it in the form of an encoded constant-and-rotation.
2638 // (see (AAELF 4.6.1.4 Static ARM relocations, Group Relocations, p.32)
2639 static typename elfcpp::Swap<32, big_endian>::Valtype
2640 calc_grp_gn(typename elfcpp::Swap<32, big_endian>::Valtype residual,
2641 const int group)
2642 {
2643 typename elfcpp::Swap<32, big_endian>::Valtype gn = 0;
2644 uint32_t shift = 0;
2645
2646 for (int n = 0; n <= group; n++)
2647 {
2648 // Calculate which part of the value to mask.
2649 shift = calc_grp_kn(residual);
2650 // Calculate Gn in 32-bit as well as encoded constant-and-rotation form.
2651 gn = residual & (0xff << shift);
2652 // Calculate the residual for the next time around.
2653 residual &= ~gn;
2654 }
2655 // Return Gn in the form of an encoded constant-and-rotation.
2656 return ((gn >> shift) | ((gn <= 0xff ? 0 : (32 - shift) / 2) << 8));
2657 }
2658
2659 public:
2660 // Handle ARM long branches.
2661 static typename This::Status
2662 arm_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2663 unsigned char *, const Sized_symbol<32>*,
2664 const Arm_relobj<big_endian>*, unsigned int,
2665 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2666
2667 // Handle THUMB long branches.
2668 static typename This::Status
2669 thumb_branch_common(unsigned int, const Relocate_info<32, big_endian>*,
2670 unsigned char *, const Sized_symbol<32>*,
2671 const Arm_relobj<big_endian>*, unsigned int,
2672 const Symbol_value<32>*, Arm_address, Arm_address, bool);
2673
2674
2675 // Return the branch offset of a 32-bit THUMB branch.
2676 static inline int32_t
2677 thumb32_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2678 {
2679 // We use the Thumb-2 encoding (backwards compatible with Thumb-1)
2680 // involving the J1 and J2 bits.
2681 uint32_t s = (upper_insn & (1U << 10)) >> 10;
2682 uint32_t upper = upper_insn & 0x3ffU;
2683 uint32_t lower = lower_insn & 0x7ffU;
2684 uint32_t j1 = (lower_insn & (1U << 13)) >> 13;
2685 uint32_t j2 = (lower_insn & (1U << 11)) >> 11;
2686 uint32_t i1 = j1 ^ s ? 0 : 1;
2687 uint32_t i2 = j2 ^ s ? 0 : 1;
2688
2689 return utils::sign_extend<25>((s << 24) | (i1 << 23) | (i2 << 22)
2690 | (upper << 12) | (lower << 1));
2691 }
2692
2693 // Insert OFFSET to a 32-bit THUMB branch and return the upper instruction.
2694 // UPPER_INSN is the original upper instruction of the branch. Caller is
2695 // responsible for overflow checking and BLX offset adjustment.
2696 static inline uint16_t
2697 thumb32_branch_upper(uint16_t upper_insn, int32_t offset)
2698 {
2699 uint32_t s = offset < 0 ? 1 : 0;
2700 uint32_t bits = static_cast<uint32_t>(offset);
2701 return (upper_insn & ~0x7ffU) | ((bits >> 12) & 0x3ffU) | (s << 10);
2702 }
2703
2704 // Insert OFFSET to a 32-bit THUMB branch and return the lower instruction.
2705 // LOWER_INSN is the original lower instruction of the branch. Caller is
2706 // responsible for overflow checking and BLX offset adjustment.
2707 static inline uint16_t
2708 thumb32_branch_lower(uint16_t lower_insn, int32_t offset)
2709 {
2710 uint32_t s = offset < 0 ? 1 : 0;
2711 uint32_t bits = static_cast<uint32_t>(offset);
2712 return ((lower_insn & ~0x2fffU)
2713 | ((((bits >> 23) & 1) ^ !s) << 13)
2714 | ((((bits >> 22) & 1) ^ !s) << 11)
2715 | ((bits >> 1) & 0x7ffU));
2716 }
2717
2718 // Return the branch offset of a 32-bit THUMB conditional branch.
2719 static inline int32_t
2720 thumb32_cond_branch_offset(uint16_t upper_insn, uint16_t lower_insn)
2721 {
2722 uint32_t s = (upper_insn & 0x0400U) >> 10;
2723 uint32_t j1 = (lower_insn & 0x2000U) >> 13;
2724 uint32_t j2 = (lower_insn & 0x0800U) >> 11;
2725 uint32_t lower = (lower_insn & 0x07ffU);
2726 uint32_t upper = (s << 8) | (j2 << 7) | (j1 << 6) | (upper_insn & 0x003fU);
2727
2728 return utils::sign_extend<21>((upper << 12) | (lower << 1));
2729 }
2730
2731 // Insert OFFSET to a 32-bit THUMB conditional branch and return the upper
2732 // instruction. UPPER_INSN is the original upper instruction of the branch.
2733 // Caller is responsible for overflow checking.
2734 static inline uint16_t
2735 thumb32_cond_branch_upper(uint16_t upper_insn, int32_t offset)
2736 {
2737 uint32_t s = offset < 0 ? 1 : 0;
2738 uint32_t bits = static_cast<uint32_t>(offset);
2739 return (upper_insn & 0xfbc0U) | (s << 10) | ((bits & 0x0003f000U) >> 12);
2740 }
2741
2742 // Insert OFFSET to a 32-bit THUMB conditional branch and return the lower
2743 // instruction. LOWER_INSN is the original lower instruction of the branch.
2744 // Caller is reponsible for overflow checking.
2745 static inline uint16_t
2746 thumb32_cond_branch_lower(uint16_t lower_insn, int32_t offset)
2747 {
2748 uint32_t bits = static_cast<uint32_t>(offset);
2749 uint32_t j2 = (bits & 0x00080000U) >> 19;
2750 uint32_t j1 = (bits & 0x00040000U) >> 18;
2751 uint32_t lo = (bits & 0x00000ffeU) >> 1;
2752
2753 return (lower_insn & 0xd000U) | (j1 << 13) | (j2 << 11) | lo;
2754 }
2755
2756 // R_ARM_ABS8: S + A
2757 static inline typename This::Status
2758 abs8(unsigned char *view,
2759 const Sized_relobj<32, big_endian>* object,
2760 const Symbol_value<32>* psymval)
2761 {
2762 typedef typename elfcpp::Swap<8, big_endian>::Valtype Valtype;
2763 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2764 Valtype* wv = reinterpret_cast<Valtype*>(view);
2765 Valtype val = elfcpp::Swap<8, big_endian>::readval(wv);
2766 Reltype addend = utils::sign_extend<8>(val);
2767 Reltype x = psymval->value(object, addend);
2768 val = utils::bit_select(val, x, 0xffU);
2769 elfcpp::Swap<8, big_endian>::writeval(wv, val);
2770 return (utils::has_signed_unsigned_overflow<8>(x)
2771 ? This::STATUS_OVERFLOW
2772 : This::STATUS_OKAY);
2773 }
2774
2775 // R_ARM_THM_ABS5: S + A
2776 static inline typename This::Status
2777 thm_abs5(unsigned char *view,
2778 const Sized_relobj<32, big_endian>* object,
2779 const Symbol_value<32>* psymval)
2780 {
2781 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2782 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2783 Valtype* wv = reinterpret_cast<Valtype*>(view);
2784 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2785 Reltype addend = (val & 0x7e0U) >> 6;
2786 Reltype x = psymval->value(object, addend);
2787 val = utils::bit_select(val, x << 6, 0x7e0U);
2788 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2789 return (utils::has_overflow<5>(x)
2790 ? This::STATUS_OVERFLOW
2791 : This::STATUS_OKAY);
2792 }
2793
2794 // R_ARM_ABS12: S + A
2795 static inline typename This::Status
2796 abs12(unsigned char *view,
2797 const Sized_relobj<32, big_endian>* object,
2798 const Symbol_value<32>* psymval)
2799 {
2800 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2801 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2802 Valtype* wv = reinterpret_cast<Valtype*>(view);
2803 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2804 Reltype addend = val & 0x0fffU;
2805 Reltype x = psymval->value(object, addend);
2806 val = utils::bit_select(val, x, 0x0fffU);
2807 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2808 return (utils::has_overflow<12>(x)
2809 ? This::STATUS_OVERFLOW
2810 : This::STATUS_OKAY);
2811 }
2812
2813 // R_ARM_ABS16: S + A
2814 static inline typename This::Status
2815 abs16(unsigned char *view,
2816 const Sized_relobj<32, big_endian>* object,
2817 const Symbol_value<32>* psymval)
2818 {
2819 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2820 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
2821 Valtype* wv = reinterpret_cast<Valtype*>(view);
2822 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2823 Reltype addend = utils::sign_extend<16>(val);
2824 Reltype x = psymval->value(object, addend);
2825 val = utils::bit_select(val, x, 0xffffU);
2826 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2827 return (utils::has_signed_unsigned_overflow<16>(x)
2828 ? This::STATUS_OVERFLOW
2829 : This::STATUS_OKAY);
2830 }
2831
2832 // R_ARM_ABS32: (S + A) | T
2833 static inline typename This::Status
2834 abs32(unsigned char *view,
2835 const Sized_relobj<32, big_endian>* object,
2836 const Symbol_value<32>* psymval,
2837 Arm_address thumb_bit)
2838 {
2839 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2840 Valtype* wv = reinterpret_cast<Valtype*>(view);
2841 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
2842 Valtype x = psymval->value(object, addend) | thumb_bit;
2843 elfcpp::Swap<32, big_endian>::writeval(wv, x);
2844 return This::STATUS_OKAY;
2845 }
2846
2847 // R_ARM_REL32: (S + A) | T - P
2848 static inline typename This::Status
2849 rel32(unsigned char *view,
2850 const Sized_relobj<32, big_endian>* object,
2851 const Symbol_value<32>* psymval,
2852 Arm_address address,
2853 Arm_address thumb_bit)
2854 {
2855 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2856 Valtype* wv = reinterpret_cast<Valtype*>(view);
2857 Valtype addend = elfcpp::Swap<32, big_endian>::readval(wv);
2858 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2859 elfcpp::Swap<32, big_endian>::writeval(wv, x);
2860 return This::STATUS_OKAY;
2861 }
2862
2863 // R_ARM_THM_JUMP24: (S + A) | T - P
2864 static typename This::Status
2865 thm_jump19(unsigned char *view, const Arm_relobj<big_endian>* object,
2866 const Symbol_value<32>* psymval, Arm_address address,
2867 Arm_address thumb_bit);
2868
2869 // R_ARM_THM_JUMP6: S + A – P
2870 static inline typename This::Status
2871 thm_jump6(unsigned char *view,
2872 const Sized_relobj<32, big_endian>* object,
2873 const Symbol_value<32>* psymval,
2874 Arm_address address)
2875 {
2876 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2877 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
2878 Valtype* wv = reinterpret_cast<Valtype*>(view);
2879 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2880 // bit[9]:bit[7:3]:’0’ (mask: 0x02f8)
2881 Reltype addend = (((val & 0x0200) >> 3) | ((val & 0x00f8) >> 2));
2882 Reltype x = (psymval->value(object, addend) - address);
2883 val = (val & 0xfd07) | ((x & 0x0040) << 3) | ((val & 0x003e) << 2);
2884 elfcpp::Swap<16, big_endian>::writeval(wv, val);
2885 // CZB does only forward jumps.
2886 return ((x > 0x007e)
2887 ? This::STATUS_OVERFLOW
2888 : This::STATUS_OKAY);
2889 }
2890
2891 // R_ARM_THM_JUMP8: S + A – P
2892 static inline typename This::Status
2893 thm_jump8(unsigned char *view,
2894 const Sized_relobj<32, big_endian>* object,
2895 const Symbol_value<32>* psymval,
2896 Arm_address address)
2897 {
2898 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2899 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
2900 Valtype* wv = reinterpret_cast<Valtype*>(view);
2901 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2902 Reltype addend = utils::sign_extend<8>((val & 0x00ff) << 1);
2903 Reltype x = (psymval->value(object, addend) - address);
2904 elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xff00) | ((x & 0x01fe) >> 1));
2905 return (utils::has_overflow<8>(x)
2906 ? This::STATUS_OVERFLOW
2907 : This::STATUS_OKAY);
2908 }
2909
2910 // R_ARM_THM_JUMP11: S + A – P
2911 static inline typename This::Status
2912 thm_jump11(unsigned char *view,
2913 const Sized_relobj<32, big_endian>* object,
2914 const Symbol_value<32>* psymval,
2915 Arm_address address)
2916 {
2917 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
2918 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
2919 Valtype* wv = reinterpret_cast<Valtype*>(view);
2920 Valtype val = elfcpp::Swap<16, big_endian>::readval(wv);
2921 Reltype addend = utils::sign_extend<11>((val & 0x07ff) << 1);
2922 Reltype x = (psymval->value(object, addend) - address);
2923 elfcpp::Swap<16, big_endian>::writeval(wv, (val & 0xf800) | ((x & 0x0ffe) >> 1));
2924 return (utils::has_overflow<11>(x)
2925 ? This::STATUS_OVERFLOW
2926 : This::STATUS_OKAY);
2927 }
2928
2929 // R_ARM_BASE_PREL: B(S) + A - P
2930 static inline typename This::Status
2931 base_prel(unsigned char* view,
2932 Arm_address origin,
2933 Arm_address address)
2934 {
2935 Base::rel32(view, origin - address);
2936 return STATUS_OKAY;
2937 }
2938
2939 // R_ARM_BASE_ABS: B(S) + A
2940 static inline typename This::Status
2941 base_abs(unsigned char* view,
2942 Arm_address origin)
2943 {
2944 Base::rel32(view, origin);
2945 return STATUS_OKAY;
2946 }
2947
2948 // R_ARM_GOT_BREL: GOT(S) + A - GOT_ORG
2949 static inline typename This::Status
2950 got_brel(unsigned char* view,
2951 typename elfcpp::Swap<32, big_endian>::Valtype got_offset)
2952 {
2953 Base::rel32(view, got_offset);
2954 return This::STATUS_OKAY;
2955 }
2956
2957 // R_ARM_GOT_PREL: GOT(S) + A - P
2958 static inline typename This::Status
2959 got_prel(unsigned char *view,
2960 Arm_address got_entry,
2961 Arm_address address)
2962 {
2963 Base::rel32(view, got_entry - address);
2964 return This::STATUS_OKAY;
2965 }
2966
2967 // R_ARM_PREL: (S + A) | T - P
2968 static inline typename This::Status
2969 prel31(unsigned char *view,
2970 const Sized_relobj<32, big_endian>* object,
2971 const Symbol_value<32>* psymval,
2972 Arm_address address,
2973 Arm_address thumb_bit)
2974 {
2975 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2976 Valtype* wv = reinterpret_cast<Valtype*>(view);
2977 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2978 Valtype addend = utils::sign_extend<31>(val);
2979 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
2980 val = utils::bit_select(val, x, 0x7fffffffU);
2981 elfcpp::Swap<32, big_endian>::writeval(wv, val);
2982 return (utils::has_overflow<31>(x) ?
2983 This::STATUS_OVERFLOW : This::STATUS_OKAY);
2984 }
2985
2986 // R_ARM_MOVW_ABS_NC: (S + A) | T
2987 static inline typename This::Status
2988 movw_abs_nc(unsigned char *view,
2989 const Sized_relobj<32, big_endian>* object,
2990 const Symbol_value<32>* psymval,
2991 Arm_address thumb_bit)
2992 {
2993 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
2994 Valtype* wv = reinterpret_cast<Valtype*>(view);
2995 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
2996 Valtype addend = This::extract_arm_movw_movt_addend(val);
2997 Valtype x = psymval->value(object, addend) | thumb_bit;
2998 val = This::insert_val_arm_movw_movt(val, x);
2999 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3000 return This::STATUS_OKAY;
3001 }
3002
3003 // R_ARM_MOVT_ABS: S + A
3004 static inline typename This::Status
3005 movt_abs(unsigned char *view,
3006 const Sized_relobj<32, big_endian>* object,
3007 const Symbol_value<32>* psymval)
3008 {
3009 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3010 Valtype* wv = reinterpret_cast<Valtype*>(view);
3011 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3012 Valtype addend = This::extract_arm_movw_movt_addend(val);
3013 Valtype x = psymval->value(object, addend) >> 16;
3014 val = This::insert_val_arm_movw_movt(val, x);
3015 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3016 return This::STATUS_OKAY;
3017 }
3018
3019 // R_ARM_THM_MOVW_ABS_NC: S + A | T
3020 static inline typename This::Status
3021 thm_movw_abs_nc(unsigned char *view,
3022 const Sized_relobj<32, big_endian>* object,
3023 const Symbol_value<32>* psymval,
3024 Arm_address thumb_bit)
3025 {
3026 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3027 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3028 Valtype* wv = reinterpret_cast<Valtype*>(view);
3029 Reltype val = ((elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3030 | elfcpp::Swap<16, big_endian>::readval(wv + 1));
3031 Reltype addend = extract_thumb_movw_movt_addend(val);
3032 Reltype x = psymval->value(object, addend) | thumb_bit;
3033 val = This::insert_val_thumb_movw_movt(val, x);
3034 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3035 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3036 return This::STATUS_OKAY;
3037 }
3038
3039 // R_ARM_THM_MOVT_ABS: S + A
3040 static inline typename This::Status
3041 thm_movt_abs(unsigned char *view,
3042 const Sized_relobj<32, big_endian>* object,
3043 const Symbol_value<32>* psymval)
3044 {
3045 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3046 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3047 Valtype* wv = reinterpret_cast<Valtype*>(view);
3048 Reltype val = ((elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3049 | elfcpp::Swap<16, big_endian>::readval(wv + 1));
3050 Reltype addend = This::extract_thumb_movw_movt_addend(val);
3051 Reltype x = psymval->value(object, addend) >> 16;
3052 val = This::insert_val_thumb_movw_movt(val, x);
3053 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3054 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3055 return This::STATUS_OKAY;
3056 }
3057
3058 // R_ARM_MOVW_PREL_NC: (S + A) | T - P
3059 // R_ARM_MOVW_BREL_NC: ((S + A) | T) – B(S)
3060 static inline typename This::Status
3061 movw_rel_nc(unsigned char* view,
3062 const Sized_relobj<32, big_endian>* object,
3063 const Symbol_value<32>* psymval,
3064 Arm_address address,
3065 Arm_address thumb_bit)
3066 {
3067 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3068 Valtype* wv = reinterpret_cast<Valtype*>(view);
3069 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3070 Valtype addend = This::extract_arm_movw_movt_addend(val);
3071 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
3072 val = This::insert_val_arm_movw_movt(val, x);
3073 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3074 return This::STATUS_OKAY;
3075 }
3076
3077 // R_ARM_MOVW_BREL: ((S + A) | T) – B(S)
3078 static inline typename This::Status
3079 movw_rel(unsigned char* view,
3080 const Sized_relobj<32, big_endian>* object,
3081 const Symbol_value<32>* psymval,
3082 Arm_address address,
3083 Arm_address thumb_bit)
3084 {
3085 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3086 Valtype* wv = reinterpret_cast<Valtype*>(view);
3087 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3088 Valtype addend = This::extract_arm_movw_movt_addend(val);
3089 Valtype x = (psymval->value(object, addend) | thumb_bit) - address;
3090 val = This::insert_val_arm_movw_movt(val, x);
3091 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3092 return ((x >= 0x10000) ?
3093 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3094 }
3095
3096 // R_ARM_MOVT_PREL: S + A - P
3097 // R_ARM_MOVT_BREL: S + A – B(S)
3098 static inline typename This::Status
3099 movt_rel(unsigned char* view,
3100 const Sized_relobj<32, big_endian>* object,
3101 const Symbol_value<32>* psymval,
3102 Arm_address address)
3103 {
3104 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3105 Valtype* wv = reinterpret_cast<Valtype*>(view);
3106 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3107 Valtype addend = This::extract_arm_movw_movt_addend(val);
3108 Valtype x = (psymval->value(object, addend) - address) >> 16;
3109 val = This::insert_val_arm_movw_movt(val, x);
3110 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3111 return This::STATUS_OKAY;
3112 }
3113
3114 // R_ARM_THM_MOVW_PREL_NC: (S + A) | T - P
3115 // R_ARM_THM_MOVW_BREL_NC: ((S + A) | T) – B(S)
3116 static inline typename This::Status
3117 thm_movw_rel_nc(unsigned char *view,
3118 const Sized_relobj<32, big_endian>* object,
3119 const Symbol_value<32>* psymval,
3120 Arm_address address,
3121 Arm_address thumb_bit)
3122 {
3123 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3124 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3125 Valtype* wv = reinterpret_cast<Valtype*>(view);
3126 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3127 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3128 Reltype addend = This::extract_thumb_movw_movt_addend(val);
3129 Reltype x = (psymval->value(object, addend) | thumb_bit) - address;
3130 val = This::insert_val_thumb_movw_movt(val, x);
3131 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3132 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3133 return This::STATUS_OKAY;
3134 }
3135
3136 // R_ARM_THM_MOVW_BREL: ((S + A) | T) – B(S)
3137 static inline typename This::Status
3138 thm_movw_rel(unsigned char *view,
3139 const Sized_relobj<32, big_endian>* object,
3140 const Symbol_value<32>* psymval,
3141 Arm_address address,
3142 Arm_address thumb_bit)
3143 {
3144 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3145 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3146 Valtype* wv = reinterpret_cast<Valtype*>(view);
3147 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3148 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3149 Reltype addend = This::extract_thumb_movw_movt_addend(val);
3150 Reltype x = (psymval->value(object, addend) | thumb_bit) - address;
3151 val = This::insert_val_thumb_movw_movt(val, x);
3152 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3153 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3154 return ((x >= 0x10000) ?
3155 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3156 }
3157
3158 // R_ARM_THM_MOVT_PREL: S + A - P
3159 // R_ARM_THM_MOVT_BREL: S + A – B(S)
3160 static inline typename This::Status
3161 thm_movt_rel(unsigned char* view,
3162 const Sized_relobj<32, big_endian>* object,
3163 const Symbol_value<32>* psymval,
3164 Arm_address address)
3165 {
3166 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3167 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3168 Valtype* wv = reinterpret_cast<Valtype*>(view);
3169 Reltype val = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3170 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3171 Reltype addend = This::extract_thumb_movw_movt_addend(val);
3172 Reltype x = (psymval->value(object, addend) - address) >> 16;
3173 val = This::insert_val_thumb_movw_movt(val, x);
3174 elfcpp::Swap<16, big_endian>::writeval(wv, val >> 16);
3175 elfcpp::Swap<16, big_endian>::writeval(wv + 1, val & 0xffff);
3176 return This::STATUS_OKAY;
3177 }
3178
3179 // R_ARM_THM_ALU_PREL_11_0: ((S + A) | T) - Pa (Thumb32)
3180 static inline typename This::Status
3181 thm_alu11(unsigned char* view,
3182 const Sized_relobj<32, big_endian>* object,
3183 const Symbol_value<32>* psymval,
3184 Arm_address address,
3185 Arm_address thumb_bit)
3186 {
3187 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3188 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3189 Valtype* wv = reinterpret_cast<Valtype*>(view);
3190 Reltype insn = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3191 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3192
3193 // f e d c b|a|9|8 7 6 5|4|3 2 1 0||f|e d c|b a 9 8|7 6 5 4 3 2 1 0
3194 // -----------------------------------------------------------------------
3195 // ADD{S} 1 1 1 1 0|i|0|1 0 0 0|S|1 1 0 1||0|imm3 |Rd |imm8
3196 // ADDW 1 1 1 1 0|i|1|0 0 0 0|0|1 1 0 1||0|imm3 |Rd |imm8
3197 // ADR[+] 1 1 1 1 0|i|1|0 0 0 0|0|1 1 1 1||0|imm3 |Rd |imm8
3198 // SUB{S} 1 1 1 1 0|i|0|1 1 0 1|S|1 1 0 1||0|imm3 |Rd |imm8
3199 // SUBW 1 1 1 1 0|i|1|0 1 0 1|0|1 1 0 1||0|imm3 |Rd |imm8
3200 // ADR[-] 1 1 1 1 0|i|1|0 1 0 1|0|1 1 1 1||0|imm3 |Rd |imm8
3201
3202 // Determine a sign for the addend.
3203 const int sign = ((insn & 0xf8ef0000) == 0xf0ad0000
3204 || (insn & 0xf8ef0000) == 0xf0af0000) ? -1 : 1;
3205 // Thumb2 addend encoding:
3206 // imm12 := i | imm3 | imm8
3207 int32_t addend = (insn & 0xff)
3208 | ((insn & 0x00007000) >> 4)
3209 | ((insn & 0x04000000) >> 15);
3210 // Apply a sign to the added.
3211 addend *= sign;
3212
3213 int32_t x = (psymval->value(object, addend) | thumb_bit)
3214 - (address & 0xfffffffc);
3215 Reltype val = abs(x);
3216 // Mask out the value and a distinct part of the ADD/SUB opcode
3217 // (bits 7:5 of opword).
3218 insn = (insn & 0xfb0f8f00)
3219 | (val & 0xff)
3220 | ((val & 0x700) << 4)
3221 | ((val & 0x800) << 15);
3222 // Set the opcode according to whether the value to go in the
3223 // place is negative.
3224 if (x < 0)
3225 insn |= 0x00a00000;
3226
3227 elfcpp::Swap<16, big_endian>::writeval(wv, insn >> 16);
3228 elfcpp::Swap<16, big_endian>::writeval(wv + 1, insn & 0xffff);
3229 return ((val > 0xfff) ?
3230 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3231 }
3232
3233 // R_ARM_THM_PC8: S + A - Pa (Thumb)
3234 static inline typename This::Status
3235 thm_pc8(unsigned char* view,
3236 const Sized_relobj<32, big_endian>* object,
3237 const Symbol_value<32>* psymval,
3238 Arm_address address)
3239 {
3240 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3241 typedef typename elfcpp::Swap<16, big_endian>::Valtype Reltype;
3242 Valtype* wv = reinterpret_cast<Valtype*>(view);
3243 Valtype insn = elfcpp::Swap<16, big_endian>::readval(wv);
3244 Reltype addend = ((insn & 0x00ff) << 2);
3245 int32_t x = (psymval->value(object, addend) - (address & 0xfffffffc));
3246 Reltype val = abs(x);
3247 insn = (insn & 0xff00) | ((val & 0x03fc) >> 2);
3248
3249 elfcpp::Swap<16, big_endian>::writeval(wv, insn);
3250 return ((val > 0x03fc)
3251 ? This::STATUS_OVERFLOW
3252 : This::STATUS_OKAY);
3253 }
3254
3255 // R_ARM_THM_PC12: S + A - Pa (Thumb32)
3256 static inline typename This::Status
3257 thm_pc12(unsigned char* view,
3258 const Sized_relobj<32, big_endian>* object,
3259 const Symbol_value<32>* psymval,
3260 Arm_address address)
3261 {
3262 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3263 typedef typename elfcpp::Swap<32, big_endian>::Valtype Reltype;
3264 Valtype* wv = reinterpret_cast<Valtype*>(view);
3265 Reltype insn = (elfcpp::Swap<16, big_endian>::readval(wv) << 16)
3266 | elfcpp::Swap<16, big_endian>::readval(wv + 1);
3267 // Determine a sign for the addend (positive if the U bit is 1).
3268 const int sign = (insn & 0x00800000) ? 1 : -1;
3269 int32_t addend = (insn & 0xfff);
3270 // Apply a sign to the added.
3271 addend *= sign;
3272
3273 int32_t x = (psymval->value(object, addend) - (address & 0xfffffffc));
3274 Reltype val = abs(x);
3275 // Mask out and apply the value and the U bit.
3276 insn = (insn & 0xff7ff000) | (val & 0xfff);
3277 // Set the U bit according to whether the value to go in the
3278 // place is positive.
3279 if (x >= 0)
3280 insn |= 0x00800000;
3281
3282 elfcpp::Swap<16, big_endian>::writeval(wv, insn >> 16);
3283 elfcpp::Swap<16, big_endian>::writeval(wv + 1, insn & 0xffff);
3284 return ((val > 0xfff) ?
3285 This::STATUS_OVERFLOW : This::STATUS_OKAY);
3286 }
3287
3288 // R_ARM_V4BX
3289 static inline typename This::Status
3290 v4bx(const Relocate_info<32, big_endian>* relinfo,
3291 unsigned char *view,
3292 const Arm_relobj<big_endian>* object,
3293 const Arm_address address,
3294 const bool is_interworking)
3295 {
3296
3297 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3298 Valtype* wv = reinterpret_cast<Valtype*>(view);
3299 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3300
3301 // Ensure that we have a BX instruction.
3302 gold_assert((val & 0x0ffffff0) == 0x012fff10);
3303 const uint32_t reg = (val & 0xf);
3304 if (is_interworking && reg != 0xf)
3305 {
3306 Stub_table<big_endian>* stub_table =
3307 object->stub_table(relinfo->data_shndx);
3308 gold_assert(stub_table != NULL);
3309
3310 Arm_v4bx_stub* stub = stub_table->find_arm_v4bx_stub(reg);
3311 gold_assert(stub != NULL);
3312
3313 int32_t veneer_address =
3314 stub_table->address() + stub->offset() - 8 - address;
3315 gold_assert((veneer_address <= ARM_MAX_FWD_BRANCH_OFFSET)
3316 && (veneer_address >= ARM_MAX_BWD_BRANCH_OFFSET));
3317 // Replace with a branch to veneer (B <addr>)
3318 val = (val & 0xf0000000) | 0x0a000000
3319 | ((veneer_address >> 2) & 0x00ffffff);
3320 }
3321 else
3322 {
3323 // Preserve Rm (lowest four bits) and the condition code
3324 // (highest four bits). Other bits encode MOV PC,Rm.
3325 val = (val & 0xf000000f) | 0x01a0f000;
3326 }
3327 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3328 return This::STATUS_OKAY;
3329 }
3330
3331 // R_ARM_ALU_PC_G0_NC: ((S + A) | T) - P
3332 // R_ARM_ALU_PC_G0: ((S + A) | T) - P
3333 // R_ARM_ALU_PC_G1_NC: ((S + A) | T) - P
3334 // R_ARM_ALU_PC_G1: ((S + A) | T) - P
3335 // R_ARM_ALU_PC_G2: ((S + A) | T) - P
3336 // R_ARM_ALU_SB_G0_NC: ((S + A) | T) - B(S)
3337 // R_ARM_ALU_SB_G0: ((S + A) | T) - B(S)
3338 // R_ARM_ALU_SB_G1_NC: ((S + A) | T) - B(S)
3339 // R_ARM_ALU_SB_G1: ((S + A) | T) - B(S)
3340 // R_ARM_ALU_SB_G2: ((S + A) | T) - B(S)
3341 static inline typename This::Status
3342 arm_grp_alu(unsigned char* view,
3343 const Sized_relobj<32, big_endian>* object,
3344 const Symbol_value<32>* psymval,
3345 const int group,
3346 Arm_address address,
3347 Arm_address thumb_bit,
3348 bool check_overflow)
3349 {
3350 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3351 Valtype* wv = reinterpret_cast<Valtype*>(view);
3352 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3353
3354 // ALU group relocations are allowed only for the ADD/SUB instructions.
3355 // (0x00800000 - ADD, 0x00400000 - SUB)
3356 const Valtype opcode = insn & 0x01e00000;
3357 if (opcode != 0x00800000 && opcode != 0x00400000)
3358 return This::STATUS_BAD_RELOC;
3359
3360 // Determine a sign for the addend.
3361 const int sign = (opcode == 0x00800000) ? 1 : -1;
3362 // shifter = rotate_imm * 2
3363 const uint32_t shifter = (insn & 0xf00) >> 7;
3364 // Initial addend value.
3365 int32_t addend = insn & 0xff;
3366 // Rotate addend right by shifter.
3367 addend = (addend >> shifter) | (addend << (32 - shifter));
3368 // Apply a sign to the added.
3369 addend *= sign;
3370
3371 int32_t x = ((psymval->value(object, addend) | thumb_bit) - address);
3372 Valtype gn = Arm_relocate_functions::calc_grp_gn(abs(x), group);
3373 // Check for overflow if required
3374 if (check_overflow
3375 && (Arm_relocate_functions::calc_grp_residual(abs(x), group) != 0))
3376 return This::STATUS_OVERFLOW;
3377
3378 // Mask out the value and the ADD/SUB part of the opcode; take care
3379 // not to destroy the S bit.
3380 insn &= 0xff1ff000;
3381 // Set the opcode according to whether the value to go in the
3382 // place is negative.
3383 insn |= ((x < 0) ? 0x00400000 : 0x00800000);
3384 // Encode the offset (encoded Gn).
3385 insn |= gn;
3386
3387 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3388 return This::STATUS_OKAY;
3389 }
3390
3391 // R_ARM_LDR_PC_G0: S + A - P
3392 // R_ARM_LDR_PC_G1: S + A - P
3393 // R_ARM_LDR_PC_G2: S + A - P
3394 // R_ARM_LDR_SB_G0: S + A - B(S)
3395 // R_ARM_LDR_SB_G1: S + A - B(S)
3396 // R_ARM_LDR_SB_G2: S + A - B(S)
3397 static inline typename This::Status
3398 arm_grp_ldr(unsigned char* view,
3399 const Sized_relobj<32, big_endian>* object,
3400 const Symbol_value<32>* psymval,
3401 const int group,
3402 Arm_address address)
3403 {
3404 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3405 Valtype* wv = reinterpret_cast<Valtype*>(view);
3406 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3407
3408 const int sign = (insn & 0x00800000) ? 1 : -1;
3409 int32_t addend = (insn & 0xfff) * sign;
3410 int32_t x = (psymval->value(object, addend) - address);
3411 // Calculate the relevant G(n-1) value to obtain this stage residual.
3412 Valtype residual =
3413 Arm_relocate_functions::calc_grp_residual(abs(x), group - 1);
3414 if (residual >= 0x1000)
3415 return This::STATUS_OVERFLOW;
3416
3417 // Mask out the value and U bit.
3418 insn &= 0xff7ff000;
3419 // Set the U bit for non-negative values.
3420 if (x >= 0)
3421 insn |= 0x00800000;
3422 insn |= residual;
3423
3424 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3425 return This::STATUS_OKAY;
3426 }
3427
3428 // R_ARM_LDRS_PC_G0: S + A - P
3429 // R_ARM_LDRS_PC_G1: S + A - P
3430 // R_ARM_LDRS_PC_G2: S + A - P
3431 // R_ARM_LDRS_SB_G0: S + A - B(S)
3432 // R_ARM_LDRS_SB_G1: S + A - B(S)
3433 // R_ARM_LDRS_SB_G2: S + A - B(S)
3434 static inline typename This::Status
3435 arm_grp_ldrs(unsigned char* view,
3436 const Sized_relobj<32, big_endian>* object,
3437 const Symbol_value<32>* psymval,
3438 const int group,
3439 Arm_address address)
3440 {
3441 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3442 Valtype* wv = reinterpret_cast<Valtype*>(view);
3443 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3444
3445 const int sign = (insn & 0x00800000) ? 1 : -1;
3446 int32_t addend = (((insn & 0xf00) >> 4) + (insn & 0xf)) * sign;
3447 int32_t x = (psymval->value(object, addend) - address);
3448 // Calculate the relevant G(n-1) value to obtain this stage residual.
3449 Valtype residual =
3450 Arm_relocate_functions::calc_grp_residual(abs(x), group - 1);
3451 if (residual >= 0x100)
3452 return This::STATUS_OVERFLOW;
3453
3454 // Mask out the value and U bit.
3455 insn &= 0xff7ff0f0;
3456 // Set the U bit for non-negative values.
3457 if (x >= 0)
3458 insn |= 0x00800000;
3459 insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
3460
3461 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3462 return This::STATUS_OKAY;
3463 }
3464
3465 // R_ARM_LDC_PC_G0: S + A - P
3466 // R_ARM_LDC_PC_G1: S + A - P
3467 // R_ARM_LDC_PC_G2: S + A - P
3468 // R_ARM_LDC_SB_G0: S + A - B(S)
3469 // R_ARM_LDC_SB_G1: S + A - B(S)
3470 // R_ARM_LDC_SB_G2: S + A - B(S)
3471 static inline typename This::Status
3472 arm_grp_ldc(unsigned char* view,
3473 const Sized_relobj<32, big_endian>* object,
3474 const Symbol_value<32>* psymval,
3475 const int group,
3476 Arm_address address)
3477 {
3478 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3479 Valtype* wv = reinterpret_cast<Valtype*>(view);
3480 Valtype insn = elfcpp::Swap<32, big_endian>::readval(wv);
3481
3482 const int sign = (insn & 0x00800000) ? 1 : -1;
3483 int32_t addend = ((insn & 0xff) << 2) * sign;
3484 int32_t x = (psymval->value(object, addend) - address);
3485 // Calculate the relevant G(n-1) value to obtain this stage residual.
3486 Valtype residual =
3487 Arm_relocate_functions::calc_grp_residual(abs(x), group - 1);
3488 if ((residual & 0x3) != 0 || residual >= 0x400)
3489 return This::STATUS_OVERFLOW;
3490
3491 // Mask out the value and U bit.
3492 insn &= 0xff7fff00;
3493 // Set the U bit for non-negative values.
3494 if (x >= 0)
3495 insn |= 0x00800000;
3496 insn |= (residual >> 2);
3497
3498 elfcpp::Swap<32, big_endian>::writeval(wv, insn);
3499 return This::STATUS_OKAY;
3500 }
3501 };
3502
3503 // Relocate ARM long branches. This handles relocation types
3504 // R_ARM_CALL, R_ARM_JUMP24, R_ARM_PLT32 and R_ARM_XPC25.
3505 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3506 // undefined and we do not use PLT in this relocation. In such a case,
3507 // the branch is converted into an NOP.
3508
3509 template<bool big_endian>
3510 typename Arm_relocate_functions<big_endian>::Status
3511 Arm_relocate_functions<big_endian>::arm_branch_common(
3512 unsigned int r_type,
3513 const Relocate_info<32, big_endian>* relinfo,
3514 unsigned char *view,
3515 const Sized_symbol<32>* gsym,
3516 const Arm_relobj<big_endian>* object,
3517 unsigned int r_sym,
3518 const Symbol_value<32>* psymval,
3519 Arm_address address,
3520 Arm_address thumb_bit,
3521 bool is_weakly_undefined_without_plt)
3522 {
3523 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
3524 Valtype* wv = reinterpret_cast<Valtype*>(view);
3525 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
3526
3527 bool insn_is_b = (((val >> 28) & 0xf) <= 0xe)
3528 && ((val & 0x0f000000UL) == 0x0a000000UL);
3529 bool insn_is_uncond_bl = (val & 0xff000000UL) == 0xeb000000UL;
3530 bool insn_is_cond_bl = (((val >> 28) & 0xf) < 0xe)
3531 && ((val & 0x0f000000UL) == 0x0b000000UL);
3532 bool insn_is_blx = (val & 0xfe000000UL) == 0xfa000000UL;
3533 bool insn_is_any_branch = (val & 0x0e000000UL) == 0x0a000000UL;
3534
3535 // Check that the instruction is valid.
3536 if (r_type == elfcpp::R_ARM_CALL)
3537 {
3538 if (!insn_is_uncond_bl && !insn_is_blx)
3539 return This::STATUS_BAD_RELOC;
3540 }
3541 else if (r_type == elfcpp::R_ARM_JUMP24)
3542 {
3543 if (!insn_is_b && !insn_is_cond_bl)
3544 return This::STATUS_BAD_RELOC;
3545 }
3546 else if (r_type == elfcpp::R_ARM_PLT32)
3547 {
3548 if (!insn_is_any_branch)
3549 return This::STATUS_BAD_RELOC;
3550 }
3551 else if (r_type == elfcpp::R_ARM_XPC25)
3552 {
3553 // FIXME: AAELF document IH0044C does not say much about it other
3554 // than it being obsolete.
3555 if (!insn_is_any_branch)
3556 return This::STATUS_BAD_RELOC;
3557 }
3558 else
3559 gold_unreachable();
3560
3561 // A branch to an undefined weak symbol is turned into a jump to
3562 // the next instruction unless a PLT entry will be created.
3563 // Do the same for local undefined symbols.
3564 // The jump to the next instruction is optimized as a NOP depending
3565 // on the architecture.
3566 const Target_arm<big_endian>* arm_target =
3567 Target_arm<big_endian>::default_target();
3568 if (is_weakly_undefined_without_plt)
3569 {
3570 Valtype cond = val & 0xf0000000U;
3571 if (arm_target->may_use_arm_nop())
3572 val = cond | 0x0320f000;
3573 else
3574 val = cond | 0x01a00000; // Using pre-UAL nop: mov r0, r0.
3575 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3576 return This::STATUS_OKAY;
3577 }
3578
3579 Valtype addend = utils::sign_extend<26>(val << 2);
3580 Valtype branch_target = psymval->value(object, addend);
3581 int32_t branch_offset = branch_target - address;
3582
3583 // We need a stub if the branch offset is too large or if we need
3584 // to switch mode.
3585 bool may_use_blx = arm_target->may_use_blx();
3586 Reloc_stub* stub = NULL;
3587 if ((branch_offset > ARM_MAX_FWD_BRANCH_OFFSET)
3588 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
3589 || ((thumb_bit != 0) && !(may_use_blx && r_type == elfcpp::R_ARM_CALL)))
3590 {
3591 Stub_type stub_type =
3592 Reloc_stub::stub_type_for_reloc(r_type, address, branch_target,
3593 (thumb_bit != 0));
3594 if (stub_type != arm_stub_none)
3595 {
3596 Stub_table<big_endian>* stub_table =
3597 object->stub_table(relinfo->data_shndx);
3598 gold_assert(stub_table != NULL);
3599
3600 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
3601 stub = stub_table->find_reloc_stub(stub_key);
3602 gold_assert(stub != NULL);
3603 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
3604 branch_target = stub_table->address() + stub->offset() + addend;
3605 branch_offset = branch_target - address;
3606 gold_assert((branch_offset <= ARM_MAX_FWD_BRANCH_OFFSET)
3607 && (branch_offset >= ARM_MAX_BWD_BRANCH_OFFSET));
3608 }
3609 }
3610
3611 // At this point, if we still need to switch mode, the instruction
3612 // must either be a BLX or a BL that can be converted to a BLX.
3613 if (thumb_bit != 0)
3614 {
3615 // Turn BL to BLX.
3616 gold_assert(may_use_blx && r_type == elfcpp::R_ARM_CALL);
3617 val = (val & 0xffffff) | 0xfa000000 | ((branch_offset & 2) << 23);
3618 }
3619
3620 val = utils::bit_select(val, (branch_offset >> 2), 0xffffffUL);
3621 elfcpp::Swap<32, big_endian>::writeval(wv, val);
3622 return (utils::has_overflow<26>(branch_offset)
3623 ? This::STATUS_OVERFLOW : This::STATUS_OKAY);
3624 }
3625
3626 // Relocate THUMB long branches. This handles relocation types
3627 // R_ARM_THM_CALL, R_ARM_THM_JUMP24 and R_ARM_THM_XPC22.
3628 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3629 // undefined and we do not use PLT in this relocation. In such a case,
3630 // the branch is converted into an NOP.
3631
3632 template<bool big_endian>
3633 typename Arm_relocate_functions<big_endian>::Status
3634 Arm_relocate_functions<big_endian>::thumb_branch_common(
3635 unsigned int r_type,
3636 const Relocate_info<32, big_endian>* relinfo,
3637 unsigned char *view,
3638 const Sized_symbol<32>* gsym,
3639 const Arm_relobj<big_endian>* object,
3640 unsigned int r_sym,
3641 const Symbol_value<32>* psymval,
3642 Arm_address address,
3643 Arm_address thumb_bit,
3644 bool is_weakly_undefined_without_plt)
3645 {
3646 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3647 Valtype* wv = reinterpret_cast<Valtype*>(view);
3648 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
3649 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
3650
3651 // FIXME: These tests are too loose and do not take THUMB/THUMB-2 difference
3652 // into account.
3653 bool is_bl_insn = (lower_insn & 0x1000U) == 0x1000U;
3654 bool is_blx_insn = (lower_insn & 0x1000U) == 0x0000U;
3655
3656 // Check that the instruction is valid.
3657 if (r_type == elfcpp::R_ARM_THM_CALL)
3658 {
3659 if (!is_bl_insn && !is_blx_insn)
3660 return This::STATUS_BAD_RELOC;
3661 }
3662 else if (r_type == elfcpp::R_ARM_THM_JUMP24)
3663 {
3664 // This cannot be a BLX.
3665 if (!is_bl_insn)
3666 return This::STATUS_BAD_RELOC;
3667 }
3668 else if (r_type == elfcpp::R_ARM_THM_XPC22)
3669 {
3670 // Check for Thumb to Thumb call.
3671 if (!is_blx_insn)
3672 return This::STATUS_BAD_RELOC;
3673 if (thumb_bit != 0)
3674 {
3675 gold_warning(_("%s: Thumb BLX instruction targets "
3676 "thumb function '%s'."),
3677 object->name().c_str(),
3678 (gsym ? gsym->name() : "(local)"));
3679 // Convert BLX to BL.
3680 lower_insn |= 0x1000U;
3681 }
3682 }
3683 else
3684 gold_unreachable();
3685
3686 // A branch to an undefined weak symbol is turned into a jump to
3687 // the next instruction unless a PLT entry will be created.
3688 // The jump to the next instruction is optimized as a NOP.W for
3689 // Thumb-2 enabled architectures.
3690 const Target_arm<big_endian>* arm_target =
3691 Target_arm<big_endian>::default_target();
3692 if (is_weakly_undefined_without_plt)
3693 {
3694 if (arm_target->may_use_thumb2_nop())
3695 {
3696 elfcpp::Swap<16, big_endian>::writeval(wv, 0xf3af);
3697 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0x8000);
3698 }
3699 else
3700 {
3701 elfcpp::Swap<16, big_endian>::writeval(wv, 0xe000);
3702 elfcpp::Swap<16, big_endian>::writeval(wv + 1, 0xbf00);
3703 }
3704 return This::STATUS_OKAY;
3705 }
3706
3707 int32_t addend = This::thumb32_branch_offset(upper_insn, lower_insn);
3708 Arm_address branch_target = psymval->value(object, addend);
3709 int32_t branch_offset = branch_target - address;
3710
3711 // We need a stub if the branch offset is too large or if we need
3712 // to switch mode.
3713 bool may_use_blx = arm_target->may_use_blx();
3714 bool thumb2 = arm_target->using_thumb2();
3715 if ((!thumb2
3716 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3717 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3718 || (thumb2
3719 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3720 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3721 || ((thumb_bit == 0)
3722 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
3723 || r_type == elfcpp::R_ARM_THM_JUMP24)))
3724 {
3725 Stub_type stub_type =
3726 Reloc_stub::stub_type_for_reloc(r_type, address, branch_target,
3727 (thumb_bit != 0));
3728 if (stub_type != arm_stub_none)
3729 {
3730 Stub_table<big_endian>* stub_table =
3731 object->stub_table(relinfo->data_shndx);
3732 gold_assert(stub_table != NULL);
3733
3734 Reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
3735 Reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
3736 gold_assert(stub != NULL);
3737 thumb_bit = stub->stub_template()->entry_in_thumb_mode() ? 1 : 0;
3738 branch_target = stub_table->address() + stub->offset() + addend;
3739 branch_offset = branch_target - address;
3740 }
3741 }
3742
3743 // At this point, if we still need to switch mode, the instruction
3744 // must either be a BLX or a BL that can be converted to a BLX.
3745 if (thumb_bit == 0)
3746 {
3747 gold_assert(may_use_blx
3748 && (r_type == elfcpp::R_ARM_THM_CALL
3749 || r_type == elfcpp::R_ARM_THM_XPC22));
3750 // Make sure this is a BLX.
3751 lower_insn &= ~0x1000U;
3752 }
3753 else
3754 {
3755 // Make sure this is a BL.
3756 lower_insn |= 0x1000U;
3757 }
3758
3759 if ((lower_insn & 0x5000U) == 0x4000U)
3760 // For a BLX instruction, make sure that the relocation is rounded up
3761 // to a word boundary. This follows the semantics of the instruction
3762 // which specifies that bit 1 of the target address will come from bit
3763 // 1 of the base address.
3764 branch_offset = (branch_offset + 2) & ~3;
3765
3766 // Put BRANCH_OFFSET back into the insn. Assumes two's complement.
3767 // We use the Thumb-2 encoding, which is safe even if dealing with
3768 // a Thumb-1 instruction by virtue of our overflow check above. */
3769 upper_insn = This::thumb32_branch_upper(upper_insn, branch_offset);
3770 lower_insn = This::thumb32_branch_lower(lower_insn, branch_offset);
3771
3772 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3773 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3774
3775 return ((thumb2
3776 ? utils::has_overflow<25>(branch_offset)
3777 : utils::has_overflow<23>(branch_offset))
3778 ? This::STATUS_OVERFLOW
3779 : This::STATUS_OKAY);
3780 }
3781
3782 // Relocate THUMB-2 long conditional branches.
3783 // If IS_WEAK_UNDEFINED_WITH_PLT is true. The target symbol is weakly
3784 // undefined and we do not use PLT in this relocation. In such a case,
3785 // the branch is converted into an NOP.
3786
3787 template<bool big_endian>
3788 typename Arm_relocate_functions<big_endian>::Status
3789 Arm_relocate_functions<big_endian>::thm_jump19(
3790 unsigned char *view,
3791 const Arm_relobj<big_endian>* object,
3792 const Symbol_value<32>* psymval,
3793 Arm_address address,
3794 Arm_address thumb_bit)
3795 {
3796 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
3797 Valtype* wv = reinterpret_cast<Valtype*>(view);
3798 uint32_t upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
3799 uint32_t lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
3800 int32_t addend = This::thumb32_cond_branch_offset(upper_insn, lower_insn);
3801
3802 Arm_address branch_target = psymval->value(object, addend);
3803 int32_t branch_offset = branch_target - address;
3804
3805 // ??? Should handle interworking? GCC might someday try to
3806 // use this for tail calls.
3807 // FIXME: We do support thumb entry to PLT yet.
3808 if (thumb_bit == 0)
3809 {
3810 gold_error(_("conditional branch to PLT in THUMB-2 not supported yet."));
3811 return This::STATUS_BAD_RELOC;
3812 }
3813
3814 // Put RELOCATION back into the insn.
3815 upper_insn = This::thumb32_cond_branch_upper(upper_insn, branch_offset);
3816 lower_insn = This::thumb32_cond_branch_lower(lower_insn, branch_offset);
3817
3818 // Put the relocated value back in the object file:
3819 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
3820 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
3821
3822 return (utils::has_overflow<21>(branch_offset)
3823 ? This::STATUS_OVERFLOW
3824 : This::STATUS_OKAY);
3825 }
3826
3827 // Get the GOT section, creating it if necessary.
3828
3829 template<bool big_endian>
3830 Output_data_got<32, big_endian>*
3831 Target_arm<big_endian>::got_section(Symbol_table* symtab, Layout* layout)
3832 {
3833 if (this->got_ == NULL)
3834 {
3835 gold_assert(symtab != NULL && layout != NULL);
3836
3837 this->got_ = new Output_data_got<32, big_endian>();
3838
3839 Output_section* os;
3840 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3841 (elfcpp::SHF_ALLOC
3842 | elfcpp::SHF_WRITE),
3843 this->got_, false, true, true,
3844 false);
3845
3846 // The old GNU linker creates a .got.plt section. We just
3847 // create another set of data in the .got section. Note that we
3848 // always create a PLT if we create a GOT, although the PLT
3849 // might be empty.
3850 this->got_plt_ = new Output_data_space(4, "** GOT PLT");
3851 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3852 (elfcpp::SHF_ALLOC
3853 | elfcpp::SHF_WRITE),
3854 this->got_plt_, false, false,
3855 false, true);
3856
3857 // The first three entries are reserved.
3858 this->got_plt_->set_current_data_size(3 * 4);
3859
3860 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3861 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3862 Symbol_table::PREDEFINED,
3863 this->got_plt_,
3864 0, 0, elfcpp::STT_OBJECT,
3865 elfcpp::STB_LOCAL,
3866 elfcpp::STV_HIDDEN, 0,
3867 false, false);
3868 }
3869 return this->got_;
3870 }
3871
3872 // Get the dynamic reloc section, creating it if necessary.
3873
3874 template<bool big_endian>
3875 typename Target_arm<big_endian>::Reloc_section*
3876 Target_arm<big_endian>::rel_dyn_section(Layout* layout)
3877 {
3878 if (this->rel_dyn_ == NULL)
3879 {
3880 gold_assert(layout != NULL);
3881 this->rel_dyn_ = new Reloc_section(parameters->options().combreloc());
3882 layout->add_output_section_data(".rel.dyn", elfcpp::SHT_REL,
3883 elfcpp::SHF_ALLOC, this->rel_dyn_, true,
3884 false, false, false);
3885 }
3886 return this->rel_dyn_;
3887 }
3888
3889 // Insn_template methods.
3890
3891 // Return byte size of an instruction template.
3892
3893 size_t
3894 Insn_template::size() const
3895 {
3896 switch (this->type())
3897 {
3898 case THUMB16_TYPE:
3899 case THUMB16_SPECIAL_TYPE:
3900 return 2;
3901 case ARM_TYPE:
3902 case THUMB32_TYPE:
3903 case DATA_TYPE:
3904 return 4;
3905 default:
3906 gold_unreachable();
3907 }
3908 }
3909
3910 // Return alignment of an instruction template.
3911
3912 unsigned
3913 Insn_template::alignment() const
3914 {
3915 switch (this->type())
3916 {
3917 case THUMB16_TYPE:
3918 case THUMB16_SPECIAL_TYPE:
3919 case THUMB32_TYPE:
3920 return 2;
3921 case ARM_TYPE:
3922 case DATA_TYPE:
3923 return 4;
3924 default:
3925 gold_unreachable();
3926 }
3927 }
3928
3929 // Stub_template methods.
3930
3931 Stub_template::Stub_template(
3932 Stub_type type, const Insn_template* insns,
3933 size_t insn_count)
3934 : type_(type), insns_(insns), insn_count_(insn_count), alignment_(1),
3935 entry_in_thumb_mode_(false), relocs_()
3936 {
3937 off_t offset = 0;
3938
3939 // Compute byte size and alignment of stub template.
3940 for (size_t i = 0; i < insn_count; i++)
3941 {
3942 unsigned insn_alignment = insns[i].alignment();
3943 size_t insn_size = insns[i].size();
3944 gold_assert((offset & (insn_alignment - 1)) == 0);
3945 this->alignment_ = std::max(this->alignment_, insn_alignment);
3946 switch (insns[i].type())
3947 {
3948 case Insn_template::THUMB16_TYPE:
3949 case Insn_template::THUMB16_SPECIAL_TYPE:
3950 if (i == 0)
3951 this->entry_in_thumb_mode_ = true;
3952 break;
3953
3954 case Insn_template::THUMB32_TYPE:
3955 if (insns[i].r_type() != elfcpp::R_ARM_NONE)
3956 this->relocs_.push_back(Reloc(i, offset));
3957 if (i == 0)
3958 this->entry_in_thumb_mode_ = true;
3959 break;
3960
3961 case Insn_template::ARM_TYPE:
3962 // Handle cases where the target is encoded within the
3963 // instruction.
3964 if (insns[i].r_type() == elfcpp::R_ARM_JUMP24)
3965 this->relocs_.push_back(Reloc(i, offset));
3966 break;
3967
3968 case Insn_template::DATA_TYPE:
3969 // Entry point cannot be data.
3970 gold_assert(i != 0);
3971 this->relocs_.push_back(Reloc(i, offset));
3972 break;
3973
3974 default:
3975 gold_unreachable();
3976 }
3977 offset += insn_size;
3978 }
3979 this->size_ = offset;
3980 }
3981
3982 // Stub methods.
3983
3984 // Template to implement do_write for a specific target endianity.
3985
3986 template<bool big_endian>
3987 void inline
3988 Stub::do_fixed_endian_write(unsigned char* view, section_size_type view_size)
3989 {
3990 const Stub_template* stub_template = this->stub_template();
3991 const Insn_template* insns = stub_template->insns();
3992
3993 // FIXME: We do not handle BE8 encoding yet.
3994 unsigned char* pov = view;
3995 for (size_t i = 0; i < stub_template->insn_count(); i++)
3996 {
3997 switch (insns[i].type())
3998 {
3999 case Insn_template::THUMB16_TYPE:
4000 elfcpp::Swap<16, big_endian>::writeval(pov, insns[i].data() & 0xffff);
4001 break;
4002 case Insn_template::THUMB16_SPECIAL_TYPE:
4003 elfcpp::Swap<16, big_endian>::writeval(
4004 pov,
4005 this->thumb16_special(i));
4006 break;
4007 case Insn_template::THUMB32_TYPE:
4008 {
4009 uint32_t hi = (insns[i].data() >> 16) & 0xffff;
4010 uint32_t lo = insns[i].data() & 0xffff;
4011 elfcpp::Swap<16, big_endian>::writeval(pov, hi);
4012 elfcpp::Swap<16, big_endian>::writeval(pov + 2, lo);
4013 }
4014 break;
4015 case Insn_template::ARM_TYPE:
4016 case Insn_template::DATA_TYPE:
4017 elfcpp::Swap<32, big_endian>::writeval(pov, insns[i].data());
4018 break;
4019 default:
4020 gold_unreachable();
4021 }
4022 pov += insns[i].size();
4023 }
4024 gold_assert(static_cast<section_size_type>(pov - view) == view_size);
4025 }
4026
4027 // Reloc_stub::Key methods.
4028
4029 // Dump a Key as a string for debugging.
4030
4031 std::string
4032 Reloc_stub::Key::name() const
4033 {
4034 if (this->r_sym_ == invalid_index)
4035 {
4036 // Global symbol key name
4037 // <stub-type>:<symbol name>:<addend>.
4038 const std::string sym_name = this->u_.symbol->name();
4039 // We need to print two hex number and two colons. So just add 100 bytes
4040 // to the symbol name size.
4041 size_t len = sym_name.size() + 100;
4042 char* buffer = new char[len];
4043 int c = snprintf(buffer, len, "%d:%s:%x", this->stub_type_,
4044 sym_name.c_str(), this->addend_);
4045 gold_assert(c > 0 && c < static_cast<int>(len));
4046 delete[] buffer;
4047 return std::string(buffer);
4048 }
4049 else
4050 {
4051 // local symbol key name
4052 // <stub-type>:<object>:<r_sym>:<addend>.
4053 const size_t len = 200;
4054 char buffer[len];
4055 int c = snprintf(buffer, len, "%d:%p:%u:%x", this->stub_type_,
4056 this->u_.relobj, this->r_sym_, this->addend_);
4057 gold_assert(c > 0 && c < static_cast<int>(len));
4058 return std::string(buffer);
4059 }
4060 }
4061
4062 // Reloc_stub methods.
4063
4064 // Determine the type of stub needed, if any, for a relocation of R_TYPE at
4065 // LOCATION to DESTINATION.
4066 // This code is based on the arm_type_of_stub function in
4067 // bfd/elf32-arm.c. We have changed the interface a liitle to keep the Stub
4068 // class simple.
4069
4070 Stub_type
4071 Reloc_stub::stub_type_for_reloc(
4072 unsigned int r_type,
4073 Arm_address location,
4074 Arm_address destination,
4075 bool target_is_thumb)
4076 {
4077 Stub_type stub_type = arm_stub_none;
4078
4079 // This is a bit ugly but we want to avoid using a templated class for
4080 // big and little endianities.
4081 bool may_use_blx;
4082 bool should_force_pic_veneer;
4083 bool thumb2;
4084 bool thumb_only;
4085 if (parameters->target().is_big_endian())
4086 {
4087 const Target_arm<true>* big_endian_target =
4088 Target_arm<true>::default_target();
4089 may_use_blx = big_endian_target->may_use_blx();
4090 should_force_pic_veneer = big_endian_target->should_force_pic_veneer();
4091 thumb2 = big_endian_target->using_thumb2();
4092 thumb_only = big_endian_target->using_thumb_only();
4093 }
4094 else
4095 {
4096 const Target_arm<false>* little_endian_target =
4097 Target_arm<false>::default_target();
4098 may_use_blx = little_endian_target->may_use_blx();
4099 should_force_pic_veneer = little_endian_target->should_force_pic_veneer();
4100 thumb2 = little_endian_target->using_thumb2();
4101 thumb_only = little_endian_target->using_thumb_only();
4102 }
4103
4104 int64_t branch_offset = (int64_t)destination - location;
4105
4106 if (r_type == elfcpp::R_ARM_THM_CALL || r_type == elfcpp::R_ARM_THM_JUMP24)
4107 {
4108 // Handle cases where:
4109 // - this call goes too far (different Thumb/Thumb2 max
4110 // distance)
4111 // - it's a Thumb->Arm call and blx is not available, or it's a
4112 // Thumb->Arm branch (not bl). A stub is needed in this case.
4113 if ((!thumb2
4114 && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4115 || (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4116 || (thumb2
4117 && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4118 || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4119 || ((!target_is_thumb)
4120 && (((r_type == elfcpp::R_ARM_THM_CALL) && !may_use_blx)
4121 || (r_type == elfcpp::R_ARM_THM_JUMP24))))
4122 {
4123 if (target_is_thumb)
4124 {
4125 // Thumb to thumb.
4126 if (!thumb_only)
4127 {
4128 stub_type = (parameters->options().shared()
4129 || should_force_pic_veneer)
4130 // PIC stubs.
4131 ? ((may_use_blx
4132 && (r_type == elfcpp::R_ARM_THM_CALL))
4133 // V5T and above. Stub starts with ARM code, so
4134 // we must be able to switch mode before
4135 // reaching it, which is only possible for 'bl'
4136 // (ie R_ARM_THM_CALL relocation).
4137 ? arm_stub_long_branch_any_thumb_pic
4138 // On V4T, use Thumb code only.
4139 : arm_stub_long_branch_v4t_thumb_thumb_pic)
4140
4141 // non-PIC stubs.
4142 : ((may_use_blx
4143 && (r_type == elfcpp::R_ARM_THM_CALL))
4144 ? arm_stub_long_branch_any_any // V5T and above.
4145 : arm_stub_long_branch_v4t_thumb_thumb); // V4T.
4146 }
4147 else
4148 {
4149 stub_type = (parameters->options().shared()
4150 || should_force_pic_veneer)
4151 ? arm_stub_long_branch_thumb_only_pic // PIC stub.
4152 : arm_stub_long_branch_thumb_only; // non-PIC stub.
4153 }
4154 }
4155 else
4156 {
4157 // Thumb to arm.
4158
4159 // FIXME: We should check that the input section is from an
4160 // object that has interwork enabled.
4161
4162 stub_type = (parameters->options().shared()
4163 || should_force_pic_veneer)
4164 // PIC stubs.
4165 ? ((may_use_blx
4166 && (r_type == elfcpp::R_ARM_THM_CALL))
4167 ? arm_stub_long_branch_any_arm_pic // V5T and above.
4168 : arm_stub_long_branch_v4t_thumb_arm_pic) // V4T.
4169
4170 // non-PIC stubs.
4171 : ((may_use_blx
4172 && (r_type == elfcpp::R_ARM_THM_CALL))
4173 ? arm_stub_long_branch_any_any // V5T and above.
4174 : arm_stub_long_branch_v4t_thumb_arm); // V4T.
4175
4176 // Handle v4t short branches.
4177 if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4178 && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4179 && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4180 stub_type = arm_stub_short_branch_v4t_thumb_arm;
4181 }
4182 }
4183 }
4184 else if (r_type == elfcpp::R_ARM_CALL
4185 || r_type == elfcpp::R_ARM_JUMP24
4186 || r_type == elfcpp::R_ARM_PLT32)
4187 {
4188 if (target_is_thumb)
4189 {
4190 // Arm to thumb.
4191
4192 // FIXME: We should check that the input section is from an
4193 // object that has interwork enabled.
4194
4195 // We have an extra 2-bytes reach because of
4196 // the mode change (bit 24 (H) of BLX encoding).
4197 if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4198 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4199 || ((r_type == elfcpp::R_ARM_CALL) && !may_use_blx)
4200 || (r_type == elfcpp::R_ARM_JUMP24)
4201 || (r_type == elfcpp::R_ARM_PLT32))
4202 {
4203 stub_type = (parameters->options().shared()
4204 || should_force_pic_veneer)
4205 // PIC stubs.
4206 ? (may_use_blx
4207 ? arm_stub_long_branch_any_thumb_pic// V5T and above.
4208 : arm_stub_long_branch_v4t_arm_thumb_pic) // V4T stub.
4209
4210 // non-PIC stubs.
4211 : (may_use_blx
4212 ? arm_stub_long_branch_any_any // V5T and above.
4213 : arm_stub_long_branch_v4t_arm_thumb); // V4T.
4214 }
4215 }
4216 else
4217 {
4218 // Arm to arm.
4219 if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4220 || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4221 {
4222 stub_type = (parameters->options().shared()
4223 || should_force_pic_veneer)
4224 ? arm_stub_long_branch_any_arm_pic // PIC stubs.
4225 : arm_stub_long_branch_any_any; /// non-PIC.
4226 }
4227 }
4228 }
4229
4230 return stub_type;
4231 }
4232
4233 // Cortex_a8_stub methods.
4234
4235 // Return the instruction for a THUMB16_SPECIAL_TYPE instruction template.
4236 // I is the position of the instruction template in the stub template.
4237
4238 uint16_t
4239 Cortex_a8_stub::do_thumb16_special(size_t i)
4240 {
4241 // The only use of this is to copy condition code from a conditional
4242 // branch being worked around to the corresponding conditional branch in
4243 // to the stub.
4244 gold_assert(this->stub_template()->type() == arm_stub_a8_veneer_b_cond
4245 && i == 0);
4246 uint16_t data = this->stub_template()->insns()[i].data();
4247 gold_assert((data & 0xff00U) == 0xd000U);
4248 data |= ((this->original_insn_ >> 22) & 0xf) << 8;
4249 return data;
4250 }
4251
4252 // Stub_factory methods.
4253
4254 Stub_factory::Stub_factory()
4255 {
4256 // The instruction template sequences are declared as static
4257 // objects and initialized first time the constructor runs.
4258
4259 // Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
4260 // to reach the stub if necessary.
4261 static const Insn_template elf32_arm_stub_long_branch_any_any[] =
4262 {
4263 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
4264 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4265 // dcd R_ARM_ABS32(X)
4266 };
4267
4268 // V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
4269 // available.
4270 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb[] =
4271 {
4272 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
4273 Insn_template::arm_insn(0xe12fff1c), // bx ip
4274 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4275 // dcd R_ARM_ABS32(X)
4276 };
4277
4278 // Thumb -> Thumb long branch stub. Used on M-profile architectures.
4279 static const Insn_template elf32_arm_stub_long_branch_thumb_only[] =
4280 {
4281 Insn_template::thumb16_insn(0xb401), // push {r0}
4282 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
4283 Insn_template::thumb16_insn(0x4684), // mov ip, r0
4284 Insn_template::thumb16_insn(0xbc01), // pop {r0}
4285 Insn_template::thumb16_insn(0x4760), // bx ip
4286 Insn_template::thumb16_insn(0xbf00), // nop
4287 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4288 // dcd R_ARM_ABS32(X)
4289 };
4290
4291 // V4T Thumb -> Thumb long branch stub. Using the stack is not
4292 // allowed.
4293 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
4294 {
4295 Insn_template::thumb16_insn(0x4778), // bx pc
4296 Insn_template::thumb16_insn(0x46c0), // nop
4297 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
4298 Insn_template::arm_insn(0xe12fff1c), // bx ip
4299 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4300 // dcd R_ARM_ABS32(X)
4301 };
4302
4303 // V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
4304 // available.
4305 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm[] =
4306 {
4307 Insn_template::thumb16_insn(0x4778), // bx pc
4308 Insn_template::thumb16_insn(0x46c0), // nop
4309 Insn_template::arm_insn(0xe51ff004), // ldr pc, [pc, #-4]
4310 Insn_template::data_word(0, elfcpp::R_ARM_ABS32, 0),
4311 // dcd R_ARM_ABS32(X)
4312 };
4313
4314 // V4T Thumb -> ARM short branch stub. Shorter variant of the above
4315 // one, when the destination is close enough.
4316 static const Insn_template elf32_arm_stub_short_branch_v4t_thumb_arm[] =
4317 {
4318 Insn_template::thumb16_insn(0x4778), // bx pc
4319 Insn_template::thumb16_insn(0x46c0), // nop
4320 Insn_template::arm_rel_insn(0xea000000, -8), // b (X-8)
4321 };
4322
4323 // ARM/Thumb -> ARM long branch stub, PIC. On V5T and above, use
4324 // blx to reach the stub if necessary.
4325 static const Insn_template elf32_arm_stub_long_branch_any_arm_pic[] =
4326 {
4327 Insn_template::arm_insn(0xe59fc000), // ldr r12, [pc]
4328 Insn_template::arm_insn(0xe08ff00c), // add pc, pc, ip
4329 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
4330 // dcd R_ARM_REL32(X-4)
4331 };
4332
4333 // ARM/Thumb -> Thumb long branch stub, PIC. On V5T and above, use
4334 // blx to reach the stub if necessary. We can not add into pc;
4335 // it is not guaranteed to mode switch (different in ARMv6 and
4336 // ARMv7).
4337 static const Insn_template elf32_arm_stub_long_branch_any_thumb_pic[] =
4338 {
4339 Insn_template::arm_insn(0xe59fc004), // ldr r12, [pc, #4]
4340 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
4341 Insn_template::arm_insn(0xe12fff1c), // bx ip
4342 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
4343 // dcd R_ARM_REL32(X)
4344 };
4345
4346 // V4T ARM -> ARM long branch stub, PIC.
4347 static const Insn_template elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
4348 {
4349 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
4350 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
4351 Insn_template::arm_insn(0xe12fff1c), // bx ip
4352 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
4353 // dcd R_ARM_REL32(X)
4354 };
4355
4356 // V4T Thumb -> ARM long branch stub, PIC.
4357 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
4358 {
4359 Insn_template::thumb16_insn(0x4778), // bx pc
4360 Insn_template::thumb16_insn(0x46c0), // nop
4361 Insn_template::arm_insn(0xe59fc000), // ldr ip, [pc, #0]
4362 Insn_template::arm_insn(0xe08cf00f), // add pc, ip, pc
4363 Insn_template::data_word(0, elfcpp::R_ARM_REL32, -4),
4364 // dcd R_ARM_REL32(X)
4365 };
4366
4367 // Thumb -> Thumb long branch stub, PIC. Used on M-profile
4368 // architectures.
4369 static const Insn_template elf32_arm_stub_long_branch_thumb_only_pic[] =
4370 {
4371 Insn_template::thumb16_insn(0xb401), // push {r0}
4372 Insn_template::thumb16_insn(0x4802), // ldr r0, [pc, #8]
4373 Insn_template::thumb16_insn(0x46fc), // mov ip, pc
4374 Insn_template::thumb16_insn(0x4484), // add ip, r0
4375 Insn_template::thumb16_insn(0xbc01), // pop {r0}
4376 Insn_template::thumb16_insn(0x4760), // bx ip
4377 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 4),
4378 // dcd R_ARM_REL32(X)
4379 };
4380
4381 // V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
4382 // allowed.
4383 static const Insn_template elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
4384 {
4385 Insn_template::thumb16_insn(0x4778), // bx pc
4386 Insn_template::thumb16_insn(0x46c0), // nop
4387 Insn_template::arm_insn(0xe59fc004), // ldr ip, [pc, #4]
4388 Insn_template::arm_insn(0xe08fc00c), // add ip, pc, ip
4389 Insn_template::arm_insn(0xe12fff1c), // bx ip
4390 Insn_template::data_word(0, elfcpp::R_ARM_REL32, 0),
4391 // dcd R_ARM_REL32(X)
4392 };
4393
4394 // Cortex-A8 erratum-workaround stubs.
4395
4396 // Stub used for conditional branches (which may be beyond +/-1MB away,
4397 // so we can't use a conditional branch to reach this stub).
4398
4399 // original code:
4400 //
4401 // b<cond> X
4402 // after:
4403 //
4404 static const Insn_template elf32_arm_stub_a8_veneer_b_cond[] =
4405 {
4406 Insn_template::thumb16_bcond_insn(0xd001), // b<cond>.n true
4407 Insn_template::thumb32_b_insn(0xf000b800, -4), // b.w after
4408 Insn_template::thumb32_b_insn(0xf000b800, -4) // true:
4409 // b.w X
4410 };
4411
4412 // Stub used for b.w and bl.w instructions.
4413
4414 static const Insn_template elf32_arm_stub_a8_veneer_b[] =
4415 {
4416 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
4417 };
4418
4419 static const Insn_template elf32_arm_stub_a8_veneer_bl[] =
4420 {
4421 Insn_template::thumb32_b_insn(0xf000b800, -4) // b.w dest
4422 };
4423
4424 // Stub used for Thumb-2 blx.w instructions. We modified the original blx.w
4425 // instruction (which switches to ARM mode) to point to this stub. Jump to
4426 // the real destination using an ARM-mode branch.
4427 static const Insn_template elf32_arm_stub_a8_veneer_blx[] =
4428 {
4429 Insn_template::arm_rel_insn(0xea000000, -8) // b dest
4430 };
4431
4432 // Stub used to provide an interworking for R_ARM_V4BX relocation
4433 // (bx r[n] instruction).
4434 static const Insn_template elf32_arm_stub_v4_veneer_bx[] =
4435 {
4436 Insn_template::arm_insn(0xe3100001), // tst r<n>, #1
4437 Insn_template::arm_insn(0x01a0f000), // moveq pc, r<n>
4438 Insn_template::arm_insn(0xe12fff10) // bx r<n>
4439 };
4440
4441 // Fill in the stub template look-up table. Stub templates are constructed
4442 // per instance of Stub_factory for fast look-up without locking
4443 // in a thread-enabled environment.
4444
4445 this->stub_templates_[arm_stub_none] =
4446 new Stub_template(arm_stub_none, NULL, 0);
4447
4448 #define DEF_STUB(x) \
4449 do \
4450 { \
4451 size_t array_size \
4452 = sizeof(elf32_arm_stub_##x) / sizeof(elf32_arm_stub_##x[0]); \
4453 Stub_type type = arm_stub_##x; \
4454 this->stub_templates_[type] = \
4455 new Stub_template(type, elf32_arm_stub_##x, array_size); \
4456 } \
4457 while (0);
4458
4459 DEF_STUBS
4460 #undef DEF_STUB
4461 }
4462
4463 // Stub_table methods.
4464
4465 // Removel all Cortex-A8 stub.
4466
4467 template<bool big_endian>
4468 void
4469 Stub_table<big_endian>::remove_all_cortex_a8_stubs()
4470 {
4471 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
4472 p != this->cortex_a8_stubs_.end();
4473 ++p)
4474 delete p->second;
4475 this->cortex_a8_stubs_.clear();
4476 }
4477
4478 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
4479
4480 template<bool big_endian>
4481 void
4482 Stub_table<big_endian>::relocate_stub(
4483 Stub* stub,
4484 const Relocate_info<32, big_endian>* relinfo,
4485 Target_arm<big_endian>* arm_target,
4486 Output_section* output_section,
4487 unsigned char* view,
4488 Arm_address address,
4489 section_size_type view_size)
4490 {
4491 const Stub_template* stub_template = stub->stub_template();
4492 if (stub_template->reloc_count() != 0)
4493 {
4494 // Adjust view to cover the stub only.
4495 section_size_type offset = stub->offset();
4496 section_size_type stub_size = stub_template->size();
4497 gold_assert(offset + stub_size <= view_size);
4498
4499 arm_target->relocate_stub(stub, relinfo, output_section, view + offset,
4500 address + offset, stub_size);
4501 }
4502 }
4503
4504 // Relocate all stubs in this stub table.
4505
4506 template<bool big_endian>
4507 void
4508 Stub_table<big_endian>::relocate_stubs(
4509 const Relocate_info<32, big_endian>* relinfo,
4510 Target_arm<big_endian>* arm_target,
4511 Output_section* output_section,
4512 unsigned char* view,
4513 Arm_address address,
4514 section_size_type view_size)
4515 {
4516 // If we are passed a view bigger than the stub table's. we need to
4517 // adjust the view.
4518 gold_assert(address == this->address()
4519 && (view_size
4520 == static_cast<section_size_type>(this->data_size())));
4521
4522 // Relocate all relocation stubs.
4523 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4524 p != this->reloc_stubs_.end();
4525 ++p)
4526 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
4527 address, view_size);
4528
4529 // Relocate all Cortex-A8 stubs.
4530 for (Cortex_a8_stub_list::iterator p = this->cortex_a8_stubs_.begin();
4531 p != this->cortex_a8_stubs_.end();
4532 ++p)
4533 this->relocate_stub(p->second, relinfo, arm_target, output_section, view,
4534 address, view_size);
4535
4536 // Relocate all ARM V4BX stubs.
4537 for (Arm_v4bx_stub_list::iterator p = this->arm_v4bx_stubs_.begin();
4538 p != this->arm_v4bx_stubs_.end();
4539 ++p)
4540 {
4541 if (*p != NULL)
4542 this->relocate_stub(*p, relinfo, arm_target, output_section, view,
4543 address, view_size);
4544 }
4545 }
4546
4547 // Write out the stubs to file.
4548
4549 template<bool big_endian>
4550 void
4551 Stub_table<big_endian>::do_write(Output_file* of)
4552 {
4553 off_t offset = this->offset();
4554 const section_size_type oview_size =
4555 convert_to_section_size_type(this->data_size());
4556 unsigned char* const oview = of->get_output_view(offset, oview_size);
4557
4558 // Write relocation stubs.
4559 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4560 p != this->reloc_stubs_.end();
4561 ++p)
4562 {
4563 Reloc_stub* stub = p->second;
4564 Arm_address address = this->address() + stub->offset();
4565 gold_assert(address
4566 == align_address(address,
4567 stub->stub_template()->alignment()));
4568 stub->write(oview + stub->offset(), stub->stub_template()->size(),
4569 big_endian);
4570 }
4571
4572 // Write Cortex-A8 stubs.
4573 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4574 p != this->cortex_a8_stubs_.end();
4575 ++p)
4576 {
4577 Cortex_a8_stub* stub = p->second;
4578 Arm_address address = this->address() + stub->offset();
4579 gold_assert(address
4580 == align_address(address,
4581 stub->stub_template()->alignment()));
4582 stub->write(oview + stub->offset(), stub->stub_template()->size(),
4583 big_endian);
4584 }
4585
4586 // Write ARM V4BX relocation stubs.
4587 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4588 p != this->arm_v4bx_stubs_.end();
4589 ++p)
4590 {
4591 if (*p == NULL)
4592 continue;
4593
4594 Arm_address address = this->address() + (*p)->offset();
4595 gold_assert(address
4596 == align_address(address,
4597 (*p)->stub_template()->alignment()));
4598 (*p)->write(oview + (*p)->offset(), (*p)->stub_template()->size(),
4599 big_endian);
4600 }
4601
4602 of->write_output_view(this->offset(), oview_size, oview);
4603 }
4604
4605 // Update the data size and address alignment of the stub table at the end
4606 // of a relaxation pass. Return true if either the data size or the
4607 // alignment changed in this relaxation pass.
4608
4609 template<bool big_endian>
4610 bool
4611 Stub_table<big_endian>::update_data_size_and_addralign()
4612 {
4613 off_t size = 0;
4614 unsigned addralign = 1;
4615
4616 // Go over all stubs in table to compute data size and address alignment.
4617
4618 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4619 p != this->reloc_stubs_.end();
4620 ++p)
4621 {
4622 const Stub_template* stub_template = p->second->stub_template();
4623 addralign = std::max(addralign, stub_template->alignment());
4624 size = (align_address(size, stub_template->alignment())
4625 + stub_template->size());
4626 }
4627
4628 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4629 p != this->cortex_a8_stubs_.end();
4630 ++p)
4631 {
4632 const Stub_template* stub_template = p->second->stub_template();
4633 addralign = std::max(addralign, stub_template->alignment());
4634 size = (align_address(size, stub_template->alignment())
4635 + stub_template->size());
4636 }
4637
4638 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4639 p != this->arm_v4bx_stubs_.end();
4640 ++p)
4641 {
4642 if (*p == NULL)
4643 continue;
4644
4645 const Stub_template* stub_template = (*p)->stub_template();
4646 addralign = std::max(addralign, stub_template->alignment());
4647 size = (align_address(size, stub_template->alignment())
4648 + stub_template->size());
4649 }
4650
4651 // Check if either data size or alignment changed in this pass.
4652 // Update prev_data_size_ and prev_addralign_. These will be used
4653 // as the current data size and address alignment for the next pass.
4654 bool changed = size != this->prev_data_size_;
4655 this->prev_data_size_ = size;
4656
4657 if (addralign != this->prev_addralign_)
4658 changed = true;
4659 this->prev_addralign_ = addralign;
4660
4661 return changed;
4662 }
4663
4664 // Finalize the stubs. This sets the offsets of the stubs within the stub
4665 // table. It also marks all input sections needing Cortex-A8 workaround.
4666
4667 template<bool big_endian>
4668 void
4669 Stub_table<big_endian>::finalize_stubs()
4670 {
4671 off_t off = 0;
4672 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
4673 p != this->reloc_stubs_.end();
4674 ++p)
4675 {
4676 Reloc_stub* stub = p->second;
4677 const Stub_template* stub_template = stub->stub_template();
4678 uint64_t stub_addralign = stub_template->alignment();
4679 off = align_address(off, stub_addralign);
4680 stub->set_offset(off);
4681 off += stub_template->size();
4682 }
4683
4684 for (Cortex_a8_stub_list::const_iterator p = this->cortex_a8_stubs_.begin();
4685 p != this->cortex_a8_stubs_.end();
4686 ++p)
4687 {
4688 Cortex_a8_stub* stub = p->second;
4689 const Stub_template* stub_template = stub->stub_template();
4690 uint64_t stub_addralign = stub_template->alignment();
4691 off = align_address(off, stub_addralign);
4692 stub->set_offset(off);
4693 off += stub_template->size();
4694
4695 // Mark input section so that we can determine later if a code section
4696 // needs the Cortex-A8 workaround quickly.
4697 Arm_relobj<big_endian>* arm_relobj =
4698 Arm_relobj<big_endian>::as_arm_relobj(stub->relobj());
4699 arm_relobj->mark_section_for_cortex_a8_workaround(stub->shndx());
4700 }
4701
4702 for (Arm_v4bx_stub_list::const_iterator p = this->arm_v4bx_stubs_.begin();
4703 p != this->arm_v4bx_stubs_.end();
4704 ++p)
4705 {
4706 if (*p == NULL)
4707 continue;
4708
4709 const Stub_template* stub_template = (*p)->stub_template();
4710 uint64_t stub_addralign = stub_template->alignment();
4711 off = align_address(off, stub_addralign);
4712 (*p)->set_offset(off);
4713 off += stub_template->size();
4714 }
4715
4716 gold_assert(off <= this->prev_data_size_);
4717 }
4718
4719 // Apply Cortex-A8 workaround to an address range between VIEW_ADDRESS
4720 // and VIEW_ADDRESS + VIEW_SIZE - 1. VIEW points to the mapped address
4721 // of the address range seen by the linker.
4722
4723 template<bool big_endian>
4724 void
4725 Stub_table<big_endian>::apply_cortex_a8_workaround_to_address_range(
4726 Target_arm<big_endian>* arm_target,
4727 unsigned char* view,
4728 Arm_address view_address,
4729 section_size_type view_size)
4730 {
4731 // Cortex-A8 stubs are sorted by addresses of branches being fixed up.
4732 for (Cortex_a8_stub_list::const_iterator p =
4733 this->cortex_a8_stubs_.lower_bound(view_address);
4734 ((p != this->cortex_a8_stubs_.end())
4735 && (p->first < (view_address + view_size)));
4736 ++p)
4737 {
4738 // We do not store the THUMB bit in the LSB of either the branch address
4739 // or the stub offset. There is no need to strip the LSB.
4740 Arm_address branch_address = p->first;
4741 const Cortex_a8_stub* stub = p->second;
4742 Arm_address stub_address = this->address() + stub->offset();
4743
4744 // Offset of the branch instruction relative to this view.
4745 section_size_type offset =
4746 convert_to_section_size_type(branch_address - view_address);
4747 gold_assert((offset + 4) <= view_size);
4748
4749 arm_target->apply_cortex_a8_workaround(stub, stub_address,
4750 view + offset, branch_address);
4751 }
4752 }
4753
4754 // Arm_input_section methods.
4755
4756 // Initialize an Arm_input_section.
4757
4758 template<bool big_endian>
4759 void
4760 Arm_input_section<big_endian>::init()
4761 {
4762 Relobj* relobj = this->relobj();
4763 unsigned int shndx = this->shndx();
4764
4765 // Cache these to speed up size and alignment queries. It is too slow
4766 // to call section_addraglin and section_size every time.
4767 this->original_addralign_ = relobj->section_addralign(shndx);
4768 this->original_size_ = relobj->section_size(shndx);
4769
4770 // We want to make this look like the original input section after
4771 // output sections are finalized.
4772 Output_section* os = relobj->output_section(shndx);
4773 off_t offset = relobj->output_section_offset(shndx);
4774 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
4775 this->set_address(os->address() + offset);
4776 this->set_file_offset(os->offset() + offset);
4777
4778 this->set_current_data_size(this->original_size_);
4779 this->finalize_data_size();
4780 }
4781
4782 template<bool big_endian>
4783 void
4784 Arm_input_section<big_endian>::do_write(Output_file* of)
4785 {
4786 // We have to write out the original section content.
4787 section_size_type section_size;
4788 const unsigned char* section_contents =
4789 this->relobj()->section_contents(this->shndx(), &section_size, false);
4790 of->write(this->offset(), section_contents, section_size);
4791
4792 // If this owns a stub table and it is not empty, write it.
4793 if (this->is_stub_table_owner() && !this->stub_table_->empty())
4794 this->stub_table_->write(of);
4795 }
4796
4797 // Finalize data size.
4798
4799 template<bool big_endian>
4800 void
4801 Arm_input_section<big_endian>::set_final_data_size()
4802 {
4803 // If this owns a stub table, finalize its data size as well.
4804 if (this->is_stub_table_owner())
4805 {
4806 uint64_t address = this->address();
4807
4808 // The stub table comes after the original section contents.
4809 address += this->original_size_;
4810 address = align_address(address, this->stub_table_->addralign());
4811 off_t offset = this->offset() + (address - this->address());
4812 this->stub_table_->set_address_and_file_offset(address, offset);
4813 address += this->stub_table_->data_size();
4814 gold_assert(address == this->address() + this->current_data_size());
4815 }
4816
4817 this->set_data_size(this->current_data_size());
4818 }
4819
4820 // Reset address and file offset.
4821
4822 template<bool big_endian>
4823 void
4824 Arm_input_section<big_endian>::do_reset_address_and_file_offset()
4825 {
4826 // Size of the original input section contents.
4827 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
4828
4829 // If this is a stub table owner, account for the stub table size.
4830 if (this->is_stub_table_owner())
4831 {
4832 Stub_table<big_endian>* stub_table = this->stub_table_;
4833
4834 // Reset the stub table's address and file offset. The
4835 // current data size for child will be updated after that.
4836 stub_table_->reset_address_and_file_offset();
4837 off = align_address(off, stub_table_->addralign());
4838 off += stub_table->current_data_size();
4839 }
4840
4841 this->set_current_data_size(off);
4842 }
4843
4844 // Arm_exidx_cantunwind methods.
4845
4846 // Write this to Output file OF for a fixed endianity.
4847
4848 template<bool big_endian>
4849 void
4850 Arm_exidx_cantunwind::do_fixed_endian_write(Output_file* of)
4851 {
4852 off_t offset = this->offset();
4853 const section_size_type oview_size = 8;
4854 unsigned char* const oview = of->get_output_view(offset, oview_size);
4855
4856 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
4857 Valtype* wv = reinterpret_cast<Valtype*>(oview);
4858
4859 Output_section* os = this->relobj_->output_section(this->shndx_);
4860 gold_assert(os != NULL);
4861
4862 Arm_relobj<big_endian>* arm_relobj =
4863 Arm_relobj<big_endian>::as_arm_relobj(this->relobj_);
4864 Arm_address output_offset =
4865 arm_relobj->get_output_section_offset(this->shndx_);
4866 Arm_address section_start;
4867 if(output_offset != Arm_relobj<big_endian>::invalid_address)
4868 section_start = os->address() + output_offset;
4869 else
4870 {
4871 // Currently this only happens for a relaxed section.
4872 const Output_relaxed_input_section* poris =
4873 os->find_relaxed_input_section(this->relobj_, this->shndx_);
4874 gold_assert(poris != NULL);
4875 section_start = poris->address();
4876 }
4877
4878 // We always append this to the end of an EXIDX section.
4879 Arm_address output_address =
4880 section_start + this->relobj_->section_size(this->shndx_);
4881
4882 // Write out the entry. The first word either points to the beginning
4883 // or after the end of a text section. The second word is the special
4884 // EXIDX_CANTUNWIND value.
4885 uint32_t prel31_offset = output_address - this->address();
4886 if (utils::has_overflow<31>(offset))
4887 gold_error(_("PREL31 overflow in EXIDX_CANTUNWIND entry"));
4888 elfcpp::Swap<32, big_endian>::writeval(wv, prel31_offset & 0x7fffffffU);
4889 elfcpp::Swap<32, big_endian>::writeval(wv + 1, elfcpp::EXIDX_CANTUNWIND);
4890
4891 of->write_output_view(this->offset(), oview_size, oview);
4892 }
4893
4894 // Arm_exidx_merged_section methods.
4895
4896 // Constructor for Arm_exidx_merged_section.
4897 // EXIDX_INPUT_SECTION points to the unmodified EXIDX input section.
4898 // SECTION_OFFSET_MAP points to a section offset map describing how
4899 // parts of the input section are mapped to output. DELETED_BYTES is
4900 // the number of bytes deleted from the EXIDX input section.
4901
4902 Arm_exidx_merged_section::Arm_exidx_merged_section(
4903 const Arm_exidx_input_section& exidx_input_section,
4904 const Arm_exidx_section_offset_map& section_offset_map,
4905 uint32_t deleted_bytes)
4906 : Output_relaxed_input_section(exidx_input_section.relobj(),
4907 exidx_input_section.shndx(),
4908 exidx_input_section.addralign()),
4909 exidx_input_section_(exidx_input_section),
4910 section_offset_map_(section_offset_map)
4911 {
4912 // Fix size here so that we do not need to implement set_final_data_size.
4913 this->set_data_size(exidx_input_section.size() - deleted_bytes);
4914 this->fix_data_size();
4915 }
4916
4917 // Given an input OBJECT, an input section index SHNDX within that
4918 // object, and an OFFSET relative to the start of that input
4919 // section, return whether or not the corresponding offset within
4920 // the output section is known. If this function returns true, it
4921 // sets *POUTPUT to the output offset. The value -1 indicates that
4922 // this input offset is being discarded.
4923
4924 bool
4925 Arm_exidx_merged_section::do_output_offset(
4926 const Relobj* relobj,
4927 unsigned int shndx,
4928 section_offset_type offset,
4929 section_offset_type* poutput) const
4930 {
4931 // We only handle offsets for the original EXIDX input section.
4932 if (relobj != this->exidx_input_section_.relobj()
4933 || shndx != this->exidx_input_section_.shndx())
4934 return false;
4935
4936 section_offset_type section_size =
4937 convert_types<section_offset_type>(this->exidx_input_section_.size());
4938 if (offset < 0 || offset >= section_size)
4939 // Input offset is out of valid range.
4940 *poutput = -1;
4941 else
4942 {
4943 // We need to look up the section offset map to determine the output
4944 // offset. Find the reference point in map that is first offset
4945 // bigger than or equal to this offset.
4946 Arm_exidx_section_offset_map::const_iterator p =
4947 this->section_offset_map_.lower_bound(offset);
4948
4949 // The section offset maps are build such that this should not happen if
4950 // input offset is in the valid range.
4951 gold_assert(p != this->section_offset_map_.end());
4952
4953 // We need to check if this is dropped.
4954 section_offset_type ref = p->first;
4955 section_offset_type mapped_ref = p->second;
4956
4957 if (mapped_ref != Arm_exidx_input_section::invalid_offset)
4958 // Offset is present in output.
4959 *poutput = mapped_ref + (offset - ref);
4960 else
4961 // Offset is discarded owing to EXIDX entry merging.
4962 *poutput = -1;
4963 }
4964
4965 return true;
4966 }
4967
4968 // Write this to output file OF.
4969
4970 void
4971 Arm_exidx_merged_section::do_write(Output_file* of)
4972 {
4973 // If we retain or discard the whole EXIDX input section, we would
4974 // not be here.
4975 gold_assert(this->data_size() != this->exidx_input_section_.size()
4976 && this->data_size() != 0);
4977
4978 off_t offset = this->offset();
4979 const section_size_type oview_size = this->data_size();
4980 unsigned char* const oview = of->get_output_view(offset, oview_size);
4981
4982 Output_section* os = this->relobj()->output_section(this->shndx());
4983 gold_assert(os != NULL);
4984
4985 // Get contents of EXIDX input section.
4986 section_size_type section_size;
4987 const unsigned char* section_contents =
4988 this->relobj()->section_contents(this->shndx(), &section_size, false);
4989 gold_assert(section_size == this->exidx_input_section_.size());
4990
4991 // Go over spans of input offsets and write only those that are not
4992 // discarded.
4993 section_offset_type in_start = 0;
4994 section_offset_type out_start = 0;
4995 for(Arm_exidx_section_offset_map::const_iterator p =
4996 this->section_offset_map_.begin();
4997 p != this->section_offset_map_.end();
4998 ++p)
4999 {
5000 section_offset_type in_end = p->first;
5001 gold_assert(in_end >= in_start);
5002 section_offset_type out_end = p->second;
5003 size_t in_chunk_size = convert_types<size_t>(in_end - in_start + 1);
5004 if (out_end != -1)
5005 {
5006 size_t out_chunk_size =
5007 convert_types<size_t>(out_end - out_start + 1);
5008 gold_assert(out_chunk_size == in_chunk_size);
5009 memcpy(oview + out_start, section_contents + in_start,
5010 out_chunk_size);
5011 out_start += out_chunk_size;
5012 }
5013 in_start += in_chunk_size;
5014 }
5015
5016 gold_assert(convert_to_section_size_type(out_start) == oview_size);
5017 of->write_output_view(this->offset(), oview_size, oview);
5018 }
5019
5020 // Arm_exidx_fixup methods.
5021
5022 // Append an EXIDX_CANTUNWIND in the current output section if the last entry
5023 // is not an EXIDX_CANTUNWIND entry already. The new EXIDX_CANTUNWIND entry
5024 // points to the end of the last seen EXIDX section.
5025
5026 void
5027 Arm_exidx_fixup::add_exidx_cantunwind_as_needed()
5028 {
5029 if (this->last_unwind_type_ != UT_EXIDX_CANTUNWIND
5030 && this->last_input_section_ != NULL)
5031 {
5032 Relobj* relobj = this->last_input_section_->relobj();
5033 unsigned int text_shndx = this->last_input_section_->link();
5034 Arm_exidx_cantunwind* cantunwind =
5035 new Arm_exidx_cantunwind(relobj, text_shndx);
5036 this->exidx_output_section_->add_output_section_data(cantunwind);
5037 this->last_unwind_type_ = UT_EXIDX_CANTUNWIND;
5038 }
5039 }
5040
5041 // Process an EXIDX section entry in input. Return whether this entry
5042 // can be deleted in the output. SECOND_WORD in the second word of the
5043 // EXIDX entry.
5044
5045 bool
5046 Arm_exidx_fixup::process_exidx_entry(uint32_t second_word)
5047 {
5048 bool delete_entry;
5049 if (second_word == elfcpp::EXIDX_CANTUNWIND)
5050 {
5051 // Merge if previous entry is also an EXIDX_CANTUNWIND.
5052 delete_entry = this->last_unwind_type_ == UT_EXIDX_CANTUNWIND;
5053 this->last_unwind_type_ = UT_EXIDX_CANTUNWIND;
5054 }
5055 else if ((second_word & 0x80000000) != 0)
5056 {
5057 // Inlined unwinding data. Merge if equal to previous.
5058 delete_entry = (this->last_unwind_type_ == UT_INLINED_ENTRY
5059 && this->last_inlined_entry_ == second_word);
5060 this->last_unwind_type_ = UT_INLINED_ENTRY;
5061 this->last_inlined_entry_ = second_word;
5062 }
5063 else
5064 {
5065 // Normal table entry. In theory we could merge these too,
5066 // but duplicate entries are likely to be much less common.
5067 delete_entry = false;
5068 this->last_unwind_type_ = UT_NORMAL_ENTRY;
5069 }
5070 return delete_entry;
5071 }
5072
5073 // Update the current section offset map during EXIDX section fix-up.
5074 // If there is no map, create one. INPUT_OFFSET is the offset of a
5075 // reference point, DELETED_BYTES is the number of deleted by in the
5076 // section so far. If DELETE_ENTRY is true, the reference point and
5077 // all offsets after the previous reference point are discarded.
5078
5079 void
5080 Arm_exidx_fixup::update_offset_map(
5081 section_offset_type input_offset,
5082 section_size_type deleted_bytes,
5083 bool delete_entry)
5084 {
5085 if (this->section_offset_map_ == NULL)
5086 this->section_offset_map_ = new Arm_exidx_section_offset_map();
5087 section_offset_type output_offset = (delete_entry
5088 ? -1
5089 : input_offset - deleted_bytes);
5090 (*this->section_offset_map_)[input_offset] = output_offset;
5091 }
5092
5093 // Process EXIDX_INPUT_SECTION for EXIDX entry merging. Return the number of
5094 // bytes deleted. If some entries are merged, also store a pointer to a newly
5095 // created Arm_exidx_section_offset_map object in *PSECTION_OFFSET_MAP. The
5096 // caller owns the map and is responsible for releasing it after use.
5097
5098 template<bool big_endian>
5099 uint32_t
5100 Arm_exidx_fixup::process_exidx_section(
5101 const Arm_exidx_input_section* exidx_input_section,
5102 Arm_exidx_section_offset_map** psection_offset_map)
5103 {
5104 Relobj* relobj = exidx_input_section->relobj();
5105 unsigned shndx = exidx_input_section->shndx();
5106 section_size_type section_size;
5107 const unsigned char* section_contents =
5108 relobj->section_contents(shndx, &section_size, false);
5109
5110 if ((section_size % 8) != 0)
5111 {
5112 // Something is wrong with this section. Better not touch it.
5113 gold_error(_("uneven .ARM.exidx section size in %s section %u"),
5114 relobj->name().c_str(), shndx);
5115 this->last_input_section_ = exidx_input_section;
5116 this->last_unwind_type_ = UT_NONE;
5117 return 0;
5118 }
5119
5120 uint32_t deleted_bytes = 0;
5121 bool prev_delete_entry = false;
5122 gold_assert(this->section_offset_map_ == NULL);
5123
5124 for (section_size_type i = 0; i < section_size; i += 8)
5125 {
5126 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5127 const Valtype* wv =
5128 reinterpret_cast<const Valtype*>(section_contents + i + 4);
5129 uint32_t second_word = elfcpp::Swap<32, big_endian>::readval(wv);
5130
5131 bool delete_entry = this->process_exidx_entry(second_word);
5132
5133 // Entry deletion causes changes in output offsets. We use a std::map
5134 // to record these. And entry (x, y) means input offset x
5135 // is mapped to output offset y. If y is invalid_offset, then x is
5136 // dropped in the output. Because of the way std::map::lower_bound
5137 // works, we record the last offset in a region w.r.t to keeping or
5138 // dropping. If there is no entry (x0, y0) for an input offset x0,
5139 // the output offset y0 of it is determined by the output offset y1 of
5140 // the smallest input offset x1 > x0 that there is an (x1, y1) entry
5141 // in the map. If y1 is not -1, then y0 = y1 + x0 - x1. Othewise, y1
5142 // y0 is also -1.
5143 if (delete_entry != prev_delete_entry && i != 0)
5144 this->update_offset_map(i - 1, deleted_bytes, prev_delete_entry);
5145
5146 // Update total deleted bytes for this entry.
5147 if (delete_entry)
5148 deleted_bytes += 8;
5149
5150 prev_delete_entry = delete_entry;
5151 }
5152
5153 // If section offset map is not NULL, make an entry for the end of
5154 // section.
5155 if (this->section_offset_map_ != NULL)
5156 update_offset_map(section_size - 1, deleted_bytes, prev_delete_entry);
5157
5158 *psection_offset_map = this->section_offset_map_;
5159 this->section_offset_map_ = NULL;
5160 this->last_input_section_ = exidx_input_section;
5161
5162 return deleted_bytes;
5163 }
5164
5165 // Arm_output_section methods.
5166
5167 // Create a stub group for input sections from BEGIN to END. OWNER
5168 // points to the input section to be the owner a new stub table.
5169
5170 template<bool big_endian>
5171 void
5172 Arm_output_section<big_endian>::create_stub_group(
5173 Input_section_list::const_iterator begin,
5174 Input_section_list::const_iterator end,
5175 Input_section_list::const_iterator owner,
5176 Target_arm<big_endian>* target,
5177 std::vector<Output_relaxed_input_section*>* new_relaxed_sections)
5178 {
5179 // We use a different kind of relaxed section in an EXIDX section.
5180 // The static casting from Output_relaxed_input_section to
5181 // Arm_input_section is invalid in an EXIDX section. We are okay
5182 // because we should not be calling this for an EXIDX section.
5183 gold_assert(this->type() != elfcpp::SHT_ARM_EXIDX);
5184
5185 // Currently we convert ordinary input sections into relaxed sections only
5186 // at this point but we may want to support creating relaxed input section
5187 // very early. So we check here to see if owner is already a relaxed
5188 // section.
5189
5190 Arm_input_section<big_endian>* arm_input_section;
5191 if (owner->is_relaxed_input_section())
5192 {
5193 arm_input_section =
5194 Arm_input_section<big_endian>::as_arm_input_section(
5195 owner->relaxed_input_section());
5196 }
5197 else
5198 {
5199 gold_assert(owner->is_input_section());
5200 // Create a new relaxed input section.
5201 arm_input_section =
5202 target->new_arm_input_section(owner->relobj(), owner->shndx());
5203 new_relaxed_sections->push_back(arm_input_section);
5204 }
5205
5206 // Create a stub table.
5207 Stub_table<big_endian>* stub_table =
5208 target->new_stub_table(arm_input_section);
5209
5210 arm_input_section->set_stub_table(stub_table);
5211
5212 Input_section_list::const_iterator p = begin;
5213 Input_section_list::const_iterator prev_p;
5214
5215 // Look for input sections or relaxed input sections in [begin ... end].
5216 do
5217 {
5218 if (p->is_input_section() || p->is_relaxed_input_section())
5219 {
5220 // The stub table information for input sections live
5221 // in their objects.
5222 Arm_relobj<big_endian>* arm_relobj =
5223 Arm_relobj<big_endian>::as_arm_relobj(p->relobj());
5224 arm_relobj->set_stub_table(p->shndx(), stub_table);
5225 }
5226 prev_p = p++;
5227 }
5228 while (prev_p != end);
5229 }
5230
5231 // Group input sections for stub generation. GROUP_SIZE is roughly the limit
5232 // of stub groups. We grow a stub group by adding input section until the
5233 // size is just below GROUP_SIZE. The last input section will be converted
5234 // into a stub table. If STUB_ALWAYS_AFTER_BRANCH is false, we also add
5235 // input section after the stub table, effectively double the group size.
5236 //
5237 // This is similar to the group_sections() function in elf32-arm.c but is
5238 // implemented differently.
5239
5240 template<bool big_endian>
5241 void
5242 Arm_output_section<big_endian>::group_sections(
5243 section_size_type group_size,
5244 bool stubs_always_after_branch,
5245 Target_arm<big_endian>* target)
5246 {
5247 // We only care about sections containing code.
5248 if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
5249 return;
5250
5251 // States for grouping.
5252 typedef enum
5253 {
5254 // No group is being built.
5255 NO_GROUP,
5256 // A group is being built but the stub table is not found yet.
5257 // We keep group a stub group until the size is just under GROUP_SIZE.
5258 // The last input section in the group will be used as the stub table.
5259 FINDING_STUB_SECTION,
5260 // A group is being built and we have already found a stub table.
5261 // We enter this state to grow a stub group by adding input section
5262 // after the stub table. This effectively doubles the group size.
5263 HAS_STUB_SECTION
5264 } State;
5265
5266 // Any newly created relaxed sections are stored here.
5267 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
5268
5269 State state = NO_GROUP;
5270 section_size_type off = 0;
5271 section_size_type group_begin_offset = 0;
5272 section_size_type group_end_offset = 0;
5273 section_size_type stub_table_end_offset = 0;
5274 Input_section_list::const_iterator group_begin =
5275 this->input_sections().end();
5276 Input_section_list::const_iterator stub_table =
5277 this->input_sections().end();
5278 Input_section_list::const_iterator group_end = this->input_sections().end();
5279 for (Input_section_list::const_iterator p = this->input_sections().begin();
5280 p != this->input_sections().end();
5281 ++p)
5282 {
5283 section_size_type section_begin_offset =
5284 align_address(off, p->addralign());
5285 section_size_type section_end_offset =
5286 section_begin_offset + p->data_size();
5287
5288 // Check to see if we should group the previously seens sections.
5289 switch (state)
5290 {
5291 case NO_GROUP:
5292 break;
5293
5294 case FINDING_STUB_SECTION:
5295 // Adding this section makes the group larger than GROUP_SIZE.
5296 if (section_end_offset - group_begin_offset >= group_size)
5297 {
5298 if (stubs_always_after_branch)
5299 {
5300 gold_assert(group_end != this->input_sections().end());
5301 this->create_stub_group(group_begin, group_end, group_end,
5302 target, &new_relaxed_sections);
5303 state = NO_GROUP;
5304 }
5305 else
5306 {
5307 // But wait, there's more! Input sections up to
5308 // stub_group_size bytes after the stub table can be
5309 // handled by it too.
5310 state = HAS_STUB_SECTION;
5311 stub_table = group_end;
5312 stub_table_end_offset = group_end_offset;
5313 }
5314 }
5315 break;
5316
5317 case HAS_STUB_SECTION:
5318 // Adding this section makes the post stub-section group larger
5319 // than GROUP_SIZE.
5320 if (section_end_offset - stub_table_end_offset >= group_size)
5321 {
5322 gold_assert(group_end != this->input_sections().end());
5323 this->create_stub_group(group_begin, group_end, stub_table,
5324 target, &new_relaxed_sections);
5325 state = NO_GROUP;
5326 }
5327 break;
5328
5329 default:
5330 gold_unreachable();
5331 }
5332
5333 // If we see an input section and currently there is no group, start
5334 // a new one. Skip any empty sections.
5335 if ((p->is_input_section() || p->is_relaxed_input_section())
5336 && (p->relobj()->section_size(p->shndx()) != 0))
5337 {
5338 if (state == NO_GROUP)
5339 {
5340 state = FINDING_STUB_SECTION;
5341 group_begin = p;
5342 group_begin_offset = section_begin_offset;
5343 }
5344
5345 // Keep track of the last input section seen.
5346 group_end = p;
5347 group_end_offset = section_end_offset;
5348 }
5349
5350 off = section_end_offset;
5351 }
5352
5353 // Create a stub group for any ungrouped sections.
5354 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
5355 {
5356 gold_assert(group_end != this->input_sections().end());
5357 this->create_stub_group(group_begin, group_end,
5358 (state == FINDING_STUB_SECTION
5359 ? group_end
5360 : stub_table),
5361 target, &new_relaxed_sections);
5362 }
5363
5364 // Convert input section into relaxed input section in a batch.
5365 if (!new_relaxed_sections.empty())
5366 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
5367
5368 // Update the section offsets
5369 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
5370 {
5371 Arm_relobj<big_endian>* arm_relobj =
5372 Arm_relobj<big_endian>::as_arm_relobj(
5373 new_relaxed_sections[i]->relobj());
5374 unsigned int shndx = new_relaxed_sections[i]->shndx();
5375 // Tell Arm_relobj that this input section is converted.
5376 arm_relobj->convert_input_section_to_relaxed_section(shndx);
5377 }
5378 }
5379
5380 // Append non empty text sections in this to LIST in ascending
5381 // order of their position in this.
5382
5383 template<bool big_endian>
5384 void
5385 Arm_output_section<big_endian>::append_text_sections_to_list(
5386 Text_section_list* list)
5387 {
5388 // We only care about text sections.
5389 if ((this->flags() & elfcpp::SHF_EXECINSTR) == 0)
5390 return;
5391
5392 gold_assert((this->flags() & elfcpp::SHF_ALLOC) != 0);
5393
5394 for (Input_section_list::const_iterator p = this->input_sections().begin();
5395 p != this->input_sections().end();
5396 ++p)
5397 {
5398 // We only care about plain or relaxed input sections. We also
5399 // ignore any merged sections.
5400 if ((p->is_input_section() || p->is_relaxed_input_section())
5401 && p->data_size() != 0)
5402 list->push_back(Text_section_list::value_type(p->relobj(),
5403 p->shndx()));
5404 }
5405 }
5406
5407 template<bool big_endian>
5408 void
5409 Arm_output_section<big_endian>::fix_exidx_coverage(
5410 const Text_section_list& sorted_text_sections,
5411 Symbol_table* symtab)
5412 {
5413 // We should only do this for the EXIDX output section.
5414 gold_assert(this->type() == elfcpp::SHT_ARM_EXIDX);
5415
5416 // We don't want the relaxation loop to undo these changes, so we discard
5417 // the current saved states and take another one after the fix-up.
5418 this->discard_states();
5419
5420 // Remove all input sections.
5421 uint64_t address = this->address();
5422 typedef std::list<Simple_input_section> Simple_input_section_list;
5423 Simple_input_section_list input_sections;
5424 this->reset_address_and_file_offset();
5425 this->get_input_sections(address, std::string(""), &input_sections);
5426
5427 if (!this->input_sections().empty())
5428 gold_error(_("Found non-EXIDX input sections in EXIDX output section"));
5429
5430 // Go through all the known input sections and record them.
5431 typedef Unordered_set<Section_id, Section_id_hash> Section_id_set;
5432 Section_id_set known_input_sections;
5433 for (Simple_input_section_list::const_iterator p = input_sections.begin();
5434 p != input_sections.end();
5435 ++p)
5436 {
5437 // This should never happen. At this point, we should only see
5438 // plain EXIDX input sections.
5439 gold_assert(!p->is_relaxed_input_section());
5440 known_input_sections.insert(Section_id(p->relobj(), p->shndx()));
5441 }
5442
5443 Arm_exidx_fixup exidx_fixup(this);
5444
5445 // Go over the sorted text sections.
5446 Section_id_set processed_input_sections;
5447 for (Text_section_list::const_iterator p = sorted_text_sections.begin();
5448 p != sorted_text_sections.end();
5449 ++p)
5450 {
5451 Relobj* relobj = p->first;
5452 unsigned int shndx = p->second;
5453
5454 Arm_relobj<big_endian>* arm_relobj =
5455 Arm_relobj<big_endian>::as_arm_relobj(relobj);
5456 const Arm_exidx_input_section* exidx_input_section =
5457 arm_relobj->exidx_input_section_by_link(shndx);
5458
5459 // If this text section has no EXIDX section, force an EXIDX_CANTUNWIND
5460 // entry pointing to the end of the last seen EXIDX section.
5461 if (exidx_input_section == NULL)
5462 {
5463 exidx_fixup.add_exidx_cantunwind_as_needed();
5464 continue;
5465 }
5466
5467 Relobj* exidx_relobj = exidx_input_section->relobj();
5468 unsigned int exidx_shndx = exidx_input_section->shndx();
5469 Section_id sid(exidx_relobj, exidx_shndx);
5470 if (known_input_sections.find(sid) == known_input_sections.end())
5471 {
5472 // This is odd. We have not seen this EXIDX input section before.
5473 // We cannot do fix-up.
5474 gold_error(_("EXIDX section %u of %s is not in EXIDX output section"),
5475 exidx_shndx, exidx_relobj->name().c_str());
5476 exidx_fixup.add_exidx_cantunwind_as_needed();
5477 continue;
5478 }
5479
5480 // Fix up coverage and append input section to output data list.
5481 Arm_exidx_section_offset_map* section_offset_map = NULL;
5482 uint32_t deleted_bytes =
5483 exidx_fixup.process_exidx_section<big_endian>(exidx_input_section,
5484 &section_offset_map);
5485
5486 if (deleted_bytes == exidx_input_section->size())
5487 {
5488 // The whole EXIDX section got merged. Remove it from output.
5489 gold_assert(section_offset_map == NULL);
5490 exidx_relobj->set_output_section(exidx_shndx, NULL);
5491
5492 // All local symbols defined in this input section will be dropped.
5493 // We need to adjust output local symbol count.
5494 arm_relobj->set_output_local_symbol_count_needs_update();
5495 }
5496 else if (deleted_bytes > 0)
5497 {
5498 // Some entries are merged. We need to convert this EXIDX input
5499 // section into a relaxed section.
5500 gold_assert(section_offset_map != NULL);
5501 Arm_exidx_merged_section* merged_section =
5502 new Arm_exidx_merged_section(*exidx_input_section,
5503 *section_offset_map, deleted_bytes);
5504 this->add_relaxed_input_section(merged_section);
5505 arm_relobj->convert_input_section_to_relaxed_section(exidx_shndx);
5506
5507 // All local symbols defined in discarded portions of this input
5508 // section will be dropped. We need to adjust output local symbol
5509 // count.
5510 arm_relobj->set_output_local_symbol_count_needs_update();
5511 }
5512 else
5513 {
5514 // Just add back the EXIDX input section.
5515 gold_assert(section_offset_map == NULL);
5516 Output_section::Simple_input_section sis(exidx_relobj, exidx_shndx);
5517 this->add_simple_input_section(sis, exidx_input_section->size(),
5518 exidx_input_section->addralign());
5519 }
5520
5521 processed_input_sections.insert(Section_id(exidx_relobj, exidx_shndx));
5522 }
5523
5524 // Insert an EXIDX_CANTUNWIND entry at the end of output if necessary.
5525 exidx_fixup.add_exidx_cantunwind_as_needed();
5526
5527 // Remove any known EXIDX input sections that are not processed.
5528 for (Simple_input_section_list::const_iterator p = input_sections.begin();
5529 p != input_sections.end();
5530 ++p)
5531 {
5532 if (processed_input_sections.find(Section_id(p->relobj(), p->shndx()))
5533 == processed_input_sections.end())
5534 {
5535 // We only discard a known EXIDX section because its linked
5536 // text section has been folded by ICF.
5537 Arm_relobj<big_endian>* arm_relobj =
5538 Arm_relobj<big_endian>::as_arm_relobj(p->relobj());
5539 const Arm_exidx_input_section* exidx_input_section =
5540 arm_relobj->exidx_input_section_by_shndx(p->shndx());
5541 gold_assert(exidx_input_section != NULL);
5542 unsigned int text_shndx = exidx_input_section->link();
5543 gold_assert(symtab->is_section_folded(p->relobj(), text_shndx));
5544
5545 // Remove this from link.
5546 p->relobj()->set_output_section(p->shndx(), NULL);
5547 }
5548 }
5549
5550 // Make changes permanent.
5551 this->save_states();
5552 this->set_section_offsets_need_adjustment();
5553 }
5554
5555 // Arm_relobj methods.
5556
5557 // Determine if we want to scan the SHNDX-th section for relocation stubs.
5558 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
5559
5560 template<bool big_endian>
5561 bool
5562 Arm_relobj<big_endian>::section_needs_reloc_stub_scanning(
5563 const elfcpp::Shdr<32, big_endian>& shdr,
5564 const Relobj::Output_sections& out_sections,
5565 const Symbol_table *symtab,
5566 const unsigned char* pshdrs)
5567 {
5568 unsigned int sh_type = shdr.get_sh_type();
5569 if (sh_type != elfcpp::SHT_REL && sh_type != elfcpp::SHT_RELA)
5570 return false;
5571
5572 // Ignore empty section.
5573 off_t sh_size = shdr.get_sh_size();
5574 if (sh_size == 0)
5575 return false;
5576
5577 // Ignore reloc section with bad info. This error will be
5578 // reported in the final link.
5579 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
5580 if (index >= this->shnum())
5581 return false;
5582
5583 // This relocation section is against a section which we
5584 // discarded or if the section is folded into another
5585 // section due to ICF.
5586 if (out_sections[index] == NULL || symtab->is_section_folded(this, index))
5587 return false;
5588
5589 // Check the section to which relocations are applied. Ignore relocations
5590 // to unallocated sections or EXIDX sections.
5591 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
5592 const elfcpp::Shdr<32, big_endian> data_shdr(pshdrs + index * shdr_size);
5593 if ((data_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
5594 || data_shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
5595 return false;
5596
5597 // Ignore reloc section with unexpected symbol table. The
5598 // error will be reported in the final link.
5599 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
5600 return false;
5601
5602 unsigned int reloc_size;
5603 if (sh_type == elfcpp::SHT_REL)
5604 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
5605 else
5606 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
5607
5608 // Ignore reloc section with unexpected entsize or uneven size.
5609 // The error will be reported in the final link.
5610 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
5611 return false;
5612
5613 return true;
5614 }
5615
5616 // Determine if we want to scan the SHNDX-th section for non-relocation stubs.
5617 // This is a helper for Arm_relobj::scan_sections_for_stubs() below.
5618
5619 template<bool big_endian>
5620 bool
5621 Arm_relobj<big_endian>::section_needs_cortex_a8_stub_scanning(
5622 const elfcpp::Shdr<32, big_endian>& shdr,
5623 unsigned int shndx,
5624 Output_section* os,
5625 const Symbol_table* symtab)
5626 {
5627 // We only scan non-empty code sections.
5628 if ((shdr.get_sh_flags() & elfcpp::SHF_EXECINSTR) == 0
5629 || shdr.get_sh_size() == 0)
5630 return false;
5631
5632 // Ignore discarded or ICF'ed sections.
5633 if (os == NULL || symtab->is_section_folded(this, shndx))
5634 return false;
5635
5636 // Find output address of section.
5637 Arm_address address = os->output_address(this, shndx, 0);
5638
5639 // If the section does not cross any 4K-boundaries, it does not need to
5640 // be scanned.
5641 if ((address & ~0xfffU) == ((address + shdr.get_sh_size() - 1) & ~0xfffU))
5642 return false;
5643
5644 return true;
5645 }
5646
5647 // Scan a section for Cortex-A8 workaround.
5648
5649 template<bool big_endian>
5650 void
5651 Arm_relobj<big_endian>::scan_section_for_cortex_a8_erratum(
5652 const elfcpp::Shdr<32, big_endian>& shdr,
5653 unsigned int shndx,
5654 Output_section* os,
5655 Target_arm<big_endian>* arm_target)
5656 {
5657 Arm_address output_address = os->output_address(this, shndx, 0);
5658
5659 // Get the section contents.
5660 section_size_type input_view_size = 0;
5661 const unsigned char* input_view =
5662 this->section_contents(shndx, &input_view_size, false);
5663
5664 // We need to go through the mapping symbols to determine what to
5665 // scan. There are two reasons. First, we should look at THUMB code and
5666 // THUMB code only. Second, we only want to look at the 4K-page boundary
5667 // to speed up the scanning.
5668
5669 // Look for the first mapping symbol in this section. It should be
5670 // at (shndx, 0).
5671 Mapping_symbol_position section_start(shndx, 0);
5672 typename Mapping_symbols_info::const_iterator p =
5673 this->mapping_symbols_info_.lower_bound(section_start);
5674
5675 if (p == this->mapping_symbols_info_.end()
5676 || p->first != section_start)
5677 {
5678 gold_warning(_("Cortex-A8 erratum scanning failed because there "
5679 "is no mapping symbols for section %u of %s"),
5680 shndx, this->name().c_str());
5681 return;
5682 }
5683
5684 while (p != this->mapping_symbols_info_.end()
5685 && p->first.first == shndx)
5686 {
5687 typename Mapping_symbols_info::const_iterator next =
5688 this->mapping_symbols_info_.upper_bound(p->first);
5689
5690 // Only scan part of a section with THUMB code.
5691 if (p->second == 't')
5692 {
5693 // Determine the end of this range.
5694 section_size_type span_start =
5695 convert_to_section_size_type(p->first.second);
5696 section_size_type span_end;
5697 if (next != this->mapping_symbols_info_.end()
5698 && next->first.first == shndx)
5699 span_end = convert_to_section_size_type(next->first.second);
5700 else
5701 span_end = convert_to_section_size_type(shdr.get_sh_size());
5702
5703 if (((span_start + output_address) & ~0xfffUL)
5704 != ((span_end + output_address - 1) & ~0xfffUL))
5705 {
5706 arm_target->scan_span_for_cortex_a8_erratum(this, shndx,
5707 span_start, span_end,
5708 input_view,
5709 output_address);
5710 }
5711 }
5712
5713 p = next;
5714 }
5715 }
5716
5717 // Scan relocations for stub generation.
5718
5719 template<bool big_endian>
5720 void
5721 Arm_relobj<big_endian>::scan_sections_for_stubs(
5722 Target_arm<big_endian>* arm_target,
5723 const Symbol_table* symtab,
5724 const Layout* layout)
5725 {
5726 unsigned int shnum = this->shnum();
5727 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
5728
5729 // Read the section headers.
5730 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
5731 shnum * shdr_size,
5732 true, true);
5733
5734 // To speed up processing, we set up hash tables for fast lookup of
5735 // input offsets to output addresses.
5736 this->initialize_input_to_output_maps();
5737
5738 const Relobj::Output_sections& out_sections(this->output_sections());
5739
5740 Relocate_info<32, big_endian> relinfo;
5741 relinfo.symtab = symtab;
5742 relinfo.layout = layout;
5743 relinfo.object = this;
5744
5745 // Do relocation stubs scanning.
5746 const unsigned char* p = pshdrs + shdr_size;
5747 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
5748 {
5749 const elfcpp::Shdr<32, big_endian> shdr(p);
5750 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
5751 pshdrs))
5752 {
5753 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
5754 Arm_address output_offset = this->get_output_section_offset(index);
5755 Arm_address output_address;
5756 if(output_offset != invalid_address)
5757 output_address = out_sections[index]->address() + output_offset;
5758 else
5759 {
5760 // Currently this only happens for a relaxed section.
5761 const Output_relaxed_input_section* poris =
5762 out_sections[index]->find_relaxed_input_section(this, index);
5763 gold_assert(poris != NULL);
5764 output_address = poris->address();
5765 }
5766
5767 // Get the relocations.
5768 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
5769 shdr.get_sh_size(),
5770 true, false);
5771
5772 // Get the section contents. This does work for the case in which
5773 // we modify the contents of an input section. We need to pass the
5774 // output view under such circumstances.
5775 section_size_type input_view_size = 0;
5776 const unsigned char* input_view =
5777 this->section_contents(index, &input_view_size, false);
5778
5779 relinfo.reloc_shndx = i;
5780 relinfo.data_shndx = index;
5781 unsigned int sh_type = shdr.get_sh_type();
5782 unsigned int reloc_size;
5783 if (sh_type == elfcpp::SHT_REL)
5784 reloc_size = elfcpp::Elf_sizes<32>::rel_size;
5785 else
5786 reloc_size = elfcpp::Elf_sizes<32>::rela_size;
5787
5788 Output_section* os = out_sections[index];
5789 arm_target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
5790 shdr.get_sh_size() / reloc_size,
5791 os,
5792 output_offset == invalid_address,
5793 input_view, output_address,
5794 input_view_size);
5795 }
5796 }
5797
5798 // Do Cortex-A8 erratum stubs scanning. This has to be done for a section
5799 // after its relocation section, if there is one, is processed for
5800 // relocation stubs. Merging this loop with the one above would have been
5801 // complicated since we would have had to make sure that relocation stub
5802 // scanning is done first.
5803 if (arm_target->fix_cortex_a8())
5804 {
5805 const unsigned char* p = pshdrs + shdr_size;
5806 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
5807 {
5808 const elfcpp::Shdr<32, big_endian> shdr(p);
5809 if (this->section_needs_cortex_a8_stub_scanning(shdr, i,
5810 out_sections[i],
5811 symtab))
5812 this->scan_section_for_cortex_a8_erratum(shdr, i, out_sections[i],
5813 arm_target);
5814 }
5815 }
5816
5817 // After we've done the relocations, we release the hash tables,
5818 // since we no longer need them.
5819 this->free_input_to_output_maps();
5820 }
5821
5822 // Count the local symbols. The ARM backend needs to know if a symbol
5823 // is a THUMB function or not. For global symbols, it is easy because
5824 // the Symbol object keeps the ELF symbol type. For local symbol it is
5825 // harder because we cannot access this information. So we override the
5826 // do_count_local_symbol in parent and scan local symbols to mark
5827 // THUMB functions. This is not the most efficient way but I do not want to
5828 // slow down other ports by calling a per symbol targer hook inside
5829 // Sized_relobj<size, big_endian>::do_count_local_symbols.
5830
5831 template<bool big_endian>
5832 void
5833 Arm_relobj<big_endian>::do_count_local_symbols(
5834 Stringpool_template<char>* pool,
5835 Stringpool_template<char>* dynpool)
5836 {
5837 // We need to fix-up the values of any local symbols whose type are
5838 // STT_ARM_TFUNC.
5839
5840 // Ask parent to count the local symbols.
5841 Sized_relobj<32, big_endian>::do_count_local_symbols(pool, dynpool);
5842 const unsigned int loccount = this->local_symbol_count();
5843 if (loccount == 0)
5844 return;
5845
5846 // Intialize the thumb function bit-vector.
5847 std::vector<bool> empty_vector(loccount, false);
5848 this->local_symbol_is_thumb_function_.swap(empty_vector);
5849
5850 // Read the symbol table section header.
5851 const unsigned int symtab_shndx = this->symtab_shndx();
5852 elfcpp::Shdr<32, big_endian>
5853 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
5854 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
5855
5856 // Read the local symbols.
5857 const int sym_size =elfcpp::Elf_sizes<32>::sym_size;
5858 gold_assert(loccount == symtabshdr.get_sh_info());
5859 off_t locsize = loccount * sym_size;
5860 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
5861 locsize, true, true);
5862
5863 // For mapping symbol processing, we need to read the symbol names.
5864 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
5865 if (strtab_shndx >= this->shnum())
5866 {
5867 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
5868 return;
5869 }
5870
5871 elfcpp::Shdr<32, big_endian>
5872 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
5873 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
5874 {
5875 this->error(_("symbol table name section has wrong type: %u"),
5876 static_cast<unsigned int>(strtabshdr.get_sh_type()));
5877 return;
5878 }
5879 const char* pnames =
5880 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
5881 strtabshdr.get_sh_size(),
5882 false, false));
5883
5884 // Loop over the local symbols and mark any local symbols pointing
5885 // to THUMB functions.
5886
5887 // Skip the first dummy symbol.
5888 psyms += sym_size;
5889 typename Sized_relobj<32, big_endian>::Local_values* plocal_values =
5890 this->local_values();
5891 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
5892 {
5893 elfcpp::Sym<32, big_endian> sym(psyms);
5894 elfcpp::STT st_type = sym.get_st_type();
5895 Symbol_value<32>& lv((*plocal_values)[i]);
5896 Arm_address input_value = lv.input_value();
5897
5898 // Check to see if this is a mapping symbol.
5899 const char* sym_name = pnames + sym.get_st_name();
5900 if (Target_arm<big_endian>::is_mapping_symbol_name(sym_name))
5901 {
5902 unsigned int input_shndx = sym.get_st_shndx();
5903
5904 // Strip of LSB in case this is a THUMB symbol.
5905 Mapping_symbol_position msp(input_shndx, input_value & ~1U);
5906 this->mapping_symbols_info_[msp] = sym_name[1];
5907 }
5908
5909 if (st_type == elfcpp::STT_ARM_TFUNC
5910 || (st_type == elfcpp::STT_FUNC && ((input_value & 1) != 0)))
5911 {
5912 // This is a THUMB function. Mark this and canonicalize the
5913 // symbol value by setting LSB.
5914 this->local_symbol_is_thumb_function_[i] = true;
5915 if ((input_value & 1) == 0)
5916 lv.set_input_value(input_value | 1);
5917 }
5918 }
5919 }
5920
5921 // Relocate sections.
5922 template<bool big_endian>
5923 void
5924 Arm_relobj<big_endian>::do_relocate_sections(
5925 const Symbol_table* symtab,
5926 const Layout* layout,
5927 const unsigned char* pshdrs,
5928 typename Sized_relobj<32, big_endian>::Views* pviews)
5929 {
5930 // Call parent to relocate sections.
5931 Sized_relobj<32, big_endian>::do_relocate_sections(symtab, layout, pshdrs,
5932 pviews);
5933
5934 // We do not generate stubs if doing a relocatable link.
5935 if (parameters->options().relocatable())
5936 return;
5937
5938 // Relocate stub tables.
5939 unsigned int shnum = this->shnum();
5940
5941 Target_arm<big_endian>* arm_target =
5942 Target_arm<big_endian>::default_target();
5943
5944 Relocate_info<32, big_endian> relinfo;
5945 relinfo.symtab = symtab;
5946 relinfo.layout = layout;
5947 relinfo.object = this;
5948
5949 for (unsigned int i = 1; i < shnum; ++i)
5950 {
5951 Arm_input_section<big_endian>* arm_input_section =
5952 arm_target->find_arm_input_section(this, i);
5953
5954 if (arm_input_section != NULL
5955 && arm_input_section->is_stub_table_owner()
5956 && !arm_input_section->stub_table()->empty())
5957 {
5958 // We cannot discard a section if it owns a stub table.
5959 Output_section* os = this->output_section(i);
5960 gold_assert(os != NULL);
5961
5962 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
5963 relinfo.reloc_shdr = NULL;
5964 relinfo.data_shndx = i;
5965 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<32>::shdr_size;
5966
5967 gold_assert((*pviews)[i].view != NULL);
5968
5969 // We are passed the output section view. Adjust it to cover the
5970 // stub table only.
5971 Stub_table<big_endian>* stub_table = arm_input_section->stub_table();
5972 gold_assert((stub_table->address() >= (*pviews)[i].address)
5973 && ((stub_table->address() + stub_table->data_size())
5974 <= (*pviews)[i].address + (*pviews)[i].view_size));
5975
5976 off_t offset = stub_table->address() - (*pviews)[i].address;
5977 unsigned char* view = (*pviews)[i].view + offset;
5978 Arm_address address = stub_table->address();
5979 section_size_type view_size = stub_table->data_size();
5980
5981 stub_table->relocate_stubs(&relinfo, arm_target, os, view, address,
5982 view_size);
5983 }
5984
5985 // Apply Cortex A8 workaround if applicable.
5986 if (this->section_has_cortex_a8_workaround(i))
5987 {
5988 unsigned char* view = (*pviews)[i].view;
5989 Arm_address view_address = (*pviews)[i].address;
5990 section_size_type view_size = (*pviews)[i].view_size;
5991 Stub_table<big_endian>* stub_table = this->stub_tables_[i];
5992
5993 // Adjust view to cover section.
5994 Output_section* os = this->output_section(i);
5995 gold_assert(os != NULL);
5996 Arm_address section_address = os->output_address(this, i, 0);
5997 uint64_t section_size = this->section_size(i);
5998
5999 gold_assert(section_address >= view_address
6000 && ((section_address + section_size)
6001 <= (view_address + view_size)));
6002
6003 unsigned char* section_view = view + (section_address - view_address);
6004
6005 // Apply the Cortex-A8 workaround to the output address range
6006 // corresponding to this input section.
6007 stub_table->apply_cortex_a8_workaround_to_address_range(
6008 arm_target,
6009 section_view,
6010 section_address,
6011 section_size);
6012 }
6013 }
6014 }
6015
6016 // Create a new EXIDX input section object for EXIDX section SHNDX with
6017 // header SHDR.
6018
6019 template<bool big_endian>
6020 void
6021 Arm_relobj<big_endian>::make_exidx_input_section(
6022 unsigned int shndx,
6023 const elfcpp::Shdr<32, big_endian>& shdr)
6024 {
6025 // Link .text section to its .ARM.exidx section in the same object.
6026 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_link());
6027
6028 // Issue an error and ignore this EXIDX section if it does not point
6029 // to any text section.
6030 if (text_shndx == elfcpp::SHN_UNDEF)
6031 {
6032 gold_error(_("EXIDX section %u in %s has no linked text section"),
6033 shndx, this->name().c_str());
6034 return;
6035 }
6036
6037 // Issue an error and ignore this EXIDX section if it points to a text
6038 // section already has an EXIDX section.
6039 if (this->exidx_section_map_[text_shndx] != NULL)
6040 {
6041 gold_error(_("EXIDX sections %u and %u both link to text section %u "
6042 "in %s"),
6043 shndx, this->exidx_section_map_[text_shndx]->shndx(),
6044 text_shndx, this->name().c_str());
6045 return;
6046 }
6047
6048 // Create an Arm_exidx_input_section object for this EXIDX section.
6049 Arm_exidx_input_section* exidx_input_section =
6050 new Arm_exidx_input_section(this, shndx, text_shndx, shdr.get_sh_size(),
6051 shdr.get_sh_addralign());
6052 this->exidx_section_map_[text_shndx] = exidx_input_section;
6053
6054 // Also map the EXIDX section index to this.
6055 gold_assert(this->exidx_section_map_[shndx] == NULL);
6056 this->exidx_section_map_[shndx] = exidx_input_section;
6057 }
6058
6059 // Read the symbol information.
6060
6061 template<bool big_endian>
6062 void
6063 Arm_relobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
6064 {
6065 // Call parent class to read symbol information.
6066 Sized_relobj<32, big_endian>::do_read_symbols(sd);
6067
6068 // Read processor-specific flags in ELF file header.
6069 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
6070 elfcpp::Elf_sizes<32>::ehdr_size,
6071 true, false);
6072 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
6073 this->processor_specific_flags_ = ehdr.get_e_flags();
6074
6075 // Go over the section headers and look for .ARM.attributes and .ARM.exidx
6076 // sections.
6077 const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
6078 const unsigned char *ps =
6079 sd->section_headers->data() + shdr_size;
6080 for (unsigned int i = 1; i < this->shnum(); ++i, ps += shdr_size)
6081 {
6082 elfcpp::Shdr<32, big_endian> shdr(ps);
6083 if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
6084 {
6085 gold_assert(this->attributes_section_data_ == NULL);
6086 section_offset_type section_offset = shdr.get_sh_offset();
6087 section_size_type section_size =
6088 convert_to_section_size_type(shdr.get_sh_size());
6089 File_view* view = this->get_lasting_view(section_offset,
6090 section_size, true, false);
6091 this->attributes_section_data_ =
6092 new Attributes_section_data(view->data(), section_size);
6093 }
6094 else if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
6095 this->make_exidx_input_section(i, shdr);
6096 }
6097 }
6098
6099 // Process relocations for garbage collection. The ARM target uses .ARM.exidx
6100 // sections for unwinding. These sections are referenced implicitly by
6101 // text sections linked in the section headers. If we ignore these implict
6102 // references, the .ARM.exidx sections and any .ARM.extab sections they use
6103 // will be garbage-collected incorrectly. Hence we override the same function
6104 // in the base class to handle these implicit references.
6105
6106 template<bool big_endian>
6107 void
6108 Arm_relobj<big_endian>::do_gc_process_relocs(Symbol_table* symtab,
6109 Layout* layout,
6110 Read_relocs_data* rd)
6111 {
6112 // First, call base class method to process relocations in this object.
6113 Sized_relobj<32, big_endian>::do_gc_process_relocs(symtab, layout, rd);
6114
6115 unsigned int shnum = this->shnum();
6116 const unsigned int shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
6117 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
6118 shnum * shdr_size,
6119 true, true);
6120
6121 // Scan section headers for sections of type SHT_ARM_EXIDX. Add references
6122 // to these from the linked text sections.
6123 const unsigned char* ps = pshdrs + shdr_size;
6124 for (unsigned int i = 1; i < shnum; ++i, ps += shdr_size)
6125 {
6126 elfcpp::Shdr<32, big_endian> shdr(ps);
6127 if (shdr.get_sh_type() == elfcpp::SHT_ARM_EXIDX)
6128 {
6129 // Found an .ARM.exidx section, add it to the set of reachable
6130 // sections from its linked text section.
6131 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_link());
6132 symtab->gc()->add_reference(this, text_shndx, this, i);
6133 }
6134 }
6135 }
6136
6137 // Update output local symbol count. Owing to EXIDX entry merging, some local
6138 // symbols will be removed in output. Adjust output local symbol count
6139 // accordingly. We can only changed the static output local symbol count. It
6140 // is too late to change the dynamic symbols.
6141
6142 template<bool big_endian>
6143 void
6144 Arm_relobj<big_endian>::update_output_local_symbol_count()
6145 {
6146 // Caller should check that this needs updating. We want caller checking
6147 // because output_local_symbol_count_needs_update() is most likely inlined.
6148 gold_assert(this->output_local_symbol_count_needs_update_);
6149
6150 gold_assert(this->symtab_shndx() != -1U);
6151 if (this->symtab_shndx() == 0)
6152 {
6153 // This object has no symbols. Weird but legal.
6154 return;
6155 }
6156
6157 // Read the symbol table section header.
6158 const unsigned int symtab_shndx = this->symtab_shndx();
6159 elfcpp::Shdr<32, big_endian>
6160 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
6161 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
6162
6163 // Read the local symbols.
6164 const int sym_size = elfcpp::Elf_sizes<32>::sym_size;
6165 const unsigned int loccount = this->local_symbol_count();
6166 gold_assert(loccount == symtabshdr.get_sh_info());
6167 off_t locsize = loccount * sym_size;
6168 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
6169 locsize, true, true);
6170
6171 // Loop over the local symbols.
6172
6173 typedef typename Sized_relobj<32, big_endian>::Output_sections
6174 Output_sections;
6175 const Output_sections& out_sections(this->output_sections());
6176 unsigned int shnum = this->shnum();
6177 unsigned int count = 0;
6178 // Skip the first, dummy, symbol.
6179 psyms += sym_size;
6180 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
6181 {
6182 elfcpp::Sym<32, big_endian> sym(psyms);
6183
6184 Symbol_value<32>& lv((*this->local_values())[i]);
6185
6186 // This local symbol was already discarded by do_count_local_symbols.
6187 if (!lv.needs_output_symtab_entry())
6188 continue;
6189
6190 bool is_ordinary;
6191 unsigned int shndx = this->adjust_sym_shndx(i, sym.get_st_shndx(),
6192 &is_ordinary);
6193
6194 if (shndx < shnum)
6195 {
6196 Output_section* os = out_sections[shndx];
6197
6198 // This local symbol no longer has an output section. Discard it.
6199 if (os == NULL)
6200 {
6201 lv.set_no_output_symtab_entry();
6202 continue;
6203 }
6204
6205 // Currently we only discard parts of EXIDX input sections.
6206 // We explicitly check for a merged EXIDX input section to avoid
6207 // calling Output_section_data::output_offset unless necessary.
6208 if ((this->get_output_section_offset(shndx) == invalid_address)
6209 && (this->exidx_input_section_by_shndx(shndx) != NULL))
6210 {
6211 section_offset_type output_offset =
6212 os->output_offset(this, shndx, lv.input_value());
6213 if (output_offset == -1)
6214 {
6215 // This symbol is defined in a part of an EXIDX input section
6216 // that is discarded due to entry merging.
6217 lv.set_no_output_symtab_entry();
6218 continue;
6219 }
6220 }
6221 }
6222
6223 ++count;
6224 }
6225
6226 this->set_output_local_symbol_count(count);
6227 this->output_local_symbol_count_needs_update_ = false;
6228 }
6229
6230 // Arm_dynobj methods.
6231
6232 // Read the symbol information.
6233
6234 template<bool big_endian>
6235 void
6236 Arm_dynobj<big_endian>::do_read_symbols(Read_symbols_data* sd)
6237 {
6238 // Call parent class to read symbol information.
6239 Sized_dynobj<32, big_endian>::do_read_symbols(sd);
6240
6241 // Read processor-specific flags in ELF file header.
6242 const unsigned char* pehdr = this->get_view(elfcpp::file_header_offset,
6243 elfcpp::Elf_sizes<32>::ehdr_size,
6244 true, false);
6245 elfcpp::Ehdr<32, big_endian> ehdr(pehdr);
6246 this->processor_specific_flags_ = ehdr.get_e_flags();
6247
6248 // Read the attributes section if there is one.
6249 // We read from the end because gas seems to put it near the end of
6250 // the section headers.
6251 const size_t shdr_size = elfcpp::Elf_sizes<32>::shdr_size;
6252 const unsigned char *ps =
6253 sd->section_headers->data() + shdr_size * (this->shnum() - 1);
6254 for (unsigned int i = this->shnum(); i > 0; --i, ps -= shdr_size)
6255 {
6256 elfcpp::Shdr<32, big_endian> shdr(ps);
6257 if (shdr.get_sh_type() == elfcpp::SHT_ARM_ATTRIBUTES)
6258 {
6259 section_offset_type section_offset = shdr.get_sh_offset();
6260 section_size_type section_size =
6261 convert_to_section_size_type(shdr.get_sh_size());
6262 File_view* view = this->get_lasting_view(section_offset,
6263 section_size, true, false);
6264 this->attributes_section_data_ =
6265 new Attributes_section_data(view->data(), section_size);
6266 break;
6267 }
6268 }
6269 }
6270
6271 // Stub_addend_reader methods.
6272
6273 // Read the addend of a REL relocation of type R_TYPE at VIEW.
6274
6275 template<bool big_endian>
6276 elfcpp::Elf_types<32>::Elf_Swxword
6277 Stub_addend_reader<elfcpp::SHT_REL, big_endian>::operator()(
6278 unsigned int r_type,
6279 const unsigned char* view,
6280 const typename Reloc_types<elfcpp::SHT_REL, 32, big_endian>::Reloc&) const
6281 {
6282 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
6283
6284 switch (r_type)
6285 {
6286 case elfcpp::R_ARM_CALL:
6287 case elfcpp::R_ARM_JUMP24:
6288 case elfcpp::R_ARM_PLT32:
6289 {
6290 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
6291 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
6292 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
6293 return utils::sign_extend<26>(val << 2);
6294 }
6295
6296 case elfcpp::R_ARM_THM_CALL:
6297 case elfcpp::R_ARM_THM_JUMP24:
6298 case elfcpp::R_ARM_THM_XPC22:
6299 {
6300 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
6301 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
6302 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
6303 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
6304 return RelocFuncs::thumb32_branch_offset(upper_insn, lower_insn);
6305 }
6306
6307 case elfcpp::R_ARM_THM_JUMP19:
6308 {
6309 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
6310 const Valtype* wv = reinterpret_cast<const Valtype*>(view);
6311 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
6312 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
6313 return RelocFuncs::thumb32_cond_branch_offset(upper_insn, lower_insn);
6314 }
6315
6316 default:
6317 gold_unreachable();
6318 }
6319 }
6320
6321 // A class to handle the PLT data.
6322
6323 template<bool big_endian>
6324 class Output_data_plt_arm : public Output_section_data
6325 {
6326 public:
6327 typedef Output_data_reloc<elfcpp::SHT_REL, true, 32, big_endian>
6328 Reloc_section;
6329
6330 Output_data_plt_arm(Layout*, Output_data_space*);
6331
6332 // Add an entry to the PLT.
6333 void
6334 add_entry(Symbol* gsym);
6335
6336 // Return the .rel.plt section data.
6337 const Reloc_section*
6338 rel_plt() const
6339 { return this->rel_; }
6340
6341 protected:
6342 void
6343 do_adjust_output_section(Output_section* os);
6344
6345 // Write to a map file.
6346 void
6347 do_print_to_mapfile(Mapfile* mapfile) const
6348 { mapfile->print_output_data(this, _("** PLT")); }
6349
6350 private:
6351 // Template for the first PLT entry.
6352 static const uint32_t first_plt_entry[5];
6353
6354 // Template for subsequent PLT entries.
6355 static const uint32_t plt_entry[3];
6356
6357 // Set the final size.
6358 void
6359 set_final_data_size()
6360 {
6361 this->set_data_size(sizeof(first_plt_entry)
6362 + this->count_ * sizeof(plt_entry));
6363 }
6364
6365 // Write out the PLT data.
6366 void
6367 do_write(Output_file*);
6368
6369 // The reloc section.
6370 Reloc_section* rel_;
6371 // The .got.plt section.
6372 Output_data_space* got_plt_;
6373 // The number of PLT entries.
6374 unsigned int count_;
6375 };
6376
6377 // Create the PLT section. The ordinary .got section is an argument,
6378 // since we need to refer to the start. We also create our own .got
6379 // section just for PLT entries.
6380
6381 template<bool big_endian>
6382 Output_data_plt_arm<big_endian>::Output_data_plt_arm(Layout* layout,
6383 Output_data_space* got_plt)
6384 : Output_section_data(4), got_plt_(got_plt), count_(0)
6385 {
6386 this->rel_ = new Reloc_section(false);
6387 layout->add_output_section_data(".rel.plt", elfcpp::SHT_REL,
6388 elfcpp::SHF_ALLOC, this->rel_, true, false,
6389 false, false);
6390 }
6391
6392 template<bool big_endian>
6393 void
6394 Output_data_plt_arm<big_endian>::do_adjust_output_section(Output_section* os)
6395 {
6396 os->set_entsize(0);
6397 }
6398
6399 // Add an entry to the PLT.
6400
6401 template<bool big_endian>
6402 void
6403 Output_data_plt_arm<big_endian>::add_entry(Symbol* gsym)
6404 {
6405 gold_assert(!gsym->has_plt_offset());
6406
6407 // Note that when setting the PLT offset we skip the initial
6408 // reserved PLT entry.
6409 gsym->set_plt_offset((this->count_) * sizeof(plt_entry)
6410 + sizeof(first_plt_entry));
6411
6412 ++this->count_;
6413
6414 section_offset_type got_offset = this->got_plt_->current_data_size();
6415
6416 // Every PLT entry needs a GOT entry which points back to the PLT
6417 // entry (this will be changed by the dynamic linker, normally
6418 // lazily when the function is called).
6419 this->got_plt_->set_current_data_size(got_offset + 4);
6420
6421 // Every PLT entry needs a reloc.
6422 gsym->set_needs_dynsym_entry();
6423 this->rel_->add_global(gsym, elfcpp::R_ARM_JUMP_SLOT, this->got_plt_,
6424 got_offset);
6425
6426 // Note that we don't need to save the symbol. The contents of the
6427 // PLT are independent of which symbols are used. The symbols only
6428 // appear in the relocations.
6429 }
6430
6431 // ARM PLTs.
6432 // FIXME: This is not very flexible. Right now this has only been tested
6433 // on armv5te. If we are to support additional architecture features like
6434 // Thumb-2 or BE8, we need to make this more flexible like GNU ld.
6435
6436 // The first entry in the PLT.
6437 template<bool big_endian>
6438 const uint32_t Output_data_plt_arm<big_endian>::first_plt_entry[5] =
6439 {
6440 0xe52de004, // str lr, [sp, #-4]!
6441 0xe59fe004, // ldr lr, [pc, #4]
6442 0xe08fe00e, // add lr, pc, lr
6443 0xe5bef008, // ldr pc, [lr, #8]!
6444 0x00000000, // &GOT[0] - .
6445 };
6446
6447 // Subsequent entries in the PLT.
6448
6449 template<bool big_endian>
6450 const uint32_t Output_data_plt_arm<big_endian>::plt_entry[3] =
6451 {
6452 0xe28fc600, // add ip, pc, #0xNN00000
6453 0xe28cca00, // add ip, ip, #0xNN000
6454 0xe5bcf000, // ldr pc, [ip, #0xNNN]!
6455 };
6456
6457 // Write out the PLT. This uses the hand-coded instructions above,
6458 // and adjusts them as needed. This is all specified by the arm ELF
6459 // Processor Supplement.
6460
6461 template<bool big_endian>
6462 void
6463 Output_data_plt_arm<big_endian>::do_write(Output_file* of)
6464 {
6465 const off_t offset = this->offset();
6466 const section_size_type oview_size =
6467 convert_to_section_size_type(this->data_size());
6468 unsigned char* const oview = of->get_output_view(offset, oview_size);
6469
6470 const off_t got_file_offset = this->got_plt_->offset();
6471 const section_size_type got_size =
6472 convert_to_section_size_type(this->got_plt_->data_size());
6473 unsigned char* const got_view = of->get_output_view(got_file_offset,
6474 got_size);
6475 unsigned char* pov = oview;
6476
6477 Arm_address plt_address = this->address();
6478 Arm_address got_address = this->got_plt_->address();
6479
6480 // Write first PLT entry. All but the last word are constants.
6481 const size_t num_first_plt_words = (sizeof(first_plt_entry)
6482 / sizeof(plt_entry[0]));
6483 for (size_t i = 0; i < num_first_plt_words - 1; i++)
6484 elfcpp::Swap<32, big_endian>::writeval(pov + i * 4, first_plt_entry[i]);
6485 // Last word in first PLT entry is &GOT[0] - .
6486 elfcpp::Swap<32, big_endian>::writeval(pov + 16,
6487 got_address - (plt_address + 16));
6488 pov += sizeof(first_plt_entry);
6489
6490 unsigned char* got_pov = got_view;
6491
6492 memset(got_pov, 0, 12);
6493 got_pov += 12;
6494
6495 const int rel_size = elfcpp::Elf_sizes<32>::rel_size;
6496 unsigned int plt_offset = sizeof(first_plt_entry);
6497 unsigned int plt_rel_offset = 0;
6498 unsigned int got_offset = 12;
6499 const unsigned int count = this->count_;
6500 for (unsigned int i = 0;
6501 i < count;
6502 ++i,
6503 pov += sizeof(plt_entry),
6504 got_pov += 4,
6505 plt_offset += sizeof(plt_entry),
6506 plt_rel_offset += rel_size,
6507 got_offset += 4)
6508 {
6509 // Set and adjust the PLT entry itself.
6510 int32_t offset = ((got_address + got_offset)
6511 - (plt_address + plt_offset + 8));
6512
6513 gold_assert(offset >= 0 && offset < 0x0fffffff);
6514 uint32_t plt_insn0 = plt_entry[0] | ((offset >> 20) & 0xff);
6515 elfcpp::Swap<32, big_endian>::writeval(pov, plt_insn0);
6516 uint32_t plt_insn1 = plt_entry[1] | ((offset >> 12) & 0xff);
6517 elfcpp::Swap<32, big_endian>::writeval(pov + 4, plt_insn1);
6518 uint32_t plt_insn2 = plt_entry[2] | (offset & 0xfff);
6519 elfcpp::Swap<32, big_endian>::writeval(pov + 8, plt_insn2);
6520
6521 // Set the entry in the GOT.
6522 elfcpp::Swap<32, big_endian>::writeval(got_pov, plt_address);
6523 }
6524
6525 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
6526 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
6527
6528 of->write_output_view(offset, oview_size, oview);
6529 of->write_output_view(got_file_offset, got_size, got_view);
6530 }
6531
6532 // Create a PLT entry for a global symbol.
6533
6534 template<bool big_endian>
6535 void
6536 Target_arm<big_endian>::make_plt_entry(Symbol_table* symtab, Layout* layout,
6537 Symbol* gsym)
6538 {
6539 if (gsym->has_plt_offset())
6540 return;
6541
6542 if (this->plt_ == NULL)
6543 {
6544 // Create the GOT sections first.
6545 this->got_section(symtab, layout);
6546
6547 this->plt_ = new Output_data_plt_arm<big_endian>(layout, this->got_plt_);
6548 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6549 (elfcpp::SHF_ALLOC
6550 | elfcpp::SHF_EXECINSTR),
6551 this->plt_, false, false, false, false);
6552 }
6553 this->plt_->add_entry(gsym);
6554 }
6555
6556 // Report an unsupported relocation against a local symbol.
6557
6558 template<bool big_endian>
6559 void
6560 Target_arm<big_endian>::Scan::unsupported_reloc_local(
6561 Sized_relobj<32, big_endian>* object,
6562 unsigned int r_type)
6563 {
6564 gold_error(_("%s: unsupported reloc %u against local symbol"),
6565 object->name().c_str(), r_type);
6566 }
6567
6568 // We are about to emit a dynamic relocation of type R_TYPE. If the
6569 // dynamic linker does not support it, issue an error. The GNU linker
6570 // only issues a non-PIC error for an allocated read-only section.
6571 // Here we know the section is allocated, but we don't know that it is
6572 // read-only. But we check for all the relocation types which the
6573 // glibc dynamic linker supports, so it seems appropriate to issue an
6574 // error even if the section is not read-only.
6575
6576 template<bool big_endian>
6577 void
6578 Target_arm<big_endian>::Scan::check_non_pic(Relobj* object,
6579 unsigned int r_type)
6580 {
6581 switch (r_type)
6582 {
6583 // These are the relocation types supported by glibc for ARM.
6584 case elfcpp::R_ARM_RELATIVE:
6585 case elfcpp::R_ARM_COPY:
6586 case elfcpp::R_ARM_GLOB_DAT:
6587 case elfcpp::R_ARM_JUMP_SLOT:
6588 case elfcpp::R_ARM_ABS32:
6589 case elfcpp::R_ARM_ABS32_NOI:
6590 case elfcpp::R_ARM_PC24:
6591 // FIXME: The following 3 types are not supported by Android's dynamic
6592 // linker.
6593 case elfcpp::R_ARM_TLS_DTPMOD32:
6594 case elfcpp::R_ARM_TLS_DTPOFF32:
6595 case elfcpp::R_ARM_TLS_TPOFF32:
6596 return;
6597
6598 default:
6599 // This prevents us from issuing more than one error per reloc
6600 // section. But we can still wind up issuing more than one
6601 // error per object file.
6602 if (this->issued_non_pic_error_)
6603 return;
6604 object->error(_("requires unsupported dynamic reloc; "
6605 "recompile with -fPIC"));
6606 this->issued_non_pic_error_ = true;
6607 return;
6608
6609 case elfcpp::R_ARM_NONE:
6610 gold_unreachable();
6611 }
6612 }
6613
6614 // Scan a relocation for a local symbol.
6615 // FIXME: This only handles a subset of relocation types used by Android
6616 // on ARM v5te devices.
6617
6618 template<bool big_endian>
6619 inline void
6620 Target_arm<big_endian>::Scan::local(Symbol_table* symtab,
6621 Layout* layout,
6622 Target_arm* target,
6623 Sized_relobj<32, big_endian>* object,
6624 unsigned int data_shndx,
6625 Output_section* output_section,
6626 const elfcpp::Rel<32, big_endian>& reloc,
6627 unsigned int r_type,
6628 const elfcpp::Sym<32, big_endian>&)
6629 {
6630 r_type = get_real_reloc_type(r_type);
6631 switch (r_type)
6632 {
6633 case elfcpp::R_ARM_NONE:
6634 break;
6635
6636 case elfcpp::R_ARM_ABS32:
6637 case elfcpp::R_ARM_ABS32_NOI:
6638 // If building a shared library (or a position-independent
6639 // executable), we need to create a dynamic relocation for
6640 // this location. The relocation applied at link time will
6641 // apply the link-time value, so we flag the location with
6642 // an R_ARM_RELATIVE relocation so the dynamic loader can
6643 // relocate it easily.
6644 if (parameters->options().output_is_position_independent())
6645 {
6646 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
6647 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
6648 // If we are to add more other reloc types than R_ARM_ABS32,
6649 // we need to add check_non_pic(object, r_type) here.
6650 rel_dyn->add_local_relative(object, r_sym, elfcpp::R_ARM_RELATIVE,
6651 output_section, data_shndx,
6652 reloc.get_r_offset());
6653 }
6654 break;
6655
6656 case elfcpp::R_ARM_REL32:
6657 case elfcpp::R_ARM_THM_CALL:
6658 case elfcpp::R_ARM_CALL:
6659 case elfcpp::R_ARM_PREL31:
6660 case elfcpp::R_ARM_JUMP24:
6661 case elfcpp::R_ARM_THM_JUMP24:
6662 case elfcpp::R_ARM_THM_JUMP19:
6663 case elfcpp::R_ARM_PLT32:
6664 case elfcpp::R_ARM_THM_ABS5:
6665 case elfcpp::R_ARM_ABS8:
6666 case elfcpp::R_ARM_ABS12:
6667 case elfcpp::R_ARM_ABS16:
6668 case elfcpp::R_ARM_BASE_ABS:
6669 case elfcpp::R_ARM_MOVW_ABS_NC:
6670 case elfcpp::R_ARM_MOVT_ABS:
6671 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
6672 case elfcpp::R_ARM_THM_MOVT_ABS:
6673 case elfcpp::R_ARM_MOVW_PREL_NC:
6674 case elfcpp::R_ARM_MOVT_PREL:
6675 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
6676 case elfcpp::R_ARM_THM_MOVT_PREL:
6677 case elfcpp::R_ARM_MOVW_BREL_NC:
6678 case elfcpp::R_ARM_MOVT_BREL:
6679 case elfcpp::R_ARM_MOVW_BREL:
6680 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
6681 case elfcpp::R_ARM_THM_MOVT_BREL:
6682 case elfcpp::R_ARM_THM_MOVW_BREL:
6683 case elfcpp::R_ARM_THM_JUMP6:
6684 case elfcpp::R_ARM_THM_JUMP8:
6685 case elfcpp::R_ARM_THM_JUMP11:
6686 case elfcpp::R_ARM_V4BX:
6687 case elfcpp::R_ARM_THM_PC8:
6688 case elfcpp::R_ARM_THM_PC12:
6689 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
6690 case elfcpp::R_ARM_ALU_PC_G0_NC:
6691 case elfcpp::R_ARM_ALU_PC_G0:
6692 case elfcpp::R_ARM_ALU_PC_G1_NC:
6693 case elfcpp::R_ARM_ALU_PC_G1:
6694 case elfcpp::R_ARM_ALU_PC_G2:
6695 case elfcpp::R_ARM_ALU_SB_G0_NC:
6696 case elfcpp::R_ARM_ALU_SB_G0:
6697 case elfcpp::R_ARM_ALU_SB_G1_NC:
6698 case elfcpp::R_ARM_ALU_SB_G1:
6699 case elfcpp::R_ARM_ALU_SB_G2:
6700 case elfcpp::R_ARM_LDR_PC_G0:
6701 case elfcpp::R_ARM_LDR_PC_G1:
6702 case elfcpp::R_ARM_LDR_PC_G2:
6703 case elfcpp::R_ARM_LDR_SB_G0:
6704 case elfcpp::R_ARM_LDR_SB_G1:
6705 case elfcpp::R_ARM_LDR_SB_G2:
6706 case elfcpp::R_ARM_LDRS_PC_G0:
6707 case elfcpp::R_ARM_LDRS_PC_G1:
6708 case elfcpp::R_ARM_LDRS_PC_G2:
6709 case elfcpp::R_ARM_LDRS_SB_G0:
6710 case elfcpp::R_ARM_LDRS_SB_G1:
6711 case elfcpp::R_ARM_LDRS_SB_G2:
6712 case elfcpp::R_ARM_LDC_PC_G0:
6713 case elfcpp::R_ARM_LDC_PC_G1:
6714 case elfcpp::R_ARM_LDC_PC_G2:
6715 case elfcpp::R_ARM_LDC_SB_G0:
6716 case elfcpp::R_ARM_LDC_SB_G1:
6717 case elfcpp::R_ARM_LDC_SB_G2:
6718 break;
6719
6720 case elfcpp::R_ARM_GOTOFF32:
6721 // We need a GOT section:
6722 target->got_section(symtab, layout);
6723 break;
6724
6725 case elfcpp::R_ARM_BASE_PREL:
6726 // FIXME: What about this?
6727 break;
6728
6729 case elfcpp::R_ARM_GOT_BREL:
6730 case elfcpp::R_ARM_GOT_PREL:
6731 {
6732 // The symbol requires a GOT entry.
6733 Output_data_got<32, big_endian>* got =
6734 target->got_section(symtab, layout);
6735 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
6736 if (got->add_local(object, r_sym, GOT_TYPE_STANDARD))
6737 {
6738 // If we are generating a shared object, we need to add a
6739 // dynamic RELATIVE relocation for this symbol's GOT entry.
6740 if (parameters->options().output_is_position_independent())
6741 {
6742 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
6743 unsigned int r_sym = elfcpp::elf_r_sym<32>(reloc.get_r_info());
6744 rel_dyn->add_local_relative(
6745 object, r_sym, elfcpp::R_ARM_RELATIVE, got,
6746 object->local_got_offset(r_sym, GOT_TYPE_STANDARD));
6747 }
6748 }
6749 }
6750 break;
6751
6752 case elfcpp::R_ARM_TARGET1:
6753 // This should have been mapped to another type already.
6754 // Fall through.
6755 case elfcpp::R_ARM_COPY:
6756 case elfcpp::R_ARM_GLOB_DAT:
6757 case elfcpp::R_ARM_JUMP_SLOT:
6758 case elfcpp::R_ARM_RELATIVE:
6759 // These are relocations which should only be seen by the
6760 // dynamic linker, and should never be seen here.
6761 gold_error(_("%s: unexpected reloc %u in object file"),
6762 object->name().c_str(), r_type);
6763 break;
6764
6765 default:
6766 unsupported_reloc_local(object, r_type);
6767 break;
6768 }
6769 }
6770
6771 // Report an unsupported relocation against a global symbol.
6772
6773 template<bool big_endian>
6774 void
6775 Target_arm<big_endian>::Scan::unsupported_reloc_global(
6776 Sized_relobj<32, big_endian>* object,
6777 unsigned int r_type,
6778 Symbol* gsym)
6779 {
6780 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6781 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6782 }
6783
6784 // Scan a relocation for a global symbol.
6785 // FIXME: This only handles a subset of relocation types used by Android
6786 // on ARM v5te devices.
6787
6788 template<bool big_endian>
6789 inline void
6790 Target_arm<big_endian>::Scan::global(Symbol_table* symtab,
6791 Layout* layout,
6792 Target_arm* target,
6793 Sized_relobj<32, big_endian>* object,
6794 unsigned int data_shndx,
6795 Output_section* output_section,
6796 const elfcpp::Rel<32, big_endian>& reloc,
6797 unsigned int r_type,
6798 Symbol* gsym)
6799 {
6800 r_type = get_real_reloc_type(r_type);
6801 switch (r_type)
6802 {
6803 case elfcpp::R_ARM_NONE:
6804 break;
6805
6806 case elfcpp::R_ARM_ABS32:
6807 case elfcpp::R_ARM_ABS32_NOI:
6808 {
6809 // Make a dynamic relocation if necessary.
6810 if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF))
6811 {
6812 if (target->may_need_copy_reloc(gsym))
6813 {
6814 target->copy_reloc(symtab, layout, object,
6815 data_shndx, output_section, gsym, reloc);
6816 }
6817 else if (gsym->can_use_relative_reloc(false))
6818 {
6819 // If we are to add more other reloc types than R_ARM_ABS32,
6820 // we need to add check_non_pic(object, r_type) here.
6821 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
6822 rel_dyn->add_global_relative(gsym, elfcpp::R_ARM_RELATIVE,
6823 output_section, object,
6824 data_shndx, reloc.get_r_offset());
6825 }
6826 else
6827 {
6828 // If we are to add more other reloc types than R_ARM_ABS32,
6829 // we need to add check_non_pic(object, r_type) here.
6830 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
6831 rel_dyn->add_global(gsym, r_type, output_section, object,
6832 data_shndx, reloc.get_r_offset());
6833 }
6834 }
6835 }
6836 break;
6837
6838 case elfcpp::R_ARM_MOVW_ABS_NC:
6839 case elfcpp::R_ARM_MOVT_ABS:
6840 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
6841 case elfcpp::R_ARM_THM_MOVT_ABS:
6842 case elfcpp::R_ARM_MOVW_PREL_NC:
6843 case elfcpp::R_ARM_MOVT_PREL:
6844 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
6845 case elfcpp::R_ARM_THM_MOVT_PREL:
6846 case elfcpp::R_ARM_MOVW_BREL_NC:
6847 case elfcpp::R_ARM_MOVT_BREL:
6848 case elfcpp::R_ARM_MOVW_BREL:
6849 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
6850 case elfcpp::R_ARM_THM_MOVT_BREL:
6851 case elfcpp::R_ARM_THM_MOVW_BREL:
6852 case elfcpp::R_ARM_THM_JUMP6:
6853 case elfcpp::R_ARM_THM_JUMP8:
6854 case elfcpp::R_ARM_THM_JUMP11:
6855 case elfcpp::R_ARM_V4BX:
6856 case elfcpp::R_ARM_THM_PC8:
6857 case elfcpp::R_ARM_THM_PC12:
6858 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
6859 case elfcpp::R_ARM_ALU_PC_G0_NC:
6860 case elfcpp::R_ARM_ALU_PC_G0:
6861 case elfcpp::R_ARM_ALU_PC_G1_NC:
6862 case elfcpp::R_ARM_ALU_PC_G1:
6863 case elfcpp::R_ARM_ALU_PC_G2:
6864 case elfcpp::R_ARM_ALU_SB_G0_NC:
6865 case elfcpp::R_ARM_ALU_SB_G0:
6866 case elfcpp::R_ARM_ALU_SB_G1_NC:
6867 case elfcpp::R_ARM_ALU_SB_G1:
6868 case elfcpp::R_ARM_ALU_SB_G2:
6869 case elfcpp::R_ARM_LDR_PC_G0:
6870 case elfcpp::R_ARM_LDR_PC_G1:
6871 case elfcpp::R_ARM_LDR_PC_G2:
6872 case elfcpp::R_ARM_LDR_SB_G0:
6873 case elfcpp::R_ARM_LDR_SB_G1:
6874 case elfcpp::R_ARM_LDR_SB_G2:
6875 case elfcpp::R_ARM_LDRS_PC_G0:
6876 case elfcpp::R_ARM_LDRS_PC_G1:
6877 case elfcpp::R_ARM_LDRS_PC_G2:
6878 case elfcpp::R_ARM_LDRS_SB_G0:
6879 case elfcpp::R_ARM_LDRS_SB_G1:
6880 case elfcpp::R_ARM_LDRS_SB_G2:
6881 case elfcpp::R_ARM_LDC_PC_G0:
6882 case elfcpp::R_ARM_LDC_PC_G1:
6883 case elfcpp::R_ARM_LDC_PC_G2:
6884 case elfcpp::R_ARM_LDC_SB_G0:
6885 case elfcpp::R_ARM_LDC_SB_G1:
6886 case elfcpp::R_ARM_LDC_SB_G2:
6887 break;
6888
6889 case elfcpp::R_ARM_THM_ABS5:
6890 case elfcpp::R_ARM_ABS8:
6891 case elfcpp::R_ARM_ABS12:
6892 case elfcpp::R_ARM_ABS16:
6893 case elfcpp::R_ARM_BASE_ABS:
6894 {
6895 // No dynamic relocs of this kinds.
6896 // Report the error in case of PIC.
6897 int flags = Symbol::NON_PIC_REF;
6898 if (gsym->type() == elfcpp::STT_FUNC
6899 || gsym->type() == elfcpp::STT_ARM_TFUNC)
6900 flags |= Symbol::FUNCTION_CALL;
6901 if (gsym->needs_dynamic_reloc(flags))
6902 check_non_pic(object, r_type);
6903 }
6904 break;
6905
6906 case elfcpp::R_ARM_REL32:
6907 {
6908 // Make a dynamic relocation if necessary.
6909 int flags = Symbol::NON_PIC_REF;
6910 if (gsym->needs_dynamic_reloc(flags))
6911 {
6912 if (target->may_need_copy_reloc(gsym))
6913 {
6914 target->copy_reloc(symtab, layout, object,
6915 data_shndx, output_section, gsym, reloc);
6916 }
6917 else
6918 {
6919 check_non_pic(object, r_type);
6920 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
6921 rel_dyn->add_global(gsym, r_type, output_section, object,
6922 data_shndx, reloc.get_r_offset());
6923 }
6924 }
6925 }
6926 break;
6927
6928 case elfcpp::R_ARM_JUMP24:
6929 case elfcpp::R_ARM_THM_JUMP24:
6930 case elfcpp::R_ARM_THM_JUMP19:
6931 case elfcpp::R_ARM_CALL:
6932 case elfcpp::R_ARM_THM_CALL:
6933 case elfcpp::R_ARM_PLT32:
6934 case elfcpp::R_ARM_PREL31:
6935 case elfcpp::R_ARM_PC24:
6936 // If the symbol is fully resolved, this is just a relative
6937 // local reloc. Otherwise we need a PLT entry.
6938 if (gsym->final_value_is_known())
6939 break;
6940 // If building a shared library, we can also skip the PLT entry
6941 // if the symbol is defined in the output file and is protected
6942 // or hidden.
6943 if (gsym->is_defined()
6944 && !gsym->is_from_dynobj()
6945 && !gsym->is_preemptible())
6946 break;
6947 target->make_plt_entry(symtab, layout, gsym);
6948 break;
6949
6950 case elfcpp::R_ARM_GOTOFF32:
6951 // We need a GOT section.
6952 target->got_section(symtab, layout);
6953 break;
6954
6955 case elfcpp::R_ARM_BASE_PREL:
6956 // FIXME: What about this?
6957 break;
6958
6959 case elfcpp::R_ARM_GOT_BREL:
6960 case elfcpp::R_ARM_GOT_PREL:
6961 {
6962 // The symbol requires a GOT entry.
6963 Output_data_got<32, big_endian>* got =
6964 target->got_section(symtab, layout);
6965 if (gsym->final_value_is_known())
6966 got->add_global(gsym, GOT_TYPE_STANDARD);
6967 else
6968 {
6969 // If this symbol is not fully resolved, we need to add a
6970 // GOT entry with a dynamic relocation.
6971 Reloc_section* rel_dyn = target->rel_dyn_section(layout);
6972 if (gsym->is_from_dynobj()
6973 || gsym->is_undefined()
6974 || gsym->is_preemptible())
6975 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6976 rel_dyn, elfcpp::R_ARM_GLOB_DAT);
6977 else
6978 {
6979 if (got->add_global(gsym, GOT_TYPE_STANDARD))
6980 rel_dyn->add_global_relative(
6981 gsym, elfcpp::R_ARM_RELATIVE, got,
6982 gsym->got_offset(GOT_TYPE_STANDARD));
6983 }
6984 }
6985 }
6986 break;
6987
6988 case elfcpp::R_ARM_TARGET1:
6989 // This should have been mapped to another type already.
6990 // Fall through.
6991 case elfcpp::R_ARM_COPY:
6992 case elfcpp::R_ARM_GLOB_DAT:
6993 case elfcpp::R_ARM_JUMP_SLOT:
6994 case elfcpp::R_ARM_RELATIVE:
6995 // These are relocations which should only be seen by the
6996 // dynamic linker, and should never be seen here.
6997 gold_error(_("%s: unexpected reloc %u in object file"),
6998 object->name().c_str(), r_type);
6999 break;
7000
7001 default:
7002 unsupported_reloc_global(object, r_type, gsym);
7003 break;
7004 }
7005 }
7006
7007 // Process relocations for gc.
7008
7009 template<bool big_endian>
7010 void
7011 Target_arm<big_endian>::gc_process_relocs(Symbol_table* symtab,
7012 Layout* layout,
7013 Sized_relobj<32, big_endian>* object,
7014 unsigned int data_shndx,
7015 unsigned int,
7016 const unsigned char* prelocs,
7017 size_t reloc_count,
7018 Output_section* output_section,
7019 bool needs_special_offset_handling,
7020 size_t local_symbol_count,
7021 const unsigned char* plocal_symbols)
7022 {
7023 typedef Target_arm<big_endian> Arm;
7024 typedef typename Target_arm<big_endian>::Scan Scan;
7025
7026 gold::gc_process_relocs<32, big_endian, Arm, elfcpp::SHT_REL, Scan>(
7027 symtab,
7028 layout,
7029 this,
7030 object,
7031 data_shndx,
7032 prelocs,
7033 reloc_count,
7034 output_section,
7035 needs_special_offset_handling,
7036 local_symbol_count,
7037 plocal_symbols);
7038 }
7039
7040 // Scan relocations for a section.
7041
7042 template<bool big_endian>
7043 void
7044 Target_arm<big_endian>::scan_relocs(Symbol_table* symtab,
7045 Layout* layout,
7046 Sized_relobj<32, big_endian>* object,
7047 unsigned int data_shndx,
7048 unsigned int sh_type,
7049 const unsigned char* prelocs,
7050 size_t reloc_count,
7051 Output_section* output_section,
7052 bool needs_special_offset_handling,
7053 size_t local_symbol_count,
7054 const unsigned char* plocal_symbols)
7055 {
7056 typedef typename Target_arm<big_endian>::Scan Scan;
7057 if (sh_type == elfcpp::SHT_RELA)
7058 {
7059 gold_error(_("%s: unsupported RELA reloc section"),
7060 object->name().c_str());
7061 return;
7062 }
7063
7064 gold::scan_relocs<32, big_endian, Target_arm, elfcpp::SHT_REL, Scan>(
7065 symtab,
7066 layout,
7067 this,
7068 object,
7069 data_shndx,
7070 prelocs,
7071 reloc_count,
7072 output_section,
7073 needs_special_offset_handling,
7074 local_symbol_count,
7075 plocal_symbols);
7076 }
7077
7078 // Finalize the sections.
7079
7080 template<bool big_endian>
7081 void
7082 Target_arm<big_endian>::do_finalize_sections(
7083 Layout* layout,
7084 const Input_objects* input_objects,
7085 Symbol_table* symtab)
7086 {
7087 // Merge processor-specific flags.
7088 for (Input_objects::Relobj_iterator p = input_objects->relobj_begin();
7089 p != input_objects->relobj_end();
7090 ++p)
7091 {
7092 Arm_relobj<big_endian>* arm_relobj =
7093 Arm_relobj<big_endian>::as_arm_relobj(*p);
7094 this->merge_processor_specific_flags(
7095 arm_relobj->name(),
7096 arm_relobj->processor_specific_flags());
7097 this->merge_object_attributes(arm_relobj->name().c_str(),
7098 arm_relobj->attributes_section_data());
7099
7100 }
7101
7102 for (Input_objects::Dynobj_iterator p = input_objects->dynobj_begin();
7103 p != input_objects->dynobj_end();
7104 ++p)
7105 {
7106 Arm_dynobj<big_endian>* arm_dynobj =
7107 Arm_dynobj<big_endian>::as_arm_dynobj(*p);
7108 this->merge_processor_specific_flags(
7109 arm_dynobj->name(),
7110 arm_dynobj->processor_specific_flags());
7111 this->merge_object_attributes(arm_dynobj->name().c_str(),
7112 arm_dynobj->attributes_section_data());
7113 }
7114
7115 // Check BLX use.
7116 const Object_attribute* cpu_arch_attr =
7117 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch);
7118 if (cpu_arch_attr->int_value() > elfcpp::TAG_CPU_ARCH_V4)
7119 this->set_may_use_blx(true);
7120
7121 // Check if we need to use Cortex-A8 workaround.
7122 if (parameters->options().user_set_fix_cortex_a8())
7123 this->fix_cortex_a8_ = parameters->options().fix_cortex_a8();
7124 else
7125 {
7126 // If neither --fix-cortex-a8 nor --no-fix-cortex-a8 is used, turn on
7127 // Cortex-A8 erratum workaround for ARMv7-A or ARMv7 with unknown
7128 // profile.
7129 const Object_attribute* cpu_arch_profile_attr =
7130 this->get_aeabi_object_attribute(elfcpp::Tag_CPU_arch_profile);
7131 this->fix_cortex_a8_ =
7132 (cpu_arch_attr->int_value() == elfcpp::TAG_CPU_ARCH_V7
7133 && (cpu_arch_profile_attr->int_value() == 'A'
7134 || cpu_arch_profile_attr->int_value() == 0));
7135 }
7136
7137 // Check if we can use V4BX interworking.
7138 // The V4BX interworking stub contains BX instruction,
7139 // which is not specified for some profiles.
7140 if (this->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING
7141 && !this->may_use_blx())
7142 gold_error(_("unable to provide V4BX reloc interworking fix up; "
7143 "the target profile does not support BX instruction"));
7144
7145 // Fill in some more dynamic tags.
7146 const Reloc_section* rel_plt = (this->plt_ == NULL
7147 ? NULL
7148 : this->plt_->rel_plt());
7149 layout->add_target_dynamic_tags(true, this->got_plt_, rel_plt,
7150 this->rel_dyn_, true);
7151
7152 // Emit any relocs we saved in an attempt to avoid generating COPY
7153 // relocs.
7154 if (this->copy_relocs_.any_saved_relocs())
7155 this->copy_relocs_.emit(this->rel_dyn_section(layout));
7156
7157 // Handle the .ARM.exidx section.
7158 Output_section* exidx_section = layout->find_output_section(".ARM.exidx");
7159 if (exidx_section != NULL
7160 && exidx_section->type() == elfcpp::SHT_ARM_EXIDX
7161 && !parameters->options().relocatable())
7162 {
7163 // Create __exidx_start and __exdix_end symbols.
7164 symtab->define_in_output_data("__exidx_start", NULL,
7165 Symbol_table::PREDEFINED,
7166 exidx_section, 0, 0, elfcpp::STT_OBJECT,
7167 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
7168 false, true);
7169 symtab->define_in_output_data("__exidx_end", NULL,
7170 Symbol_table::PREDEFINED,
7171 exidx_section, 0, 0, elfcpp::STT_OBJECT,
7172 elfcpp::STB_GLOBAL, elfcpp::STV_HIDDEN, 0,
7173 true, true);
7174
7175 // For the ARM target, we need to add a PT_ARM_EXIDX segment for
7176 // the .ARM.exidx section.
7177 if (!layout->script_options()->saw_phdrs_clause())
7178 {
7179 gold_assert(layout->find_output_segment(elfcpp::PT_ARM_EXIDX, 0, 0)
7180 == NULL);
7181 Output_segment* exidx_segment =
7182 layout->make_output_segment(elfcpp::PT_ARM_EXIDX, elfcpp::PF_R);
7183 exidx_segment->add_output_section(exidx_section, elfcpp::PF_R,
7184 false);
7185 }
7186 }
7187
7188 // Create an .ARM.attributes section if there is not one already.
7189 Output_attributes_section_data* attributes_section =
7190 new Output_attributes_section_data(*this->attributes_section_data_);
7191 layout->add_output_section_data(".ARM.attributes",
7192 elfcpp::SHT_ARM_ATTRIBUTES, 0,
7193 attributes_section, false, false, false,
7194 false);
7195 }
7196
7197 // Return whether a direct absolute static relocation needs to be applied.
7198 // In cases where Scan::local() or Scan::global() has created
7199 // a dynamic relocation other than R_ARM_RELATIVE, the addend
7200 // of the relocation is carried in the data, and we must not
7201 // apply the static relocation.
7202
7203 template<bool big_endian>
7204 inline bool
7205 Target_arm<big_endian>::Relocate::should_apply_static_reloc(
7206 const Sized_symbol<32>* gsym,
7207 int ref_flags,
7208 bool is_32bit,
7209 Output_section* output_section)
7210 {
7211 // If the output section is not allocated, then we didn't call
7212 // scan_relocs, we didn't create a dynamic reloc, and we must apply
7213 // the reloc here.
7214 if ((output_section->flags() & elfcpp::SHF_ALLOC) == 0)
7215 return true;
7216
7217 // For local symbols, we will have created a non-RELATIVE dynamic
7218 // relocation only if (a) the output is position independent,
7219 // (b) the relocation is absolute (not pc- or segment-relative), and
7220 // (c) the relocation is not 32 bits wide.
7221 if (gsym == NULL)
7222 return !(parameters->options().output_is_position_independent()
7223 && (ref_flags & Symbol::ABSOLUTE_REF)
7224 && !is_32bit);
7225
7226 // For global symbols, we use the same helper routines used in the
7227 // scan pass. If we did not create a dynamic relocation, or if we
7228 // created a RELATIVE dynamic relocation, we should apply the static
7229 // relocation.
7230 bool has_dyn = gsym->needs_dynamic_reloc(ref_flags);
7231 bool is_rel = (ref_flags & Symbol::ABSOLUTE_REF)
7232 && gsym->can_use_relative_reloc(ref_flags
7233 & Symbol::FUNCTION_CALL);
7234 return !has_dyn || is_rel;
7235 }
7236
7237 // Perform a relocation.
7238
7239 template<bool big_endian>
7240 inline bool
7241 Target_arm<big_endian>::Relocate::relocate(
7242 const Relocate_info<32, big_endian>* relinfo,
7243 Target_arm* target,
7244 Output_section *output_section,
7245 size_t relnum,
7246 const elfcpp::Rel<32, big_endian>& rel,
7247 unsigned int r_type,
7248 const Sized_symbol<32>* gsym,
7249 const Symbol_value<32>* psymval,
7250 unsigned char* view,
7251 Arm_address address,
7252 section_size_type /* view_size */ )
7253 {
7254 typedef Arm_relocate_functions<big_endian> Arm_relocate_functions;
7255
7256 r_type = get_real_reloc_type(r_type);
7257
7258 const Arm_relobj<big_endian>* object =
7259 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
7260
7261 // If the final branch target of a relocation is THUMB instruction, this
7262 // is 1. Otherwise it is 0.
7263 Arm_address thumb_bit = 0;
7264 Symbol_value<32> symval;
7265 bool is_weakly_undefined_without_plt = false;
7266 if (relnum != Target_arm<big_endian>::fake_relnum_for_stubs)
7267 {
7268 if (gsym != NULL)
7269 {
7270 // This is a global symbol. Determine if we use PLT and if the
7271 // final target is THUMB.
7272 if (gsym->use_plt_offset(reloc_is_non_pic(r_type)))
7273 {
7274 // This uses a PLT, change the symbol value.
7275 symval.set_output_value(target->plt_section()->address()
7276 + gsym->plt_offset());
7277 psymval = &symval;
7278 }
7279 else if (gsym->is_weak_undefined())
7280 {
7281 // This is a weakly undefined symbol and we do not use PLT
7282 // for this relocation. A branch targeting this symbol will
7283 // be converted into an NOP.
7284 is_weakly_undefined_without_plt = true;
7285 }
7286 else
7287 {
7288 // Set thumb bit if symbol:
7289 // -Has type STT_ARM_TFUNC or
7290 // -Has type STT_FUNC, is defined and with LSB in value set.
7291 thumb_bit =
7292 (((gsym->type() == elfcpp::STT_ARM_TFUNC)
7293 || (gsym->type() == elfcpp::STT_FUNC
7294 && !gsym->is_undefined()
7295 && ((psymval->value(object, 0) & 1) != 0)))
7296 ? 1
7297 : 0);
7298 }
7299 }
7300 else
7301 {
7302 // This is a local symbol. Determine if the final target is THUMB.
7303 // We saved this information when all the local symbols were read.
7304 elfcpp::Elf_types<32>::Elf_WXword r_info = rel.get_r_info();
7305 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
7306 thumb_bit = object->local_symbol_is_thumb_function(r_sym) ? 1 : 0;
7307 }
7308 }
7309 else
7310 {
7311 // This is a fake relocation synthesized for a stub. It does not have
7312 // a real symbol. We just look at the LSB of the symbol value to
7313 // determine if the target is THUMB or not.
7314 thumb_bit = ((psymval->value(object, 0) & 1) != 0);
7315 }
7316
7317 // Strip LSB if this points to a THUMB target.
7318 if (thumb_bit != 0
7319 && Target_arm<big_endian>::reloc_uses_thumb_bit(r_type)
7320 && ((psymval->value(object, 0) & 1) != 0))
7321 {
7322 Arm_address stripped_value =
7323 psymval->value(object, 0) & ~static_cast<Arm_address>(1);
7324 symval.set_output_value(stripped_value);
7325 psymval = &symval;
7326 }
7327
7328 // Get the GOT offset if needed.
7329 // The GOT pointer points to the end of the GOT section.
7330 // We need to subtract the size of the GOT section to get
7331 // the actual offset to use in the relocation.
7332 bool have_got_offset = false;
7333 unsigned int got_offset = 0;
7334 switch (r_type)
7335 {
7336 case elfcpp::R_ARM_GOT_BREL:
7337 case elfcpp::R_ARM_GOT_PREL:
7338 if (gsym != NULL)
7339 {
7340 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
7341 got_offset = (gsym->got_offset(GOT_TYPE_STANDARD)
7342 - target->got_size());
7343 }
7344 else
7345 {
7346 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
7347 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
7348 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
7349 - target->got_size());
7350 }
7351 have_got_offset = true;
7352 break;
7353
7354 default:
7355 break;
7356 }
7357
7358 // To look up relocation stubs, we need to pass the symbol table index of
7359 // a local symbol.
7360 unsigned int r_sym = elfcpp::elf_r_sym<32>(rel.get_r_info());
7361
7362 // Get the addressing origin of the output segment defining the
7363 // symbol gsym if needed (AAELF 4.6.1.2 Relocation types).
7364 Arm_address sym_origin = 0;
7365 if (Relocate::reloc_needs_sym_origin(r_type))
7366 {
7367 if (r_type == elfcpp::R_ARM_BASE_ABS && gsym == NULL)
7368 // R_ARM_BASE_ABS with the NULL symbol will give the
7369 // absolute address of the GOT origin (GOT_ORG) (see ARM IHI
7370 // 0044C (AAELF): 4.6.1.8 Proxy generating relocations).
7371 sym_origin = target->got_plt_section()->address();
7372 else if (gsym == NULL)
7373 sym_origin = 0;
7374 else if (gsym->source() == Symbol::IN_OUTPUT_SEGMENT)
7375 sym_origin = gsym->output_segment()->vaddr();
7376 else if (gsym->source() == Symbol::IN_OUTPUT_DATA)
7377 sym_origin = gsym->output_data()->address();
7378
7379 // TODO: Assumes the segment base to be zero for the global symbols
7380 // till the proper support for the segment-base-relative addressing
7381 // will be implemented. This is consistent with GNU ld.
7382 }
7383
7384 typename Arm_relocate_functions::Status reloc_status =
7385 Arm_relocate_functions::STATUS_OKAY;
7386 switch (r_type)
7387 {
7388 case elfcpp::R_ARM_NONE:
7389 break;
7390
7391 case elfcpp::R_ARM_ABS8:
7392 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
7393 output_section))
7394 reloc_status = Arm_relocate_functions::abs8(view, object, psymval);
7395 break;
7396
7397 case elfcpp::R_ARM_ABS12:
7398 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
7399 output_section))
7400 reloc_status = Arm_relocate_functions::abs12(view, object, psymval);
7401 break;
7402
7403 case elfcpp::R_ARM_ABS16:
7404 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
7405 output_section))
7406 reloc_status = Arm_relocate_functions::abs16(view, object, psymval);
7407 break;
7408
7409 case elfcpp::R_ARM_ABS32:
7410 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7411 output_section))
7412 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
7413 thumb_bit);
7414 break;
7415
7416 case elfcpp::R_ARM_ABS32_NOI:
7417 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7418 output_section))
7419 // No thumb bit for this relocation: (S + A)
7420 reloc_status = Arm_relocate_functions::abs32(view, object, psymval,
7421 0);
7422 break;
7423
7424 case elfcpp::R_ARM_MOVW_ABS_NC:
7425 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7426 output_section))
7427 reloc_status = Arm_relocate_functions::movw_abs_nc(view, object,
7428 psymval,
7429 thumb_bit);
7430 else
7431 gold_error(_("relocation R_ARM_MOVW_ABS_NC cannot be used when making"
7432 "a shared object; recompile with -fPIC"));
7433 break;
7434
7435 case elfcpp::R_ARM_MOVT_ABS:
7436 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7437 output_section))
7438 reloc_status = Arm_relocate_functions::movt_abs(view, object, psymval);
7439 else
7440 gold_error(_("relocation R_ARM_MOVT_ABS cannot be used when making"
7441 "a shared object; recompile with -fPIC"));
7442 break;
7443
7444 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
7445 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7446 output_section))
7447 reloc_status = Arm_relocate_functions::thm_movw_abs_nc(view, object,
7448 psymval,
7449 thumb_bit);
7450 else
7451 gold_error(_("relocation R_ARM_THM_MOVW_ABS_NC cannot be used when"
7452 "making a shared object; recompile with -fPIC"));
7453 break;
7454
7455 case elfcpp::R_ARM_THM_MOVT_ABS:
7456 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7457 output_section))
7458 reloc_status = Arm_relocate_functions::thm_movt_abs(view, object,
7459 psymval);
7460 else
7461 gold_error(_("relocation R_ARM_THM_MOVT_ABS cannot be used when"
7462 "making a shared object; recompile with -fPIC"));
7463 break;
7464
7465 case elfcpp::R_ARM_MOVW_PREL_NC:
7466 reloc_status = Arm_relocate_functions::movw_rel_nc(view, object,
7467 psymval, address,
7468 thumb_bit);
7469 break;
7470
7471 case elfcpp::R_ARM_MOVW_BREL_NC:
7472 reloc_status = Arm_relocate_functions::movw_rel_nc(view, object,
7473 psymval, sym_origin,
7474 thumb_bit);
7475 break;
7476
7477 case elfcpp::R_ARM_MOVW_BREL:
7478 reloc_status = Arm_relocate_functions::movw_rel(view, object,
7479 psymval, sym_origin,
7480 thumb_bit);
7481 break;
7482
7483 case elfcpp::R_ARM_MOVT_PREL:
7484 reloc_status = Arm_relocate_functions::movt_rel(view, object,
7485 psymval, address);
7486 break;
7487
7488 case elfcpp::R_ARM_MOVT_BREL:
7489 reloc_status = Arm_relocate_functions::movt_rel(view, object,
7490 psymval, sym_origin);
7491 break;
7492
7493 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
7494 reloc_status = Arm_relocate_functions::thm_movw_rel_nc(view, object,
7495 psymval, address,
7496 thumb_bit);
7497 break;
7498
7499 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
7500 reloc_status = Arm_relocate_functions::thm_movw_rel_nc(view, object,
7501 psymval,
7502 sym_origin,
7503 thumb_bit);
7504 break;
7505
7506 case elfcpp::R_ARM_THM_MOVW_BREL:
7507 reloc_status = Arm_relocate_functions::thm_movw_rel(view, object,
7508 psymval, sym_origin,
7509 thumb_bit);
7510 break;
7511
7512 case elfcpp::R_ARM_THM_MOVT_PREL:
7513 reloc_status = Arm_relocate_functions::thm_movt_rel(view, object,
7514 psymval, address);
7515 break;
7516
7517 case elfcpp::R_ARM_THM_MOVT_BREL:
7518 reloc_status = Arm_relocate_functions::thm_movt_rel(view, object,
7519 psymval, sym_origin);
7520 break;
7521
7522 case elfcpp::R_ARM_REL32:
7523 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
7524 address, thumb_bit);
7525 break;
7526
7527 case elfcpp::R_ARM_THM_ABS5:
7528 if (should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, false,
7529 output_section))
7530 reloc_status = Arm_relocate_functions::thm_abs5(view, object, psymval);
7531 break;
7532
7533 // Thumb long branches.
7534 case elfcpp::R_ARM_THM_CALL:
7535 case elfcpp::R_ARM_THM_XPC22:
7536 case elfcpp::R_ARM_THM_JUMP24:
7537 reloc_status =
7538 Arm_relocate_functions::thumb_branch_common(
7539 r_type, relinfo, view, gsym, object, r_sym, psymval, address,
7540 thumb_bit, is_weakly_undefined_without_plt);
7541 break;
7542
7543 case elfcpp::R_ARM_GOTOFF32:
7544 {
7545 Arm_address got_origin;
7546 got_origin = target->got_plt_section()->address();
7547 reloc_status = Arm_relocate_functions::rel32(view, object, psymval,
7548 got_origin, thumb_bit);
7549 }
7550 break;
7551
7552 case elfcpp::R_ARM_BASE_PREL:
7553 gold_assert(gsym != NULL);
7554 reloc_status =
7555 Arm_relocate_functions::base_prel(view, sym_origin, address);
7556 break;
7557
7558 case elfcpp::R_ARM_BASE_ABS:
7559 {
7560 if (!should_apply_static_reloc(gsym, Symbol::ABSOLUTE_REF, true,
7561 output_section))
7562 break;
7563
7564 reloc_status = Arm_relocate_functions::base_abs(view, sym_origin);
7565 }
7566 break;
7567
7568 case elfcpp::R_ARM_GOT_BREL:
7569 gold_assert(have_got_offset);
7570 reloc_status = Arm_relocate_functions::got_brel(view, got_offset);
7571 break;
7572
7573 case elfcpp::R_ARM_GOT_PREL:
7574 gold_assert(have_got_offset);
7575 // Get the address origin for GOT PLT, which is allocated right
7576 // after the GOT section, to calculate an absolute address of
7577 // the symbol GOT entry (got_origin + got_offset).
7578 Arm_address got_origin;
7579 got_origin = target->got_plt_section()->address();
7580 reloc_status = Arm_relocate_functions::got_prel(view,
7581 got_origin + got_offset,
7582 address);
7583 break;
7584
7585 case elfcpp::R_ARM_PLT32:
7586 case elfcpp::R_ARM_CALL:
7587 case elfcpp::R_ARM_JUMP24:
7588 case elfcpp::R_ARM_XPC25:
7589 gold_assert(gsym == NULL
7590 || gsym->has_plt_offset()
7591 || gsym->final_value_is_known()
7592 || (gsym->is_defined()
7593 && !gsym->is_from_dynobj()
7594 && !gsym->is_preemptible()));
7595 reloc_status =
7596 Arm_relocate_functions::arm_branch_common(
7597 r_type, relinfo, view, gsym, object, r_sym, psymval, address,
7598 thumb_bit, is_weakly_undefined_without_plt);
7599 break;
7600
7601 case elfcpp::R_ARM_THM_JUMP19:
7602 reloc_status =
7603 Arm_relocate_functions::thm_jump19(view, object, psymval, address,
7604 thumb_bit);
7605 break;
7606
7607 case elfcpp::R_ARM_THM_JUMP6:
7608 reloc_status =
7609 Arm_relocate_functions::thm_jump6(view, object, psymval, address);
7610 break;
7611
7612 case elfcpp::R_ARM_THM_JUMP8:
7613 reloc_status =
7614 Arm_relocate_functions::thm_jump8(view, object, psymval, address);
7615 break;
7616
7617 case elfcpp::R_ARM_THM_JUMP11:
7618 reloc_status =
7619 Arm_relocate_functions::thm_jump11(view, object, psymval, address);
7620 break;
7621
7622 case elfcpp::R_ARM_PREL31:
7623 reloc_status = Arm_relocate_functions::prel31(view, object, psymval,
7624 address, thumb_bit);
7625 break;
7626
7627 case elfcpp::R_ARM_V4BX:
7628 if (target->fix_v4bx() > General_options::FIX_V4BX_NONE)
7629 {
7630 const bool is_v4bx_interworking =
7631 (target->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING);
7632 reloc_status =
7633 Arm_relocate_functions::v4bx(relinfo, view, object, address,
7634 is_v4bx_interworking);
7635 }
7636 break;
7637
7638 case elfcpp::R_ARM_THM_PC8:
7639 reloc_status =
7640 Arm_relocate_functions::thm_pc8(view, object, psymval, address);
7641 break;
7642
7643 case elfcpp::R_ARM_THM_PC12:
7644 reloc_status =
7645 Arm_relocate_functions::thm_pc12(view, object, psymval, address);
7646 break;
7647
7648 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
7649 reloc_status =
7650 Arm_relocate_functions::thm_alu11(view, object, psymval, address,
7651 thumb_bit);
7652 break;
7653
7654 case elfcpp::R_ARM_ALU_PC_G0_NC:
7655 reloc_status =
7656 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 0,
7657 address, thumb_bit, false);
7658 break;
7659
7660 case elfcpp::R_ARM_ALU_PC_G0:
7661 reloc_status =
7662 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 0,
7663 address, thumb_bit, true);
7664 break;
7665
7666 case elfcpp::R_ARM_ALU_PC_G1_NC:
7667 reloc_status =
7668 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 1,
7669 address, thumb_bit, false);
7670 break;
7671
7672 case elfcpp::R_ARM_ALU_PC_G1:
7673 reloc_status =
7674 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 1,
7675 address, thumb_bit, true);
7676 break;
7677
7678 case elfcpp::R_ARM_ALU_PC_G2:
7679 reloc_status =
7680 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 2,
7681 address, thumb_bit, true);
7682 break;
7683
7684 case elfcpp::R_ARM_ALU_SB_G0_NC:
7685 reloc_status =
7686 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 0,
7687 sym_origin, thumb_bit, false);
7688 break;
7689
7690 case elfcpp::R_ARM_ALU_SB_G0:
7691 reloc_status =
7692 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 0,
7693 sym_origin, thumb_bit, true);
7694 break;
7695
7696 case elfcpp::R_ARM_ALU_SB_G1_NC:
7697 reloc_status =
7698 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 1,
7699 sym_origin, thumb_bit, false);
7700 break;
7701
7702 case elfcpp::R_ARM_ALU_SB_G1:
7703 reloc_status =
7704 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 1,
7705 sym_origin, thumb_bit, true);
7706 break;
7707
7708 case elfcpp::R_ARM_ALU_SB_G2:
7709 reloc_status =
7710 Arm_relocate_functions::arm_grp_alu(view, object, psymval, 2,
7711 sym_origin, thumb_bit, true);
7712 break;
7713
7714 case elfcpp::R_ARM_LDR_PC_G0:
7715 reloc_status =
7716 Arm_relocate_functions::arm_grp_ldr(view, object, psymval, 0,
7717 address);
7718 break;
7719
7720 case elfcpp::R_ARM_LDR_PC_G1:
7721 reloc_status =
7722 Arm_relocate_functions::arm_grp_ldr(view, object, psymval, 1,
7723 address);
7724 break;
7725
7726 case elfcpp::R_ARM_LDR_PC_G2:
7727 reloc_status =
7728 Arm_relocate_functions::arm_grp_ldr(view, object, psymval, 2,
7729 address);
7730 break;
7731
7732 case elfcpp::R_ARM_LDR_SB_G0:
7733 reloc_status =
7734 Arm_relocate_functions::arm_grp_ldr(view, object, psymval, 0,
7735 sym_origin);
7736 break;
7737
7738 case elfcpp::R_ARM_LDR_SB_G1:
7739 reloc_status =
7740 Arm_relocate_functions::arm_grp_ldr(view, object, psymval, 1,
7741 sym_origin);
7742 break;
7743
7744 case elfcpp::R_ARM_LDR_SB_G2:
7745 reloc_status =
7746 Arm_relocate_functions::arm_grp_ldr(view, object, psymval, 2,
7747 sym_origin);
7748 break;
7749
7750 case elfcpp::R_ARM_LDRS_PC_G0:
7751 reloc_status =
7752 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval, 0,
7753 address);
7754 break;
7755
7756 case elfcpp::R_ARM_LDRS_PC_G1:
7757 reloc_status =
7758 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval, 1,
7759 address);
7760 break;
7761
7762 case elfcpp::R_ARM_LDRS_PC_G2:
7763 reloc_status =
7764 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval, 2,
7765 address);
7766 break;
7767
7768 case elfcpp::R_ARM_LDRS_SB_G0:
7769 reloc_status =
7770 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval, 0,
7771 sym_origin);
7772 break;
7773
7774 case elfcpp::R_ARM_LDRS_SB_G1:
7775 reloc_status =
7776 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval, 1,
7777 sym_origin);
7778 break;
7779
7780 case elfcpp::R_ARM_LDRS_SB_G2:
7781 reloc_status =
7782 Arm_relocate_functions::arm_grp_ldrs(view, object, psymval, 2,
7783 sym_origin);
7784 break;
7785
7786 case elfcpp::R_ARM_LDC_PC_G0:
7787 reloc_status =
7788 Arm_relocate_functions::arm_grp_ldc(view, object, psymval, 0,
7789 address);
7790 break;
7791
7792 case elfcpp::R_ARM_LDC_PC_G1:
7793 reloc_status =
7794 Arm_relocate_functions::arm_grp_ldc(view, object, psymval, 1,
7795 address);
7796 break;
7797
7798 case elfcpp::R_ARM_LDC_PC_G2:
7799 reloc_status =
7800 Arm_relocate_functions::arm_grp_ldc(view, object, psymval, 2,
7801 address);
7802 break;
7803
7804 case elfcpp::R_ARM_LDC_SB_G0:
7805 reloc_status =
7806 Arm_relocate_functions::arm_grp_ldc(view, object, psymval, 0,
7807 sym_origin);
7808 break;
7809
7810 case elfcpp::R_ARM_LDC_SB_G1:
7811 reloc_status =
7812 Arm_relocate_functions::arm_grp_ldc(view, object, psymval, 1,
7813 sym_origin);
7814 break;
7815
7816 case elfcpp::R_ARM_LDC_SB_G2:
7817 reloc_status =
7818 Arm_relocate_functions::arm_grp_ldc(view, object, psymval, 2,
7819 sym_origin);
7820 break;
7821
7822 case elfcpp::R_ARM_TARGET1:
7823 // This should have been mapped to another type already.
7824 // Fall through.
7825 case elfcpp::R_ARM_COPY:
7826 case elfcpp::R_ARM_GLOB_DAT:
7827 case elfcpp::R_ARM_JUMP_SLOT:
7828 case elfcpp::R_ARM_RELATIVE:
7829 // These are relocations which should only be seen by the
7830 // dynamic linker, and should never be seen here.
7831 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
7832 _("unexpected reloc %u in object file"),
7833 r_type);
7834 break;
7835
7836 default:
7837 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
7838 _("unsupported reloc %u"),
7839 r_type);
7840 break;
7841 }
7842
7843 // Report any errors.
7844 switch (reloc_status)
7845 {
7846 case Arm_relocate_functions::STATUS_OKAY:
7847 break;
7848 case Arm_relocate_functions::STATUS_OVERFLOW:
7849 gold_error_at_location(relinfo, relnum, rel.get_r_offset(),
7850 _("relocation overflow in relocation %u"),
7851 r_type);
7852 break;
7853 case Arm_relocate_functions::STATUS_BAD_RELOC:
7854 gold_error_at_location(
7855 relinfo,
7856 relnum,
7857 rel.get_r_offset(),
7858 _("unexpected opcode while processing relocation %u"),
7859 r_type);
7860 break;
7861 default:
7862 gold_unreachable();
7863 }
7864
7865 return true;
7866 }
7867
7868 // Relocate section data.
7869
7870 template<bool big_endian>
7871 void
7872 Target_arm<big_endian>::relocate_section(
7873 const Relocate_info<32, big_endian>* relinfo,
7874 unsigned int sh_type,
7875 const unsigned char* prelocs,
7876 size_t reloc_count,
7877 Output_section* output_section,
7878 bool needs_special_offset_handling,
7879 unsigned char* view,
7880 Arm_address address,
7881 section_size_type view_size,
7882 const Reloc_symbol_changes* reloc_symbol_changes)
7883 {
7884 typedef typename Target_arm<big_endian>::Relocate Arm_relocate;
7885 gold_assert(sh_type == elfcpp::SHT_REL);
7886
7887 Arm_input_section<big_endian>* arm_input_section =
7888 this->find_arm_input_section(relinfo->object, relinfo->data_shndx);
7889
7890 // This is an ARM input section and the view covers the whole output
7891 // section.
7892 if (arm_input_section != NULL)
7893 {
7894 gold_assert(needs_special_offset_handling);
7895 Arm_address section_address = arm_input_section->address();
7896 section_size_type section_size = arm_input_section->data_size();
7897
7898 gold_assert((arm_input_section->address() >= address)
7899 && ((arm_input_section->address()
7900 + arm_input_section->data_size())
7901 <= (address + view_size)));
7902
7903 off_t offset = section_address - address;
7904 view += offset;
7905 address += offset;
7906 view_size = section_size;
7907 }
7908
7909 gold::relocate_section<32, big_endian, Target_arm, elfcpp::SHT_REL,
7910 Arm_relocate>(
7911 relinfo,
7912 this,
7913 prelocs,
7914 reloc_count,
7915 output_section,
7916 needs_special_offset_handling,
7917 view,
7918 address,
7919 view_size,
7920 reloc_symbol_changes);
7921 }
7922
7923 // Return the size of a relocation while scanning during a relocatable
7924 // link.
7925
7926 template<bool big_endian>
7927 unsigned int
7928 Target_arm<big_endian>::Relocatable_size_for_reloc::get_size_for_reloc(
7929 unsigned int r_type,
7930 Relobj* object)
7931 {
7932 r_type = get_real_reloc_type(r_type);
7933 switch (r_type)
7934 {
7935 case elfcpp::R_ARM_NONE:
7936 return 0;
7937
7938 case elfcpp::R_ARM_ABS8:
7939 return 1;
7940
7941 case elfcpp::R_ARM_ABS16:
7942 case elfcpp::R_ARM_THM_ABS5:
7943 case elfcpp::R_ARM_THM_JUMP6:
7944 case elfcpp::R_ARM_THM_JUMP8:
7945 case elfcpp::R_ARM_THM_JUMP11:
7946 case elfcpp::R_ARM_THM_PC8:
7947 return 2;
7948
7949 case elfcpp::R_ARM_ABS32:
7950 case elfcpp::R_ARM_ABS32_NOI:
7951 case elfcpp::R_ARM_ABS12:
7952 case elfcpp::R_ARM_BASE_ABS:
7953 case elfcpp::R_ARM_REL32:
7954 case elfcpp::R_ARM_THM_CALL:
7955 case elfcpp::R_ARM_GOTOFF32:
7956 case elfcpp::R_ARM_BASE_PREL:
7957 case elfcpp::R_ARM_GOT_BREL:
7958 case elfcpp::R_ARM_GOT_PREL:
7959 case elfcpp::R_ARM_PLT32:
7960 case elfcpp::R_ARM_CALL:
7961 case elfcpp::R_ARM_JUMP24:
7962 case elfcpp::R_ARM_PREL31:
7963 case elfcpp::R_ARM_MOVW_ABS_NC:
7964 case elfcpp::R_ARM_MOVT_ABS:
7965 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
7966 case elfcpp::R_ARM_THM_MOVT_ABS:
7967 case elfcpp::R_ARM_MOVW_PREL_NC:
7968 case elfcpp::R_ARM_MOVT_PREL:
7969 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
7970 case elfcpp::R_ARM_THM_MOVT_PREL:
7971 case elfcpp::R_ARM_MOVW_BREL_NC:
7972 case elfcpp::R_ARM_MOVT_BREL:
7973 case elfcpp::R_ARM_MOVW_BREL:
7974 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
7975 case elfcpp::R_ARM_THM_MOVT_BREL:
7976 case elfcpp::R_ARM_THM_MOVW_BREL:
7977 case elfcpp::R_ARM_V4BX:
7978 case elfcpp::R_ARM_THM_PC12:
7979 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
7980 case elfcpp::R_ARM_ALU_PC_G0_NC:
7981 case elfcpp::R_ARM_ALU_PC_G0:
7982 case elfcpp::R_ARM_ALU_PC_G1_NC:
7983 case elfcpp::R_ARM_ALU_PC_G1:
7984 case elfcpp::R_ARM_ALU_PC_G2:
7985 case elfcpp::R_ARM_ALU_SB_G0_NC:
7986 case elfcpp::R_ARM_ALU_SB_G0:
7987 case elfcpp::R_ARM_ALU_SB_G1_NC:
7988 case elfcpp::R_ARM_ALU_SB_G1:
7989 case elfcpp::R_ARM_ALU_SB_G2:
7990 case elfcpp::R_ARM_LDR_PC_G0:
7991 case elfcpp::R_ARM_LDR_PC_G1:
7992 case elfcpp::R_ARM_LDR_PC_G2:
7993 case elfcpp::R_ARM_LDR_SB_G0:
7994 case elfcpp::R_ARM_LDR_SB_G1:
7995 case elfcpp::R_ARM_LDR_SB_G2:
7996 case elfcpp::R_ARM_LDRS_PC_G0:
7997 case elfcpp::R_ARM_LDRS_PC_G1:
7998 case elfcpp::R_ARM_LDRS_PC_G2:
7999 case elfcpp::R_ARM_LDRS_SB_G0:
8000 case elfcpp::R_ARM_LDRS_SB_G1:
8001 case elfcpp::R_ARM_LDRS_SB_G2:
8002 case elfcpp::R_ARM_LDC_PC_G0:
8003 case elfcpp::R_ARM_LDC_PC_G1:
8004 case elfcpp::R_ARM_LDC_PC_G2:
8005 case elfcpp::R_ARM_LDC_SB_G0:
8006 case elfcpp::R_ARM_LDC_SB_G1:
8007 case elfcpp::R_ARM_LDC_SB_G2:
8008 return 4;
8009
8010 case elfcpp::R_ARM_TARGET1:
8011 // This should have been mapped to another type already.
8012 // Fall through.
8013 case elfcpp::R_ARM_COPY:
8014 case elfcpp::R_ARM_GLOB_DAT:
8015 case elfcpp::R_ARM_JUMP_SLOT:
8016 case elfcpp::R_ARM_RELATIVE:
8017 // These are relocations which should only be seen by the
8018 // dynamic linker, and should never be seen here.
8019 gold_error(_("%s: unexpected reloc %u in object file"),
8020 object->name().c_str(), r_type);
8021 return 0;
8022
8023 default:
8024 object->error(_("unsupported reloc %u in object file"), r_type);
8025 return 0;
8026 }
8027 }
8028
8029 // Scan the relocs during a relocatable link.
8030
8031 template<bool big_endian>
8032 void
8033 Target_arm<big_endian>::scan_relocatable_relocs(
8034 Symbol_table* symtab,
8035 Layout* layout,
8036 Sized_relobj<32, big_endian>* object,
8037 unsigned int data_shndx,
8038 unsigned int sh_type,
8039 const unsigned char* prelocs,
8040 size_t reloc_count,
8041 Output_section* output_section,
8042 bool needs_special_offset_handling,
8043 size_t local_symbol_count,
8044 const unsigned char* plocal_symbols,
8045 Relocatable_relocs* rr)
8046 {
8047 gold_assert(sh_type == elfcpp::SHT_REL);
8048
8049 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_REL,
8050 Relocatable_size_for_reloc> Scan_relocatable_relocs;
8051
8052 gold::scan_relocatable_relocs<32, big_endian, elfcpp::SHT_REL,
8053 Scan_relocatable_relocs>(
8054 symtab,
8055 layout,
8056 object,
8057 data_shndx,
8058 prelocs,
8059 reloc_count,
8060 output_section,
8061 needs_special_offset_handling,
8062 local_symbol_count,
8063 plocal_symbols,
8064 rr);
8065 }
8066
8067 // Relocate a section during a relocatable link.
8068
8069 template<bool big_endian>
8070 void
8071 Target_arm<big_endian>::relocate_for_relocatable(
8072 const Relocate_info<32, big_endian>* relinfo,
8073 unsigned int sh_type,
8074 const unsigned char* prelocs,
8075 size_t reloc_count,
8076 Output_section* output_section,
8077 off_t offset_in_output_section,
8078 const Relocatable_relocs* rr,
8079 unsigned char* view,
8080 Arm_address view_address,
8081 section_size_type view_size,
8082 unsigned char* reloc_view,
8083 section_size_type reloc_view_size)
8084 {
8085 gold_assert(sh_type == elfcpp::SHT_REL);
8086
8087 gold::relocate_for_relocatable<32, big_endian, elfcpp::SHT_REL>(
8088 relinfo,
8089 prelocs,
8090 reloc_count,
8091 output_section,
8092 offset_in_output_section,
8093 rr,
8094 view,
8095 view_address,
8096 view_size,
8097 reloc_view,
8098 reloc_view_size);
8099 }
8100
8101 // Return the value to use for a dynamic symbol which requires special
8102 // treatment. This is how we support equality comparisons of function
8103 // pointers across shared library boundaries, as described in the
8104 // processor specific ABI supplement.
8105
8106 template<bool big_endian>
8107 uint64_t
8108 Target_arm<big_endian>::do_dynsym_value(const Symbol* gsym) const
8109 {
8110 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
8111 return this->plt_section()->address() + gsym->plt_offset();
8112 }
8113
8114 // Map platform-specific relocs to real relocs
8115 //
8116 template<bool big_endian>
8117 unsigned int
8118 Target_arm<big_endian>::get_real_reloc_type (unsigned int r_type)
8119 {
8120 switch (r_type)
8121 {
8122 case elfcpp::R_ARM_TARGET1:
8123 // This is either R_ARM_ABS32 or R_ARM_REL32;
8124 return elfcpp::R_ARM_ABS32;
8125
8126 case elfcpp::R_ARM_TARGET2:
8127 // This can be any reloc type but ususally is R_ARM_GOT_PREL
8128 return elfcpp::R_ARM_GOT_PREL;
8129
8130 default:
8131 return r_type;
8132 }
8133 }
8134
8135 // Whether if two EABI versions V1 and V2 are compatible.
8136
8137 template<bool big_endian>
8138 bool
8139 Target_arm<big_endian>::are_eabi_versions_compatible(
8140 elfcpp::Elf_Word v1,
8141 elfcpp::Elf_Word v2)
8142 {
8143 // v4 and v5 are the same spec before and after it was released,
8144 // so allow mixing them.
8145 if ((v1 == elfcpp::EF_ARM_EABI_VER4 && v2 == elfcpp::EF_ARM_EABI_VER5)
8146 || (v1 == elfcpp::EF_ARM_EABI_VER5 && v2 == elfcpp::EF_ARM_EABI_VER4))
8147 return true;
8148
8149 return v1 == v2;
8150 }
8151
8152 // Combine FLAGS from an input object called NAME and the processor-specific
8153 // flags in the ELF header of the output. Much of this is adapted from the
8154 // processor-specific flags merging code in elf32_arm_merge_private_bfd_data
8155 // in bfd/elf32-arm.c.
8156
8157 template<bool big_endian>
8158 void
8159 Target_arm<big_endian>::merge_processor_specific_flags(
8160 const std::string& name,
8161 elfcpp::Elf_Word flags)
8162 {
8163 if (this->are_processor_specific_flags_set())
8164 {
8165 elfcpp::Elf_Word out_flags = this->processor_specific_flags();
8166
8167 // Nothing to merge if flags equal to those in output.
8168 if (flags == out_flags)
8169 return;
8170
8171 // Complain about various flag mismatches.
8172 elfcpp::Elf_Word version1 = elfcpp::arm_eabi_version(flags);
8173 elfcpp::Elf_Word version2 = elfcpp::arm_eabi_version(out_flags);
8174 if (!this->are_eabi_versions_compatible(version1, version2))
8175 gold_error(_("Source object %s has EABI version %d but output has "
8176 "EABI version %d."),
8177 name.c_str(),
8178 (flags & elfcpp::EF_ARM_EABIMASK) >> 24,
8179 (out_flags & elfcpp::EF_ARM_EABIMASK) >> 24);
8180 }
8181 else
8182 {
8183 // If the input is the default architecture and had the default
8184 // flags then do not bother setting the flags for the output
8185 // architecture, instead allow future merges to do this. If no
8186 // future merges ever set these flags then they will retain their
8187 // uninitialised values, which surprise surprise, correspond
8188 // to the default values.
8189 if (flags == 0)
8190 return;
8191
8192 // This is the first time, just copy the flags.
8193 // We only copy the EABI version for now.
8194 this->set_processor_specific_flags(flags & elfcpp::EF_ARM_EABIMASK);
8195 }
8196 }
8197
8198 // Adjust ELF file header.
8199 template<bool big_endian>
8200 void
8201 Target_arm<big_endian>::do_adjust_elf_header(
8202 unsigned char* view,
8203 int len) const
8204 {
8205 gold_assert(len == elfcpp::Elf_sizes<32>::ehdr_size);
8206
8207 elfcpp::Ehdr<32, big_endian> ehdr(view);
8208 unsigned char e_ident[elfcpp::EI_NIDENT];
8209 memcpy(e_ident, ehdr.get_e_ident(), elfcpp::EI_NIDENT);
8210
8211 if (elfcpp::arm_eabi_version(this->processor_specific_flags())
8212 == elfcpp::EF_ARM_EABI_UNKNOWN)
8213 e_ident[elfcpp::EI_OSABI] = elfcpp::ELFOSABI_ARM;
8214 else
8215 e_ident[elfcpp::EI_OSABI] = 0;
8216 e_ident[elfcpp::EI_ABIVERSION] = 0;
8217
8218 // FIXME: Do EF_ARM_BE8 adjustment.
8219
8220 elfcpp::Ehdr_write<32, big_endian> oehdr(view);
8221 oehdr.put_e_ident(e_ident);
8222 }
8223
8224 // do_make_elf_object to override the same function in the base class.
8225 // We need to use a target-specific sub-class of Sized_relobj<32, big_endian>
8226 // to store ARM specific information. Hence we need to have our own
8227 // ELF object creation.
8228
8229 template<bool big_endian>
8230 Object*
8231 Target_arm<big_endian>::do_make_elf_object(
8232 const std::string& name,
8233 Input_file* input_file,
8234 off_t offset, const elfcpp::Ehdr<32, big_endian>& ehdr)
8235 {
8236 int et = ehdr.get_e_type();
8237 if (et == elfcpp::ET_REL)
8238 {
8239 Arm_relobj<big_endian>* obj =
8240 new Arm_relobj<big_endian>(name, input_file, offset, ehdr);
8241 obj->setup();
8242 return obj;
8243 }
8244 else if (et == elfcpp::ET_DYN)
8245 {
8246 Sized_dynobj<32, big_endian>* obj =
8247 new Arm_dynobj<big_endian>(name, input_file, offset, ehdr);
8248 obj->setup();
8249 return obj;
8250 }
8251 else
8252 {
8253 gold_error(_("%s: unsupported ELF file type %d"),
8254 name.c_str(), et);
8255 return NULL;
8256 }
8257 }
8258
8259 // Read the architecture from the Tag_also_compatible_with attribute, if any.
8260 // Returns -1 if no architecture could be read.
8261 // This is adapted from get_secondary_compatible_arch() in bfd/elf32-arm.c.
8262
8263 template<bool big_endian>
8264 int
8265 Target_arm<big_endian>::get_secondary_compatible_arch(
8266 const Attributes_section_data* pasd)
8267 {
8268 const Object_attribute *known_attributes =
8269 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
8270
8271 // Note: the tag and its argument below are uleb128 values, though
8272 // currently-defined values fit in one byte for each.
8273 const std::string& sv =
8274 known_attributes[elfcpp::Tag_also_compatible_with].string_value();
8275 if (sv.size() == 2
8276 && sv.data()[0] == elfcpp::Tag_CPU_arch
8277 && (sv.data()[1] & 128) != 128)
8278 return sv.data()[1];
8279
8280 // This tag is "safely ignorable", so don't complain if it looks funny.
8281 return -1;
8282 }
8283
8284 // Set, or unset, the architecture of the Tag_also_compatible_with attribute.
8285 // The tag is removed if ARCH is -1.
8286 // This is adapted from set_secondary_compatible_arch() in bfd/elf32-arm.c.
8287
8288 template<bool big_endian>
8289 void
8290 Target_arm<big_endian>::set_secondary_compatible_arch(
8291 Attributes_section_data* pasd,
8292 int arch)
8293 {
8294 Object_attribute *known_attributes =
8295 pasd->known_attributes(Object_attribute::OBJ_ATTR_PROC);
8296
8297 if (arch == -1)
8298 {
8299 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value("");
8300 return;
8301 }
8302
8303 // Note: the tag and its argument below are uleb128 values, though
8304 // currently-defined values fit in one byte for each.
8305 char sv[3];
8306 sv[0] = elfcpp::Tag_CPU_arch;
8307 gold_assert(arch != 0);
8308 sv[1] = arch;
8309 sv[2] = '\0';
8310
8311 known_attributes[elfcpp::Tag_also_compatible_with].set_string_value(sv);
8312 }
8313
8314 // Combine two values for Tag_CPU_arch, taking secondary compatibility tags
8315 // into account.
8316 // This is adapted from tag_cpu_arch_combine() in bfd/elf32-arm.c.
8317
8318 template<bool big_endian>
8319 int
8320 Target_arm<big_endian>::tag_cpu_arch_combine(
8321 const char* name,
8322 int oldtag,
8323 int* secondary_compat_out,
8324 int newtag,
8325 int secondary_compat)
8326 {
8327 #define T(X) elfcpp::TAG_CPU_ARCH_##X
8328 static const int v6t2[] =
8329 {
8330 T(V6T2), // PRE_V4.
8331 T(V6T2), // V4.
8332 T(V6T2), // V4T.
8333 T(V6T2), // V5T.
8334 T(V6T2), // V5TE.
8335 T(V6T2), // V5TEJ.
8336 T(V6T2), // V6.
8337 T(V7), // V6KZ.
8338 T(V6T2) // V6T2.
8339 };
8340 static const int v6k[] =
8341 {
8342 T(V6K), // PRE_V4.
8343 T(V6K), // V4.
8344 T(V6K), // V4T.
8345 T(V6K), // V5T.
8346 T(V6K), // V5TE.
8347 T(V6K), // V5TEJ.
8348 T(V6K), // V6.
8349 T(V6KZ), // V6KZ.
8350 T(V7), // V6T2.
8351 T(V6K) // V6K.
8352 };
8353 static const int v7[] =
8354 {
8355 T(V7), // PRE_V4.
8356 T(V7), // V4.
8357 T(V7), // V4T.
8358 T(V7), // V5T.
8359 T(V7), // V5TE.
8360 T(V7), // V5TEJ.
8361 T(V7), // V6.
8362 T(V7), // V6KZ.
8363 T(V7), // V6T2.
8364 T(V7), // V6K.
8365 T(V7) // V7.
8366 };
8367 static const int v6_m[] =
8368 {
8369 -1, // PRE_V4.
8370 -1, // V4.
8371 T(V6K), // V4T.
8372 T(V6K), // V5T.
8373 T(V6K), // V5TE.
8374 T(V6K), // V5TEJ.
8375 T(V6K), // V6.
8376 T(V6KZ), // V6KZ.
8377 T(V7), // V6T2.
8378 T(V6K), // V6K.
8379 T(V7), // V7.
8380 T(V6_M) // V6_M.
8381 };
8382 static const int v6s_m[] =
8383 {
8384 -1, // PRE_V4.
8385 -1, // V4.
8386 T(V6K), // V4T.
8387 T(V6K), // V5T.
8388 T(V6K), // V5TE.
8389 T(V6K), // V5TEJ.
8390 T(V6K), // V6.
8391 T(V6KZ), // V6KZ.
8392 T(V7), // V6T2.
8393 T(V6K), // V6K.
8394 T(V7), // V7.
8395 T(V6S_M), // V6_M.
8396 T(V6S_M) // V6S_M.
8397 };
8398 static const int v7e_m[] =
8399 {
8400 -1, // PRE_V4.
8401 -1, // V4.
8402 T(V7E_M), // V4T.
8403 T(V7E_M), // V5T.
8404 T(V7E_M), // V5TE.
8405 T(V7E_M), // V5TEJ.
8406 T(V7E_M), // V6.
8407 T(V7E_M), // V6KZ.
8408 T(V7E_M), // V6T2.
8409 T(V7E_M), // V6K.
8410 T(V7E_M), // V7.
8411 T(V7E_M), // V6_M.
8412 T(V7E_M), // V6S_M.
8413 T(V7E_M) // V7E_M.
8414 };
8415 static const int v4t_plus_v6_m[] =
8416 {
8417 -1, // PRE_V4.
8418 -1, // V4.
8419 T(V4T), // V4T.
8420 T(V5T), // V5T.
8421 T(V5TE), // V5TE.
8422 T(V5TEJ), // V5TEJ.
8423 T(V6), // V6.
8424 T(V6KZ), // V6KZ.
8425 T(V6T2), // V6T2.
8426 T(V6K), // V6K.
8427 T(V7), // V7.
8428 T(V6_M), // V6_M.
8429 T(V6S_M), // V6S_M.
8430 T(V7E_M), // V7E_M.
8431 T(V4T_PLUS_V6_M) // V4T plus V6_M.
8432 };
8433 static const int *comb[] =
8434 {
8435 v6t2,
8436 v6k,
8437 v7,
8438 v6_m,
8439 v6s_m,
8440 v7e_m,
8441 // Pseudo-architecture.
8442 v4t_plus_v6_m
8443 };
8444
8445 // Check we've not got a higher architecture than we know about.
8446
8447 if (oldtag >= elfcpp::MAX_TAG_CPU_ARCH || newtag >= elfcpp::MAX_TAG_CPU_ARCH)
8448 {
8449 gold_error(_("%s: unknown CPU architecture"), name);
8450 return -1;
8451 }
8452
8453 // Override old tag if we have a Tag_also_compatible_with on the output.
8454
8455 if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
8456 || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
8457 oldtag = T(V4T_PLUS_V6_M);
8458
8459 // And override the new tag if we have a Tag_also_compatible_with on the
8460 // input.
8461
8462 if ((newtag == T(V6_M) && secondary_compat == T(V4T))
8463 || (newtag == T(V4T) && secondary_compat == T(V6_M)))
8464 newtag = T(V4T_PLUS_V6_M);
8465
8466 // Architectures before V6KZ add features monotonically.
8467 int tagh = std::max(oldtag, newtag);
8468 if (tagh <= elfcpp::TAG_CPU_ARCH_V6KZ)
8469 return tagh;
8470
8471 int tagl = std::min(oldtag, newtag);
8472 int result = comb[tagh - T(V6T2)][tagl];
8473
8474 // Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
8475 // as the canonical version.
8476 if (result == T(V4T_PLUS_V6_M))
8477 {
8478 result = T(V4T);
8479 *secondary_compat_out = T(V6_M);
8480 }
8481 else
8482 *secondary_compat_out = -1;
8483
8484 if (result == -1)
8485 {
8486 gold_error(_("%s: conflicting CPU architectures %d/%d"),
8487 name, oldtag, newtag);
8488 return -1;
8489 }
8490
8491 return result;
8492 #undef T
8493 }
8494
8495 // Helper to print AEABI enum tag value.
8496
8497 template<bool big_endian>
8498 std::string
8499 Target_arm<big_endian>::aeabi_enum_name(unsigned int value)
8500 {
8501 static const char *aeabi_enum_names[] =
8502 { "", "variable-size", "32-bit", "" };
8503 const size_t aeabi_enum_names_size =
8504 sizeof(aeabi_enum_names) / sizeof(aeabi_enum_names[0]);
8505
8506 if (value < aeabi_enum_names_size)
8507 return std::string(aeabi_enum_names[value]);
8508 else
8509 {
8510 char buffer[100];
8511 sprintf(buffer, "<unknown value %u>", value);
8512 return std::string(buffer);
8513 }
8514 }
8515
8516 // Return the string value to store in TAG_CPU_name.
8517
8518 template<bool big_endian>
8519 std::string
8520 Target_arm<big_endian>::tag_cpu_name_value(unsigned int value)
8521 {
8522 static const char *name_table[] = {
8523 // These aren't real CPU names, but we can't guess
8524 // that from the architecture version alone.
8525 "Pre v4",
8526 "ARM v4",
8527 "ARM v4T",
8528 "ARM v5T",
8529 "ARM v5TE",
8530 "ARM v5TEJ",
8531 "ARM v6",
8532 "ARM v6KZ",
8533 "ARM v6T2",
8534 "ARM v6K",
8535 "ARM v7",
8536 "ARM v6-M",
8537 "ARM v6S-M",
8538 "ARM v7E-M"
8539 };
8540 const size_t name_table_size = sizeof(name_table) / sizeof(name_table[0]);
8541
8542 if (value < name_table_size)
8543 return std::string(name_table[value]);
8544 else
8545 {
8546 char buffer[100];
8547 sprintf(buffer, "<unknown CPU value %u>", value);
8548 return std::string(buffer);
8549 }
8550 }
8551
8552 // Merge object attributes from input file called NAME with those of the
8553 // output. The input object attributes are in the object pointed by PASD.
8554
8555 template<bool big_endian>
8556 void
8557 Target_arm<big_endian>::merge_object_attributes(
8558 const char* name,
8559 const Attributes_section_data* pasd)
8560 {
8561 // Return if there is no attributes section data.
8562 if (pasd == NULL)
8563 return;
8564
8565 // If output has no object attributes, just copy.
8566 if (this->attributes_section_data_ == NULL)
8567 {
8568 this->attributes_section_data_ = new Attributes_section_data(*pasd);
8569 return;
8570 }
8571
8572 const int vendor = Object_attribute::OBJ_ATTR_PROC;
8573 const Object_attribute* in_attr = pasd->known_attributes(vendor);
8574 Object_attribute* out_attr =
8575 this->attributes_section_data_->known_attributes(vendor);
8576
8577 // This needs to happen before Tag_ABI_FP_number_model is merged. */
8578 if (in_attr[elfcpp::Tag_ABI_VFP_args].int_value()
8579 != out_attr[elfcpp::Tag_ABI_VFP_args].int_value())
8580 {
8581 // Ignore mismatches if the object doesn't use floating point. */
8582 if (out_attr[elfcpp::Tag_ABI_FP_number_model].int_value() == 0)
8583 out_attr[elfcpp::Tag_ABI_VFP_args].set_int_value(
8584 in_attr[elfcpp::Tag_ABI_VFP_args].int_value());
8585 else if (in_attr[elfcpp::Tag_ABI_FP_number_model].int_value() != 0)
8586 gold_error(_("%s uses VFP register arguments, output does not"),
8587 name);
8588 }
8589
8590 for (int i = 4; i < Vendor_object_attributes::NUM_KNOWN_ATTRIBUTES; ++i)
8591 {
8592 // Merge this attribute with existing attributes.
8593 switch (i)
8594 {
8595 case elfcpp::Tag_CPU_raw_name:
8596 case elfcpp::Tag_CPU_name:
8597 // These are merged after Tag_CPU_arch.
8598 break;
8599
8600 case elfcpp::Tag_ABI_optimization_goals:
8601 case elfcpp::Tag_ABI_FP_optimization_goals:
8602 // Use the first value seen.
8603 break;
8604
8605 case elfcpp::Tag_CPU_arch:
8606 {
8607 unsigned int saved_out_attr = out_attr->int_value();
8608 // Merge Tag_CPU_arch and Tag_also_compatible_with.
8609 int secondary_compat =
8610 this->get_secondary_compatible_arch(pasd);
8611 int secondary_compat_out =
8612 this->get_secondary_compatible_arch(
8613 this->attributes_section_data_);
8614 out_attr[i].set_int_value(
8615 tag_cpu_arch_combine(name, out_attr[i].int_value(),
8616 &secondary_compat_out,
8617 in_attr[i].int_value(),
8618 secondary_compat));
8619 this->set_secondary_compatible_arch(this->attributes_section_data_,
8620 secondary_compat_out);
8621
8622 // Merge Tag_CPU_name and Tag_CPU_raw_name.
8623 if (out_attr[i].int_value() == saved_out_attr)
8624 ; // Leave the names alone.
8625 else if (out_attr[i].int_value() == in_attr[i].int_value())
8626 {
8627 // The output architecture has been changed to match the
8628 // input architecture. Use the input names.
8629 out_attr[elfcpp::Tag_CPU_name].set_string_value(
8630 in_attr[elfcpp::Tag_CPU_name].string_value());
8631 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value(
8632 in_attr[elfcpp::Tag_CPU_raw_name].string_value());
8633 }
8634 else
8635 {
8636 out_attr[elfcpp::Tag_CPU_name].set_string_value("");
8637 out_attr[elfcpp::Tag_CPU_raw_name].set_string_value("");
8638 }
8639
8640 // If we still don't have a value for Tag_CPU_name,
8641 // make one up now. Tag_CPU_raw_name remains blank.
8642 if (out_attr[elfcpp::Tag_CPU_name].string_value() == "")
8643 {
8644 const std::string cpu_name =
8645 this->tag_cpu_name_value(out_attr[i].int_value());
8646 // FIXME: If we see an unknown CPU, this will be set
8647 // to "<unknown CPU n>", where n is the attribute value.
8648 // This is different from BFD, which leaves the name alone.
8649 out_attr[elfcpp::Tag_CPU_name].set_string_value(cpu_name);
8650 }
8651 }
8652 break;
8653
8654 case elfcpp::Tag_ARM_ISA_use:
8655 case elfcpp::Tag_THUMB_ISA_use:
8656 case elfcpp::Tag_WMMX_arch:
8657 case elfcpp::Tag_Advanced_SIMD_arch:
8658 // ??? Do Advanced_SIMD (NEON) and WMMX conflict?
8659 case elfcpp::Tag_ABI_FP_rounding:
8660 case elfcpp::Tag_ABI_FP_exceptions:
8661 case elfcpp::Tag_ABI_FP_user_exceptions:
8662 case elfcpp::Tag_ABI_FP_number_model:
8663 case elfcpp::Tag_VFP_HP_extension:
8664 case elfcpp::Tag_CPU_unaligned_access:
8665 case elfcpp::Tag_T2EE_use:
8666 case elfcpp::Tag_Virtualization_use:
8667 case elfcpp::Tag_MPextension_use:
8668 // Use the largest value specified.
8669 if (in_attr[i].int_value() > out_attr[i].int_value())
8670 out_attr[i].set_int_value(in_attr[i].int_value());
8671 break;
8672
8673 case elfcpp::Tag_ABI_align8_preserved:
8674 case elfcpp::Tag_ABI_PCS_RO_data:
8675 // Use the smallest value specified.
8676 if (in_attr[i].int_value() < out_attr[i].int_value())
8677 out_attr[i].set_int_value(in_attr[i].int_value());
8678 break;
8679
8680 case elfcpp::Tag_ABI_align8_needed:
8681 if ((in_attr[i].int_value() > 0 || out_attr[i].int_value() > 0)
8682 && (in_attr[elfcpp::Tag_ABI_align8_preserved].int_value() == 0
8683 || (out_attr[elfcpp::Tag_ABI_align8_preserved].int_value()
8684 == 0)))
8685 {
8686 // This error message should be enabled once all non-conformant
8687 // binaries in the toolchain have had the attributes set
8688 // properly.
8689 // gold_error(_("output 8-byte data alignment conflicts with %s"),
8690 // name);
8691 }
8692 // Fall through.
8693 case elfcpp::Tag_ABI_FP_denormal:
8694 case elfcpp::Tag_ABI_PCS_GOT_use:
8695 {
8696 // These tags have 0 = don't care, 1 = strong requirement,
8697 // 2 = weak requirement.
8698 static const int order_021[3] = {0, 2, 1};
8699
8700 // Use the "greatest" from the sequence 0, 2, 1, or the largest
8701 // value if greater than 2 (for future-proofing).
8702 if ((in_attr[i].int_value() > 2
8703 && in_attr[i].int_value() > out_attr[i].int_value())
8704 || (in_attr[i].int_value() <= 2
8705 && out_attr[i].int_value() <= 2
8706 && (order_021[in_attr[i].int_value()]
8707 > order_021[out_attr[i].int_value()])))
8708 out_attr[i].set_int_value(in_attr[i].int_value());
8709 }
8710 break;
8711
8712 case elfcpp::Tag_CPU_arch_profile:
8713 if (out_attr[i].int_value() != in_attr[i].int_value())
8714 {
8715 // 0 will merge with anything.
8716 // 'A' and 'S' merge to 'A'.
8717 // 'R' and 'S' merge to 'R'.
8718 // 'M' and 'A|R|S' is an error.
8719 if (out_attr[i].int_value() == 0
8720 || (out_attr[i].int_value() == 'S'
8721 && (in_attr[i].int_value() == 'A'
8722 || in_attr[i].int_value() == 'R')))
8723 out_attr[i].set_int_value(in_attr[i].int_value());
8724 else if (in_attr[i].int_value() == 0
8725 || (in_attr[i].int_value() == 'S'
8726 && (out_attr[i].int_value() == 'A'
8727 || out_attr[i].int_value() == 'R')))
8728 ; // Do nothing.
8729 else
8730 {
8731 gold_error
8732 (_("conflicting architecture profiles %c/%c"),
8733 in_attr[i].int_value() ? in_attr[i].int_value() : '0',
8734 out_attr[i].int_value() ? out_attr[i].int_value() : '0');
8735 }
8736 }
8737 break;
8738 case elfcpp::Tag_VFP_arch:
8739 {
8740 static const struct
8741 {
8742 int ver;
8743 int regs;
8744 } vfp_versions[7] =
8745 {
8746 {0, 0},
8747 {1, 16},
8748 {2, 16},
8749 {3, 32},
8750 {3, 16},
8751 {4, 32},
8752 {4, 16}
8753 };
8754
8755 // Values greater than 6 aren't defined, so just pick the
8756 // biggest.
8757 if (in_attr[i].int_value() > 6
8758 && in_attr[i].int_value() > out_attr[i].int_value())
8759 {
8760 *out_attr = *in_attr;
8761 break;
8762 }
8763 // The output uses the superset of input features
8764 // (ISA version) and registers.
8765 int ver = std::max(vfp_versions[in_attr[i].int_value()].ver,
8766 vfp_versions[out_attr[i].int_value()].ver);
8767 int regs = std::max(vfp_versions[in_attr[i].int_value()].regs,
8768 vfp_versions[out_attr[i].int_value()].regs);
8769 // This assumes all possible supersets are also a valid
8770 // options.
8771 int newval;
8772 for (newval = 6; newval > 0; newval--)
8773 {
8774 if (regs == vfp_versions[newval].regs
8775 && ver == vfp_versions[newval].ver)
8776 break;
8777 }
8778 out_attr[i].set_int_value(newval);
8779 }
8780 break;
8781 case elfcpp::Tag_PCS_config:
8782 if (out_attr[i].int_value() == 0)
8783 out_attr[i].set_int_value(in_attr[i].int_value());
8784 else if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
8785 {
8786 // It's sometimes ok to mix different configs, so this is only
8787 // a warning.
8788 gold_warning(_("%s: conflicting platform configuration"), name);
8789 }
8790 break;
8791 case elfcpp::Tag_ABI_PCS_R9_use:
8792 if (in_attr[i].int_value() != out_attr[i].int_value()
8793 && out_attr[i].int_value() != elfcpp::AEABI_R9_unused
8794 && in_attr[i].int_value() != elfcpp::AEABI_R9_unused)
8795 {
8796 gold_error(_("%s: conflicting use of R9"), name);
8797 }
8798 if (out_attr[i].int_value() == elfcpp::AEABI_R9_unused)
8799 out_attr[i].set_int_value(in_attr[i].int_value());
8800 break;
8801 case elfcpp::Tag_ABI_PCS_RW_data:
8802 if (in_attr[i].int_value() == elfcpp::AEABI_PCS_RW_data_SBrel
8803 && (in_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
8804 != elfcpp::AEABI_R9_SB)
8805 && (out_attr[elfcpp::Tag_ABI_PCS_R9_use].int_value()
8806 != elfcpp::AEABI_R9_unused))
8807 {
8808 gold_error(_("%s: SB relative addressing conflicts with use "
8809 "of R9"),
8810 name);
8811 }
8812 // Use the smallest value specified.
8813 if (in_attr[i].int_value() < out_attr[i].int_value())
8814 out_attr[i].set_int_value(in_attr[i].int_value());
8815 break;
8816 case elfcpp::Tag_ABI_PCS_wchar_t:
8817 // FIXME: Make it possible to turn off this warning.
8818 if (out_attr[i].int_value()
8819 && in_attr[i].int_value()
8820 && out_attr[i].int_value() != in_attr[i].int_value())
8821 {
8822 gold_warning(_("%s uses %u-byte wchar_t yet the output is to "
8823 "use %u-byte wchar_t; use of wchar_t values "
8824 "across objects may fail"),
8825 name, in_attr[i].int_value(),
8826 out_attr[i].int_value());
8827 }
8828 else if (in_attr[i].int_value() && !out_attr[i].int_value())
8829 out_attr[i].set_int_value(in_attr[i].int_value());
8830 break;
8831 case elfcpp::Tag_ABI_enum_size:
8832 if (in_attr[i].int_value() != elfcpp::AEABI_enum_unused)
8833 {
8834 if (out_attr[i].int_value() == elfcpp::AEABI_enum_unused
8835 || out_attr[i].int_value() == elfcpp::AEABI_enum_forced_wide)
8836 {
8837 // The existing object is compatible with anything.
8838 // Use whatever requirements the new object has.
8839 out_attr[i].set_int_value(in_attr[i].int_value());
8840 }
8841 // FIXME: Make it possible to turn off this warning.
8842 else if (in_attr[i].int_value() != elfcpp::AEABI_enum_forced_wide
8843 && out_attr[i].int_value() != in_attr[i].int_value())
8844 {
8845 unsigned int in_value = in_attr[i].int_value();
8846 unsigned int out_value = out_attr[i].int_value();
8847 gold_warning(_("%s uses %s enums yet the output is to use "
8848 "%s enums; use of enum values across objects "
8849 "may fail"),
8850 name,
8851 this->aeabi_enum_name(in_value).c_str(),
8852 this->aeabi_enum_name(out_value).c_str());
8853 }
8854 }
8855 break;
8856 case elfcpp::Tag_ABI_VFP_args:
8857 // Aready done.
8858 break;
8859 case elfcpp::Tag_ABI_WMMX_args:
8860 if (in_attr[i].int_value() != out_attr[i].int_value())
8861 {
8862 gold_error(_("%s uses iWMMXt register arguments, output does "
8863 "not"),
8864 name);
8865 }
8866 break;
8867 case Object_attribute::Tag_compatibility:
8868 // Merged in target-independent code.
8869 break;
8870 case elfcpp::Tag_ABI_HardFP_use:
8871 // 1 (SP) and 2 (DP) conflict, so combine to 3 (SP & DP).
8872 if ((in_attr[i].int_value() == 1 && out_attr[i].int_value() == 2)
8873 || (in_attr[i].int_value() == 2 && out_attr[i].int_value() == 1))
8874 out_attr[i].set_int_value(3);
8875 else if (in_attr[i].int_value() > out_attr[i].int_value())
8876 out_attr[i].set_int_value(in_attr[i].int_value());
8877 break;
8878 case elfcpp::Tag_ABI_FP_16bit_format:
8879 if (in_attr[i].int_value() != 0 && out_attr[i].int_value() != 0)
8880 {
8881 if (in_attr[i].int_value() != out_attr[i].int_value())
8882 gold_error(_("fp16 format mismatch between %s and output"),
8883 name);
8884 }
8885 if (in_attr[i].int_value() != 0)
8886 out_attr[i].set_int_value(in_attr[i].int_value());
8887 break;
8888
8889 case elfcpp::Tag_nodefaults:
8890 // This tag is set if it exists, but the value is unused (and is
8891 // typically zero). We don't actually need to do anything here -
8892 // the merge happens automatically when the type flags are merged
8893 // below.
8894 break;
8895 case elfcpp::Tag_also_compatible_with:
8896 // Already done in Tag_CPU_arch.
8897 break;
8898 case elfcpp::Tag_conformance:
8899 // Keep the attribute if it matches. Throw it away otherwise.
8900 // No attribute means no claim to conform.
8901 if (in_attr[i].string_value() != out_attr[i].string_value())
8902 out_attr[i].set_string_value("");
8903 break;
8904
8905 default:
8906 {
8907 const char* err_object = NULL;
8908
8909 // The "known_obj_attributes" table does contain some undefined
8910 // attributes. Ensure that there are unused.
8911 if (out_attr[i].int_value() != 0
8912 || out_attr[i].string_value() != "")
8913 err_object = "output";
8914 else if (in_attr[i].int_value() != 0
8915 || in_attr[i].string_value() != "")
8916 err_object = name;
8917
8918 if (err_object != NULL)
8919 {
8920 // Attribute numbers >=64 (mod 128) can be safely ignored.
8921 if ((i & 127) < 64)
8922 gold_error(_("%s: unknown mandatory EABI object attribute "
8923 "%d"),
8924 err_object, i);
8925 else
8926 gold_warning(_("%s: unknown EABI object attribute %d"),
8927 err_object, i);
8928 }
8929
8930 // Only pass on attributes that match in both inputs.
8931 if (!in_attr[i].matches(out_attr[i]))
8932 {
8933 out_attr[i].set_int_value(0);
8934 out_attr[i].set_string_value("");
8935 }
8936 }
8937 }
8938
8939 // If out_attr was copied from in_attr then it won't have a type yet.
8940 if (in_attr[i].type() && !out_attr[i].type())
8941 out_attr[i].set_type(in_attr[i].type());
8942 }
8943
8944 // Merge Tag_compatibility attributes and any common GNU ones.
8945 this->attributes_section_data_->merge(name, pasd);
8946
8947 // Check for any attributes not known on ARM.
8948 typedef Vendor_object_attributes::Other_attributes Other_attributes;
8949 const Other_attributes* in_other_attributes = pasd->other_attributes(vendor);
8950 Other_attributes::const_iterator in_iter = in_other_attributes->begin();
8951 Other_attributes* out_other_attributes =
8952 this->attributes_section_data_->other_attributes(vendor);
8953 Other_attributes::iterator out_iter = out_other_attributes->begin();
8954
8955 while (in_iter != in_other_attributes->end()
8956 || out_iter != out_other_attributes->end())
8957 {
8958 const char* err_object = NULL;
8959 int err_tag = 0;
8960
8961 // The tags for each list are in numerical order.
8962 // If the tags are equal, then merge.
8963 if (out_iter != out_other_attributes->end()
8964 && (in_iter == in_other_attributes->end()
8965 || in_iter->first > out_iter->first))
8966 {
8967 // This attribute only exists in output. We can't merge, and we
8968 // don't know what the tag means, so delete it.
8969 err_object = "output";
8970 err_tag = out_iter->first;
8971 int saved_tag = out_iter->first;
8972 delete out_iter->second;
8973 out_other_attributes->erase(out_iter);
8974 out_iter = out_other_attributes->upper_bound(saved_tag);
8975 }
8976 else if (in_iter != in_other_attributes->end()
8977 && (out_iter != out_other_attributes->end()
8978 || in_iter->first < out_iter->first))
8979 {
8980 // This attribute only exists in input. We can't merge, and we
8981 // don't know what the tag means, so ignore it.
8982 err_object = name;
8983 err_tag = in_iter->first;
8984 ++in_iter;
8985 }
8986 else // The tags are equal.
8987 {
8988 // As present, all attributes in the list are unknown, and
8989 // therefore can't be merged meaningfully.
8990 err_object = "output";
8991 err_tag = out_iter->first;
8992
8993 // Only pass on attributes that match in both inputs.
8994 if (!in_iter->second->matches(*(out_iter->second)))
8995 {
8996 // No match. Delete the attribute.
8997 int saved_tag = out_iter->first;
8998 delete out_iter->second;
8999 out_other_attributes->erase(out_iter);
9000 out_iter = out_other_attributes->upper_bound(saved_tag);
9001 }
9002 else
9003 {
9004 // Matched. Keep the attribute and move to the next.
9005 ++out_iter;
9006 ++in_iter;
9007 }
9008 }
9009
9010 if (err_object)
9011 {
9012 // Attribute numbers >=64 (mod 128) can be safely ignored. */
9013 if ((err_tag & 127) < 64)
9014 {
9015 gold_error(_("%s: unknown mandatory EABI object attribute %d"),
9016 err_object, err_tag);
9017 }
9018 else
9019 {
9020 gold_warning(_("%s: unknown EABI object attribute %d"),
9021 err_object, err_tag);
9022 }
9023 }
9024 }
9025 }
9026
9027 // Return whether a relocation type used the LSB to distinguish THUMB
9028 // addresses.
9029 template<bool big_endian>
9030 bool
9031 Target_arm<big_endian>::reloc_uses_thumb_bit(unsigned int r_type)
9032 {
9033 switch (r_type)
9034 {
9035 case elfcpp::R_ARM_PC24:
9036 case elfcpp::R_ARM_ABS32:
9037 case elfcpp::R_ARM_REL32:
9038 case elfcpp::R_ARM_SBREL32:
9039 case elfcpp::R_ARM_THM_CALL:
9040 case elfcpp::R_ARM_GLOB_DAT:
9041 case elfcpp::R_ARM_JUMP_SLOT:
9042 case elfcpp::R_ARM_GOTOFF32:
9043 case elfcpp::R_ARM_PLT32:
9044 case elfcpp::R_ARM_CALL:
9045 case elfcpp::R_ARM_JUMP24:
9046 case elfcpp::R_ARM_THM_JUMP24:
9047 case elfcpp::R_ARM_SBREL31:
9048 case elfcpp::R_ARM_PREL31:
9049 case elfcpp::R_ARM_MOVW_ABS_NC:
9050 case elfcpp::R_ARM_MOVW_PREL_NC:
9051 case elfcpp::R_ARM_THM_MOVW_ABS_NC:
9052 case elfcpp::R_ARM_THM_MOVW_PREL_NC:
9053 case elfcpp::R_ARM_THM_JUMP19:
9054 case elfcpp::R_ARM_THM_ALU_PREL_11_0:
9055 case elfcpp::R_ARM_ALU_PC_G0_NC:
9056 case elfcpp::R_ARM_ALU_PC_G0:
9057 case elfcpp::R_ARM_ALU_PC_G1_NC:
9058 case elfcpp::R_ARM_ALU_PC_G1:
9059 case elfcpp::R_ARM_ALU_PC_G2:
9060 case elfcpp::R_ARM_ALU_SB_G0_NC:
9061 case elfcpp::R_ARM_ALU_SB_G0:
9062 case elfcpp::R_ARM_ALU_SB_G1_NC:
9063 case elfcpp::R_ARM_ALU_SB_G1:
9064 case elfcpp::R_ARM_ALU_SB_G2:
9065 case elfcpp::R_ARM_MOVW_BREL_NC:
9066 case elfcpp::R_ARM_MOVW_BREL:
9067 case elfcpp::R_ARM_THM_MOVW_BREL_NC:
9068 case elfcpp::R_ARM_THM_MOVW_BREL:
9069 return true;
9070 default:
9071 return false;
9072 }
9073 }
9074
9075 // Stub-generation methods for Target_arm.
9076
9077 // Make a new Arm_input_section object.
9078
9079 template<bool big_endian>
9080 Arm_input_section<big_endian>*
9081 Target_arm<big_endian>::new_arm_input_section(
9082 Relobj* relobj,
9083 unsigned int shndx)
9084 {
9085 Section_id sid(relobj, shndx);
9086
9087 Arm_input_section<big_endian>* arm_input_section =
9088 new Arm_input_section<big_endian>(relobj, shndx);
9089 arm_input_section->init();
9090
9091 // Register new Arm_input_section in map for look-up.
9092 std::pair<typename Arm_input_section_map::iterator, bool> ins =
9093 this->arm_input_section_map_.insert(std::make_pair(sid, arm_input_section));
9094
9095 // Make sure that it we have not created another Arm_input_section
9096 // for this input section already.
9097 gold_assert(ins.second);
9098
9099 return arm_input_section;
9100 }
9101
9102 // Find the Arm_input_section object corresponding to the SHNDX-th input
9103 // section of RELOBJ.
9104
9105 template<bool big_endian>
9106 Arm_input_section<big_endian>*
9107 Target_arm<big_endian>::find_arm_input_section(
9108 Relobj* relobj,
9109 unsigned int shndx) const
9110 {
9111 Section_id sid(relobj, shndx);
9112 typename Arm_input_section_map::const_iterator p =
9113 this->arm_input_section_map_.find(sid);
9114 return (p != this->arm_input_section_map_.end()) ? p->second : NULL;
9115 }
9116
9117 // Make a new stub table.
9118
9119 template<bool big_endian>
9120 Stub_table<big_endian>*
9121 Target_arm<big_endian>::new_stub_table(Arm_input_section<big_endian>* owner)
9122 {
9123 Stub_table<big_endian>* stub_table =
9124 new Stub_table<big_endian>(owner);
9125 this->stub_tables_.push_back(stub_table);
9126
9127 stub_table->set_address(owner->address() + owner->data_size());
9128 stub_table->set_file_offset(owner->offset() + owner->data_size());
9129 stub_table->finalize_data_size();
9130
9131 return stub_table;
9132 }
9133
9134 // Scan a relocation for stub generation.
9135
9136 template<bool big_endian>
9137 void
9138 Target_arm<big_endian>::scan_reloc_for_stub(
9139 const Relocate_info<32, big_endian>* relinfo,
9140 unsigned int r_type,
9141 const Sized_symbol<32>* gsym,
9142 unsigned int r_sym,
9143 const Symbol_value<32>* psymval,
9144 elfcpp::Elf_types<32>::Elf_Swxword addend,
9145 Arm_address address)
9146 {
9147 typedef typename Target_arm<big_endian>::Relocate Relocate;
9148
9149 const Arm_relobj<big_endian>* arm_relobj =
9150 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
9151
9152 if (r_type == elfcpp::R_ARM_V4BX)
9153 {
9154 const uint32_t reg = (addend & 0xf);
9155 if (this->fix_v4bx() == General_options::FIX_V4BX_INTERWORKING
9156 && reg < 0xf)
9157 {
9158 // Try looking up an existing stub from a stub table.
9159 Stub_table<big_endian>* stub_table =
9160 arm_relobj->stub_table(relinfo->data_shndx);
9161 gold_assert(stub_table != NULL);
9162
9163 if (stub_table->find_arm_v4bx_stub(reg) == NULL)
9164 {
9165 // create a new stub and add it to stub table.
9166 Arm_v4bx_stub* stub =
9167 this->stub_factory().make_arm_v4bx_stub(reg);
9168 gold_assert(stub != NULL);
9169 stub_table->add_arm_v4bx_stub(stub);
9170 }
9171 }
9172
9173 return;
9174 }
9175
9176 bool target_is_thumb;
9177 Symbol_value<32> symval;
9178 if (gsym != NULL)
9179 {
9180 // This is a global symbol. Determine if we use PLT and if the
9181 // final target is THUMB.
9182 if (gsym->use_plt_offset(Relocate::reloc_is_non_pic(r_type)))
9183 {
9184 // This uses a PLT, change the symbol value.
9185 symval.set_output_value(this->plt_section()->address()
9186 + gsym->plt_offset());
9187 psymval = &symval;
9188 target_is_thumb = false;
9189 }
9190 else if (gsym->is_undefined())
9191 // There is no need to generate a stub symbol is undefined.
9192 return;
9193 else
9194 {
9195 target_is_thumb =
9196 ((gsym->type() == elfcpp::STT_ARM_TFUNC)
9197 || (gsym->type() == elfcpp::STT_FUNC
9198 && !gsym->is_undefined()
9199 && ((psymval->value(arm_relobj, 0) & 1) != 0)));
9200 }
9201 }
9202 else
9203 {
9204 // This is a local symbol. Determine if the final target is THUMB.
9205 target_is_thumb = arm_relobj->local_symbol_is_thumb_function(r_sym);
9206 }
9207
9208 // Strip LSB if this points to a THUMB target.
9209 if (target_is_thumb
9210 && Target_arm<big_endian>::reloc_uses_thumb_bit(r_type)
9211 && ((psymval->value(arm_relobj, 0) & 1) != 0))
9212 {
9213 Arm_address stripped_value =
9214 psymval->value(arm_relobj, 0) & ~static_cast<Arm_address>(1);
9215 symval.set_output_value(stripped_value);
9216 psymval = &symval;
9217 }
9218
9219 // Get the symbol value.
9220 Symbol_value<32>::Value value = psymval->value(arm_relobj, 0);
9221
9222 // Owing to pipelining, the PC relative branches below actually skip
9223 // two instructions when the branch offset is 0.
9224 Arm_address destination;
9225 switch (r_type)
9226 {
9227 case elfcpp::R_ARM_CALL:
9228 case elfcpp::R_ARM_JUMP24:
9229 case elfcpp::R_ARM_PLT32:
9230 // ARM branches.
9231 destination = value + addend + 8;
9232 break;
9233 case elfcpp::R_ARM_THM_CALL:
9234 case elfcpp::R_ARM_THM_XPC22:
9235 case elfcpp::R_ARM_THM_JUMP24:
9236 case elfcpp::R_ARM_THM_JUMP19:
9237 // THUMB branches.
9238 destination = value + addend + 4;
9239 break;
9240 default:
9241 gold_unreachable();
9242 }
9243
9244 Reloc_stub* stub = NULL;
9245 Stub_type stub_type =
9246 Reloc_stub::stub_type_for_reloc(r_type, address, destination,
9247 target_is_thumb);
9248 if (stub_type != arm_stub_none)
9249 {
9250 // Try looking up an existing stub from a stub table.
9251 Stub_table<big_endian>* stub_table =
9252 arm_relobj->stub_table(relinfo->data_shndx);
9253 gold_assert(stub_table != NULL);
9254
9255 // Locate stub by destination.
9256 Reloc_stub::Key stub_key(stub_type, gsym, arm_relobj, r_sym, addend);
9257
9258 // Create a stub if there is not one already
9259 stub = stub_table->find_reloc_stub(stub_key);
9260 if (stub == NULL)
9261 {
9262 // create a new stub and add it to stub table.
9263 stub = this->stub_factory().make_reloc_stub(stub_type);
9264 stub_table->add_reloc_stub(stub, stub_key);
9265 }
9266
9267 // Record the destination address.
9268 stub->set_destination_address(destination
9269 | (target_is_thumb ? 1 : 0));
9270 }
9271
9272 // For Cortex-A8, we need to record a relocation at 4K page boundary.
9273 if (this->fix_cortex_a8_
9274 && (r_type == elfcpp::R_ARM_THM_JUMP24
9275 || r_type == elfcpp::R_ARM_THM_JUMP19
9276 || r_type == elfcpp::R_ARM_THM_CALL
9277 || r_type == elfcpp::R_ARM_THM_XPC22)
9278 && (address & 0xfffU) == 0xffeU)
9279 {
9280 // Found a candidate. Note we haven't checked the destination is
9281 // within 4K here: if we do so (and don't create a record) we can't
9282 // tell that a branch should have been relocated when scanning later.
9283 this->cortex_a8_relocs_info_[address] =
9284 new Cortex_a8_reloc(stub, r_type,
9285 destination | (target_is_thumb ? 1 : 0));
9286 }
9287 }
9288
9289 // This function scans a relocation sections for stub generation.
9290 // The template parameter Relocate must be a class type which provides
9291 // a single function, relocate(), which implements the machine
9292 // specific part of a relocation.
9293
9294 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
9295 // SHT_REL or SHT_RELA.
9296
9297 // PRELOCS points to the relocation data. RELOC_COUNT is the number
9298 // of relocs. OUTPUT_SECTION is the output section.
9299 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
9300 // mapped to output offsets.
9301
9302 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
9303 // VIEW_SIZE is the size. These refer to the input section, unless
9304 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
9305 // the output section.
9306
9307 template<bool big_endian>
9308 template<int sh_type>
9309 void inline
9310 Target_arm<big_endian>::scan_reloc_section_for_stubs(
9311 const Relocate_info<32, big_endian>* relinfo,
9312 const unsigned char* prelocs,
9313 size_t reloc_count,
9314 Output_section* output_section,
9315 bool needs_special_offset_handling,
9316 const unsigned char* view,
9317 elfcpp::Elf_types<32>::Elf_Addr view_address,
9318 section_size_type)
9319 {
9320 typedef typename Reloc_types<sh_type, 32, big_endian>::Reloc Reltype;
9321 const int reloc_size =
9322 Reloc_types<sh_type, 32, big_endian>::reloc_size;
9323
9324 Arm_relobj<big_endian>* arm_object =
9325 Arm_relobj<big_endian>::as_arm_relobj(relinfo->object);
9326 unsigned int local_count = arm_object->local_symbol_count();
9327
9328 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
9329
9330 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
9331 {
9332 Reltype reloc(prelocs);
9333
9334 typename elfcpp::Elf_types<32>::Elf_WXword r_info = reloc.get_r_info();
9335 unsigned int r_sym = elfcpp::elf_r_sym<32>(r_info);
9336 unsigned int r_type = elfcpp::elf_r_type<32>(r_info);
9337
9338 r_type = this->get_real_reloc_type(r_type);
9339
9340 // Only a few relocation types need stubs.
9341 if ((r_type != elfcpp::R_ARM_CALL)
9342 && (r_type != elfcpp::R_ARM_JUMP24)
9343 && (r_type != elfcpp::R_ARM_PLT32)
9344 && (r_type != elfcpp::R_ARM_THM_CALL)
9345 && (r_type != elfcpp::R_ARM_THM_XPC22)
9346 && (r_type != elfcpp::R_ARM_THM_JUMP24)
9347 && (r_type != elfcpp::R_ARM_THM_JUMP19)
9348 && (r_type != elfcpp::R_ARM_V4BX))
9349 continue;
9350
9351 section_offset_type offset =
9352 convert_to_section_size_type(reloc.get_r_offset());
9353
9354 if (needs_special_offset_handling)
9355 {
9356 offset = output_section->output_offset(relinfo->object,
9357 relinfo->data_shndx,
9358 offset);
9359 if (offset == -1)
9360 continue;
9361 }
9362
9363 if (r_type == elfcpp::R_ARM_V4BX)
9364 {
9365 // Get the BX instruction.
9366 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
9367 const Valtype* wv = reinterpret_cast<const Valtype*>(view + offset);
9368 elfcpp::Elf_types<32>::Elf_Swxword insn =
9369 elfcpp::Swap<32, big_endian>::readval(wv);
9370 this->scan_reloc_for_stub(relinfo, r_type, NULL, 0, NULL,
9371 insn, NULL);
9372 continue;
9373 }
9374
9375 // Get the addend.
9376 Stub_addend_reader<sh_type, big_endian> stub_addend_reader;
9377 elfcpp::Elf_types<32>::Elf_Swxword addend =
9378 stub_addend_reader(r_type, view + offset, reloc);
9379
9380 const Sized_symbol<32>* sym;
9381
9382 Symbol_value<32> symval;
9383 const Symbol_value<32> *psymval;
9384 if (r_sym < local_count)
9385 {
9386 sym = NULL;
9387 psymval = arm_object->local_symbol(r_sym);
9388
9389 // If the local symbol belongs to a section we are discarding,
9390 // and that section is a debug section, try to find the
9391 // corresponding kept section and map this symbol to its
9392 // counterpart in the kept section. The symbol must not
9393 // correspond to a section we are folding.
9394 bool is_ordinary;
9395 unsigned int shndx = psymval->input_shndx(&is_ordinary);
9396 if (is_ordinary
9397 && shndx != elfcpp::SHN_UNDEF
9398 && !arm_object->is_section_included(shndx)
9399 && !(relinfo->symtab->is_section_folded(arm_object, shndx)))
9400 {
9401 if (comdat_behavior == CB_UNDETERMINED)
9402 {
9403 std::string name =
9404 arm_object->section_name(relinfo->data_shndx);
9405 comdat_behavior = get_comdat_behavior(name.c_str());
9406 }
9407 if (comdat_behavior == CB_PRETEND)
9408 {
9409 bool found;
9410 typename elfcpp::Elf_types<32>::Elf_Addr value =
9411 arm_object->map_to_kept_section(shndx, &found);
9412 if (found)
9413 symval.set_output_value(value + psymval->input_value());
9414 else
9415 symval.set_output_value(0);
9416 }
9417 else
9418 {
9419 symval.set_output_value(0);
9420 }
9421 symval.set_no_output_symtab_entry();
9422 psymval = &symval;
9423 }
9424 }
9425 else
9426 {
9427 const Symbol* gsym = arm_object->global_symbol(r_sym);
9428 gold_assert(gsym != NULL);
9429 if (gsym->is_forwarder())
9430 gsym = relinfo->symtab->resolve_forwards(gsym);
9431
9432 sym = static_cast<const Sized_symbol<32>*>(gsym);
9433 if (sym->has_symtab_index())
9434 symval.set_output_symtab_index(sym->symtab_index());
9435 else
9436 symval.set_no_output_symtab_entry();
9437
9438 // We need to compute the would-be final value of this global
9439 // symbol.
9440 const Symbol_table* symtab = relinfo->symtab;
9441 const Sized_symbol<32>* sized_symbol =
9442 symtab->get_sized_symbol<32>(gsym);
9443 Symbol_table::Compute_final_value_status status;
9444 Arm_address value =
9445 symtab->compute_final_value<32>(sized_symbol, &status);
9446
9447 // Skip this if the symbol has not output section.
9448 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
9449 continue;
9450
9451 symval.set_output_value(value);
9452 psymval = &symval;
9453 }
9454
9455 // If symbol is a section symbol, we don't know the actual type of
9456 // destination. Give up.
9457 if (psymval->is_section_symbol())
9458 continue;
9459
9460 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
9461 addend, view_address + offset);
9462 }
9463 }
9464
9465 // Scan an input section for stub generation.
9466
9467 template<bool big_endian>
9468 void
9469 Target_arm<big_endian>::scan_section_for_stubs(
9470 const Relocate_info<32, big_endian>* relinfo,
9471 unsigned int sh_type,
9472 const unsigned char* prelocs,
9473 size_t reloc_count,
9474 Output_section* output_section,
9475 bool needs_special_offset_handling,
9476 const unsigned char* view,
9477 Arm_address view_address,
9478 section_size_type view_size)
9479 {
9480 if (sh_type == elfcpp::SHT_REL)
9481 this->scan_reloc_section_for_stubs<elfcpp::SHT_REL>(
9482 relinfo,
9483 prelocs,
9484 reloc_count,
9485 output_section,
9486 needs_special_offset_handling,
9487 view,
9488 view_address,
9489 view_size);
9490 else if (sh_type == elfcpp::SHT_RELA)
9491 // We do not support RELA type relocations yet. This is provided for
9492 // completeness.
9493 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
9494 relinfo,
9495 prelocs,
9496 reloc_count,
9497 output_section,
9498 needs_special_offset_handling,
9499 view,
9500 view_address,
9501 view_size);
9502 else
9503 gold_unreachable();
9504 }
9505
9506 // Group input sections for stub generation.
9507 //
9508 // We goup input sections in an output sections so that the total size,
9509 // including any padding space due to alignment is smaller than GROUP_SIZE
9510 // unless the only input section in group is bigger than GROUP_SIZE already.
9511 // Then an ARM stub table is created to follow the last input section
9512 // in group. For each group an ARM stub table is created an is placed
9513 // after the last group. If STUB_ALWATS_AFTER_BRANCH is false, we further
9514 // extend the group after the stub table.
9515
9516 template<bool big_endian>
9517 void
9518 Target_arm<big_endian>::group_sections(
9519 Layout* layout,
9520 section_size_type group_size,
9521 bool stubs_always_after_branch)
9522 {
9523 // Group input sections and insert stub table
9524 Layout::Section_list section_list;
9525 layout->get_allocated_sections(&section_list);
9526 for (Layout::Section_list::const_iterator p = section_list.begin();
9527 p != section_list.end();
9528 ++p)
9529 {
9530 Arm_output_section<big_endian>* output_section =
9531 Arm_output_section<big_endian>::as_arm_output_section(*p);
9532 output_section->group_sections(group_size, stubs_always_after_branch,
9533 this);
9534 }
9535 }
9536
9537 // Relaxation hook. This is where we do stub generation.
9538
9539 template<bool big_endian>
9540 bool
9541 Target_arm<big_endian>::do_relax(
9542 int pass,
9543 const Input_objects* input_objects,
9544 Symbol_table* symtab,
9545 Layout* layout)
9546 {
9547 // No need to generate stubs if this is a relocatable link.
9548 gold_assert(!parameters->options().relocatable());
9549
9550 // If this is the first pass, we need to group input sections into
9551 // stub groups.
9552 bool done_exidx_fixup = false;
9553 if (pass == 1)
9554 {
9555 // Determine the stub group size. The group size is the absolute
9556 // value of the parameter --stub-group-size. If --stub-group-size
9557 // is passed a negative value, we restict stubs to be always after
9558 // the stubbed branches.
9559 int32_t stub_group_size_param =
9560 parameters->options().stub_group_size();
9561 bool stubs_always_after_branch = stub_group_size_param < 0;
9562 section_size_type stub_group_size = abs(stub_group_size_param);
9563
9564 // The Cortex-A8 erratum fix depends on stubs not being in the same 4K
9565 // page as the first half of a 32-bit branch straddling two 4K pages.
9566 // This is a crude way of enforcing that.
9567 if (this->fix_cortex_a8_)
9568 stubs_always_after_branch = true;
9569
9570 if (stub_group_size == 1)
9571 {
9572 // Default value.
9573 // Thumb branch range is +-4MB has to be used as the default
9574 // maximum size (a given section can contain both ARM and Thumb
9575 // code, so the worst case has to be taken into account).
9576 //
9577 // This value is 24K less than that, which allows for 2025
9578 // 12-byte stubs. If we exceed that, then we will fail to link.
9579 // The user will have to relink with an explicit group size
9580 // option.
9581 stub_group_size = 4170000;
9582 }
9583
9584 group_sections(layout, stub_group_size, stubs_always_after_branch);
9585
9586 // Also fix .ARM.exidx section coverage.
9587 Output_section* os = layout->find_output_section(".ARM.exidx");
9588 if (os != NULL && os->type() == elfcpp::SHT_ARM_EXIDX)
9589 {
9590 Arm_output_section<big_endian>* exidx_output_section =
9591 Arm_output_section<big_endian>::as_arm_output_section(os);
9592 this->fix_exidx_coverage(layout, exidx_output_section, symtab);
9593 done_exidx_fixup = true;
9594 }
9595 }
9596
9597 // The Cortex-A8 stubs are sensitive to layout of code sections. At the
9598 // beginning of each relaxation pass, just blow away all the stubs.
9599 // Alternatively, we could selectively remove only the stubs and reloc
9600 // information for code sections that have moved since the last pass.
9601 // That would require more book-keeping.
9602 typedef typename Stub_table_list::iterator Stub_table_iterator;
9603 if (this->fix_cortex_a8_)
9604 {
9605 // Clear all Cortex-A8 reloc information.
9606 for (typename Cortex_a8_relocs_info::const_iterator p =
9607 this->cortex_a8_relocs_info_.begin();
9608 p != this->cortex_a8_relocs_info_.end();
9609 ++p)
9610 delete p->second;
9611 this->cortex_a8_relocs_info_.clear();
9612
9613 // Remove all Cortex-A8 stubs.
9614 for (Stub_table_iterator sp = this->stub_tables_.begin();
9615 sp != this->stub_tables_.end();
9616 ++sp)
9617 (*sp)->remove_all_cortex_a8_stubs();
9618 }
9619
9620 // Scan relocs for relocation stubs
9621 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
9622 op != input_objects->relobj_end();
9623 ++op)
9624 {
9625 Arm_relobj<big_endian>* arm_relobj =
9626 Arm_relobj<big_endian>::as_arm_relobj(*op);
9627 arm_relobj->scan_sections_for_stubs(this, symtab, layout);
9628 }
9629
9630 // Check all stub tables to see if any of them have their data sizes
9631 // or addresses alignments changed. These are the only things that
9632 // matter.
9633 bool any_stub_table_changed = false;
9634 Unordered_set<const Output_section*> sections_needing_adjustment;
9635 for (Stub_table_iterator sp = this->stub_tables_.begin();
9636 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
9637 ++sp)
9638 {
9639 if ((*sp)->update_data_size_and_addralign())
9640 {
9641 // Update data size of stub table owner.
9642 Arm_input_section<big_endian>* owner = (*sp)->owner();
9643 uint64_t address = owner->address();
9644 off_t offset = owner->offset();
9645 owner->reset_address_and_file_offset();
9646 owner->set_address_and_file_offset(address, offset);
9647
9648 sections_needing_adjustment.insert(owner->output_section());
9649 any_stub_table_changed = true;
9650 }
9651 }
9652
9653 // Output_section_data::output_section() returns a const pointer but we
9654 // need to update output sections, so we record all output sections needing
9655 // update above and scan the sections here to find out what sections need
9656 // to be updated.
9657 for(Layout::Section_list::const_iterator p = layout->section_list().begin();
9658 p != layout->section_list().end();
9659 ++p)
9660 {
9661 if (sections_needing_adjustment.find(*p)
9662 != sections_needing_adjustment.end())
9663 (*p)->set_section_offsets_need_adjustment();
9664 }
9665
9666 // Stop relaxation if no EXIDX fix-up and no stub table change.
9667 bool continue_relaxation = done_exidx_fixup || any_stub_table_changed;
9668
9669 // Finalize the stubs in the last relaxation pass.
9670 if (!continue_relaxation)
9671 {
9672 for (Stub_table_iterator sp = this->stub_tables_.begin();
9673 (sp != this->stub_tables_.end()) && !any_stub_table_changed;
9674 ++sp)
9675 (*sp)->finalize_stubs();
9676
9677 // Update output local symbol counts of objects if necessary.
9678 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
9679 op != input_objects->relobj_end();
9680 ++op)
9681 {
9682 Arm_relobj<big_endian>* arm_relobj =
9683 Arm_relobj<big_endian>::as_arm_relobj(*op);
9684
9685 // Update output local symbol counts. We need to discard local
9686 // symbols defined in parts of input sections that are discarded by
9687 // relaxation.
9688 if (arm_relobj->output_local_symbol_count_needs_update())
9689 arm_relobj->update_output_local_symbol_count();
9690 }
9691 }
9692
9693 return continue_relaxation;
9694 }
9695
9696 // Relocate a stub.
9697
9698 template<bool big_endian>
9699 void
9700 Target_arm<big_endian>::relocate_stub(
9701 Stub* stub,
9702 const Relocate_info<32, big_endian>* relinfo,
9703 Output_section* output_section,
9704 unsigned char* view,
9705 Arm_address address,
9706 section_size_type view_size)
9707 {
9708 Relocate relocate;
9709 const Stub_template* stub_template = stub->stub_template();
9710 for (size_t i = 0; i < stub_template->reloc_count(); i++)
9711 {
9712 size_t reloc_insn_index = stub_template->reloc_insn_index(i);
9713 const Insn_template* insn = &stub_template->insns()[reloc_insn_index];
9714
9715 unsigned int r_type = insn->r_type();
9716 section_size_type reloc_offset = stub_template->reloc_offset(i);
9717 section_size_type reloc_size = insn->size();
9718 gold_assert(reloc_offset + reloc_size <= view_size);
9719
9720 // This is the address of the stub destination.
9721 Arm_address target = stub->reloc_target(i) + insn->reloc_addend();
9722 Symbol_value<32> symval;
9723 symval.set_output_value(target);
9724
9725 // Synthesize a fake reloc just in case. We don't have a symbol so
9726 // we use 0.
9727 unsigned char reloc_buffer[elfcpp::Elf_sizes<32>::rel_size];
9728 memset(reloc_buffer, 0, sizeof(reloc_buffer));
9729 elfcpp::Rel_write<32, big_endian> reloc_write(reloc_buffer);
9730 reloc_write.put_r_offset(reloc_offset);
9731 reloc_write.put_r_info(elfcpp::elf_r_info<32>(0, r_type));
9732 elfcpp::Rel<32, big_endian> rel(reloc_buffer);
9733
9734 relocate.relocate(relinfo, this, output_section,
9735 this->fake_relnum_for_stubs, rel, r_type,
9736 NULL, &symval, view + reloc_offset,
9737 address + reloc_offset, reloc_size);
9738 }
9739 }
9740
9741 // Determine whether an object attribute tag takes an integer, a
9742 // string or both.
9743
9744 template<bool big_endian>
9745 int
9746 Target_arm<big_endian>::do_attribute_arg_type(int tag) const
9747 {
9748 if (tag == Object_attribute::Tag_compatibility)
9749 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
9750 | Object_attribute::ATTR_TYPE_FLAG_STR_VAL);
9751 else if (tag == elfcpp::Tag_nodefaults)
9752 return (Object_attribute::ATTR_TYPE_FLAG_INT_VAL
9753 | Object_attribute::ATTR_TYPE_FLAG_NO_DEFAULT);
9754 else if (tag == elfcpp::Tag_CPU_raw_name || tag == elfcpp::Tag_CPU_name)
9755 return Object_attribute::ATTR_TYPE_FLAG_STR_VAL;
9756 else if (tag < 32)
9757 return Object_attribute::ATTR_TYPE_FLAG_INT_VAL;
9758 else
9759 return ((tag & 1) != 0
9760 ? Object_attribute::ATTR_TYPE_FLAG_STR_VAL
9761 : Object_attribute::ATTR_TYPE_FLAG_INT_VAL);
9762 }
9763
9764 // Reorder attributes.
9765 //
9766 // The ABI defines that Tag_conformance should be emitted first, and that
9767 // Tag_nodefaults should be second (if either is defined). This sets those
9768 // two positions, and bumps up the position of all the remaining tags to
9769 // compensate.
9770
9771 template<bool big_endian>
9772 int
9773 Target_arm<big_endian>::do_attributes_order(int num) const
9774 {
9775 // Reorder the known object attributes in output. We want to move
9776 // Tag_conformance to position 4 and Tag_conformance to position 5
9777 // and shift eveything between 4 .. Tag_conformance - 1 to make room.
9778 if (num == 4)
9779 return elfcpp::Tag_conformance;
9780 if (num == 5)
9781 return elfcpp::Tag_nodefaults;
9782 if ((num - 2) < elfcpp::Tag_nodefaults)
9783 return num - 2;
9784 if ((num - 1) < elfcpp::Tag_conformance)
9785 return num - 1;
9786 return num;
9787 }
9788
9789 // Scan a span of THUMB code for Cortex-A8 erratum.
9790
9791 template<bool big_endian>
9792 void
9793 Target_arm<big_endian>::scan_span_for_cortex_a8_erratum(
9794 Arm_relobj<big_endian>* arm_relobj,
9795 unsigned int shndx,
9796 section_size_type span_start,
9797 section_size_type span_end,
9798 const unsigned char* view,
9799 Arm_address address)
9800 {
9801 // Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
9802 //
9803 // The opcode is BLX.W, BL.W, B.W, Bcc.W
9804 // The branch target is in the same 4KB region as the
9805 // first half of the branch.
9806 // The instruction before the branch is a 32-bit
9807 // length non-branch instruction.
9808 section_size_type i = span_start;
9809 bool last_was_32bit = false;
9810 bool last_was_branch = false;
9811 while (i < span_end)
9812 {
9813 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
9814 const Valtype* wv = reinterpret_cast<const Valtype*>(view + i);
9815 uint32_t insn = elfcpp::Swap<16, big_endian>::readval(wv);
9816 bool is_blx = false, is_b = false;
9817 bool is_bl = false, is_bcc = false;
9818
9819 bool insn_32bit = (insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000;
9820 if (insn_32bit)
9821 {
9822 // Load the rest of the insn (in manual-friendly order).
9823 insn = (insn << 16) | elfcpp::Swap<16, big_endian>::readval(wv + 1);
9824
9825 // Encoding T4: B<c>.W.
9826 is_b = (insn & 0xf800d000U) == 0xf0009000U;
9827 // Encoding T1: BL<c>.W.
9828 is_bl = (insn & 0xf800d000U) == 0xf000d000U;
9829 // Encoding T2: BLX<c>.W.
9830 is_blx = (insn & 0xf800d000U) == 0xf000c000U;
9831 // Encoding T3: B<c>.W (not permitted in IT block).
9832 is_bcc = ((insn & 0xf800d000U) == 0xf0008000U
9833 && (insn & 0x07f00000U) != 0x03800000U);
9834 }
9835
9836 bool is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
9837
9838 // If this instruction is a 32-bit THUMB branch that crosses a 4K
9839 // page boundary and it follows 32-bit non-branch instruction,
9840 // we need to work around.
9841 if (is_32bit_branch
9842 && ((address + i) & 0xfffU) == 0xffeU
9843 && last_was_32bit
9844 && !last_was_branch)
9845 {
9846 // Check to see if there is a relocation stub for this branch.
9847 bool force_target_arm = false;
9848 bool force_target_thumb = false;
9849 const Cortex_a8_reloc* cortex_a8_reloc = NULL;
9850 Cortex_a8_relocs_info::const_iterator p =
9851 this->cortex_a8_relocs_info_.find(address + i);
9852
9853 if (p != this->cortex_a8_relocs_info_.end())
9854 {
9855 cortex_a8_reloc = p->second;
9856 bool target_is_thumb = (cortex_a8_reloc->destination() & 1) != 0;
9857
9858 if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
9859 && !target_is_thumb)
9860 force_target_arm = true;
9861 else if (cortex_a8_reloc->r_type() == elfcpp::R_ARM_THM_CALL
9862 && target_is_thumb)
9863 force_target_thumb = true;
9864 }
9865
9866 off_t offset;
9867 Stub_type stub_type = arm_stub_none;
9868
9869 // Check if we have an offending branch instruction.
9870 uint16_t upper_insn = (insn >> 16) & 0xffffU;
9871 uint16_t lower_insn = insn & 0xffffU;
9872 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
9873
9874 if (cortex_a8_reloc != NULL
9875 && cortex_a8_reloc->reloc_stub() != NULL)
9876 // We've already made a stub for this instruction, e.g.
9877 // it's a long branch or a Thumb->ARM stub. Assume that
9878 // stub will suffice to work around the A8 erratum (see
9879 // setting of always_after_branch above).
9880 ;
9881 else if (is_bcc)
9882 {
9883 offset = RelocFuncs::thumb32_cond_branch_offset(upper_insn,
9884 lower_insn);
9885 stub_type = arm_stub_a8_veneer_b_cond;
9886 }
9887 else if (is_b || is_bl || is_blx)
9888 {
9889 offset = RelocFuncs::thumb32_branch_offset(upper_insn,
9890 lower_insn);
9891 if (is_blx)
9892 offset &= ~3;
9893
9894 stub_type = (is_blx
9895 ? arm_stub_a8_veneer_blx
9896 : (is_bl
9897 ? arm_stub_a8_veneer_bl
9898 : arm_stub_a8_veneer_b));
9899 }
9900
9901 if (stub_type != arm_stub_none)
9902 {
9903 Arm_address pc_for_insn = address + i + 4;
9904
9905 // The original instruction is a BL, but the target is
9906 // an ARM instruction. If we were not making a stub,
9907 // the BL would have been converted to a BLX. Use the
9908 // BLX stub instead in that case.
9909 if (this->may_use_blx() && force_target_arm
9910 && stub_type == arm_stub_a8_veneer_bl)
9911 {
9912 stub_type = arm_stub_a8_veneer_blx;
9913 is_blx = true;
9914 is_bl = false;
9915 }
9916 // Conversely, if the original instruction was
9917 // BLX but the target is Thumb mode, use the BL stub.
9918 else if (force_target_thumb
9919 && stub_type == arm_stub_a8_veneer_blx)
9920 {
9921 stub_type = arm_stub_a8_veneer_bl;
9922 is_blx = false;
9923 is_bl = true;
9924 }
9925
9926 if (is_blx)
9927 pc_for_insn &= ~3;
9928
9929 // If we found a relocation, use the proper destination,
9930 // not the offset in the (unrelocated) instruction.
9931 // Note this is always done if we switched the stub type above.
9932 if (cortex_a8_reloc != NULL)
9933 offset = (off_t) (cortex_a8_reloc->destination() - pc_for_insn);
9934
9935 Arm_address target = (pc_for_insn + offset) | (is_blx ? 0 : 1);
9936
9937 // Add a new stub if destination address in in the same page.
9938 if (((address + i) & ~0xfffU) == (target & ~0xfffU))
9939 {
9940 Cortex_a8_stub* stub =
9941 this->stub_factory_.make_cortex_a8_stub(stub_type,
9942 arm_relobj, shndx,
9943 address + i,
9944 target, insn);
9945 Stub_table<big_endian>* stub_table =
9946 arm_relobj->stub_table(shndx);
9947 gold_assert(stub_table != NULL);
9948 stub_table->add_cortex_a8_stub(address + i, stub);
9949 }
9950 }
9951 }
9952
9953 i += insn_32bit ? 4 : 2;
9954 last_was_32bit = insn_32bit;
9955 last_was_branch = is_32bit_branch;
9956 }
9957 }
9958
9959 // Apply the Cortex-A8 workaround.
9960
9961 template<bool big_endian>
9962 void
9963 Target_arm<big_endian>::apply_cortex_a8_workaround(
9964 const Cortex_a8_stub* stub,
9965 Arm_address stub_address,
9966 unsigned char* insn_view,
9967 Arm_address insn_address)
9968 {
9969 typedef typename elfcpp::Swap<16, big_endian>::Valtype Valtype;
9970 Valtype* wv = reinterpret_cast<Valtype*>(insn_view);
9971 Valtype upper_insn = elfcpp::Swap<16, big_endian>::readval(wv);
9972 Valtype lower_insn = elfcpp::Swap<16, big_endian>::readval(wv + 1);
9973 off_t branch_offset = stub_address - (insn_address + 4);
9974
9975 typedef struct Arm_relocate_functions<big_endian> RelocFuncs;
9976 switch (stub->stub_template()->type())
9977 {
9978 case arm_stub_a8_veneer_b_cond:
9979 gold_assert(!utils::has_overflow<21>(branch_offset));
9980 upper_insn = RelocFuncs::thumb32_cond_branch_upper(upper_insn,
9981 branch_offset);
9982 lower_insn = RelocFuncs::thumb32_cond_branch_lower(lower_insn,
9983 branch_offset);
9984 break;
9985
9986 case arm_stub_a8_veneer_b:
9987 case arm_stub_a8_veneer_bl:
9988 case arm_stub_a8_veneer_blx:
9989 if ((lower_insn & 0x5000U) == 0x4000U)
9990 // For a BLX instruction, make sure that the relocation is
9991 // rounded up to a word boundary. This follows the semantics of
9992 // the instruction which specifies that bit 1 of the target
9993 // address will come from bit 1 of the base address.
9994 branch_offset = (branch_offset + 2) & ~3;
9995
9996 // Put BRANCH_OFFSET back into the insn.
9997 gold_assert(!utils::has_overflow<25>(branch_offset));
9998 upper_insn = RelocFuncs::thumb32_branch_upper(upper_insn, branch_offset);
9999 lower_insn = RelocFuncs::thumb32_branch_lower(lower_insn, branch_offset);
10000 break;
10001
10002 default:
10003 gold_unreachable();
10004 }
10005
10006 // Put the relocated value back in the object file:
10007 elfcpp::Swap<16, big_endian>::writeval(wv, upper_insn);
10008 elfcpp::Swap<16, big_endian>::writeval(wv + 1, lower_insn);
10009 }
10010
10011 template<bool big_endian>
10012 class Target_selector_arm : public Target_selector
10013 {
10014 public:
10015 Target_selector_arm()
10016 : Target_selector(elfcpp::EM_ARM, 32, big_endian,
10017 (big_endian ? "elf32-bigarm" : "elf32-littlearm"))
10018 { }
10019
10020 Target*
10021 do_instantiate_target()
10022 { return new Target_arm<big_endian>(); }
10023 };
10024
10025 // Fix .ARM.exidx section coverage.
10026
10027 template<bool big_endian>
10028 void
10029 Target_arm<big_endian>::fix_exidx_coverage(
10030 Layout* layout,
10031 Arm_output_section<big_endian>* exidx_section,
10032 Symbol_table* symtab)
10033 {
10034 // We need to look at all the input sections in output in ascending
10035 // order of of output address. We do that by building a sorted list
10036 // of output sections by addresses. Then we looks at the output sections
10037 // in order. The input sections in an output section are already sorted
10038 // by addresses within the output section.
10039
10040 typedef std::set<Output_section*, output_section_address_less_than>
10041 Sorted_output_section_list;
10042 Sorted_output_section_list sorted_output_sections;
10043 Layout::Section_list section_list;
10044 layout->get_allocated_sections(&section_list);
10045 for (Layout::Section_list::const_iterator p = section_list.begin();
10046 p != section_list.end();
10047 ++p)
10048 {
10049 // We only care about output sections that contain executable code.
10050 if (((*p)->flags() & elfcpp::SHF_EXECINSTR) != 0)
10051 sorted_output_sections.insert(*p);
10052 }
10053
10054 // Go over the output sections in ascending order of output addresses.
10055 typedef typename Arm_output_section<big_endian>::Text_section_list
10056 Text_section_list;
10057 Text_section_list sorted_text_sections;
10058 for(typename Sorted_output_section_list::iterator p =
10059 sorted_output_sections.begin();
10060 p != sorted_output_sections.end();
10061 ++p)
10062 {
10063 Arm_output_section<big_endian>* arm_output_section =
10064 Arm_output_section<big_endian>::as_arm_output_section(*p);
10065 arm_output_section->append_text_sections_to_list(&sorted_text_sections);
10066 }
10067
10068 exidx_section->fix_exidx_coverage(sorted_text_sections, symtab);
10069 }
10070
10071 Target_selector_arm<false> target_selector_arm;
10072 Target_selector_arm<true> target_selector_armbe;
10073
10074 } // End anonymous namespace.