]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gold/aarch64.cc
Drop 'missing mapping symbols' warning for arm and aarch64.
[thirdparty/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2015 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adrp(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x90000000; }
108
109 static unsigned int
110 aarch64_rm(const Insntype insn)
111 { return aarch64_bits(insn, 16, 5); }
112
113 static unsigned int
114 aarch64_rn(const Insntype insn)
115 { return aarch64_bits(insn, 5, 5); }
116
117 static unsigned int
118 aarch64_rd(const Insntype insn)
119 { return aarch64_bits(insn, 0, 5); }
120
121 static unsigned int
122 aarch64_rt(const Insntype insn)
123 { return aarch64_bits(insn, 0, 5); }
124
125 static unsigned int
126 aarch64_rt2(const Insntype insn)
127 { return aarch64_bits(insn, 10, 5); }
128
129 static bool
130 aarch64_b(const Insntype insn)
131 { return (insn & 0xFC000000) == 0x14000000; }
132
133 static bool
134 aarch64_bl(const Insntype insn)
135 { return (insn & 0xFC000000) == 0x94000000; }
136
137 static bool
138 aarch64_blr(const Insntype insn)
139 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
140
141 static bool
142 aarch64_br(const Insntype insn)
143 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
144
145 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
146 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
147 static bool
148 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
149
150 static bool
151 aarch64_ldst(Insntype insn)
152 { return (insn & 0x0a000000) == 0x08000000; }
153
154 static bool
155 aarch64_ldst_ex(Insntype insn)
156 { return (insn & 0x3f000000) == 0x08000000; }
157
158 static bool
159 aarch64_ldst_pcrel(Insntype insn)
160 { return (insn & 0x3b000000) == 0x18000000; }
161
162 static bool
163 aarch64_ldst_nap(Insntype insn)
164 { return (insn & 0x3b800000) == 0x28000000; }
165
166 static bool
167 aarch64_ldstp_pi(Insntype insn)
168 { return (insn & 0x3b800000) == 0x28800000; }
169
170 static bool
171 aarch64_ldstp_o(Insntype insn)
172 { return (insn & 0x3b800000) == 0x29000000; }
173
174 static bool
175 aarch64_ldstp_pre(Insntype insn)
176 { return (insn & 0x3b800000) == 0x29800000; }
177
178 static bool
179 aarch64_ldst_ui(Insntype insn)
180 { return (insn & 0x3b200c00) == 0x38000000; }
181
182 static bool
183 aarch64_ldst_piimm(Insntype insn)
184 { return (insn & 0x3b200c00) == 0x38000400; }
185
186 static bool
187 aarch64_ldst_u(Insntype insn)
188 { return (insn & 0x3b200c00) == 0x38000800; }
189
190 static bool
191 aarch64_ldst_preimm(Insntype insn)
192 { return (insn & 0x3b200c00) == 0x38000c00; }
193
194 static bool
195 aarch64_ldst_ro(Insntype insn)
196 { return (insn & 0x3b200c00) == 0x38200800; }
197
198 static bool
199 aarch64_ldst_uimm(Insntype insn)
200 { return (insn & 0x3b000000) == 0x39000000; }
201
202 static bool
203 aarch64_ldst_simd_m(Insntype insn)
204 { return (insn & 0xbfbf0000) == 0x0c000000; }
205
206 static bool
207 aarch64_ldst_simd_m_pi(Insntype insn)
208 { return (insn & 0xbfa00000) == 0x0c800000; }
209
210 static bool
211 aarch64_ldst_simd_s(Insntype insn)
212 { return (insn & 0xbf9f0000) == 0x0d000000; }
213
214 static bool
215 aarch64_ldst_simd_s_pi(Insntype insn)
216 { return (insn & 0xbf800000) == 0x0d800000; }
217
218 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
219 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
220 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
221 // instructions PAIR is TRUE, RT and RT2 are returned.
222 static bool
223 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
224 bool *pair, bool *load)
225 {
226 uint32_t opcode;
227 unsigned int r;
228 uint32_t opc = 0;
229 uint32_t v = 0;
230 uint32_t opc_v = 0;
231
232 /* Bail out quickly if INSN doesn't fall into the the load-store
233 encoding space. */
234 if (!aarch64_ldst (insn))
235 return false;
236
237 *pair = false;
238 *load = false;
239 if (aarch64_ldst_ex (insn))
240 {
241 *rt = aarch64_rt (insn);
242 *rt2 = *rt;
243 if (aarch64_bit (insn, 21) == 1)
244 {
245 *pair = true;
246 *rt2 = aarch64_rt2 (insn);
247 }
248 *load = aarch64_ld (insn);
249 return true;
250 }
251 else if (aarch64_ldst_nap (insn)
252 || aarch64_ldstp_pi (insn)
253 || aarch64_ldstp_o (insn)
254 || aarch64_ldstp_pre (insn))
255 {
256 *pair = true;
257 *rt = aarch64_rt (insn);
258 *rt2 = aarch64_rt2 (insn);
259 *load = aarch64_ld (insn);
260 return true;
261 }
262 else if (aarch64_ldst_pcrel (insn)
263 || aarch64_ldst_ui (insn)
264 || aarch64_ldst_piimm (insn)
265 || aarch64_ldst_u (insn)
266 || aarch64_ldst_preimm (insn)
267 || aarch64_ldst_ro (insn)
268 || aarch64_ldst_uimm (insn))
269 {
270 *rt = aarch64_rt (insn);
271 *rt2 = *rt;
272 if (aarch64_ldst_pcrel (insn))
273 *load = true;
274 opc = aarch64_bits (insn, 22, 2);
275 v = aarch64_bit (insn, 26);
276 opc_v = opc | (v << 2);
277 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
278 || opc_v == 5 || opc_v == 7);
279 return true;
280 }
281 else if (aarch64_ldst_simd_m (insn)
282 || aarch64_ldst_simd_m_pi (insn))
283 {
284 *rt = aarch64_rt (insn);
285 *load = aarch64_bit (insn, 22);
286 opcode = (insn >> 12) & 0xf;
287 switch (opcode)
288 {
289 case 0:
290 case 2:
291 *rt2 = *rt + 3;
292 break;
293
294 case 4:
295 case 6:
296 *rt2 = *rt + 2;
297 break;
298
299 case 7:
300 *rt2 = *rt;
301 break;
302
303 case 8:
304 case 10:
305 *rt2 = *rt + 1;
306 break;
307
308 default:
309 return false;
310 }
311 return true;
312 }
313 else if (aarch64_ldst_simd_s (insn)
314 || aarch64_ldst_simd_s_pi (insn))
315 {
316 *rt = aarch64_rt (insn);
317 r = (insn >> 21) & 1;
318 *load = aarch64_bit (insn, 22);
319 opcode = (insn >> 13) & 0x7;
320 switch (opcode)
321 {
322 case 0:
323 case 2:
324 case 4:
325 *rt2 = *rt + r;
326 break;
327
328 case 1:
329 case 3:
330 case 5:
331 *rt2 = *rt + (r == 0 ? 2 : 3);
332 break;
333
334 case 6:
335 *rt2 = *rt + r;
336 break;
337
338 case 7:
339 *rt2 = *rt + (r == 0 ? 2 : 3);
340 break;
341
342 default:
343 return false;
344 }
345 return true;
346 }
347 return false;
348 } // End of "aarch64_mem_op_p".
349
350 // Return true if INSN is mac insn.
351 static bool
352 aarch64_mac(Insntype insn)
353 { return (insn & 0xff000000) == 0x9b000000; }
354
355 // Return true if INSN is multiply-accumulate.
356 // (This is similar to implementaton in elfnn-aarch64.c.)
357 static bool
358 aarch64_mlxl(Insntype insn)
359 {
360 uint32_t op31 = aarch64_op31(insn);
361 if (aarch64_mac(insn)
362 && (op31 == 0 || op31 == 1 || op31 == 5)
363 /* Exclude MUL instructions which are encoded as a multiple-accumulate
364 with RA = XZR. */
365 && aarch64_ra(insn) != AARCH64_ZR)
366 {
367 return true;
368 }
369 return false;
370 }
371 }; // End of "AArch64_insn_utilities".
372
373
374 // Insn length in byte.
375
376 template<bool big_endian>
377 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
378
379
380 // Zero register encoding - 31.
381
382 template<bool big_endian>
383 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
384
385
386 // Output_data_got_aarch64 class.
387
388 template<int size, bool big_endian>
389 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
390 {
391 public:
392 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
393 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
394 : Output_data_got<size, big_endian>(),
395 symbol_table_(symtab), layout_(layout)
396 { }
397
398 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
399 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
400 // applied in a static link.
401 void
402 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
403 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
404
405
406 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
407 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
408 // relocation that needs to be applied in a static link.
409 void
410 add_static_reloc(unsigned int got_offset, unsigned int r_type,
411 Sized_relobj_file<size, big_endian>* relobj,
412 unsigned int index)
413 {
414 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
415 index));
416 }
417
418
419 protected:
420 // Write out the GOT table.
421 void
422 do_write(Output_file* of) {
423 // The first entry in the GOT is the address of the .dynamic section.
424 gold_assert(this->data_size() >= size / 8);
425 Output_section* dynamic = this->layout_->dynamic_section();
426 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
427 this->replace_constant(0, dynamic_addr);
428 Output_data_got<size, big_endian>::do_write(of);
429
430 // Handling static relocs
431 if (this->static_relocs_.empty())
432 return;
433
434 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
435
436 gold_assert(parameters->doing_static_link());
437 const off_t offset = this->offset();
438 const section_size_type oview_size =
439 convert_to_section_size_type(this->data_size());
440 unsigned char* const oview = of->get_output_view(offset, oview_size);
441
442 Output_segment* tls_segment = this->layout_->tls_segment();
443 gold_assert(tls_segment != NULL);
444
445 AArch64_address aligned_tcb_address =
446 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
447 tls_segment->maximum_alignment());
448
449 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
450 {
451 Static_reloc& reloc(this->static_relocs_[i]);
452 AArch64_address value;
453
454 if (!reloc.symbol_is_global())
455 {
456 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
457 const Symbol_value<size>* psymval =
458 reloc.relobj()->local_symbol(reloc.index());
459
460 // We are doing static linking. Issue an error and skip this
461 // relocation if the symbol is undefined or in a discarded_section.
462 bool is_ordinary;
463 unsigned int shndx = psymval->input_shndx(&is_ordinary);
464 if ((shndx == elfcpp::SHN_UNDEF)
465 || (is_ordinary
466 && shndx != elfcpp::SHN_UNDEF
467 && !object->is_section_included(shndx)
468 && !this->symbol_table_->is_section_folded(object, shndx)))
469 {
470 gold_error(_("undefined or discarded local symbol %u from "
471 " object %s in GOT"),
472 reloc.index(), reloc.relobj()->name().c_str());
473 continue;
474 }
475 value = psymval->value(object, 0);
476 }
477 else
478 {
479 const Symbol* gsym = reloc.symbol();
480 gold_assert(gsym != NULL);
481 if (gsym->is_forwarder())
482 gsym = this->symbol_table_->resolve_forwards(gsym);
483
484 // We are doing static linking. Issue an error and skip this
485 // relocation if the symbol is undefined or in a discarded_section
486 // unless it is a weakly_undefined symbol.
487 if ((gsym->is_defined_in_discarded_section()
488 || gsym->is_undefined())
489 && !gsym->is_weak_undefined())
490 {
491 gold_error(_("undefined or discarded symbol %s in GOT"),
492 gsym->name());
493 continue;
494 }
495
496 if (!gsym->is_weak_undefined())
497 {
498 const Sized_symbol<size>* sym =
499 static_cast<const Sized_symbol<size>*>(gsym);
500 value = sym->value();
501 }
502 else
503 value = 0;
504 }
505
506 unsigned got_offset = reloc.got_offset();
507 gold_assert(got_offset < oview_size);
508
509 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
510 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
511 Valtype x;
512 switch (reloc.r_type())
513 {
514 case elfcpp::R_AARCH64_TLS_DTPREL64:
515 x = value;
516 break;
517 case elfcpp::R_AARCH64_TLS_TPREL64:
518 x = value + aligned_tcb_address;
519 break;
520 default:
521 gold_unreachable();
522 }
523 elfcpp::Swap<size, big_endian>::writeval(wv, x);
524 }
525
526 of->write_output_view(offset, oview_size, oview);
527 }
528
529 private:
530 // Symbol table of the output object.
531 Symbol_table* symbol_table_;
532 // A pointer to the Layout class, so that we can find the .dynamic
533 // section when we write out the GOT section.
534 Layout* layout_;
535
536 // This class represent dynamic relocations that need to be applied by
537 // gold because we are using TLS relocations in a static link.
538 class Static_reloc
539 {
540 public:
541 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
542 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
543 { this->u_.global.symbol = gsym; }
544
545 Static_reloc(unsigned int got_offset, unsigned int r_type,
546 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
547 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
548 {
549 this->u_.local.relobj = relobj;
550 this->u_.local.index = index;
551 }
552
553 // Return the GOT offset.
554 unsigned int
555 got_offset() const
556 { return this->got_offset_; }
557
558 // Relocation type.
559 unsigned int
560 r_type() const
561 { return this->r_type_; }
562
563 // Whether the symbol is global or not.
564 bool
565 symbol_is_global() const
566 { return this->symbol_is_global_; }
567
568 // For a relocation against a global symbol, the global symbol.
569 Symbol*
570 symbol() const
571 {
572 gold_assert(this->symbol_is_global_);
573 return this->u_.global.symbol;
574 }
575
576 // For a relocation against a local symbol, the defining object.
577 Sized_relobj_file<size, big_endian>*
578 relobj() const
579 {
580 gold_assert(!this->symbol_is_global_);
581 return this->u_.local.relobj;
582 }
583
584 // For a relocation against a local symbol, the local symbol index.
585 unsigned int
586 index() const
587 {
588 gold_assert(!this->symbol_is_global_);
589 return this->u_.local.index;
590 }
591
592 private:
593 // GOT offset of the entry to which this relocation is applied.
594 unsigned int got_offset_;
595 // Type of relocation.
596 unsigned int r_type_;
597 // Whether this relocation is against a global symbol.
598 bool symbol_is_global_;
599 // A global or local symbol.
600 union
601 {
602 struct
603 {
604 // For a global symbol, the symbol itself.
605 Symbol* symbol;
606 } global;
607 struct
608 {
609 // For a local symbol, the object defining the symbol.
610 Sized_relobj_file<size, big_endian>* relobj;
611 // For a local symbol, the symbol index.
612 unsigned int index;
613 } local;
614 } u_;
615 }; // End of inner class Static_reloc
616
617 std::vector<Static_reloc> static_relocs_;
618 }; // End of Output_data_got_aarch64
619
620
621 template<int size, bool big_endian>
622 class AArch64_input_section;
623
624
625 template<int size, bool big_endian>
626 class AArch64_output_section;
627
628
629 template<int size, bool big_endian>
630 class AArch64_relobj;
631
632
633 // Stub type enum constants.
634
635 enum
636 {
637 ST_NONE = 0,
638
639 // Using adrp/add pair, 4 insns (including alignment) without mem access,
640 // the fastest stub. This has a limited jump distance, which is tested by
641 // aarch64_valid_for_adrp_p.
642 ST_ADRP_BRANCH = 1,
643
644 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
645 // unlimited in jump distance.
646 ST_LONG_BRANCH_ABS = 2,
647
648 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
649 // mem access, slowest one. Only used in position independent executables.
650 ST_LONG_BRANCH_PCREL = 3,
651
652 // Stub for erratum 843419 handling.
653 ST_E_843419 = 4,
654
655 // Stub for erratum 835769 handling.
656 ST_E_835769 = 5,
657
658 // Number of total stub types.
659 ST_NUMBER = 6
660 };
661
662
663 // Struct that wraps insns for a particular stub. All stub templates are
664 // created/initialized as constants by Stub_template_repertoire.
665
666 template<bool big_endian>
667 struct Stub_template
668 {
669 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
670 const int insn_num;
671 };
672
673
674 // Simple singleton class that creates/initializes/stores all types of stub
675 // templates.
676
677 template<bool big_endian>
678 class Stub_template_repertoire
679 {
680 public:
681 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
682
683 // Single static method to get stub template for a given stub type.
684 static const Stub_template<big_endian>*
685 get_stub_template(int type)
686 {
687 static Stub_template_repertoire<big_endian> singleton;
688 return singleton.stub_templates_[type];
689 }
690
691 private:
692 // Constructor - creates/initializes all stub templates.
693 Stub_template_repertoire();
694 ~Stub_template_repertoire()
695 { }
696
697 // Disallowing copy ctor and copy assignment operator.
698 Stub_template_repertoire(Stub_template_repertoire&);
699 Stub_template_repertoire& operator=(Stub_template_repertoire&);
700
701 // Data that stores all insn templates.
702 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
703 }; // End of "class Stub_template_repertoire".
704
705
706 // Constructor - creates/initilizes all stub templates.
707
708 template<bool big_endian>
709 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
710 {
711 // Insn array definitions.
712 const static Insntype ST_NONE_INSNS[] = {};
713
714 const static Insntype ST_ADRP_BRANCH_INSNS[] =
715 {
716 0x90000010, /* adrp ip0, X */
717 /* ADR_PREL_PG_HI21(X) */
718 0x91000210, /* add ip0, ip0, :lo12:X */
719 /* ADD_ABS_LO12_NC(X) */
720 0xd61f0200, /* br ip0 */
721 0x00000000, /* alignment padding */
722 };
723
724 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
725 {
726 0x58000050, /* ldr ip0, 0x8 */
727 0xd61f0200, /* br ip0 */
728 0x00000000, /* address field */
729 0x00000000, /* address fields */
730 };
731
732 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
733 {
734 0x58000090, /* ldr ip0, 0x10 */
735 0x10000011, /* adr ip1, #0 */
736 0x8b110210, /* add ip0, ip0, ip1 */
737 0xd61f0200, /* br ip0 */
738 0x00000000, /* address field */
739 0x00000000, /* address field */
740 0x00000000, /* alignment padding */
741 0x00000000, /* alignment padding */
742 };
743
744 const static Insntype ST_E_843419_INSNS[] =
745 {
746 0x00000000, /* Placeholder for erratum insn. */
747 0x14000000, /* b <label> */
748 };
749
750 // ST_E_835769 has the same stub template as ST_E_843419.
751 const static Insntype* ST_E_835769_INSNS = ST_E_843419_INSNS;
752
753 #define install_insn_template(T) \
754 const static Stub_template<big_endian> template_##T = { \
755 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
756 this->stub_templates_[T] = &template_##T
757
758 install_insn_template(ST_NONE);
759 install_insn_template(ST_ADRP_BRANCH);
760 install_insn_template(ST_LONG_BRANCH_ABS);
761 install_insn_template(ST_LONG_BRANCH_PCREL);
762 install_insn_template(ST_E_843419);
763 install_insn_template(ST_E_835769);
764
765 #undef install_insn_template
766 }
767
768
769 // Base class for stubs.
770
771 template<int size, bool big_endian>
772 class Stub_base
773 {
774 public:
775 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
776 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
777
778 static const AArch64_address invalid_address =
779 static_cast<AArch64_address>(-1);
780
781 static const section_offset_type invalid_offset =
782 static_cast<section_offset_type>(-1);
783
784 Stub_base(int type)
785 : destination_address_(invalid_address),
786 offset_(invalid_offset),
787 type_(type)
788 {}
789
790 ~Stub_base()
791 {}
792
793 // Get stub type.
794 int
795 type() const
796 { return this->type_; }
797
798 // Get stub template that provides stub insn information.
799 const Stub_template<big_endian>*
800 stub_template() const
801 {
802 return Stub_template_repertoire<big_endian>::
803 get_stub_template(this->type());
804 }
805
806 // Get destination address.
807 AArch64_address
808 destination_address() const
809 {
810 gold_assert(this->destination_address_ != this->invalid_address);
811 return this->destination_address_;
812 }
813
814 // Set destination address.
815 void
816 set_destination_address(AArch64_address address)
817 {
818 gold_assert(address != this->invalid_address);
819 this->destination_address_ = address;
820 }
821
822 // Reset the destination address.
823 void
824 reset_destination_address()
825 { this->destination_address_ = this->invalid_address; }
826
827 // Get offset of code stub. For Reloc_stub, it is the offset from the
828 // beginning of its containing stub table; for Erratum_stub, it is the offset
829 // from the end of reloc_stubs.
830 section_offset_type
831 offset() const
832 {
833 gold_assert(this->offset_ != this->invalid_offset);
834 return this->offset_;
835 }
836
837 // Set stub offset.
838 void
839 set_offset(section_offset_type offset)
840 { this->offset_ = offset; }
841
842 // Return the stub insn.
843 const Insntype*
844 insns() const
845 { return this->stub_template()->insns; }
846
847 // Return num of stub insns.
848 unsigned int
849 insn_num() const
850 { return this->stub_template()->insn_num; }
851
852 // Get size of the stub.
853 int
854 stub_size() const
855 {
856 return this->insn_num() *
857 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
858 }
859
860 // Write stub to output file.
861 void
862 write(unsigned char* view, section_size_type view_size)
863 { this->do_write(view, view_size); }
864
865 protected:
866 // Abstract method to be implemented by sub-classes.
867 virtual void
868 do_write(unsigned char*, section_size_type) = 0;
869
870 private:
871 // The last insn of a stub is a jump to destination insn. This field records
872 // the destination address.
873 AArch64_address destination_address_;
874 // The stub offset. Note this has difference interpretations between an
875 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
876 // beginning of the containing stub_table, whereas for Erratum_stub, this is
877 // the offset from the end of reloc_stubs.
878 section_offset_type offset_;
879 // Stub type.
880 const int type_;
881 }; // End of "Stub_base".
882
883
884 // Erratum stub class. An erratum stub differs from a reloc stub in that for
885 // each erratum occurrence, we generate an erratum stub. We never share erratum
886 // stubs, whereas for reloc stubs, different branches insns share a single reloc
887 // stub as long as the branch targets are the same. (More to the point, reloc
888 // stubs can be shared because they're used to reach a specific target, whereas
889 // erratum stubs branch back to the original control flow.)
890
891 template<int size, bool big_endian>
892 class Erratum_stub : public Stub_base<size, big_endian>
893 {
894 public:
895 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
896 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
897 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
898 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
899
900 static const int STUB_ADDR_ALIGN;
901
902 static const Insntype invalid_insn = static_cast<Insntype>(-1);
903
904 Erratum_stub(The_aarch64_relobj* relobj, int type,
905 unsigned shndx, unsigned int sh_offset)
906 : Stub_base<size, big_endian>(type), relobj_(relobj),
907 shndx_(shndx), sh_offset_(sh_offset),
908 erratum_insn_(invalid_insn),
909 erratum_address_(this->invalid_address)
910 {}
911
912 ~Erratum_stub() {}
913
914 // Return the object that contains the erratum.
915 The_aarch64_relobj*
916 relobj()
917 { return this->relobj_; }
918
919 // Get section index of the erratum.
920 unsigned int
921 shndx() const
922 { return this->shndx_; }
923
924 // Get section offset of the erratum.
925 unsigned int
926 sh_offset() const
927 { return this->sh_offset_; }
928
929 // Get the erratum insn. This is the insn located at erratum_insn_address.
930 Insntype
931 erratum_insn() const
932 {
933 gold_assert(this->erratum_insn_ != this->invalid_insn);
934 return this->erratum_insn_;
935 }
936
937 // Set the insn that the erratum happens to.
938 void
939 set_erratum_insn(Insntype insn)
940 { this->erratum_insn_ = insn; }
941
942 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
943 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
944 // is no longer the one we want to write out to the stub, update erratum_insn_
945 // with relocated version. Also note that in this case xn must not be "PC", so
946 // it is safe to move the erratum insn from the origin place to the stub. For
947 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
948 // relocation spot (assertion added though).
949 void
950 update_erratum_insn(Insntype insn)
951 {
952 gold_assert(this->erratum_insn_ != this->invalid_insn);
953 switch (this->type())
954 {
955 case ST_E_843419:
956 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
957 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
958 gold_assert(Insn_utilities::aarch64_rd(insn) ==
959 Insn_utilities::aarch64_rd(this->erratum_insn()));
960 gold_assert(Insn_utilities::aarch64_rn(insn) ==
961 Insn_utilities::aarch64_rn(this->erratum_insn()));
962 // Update plain ld/st insn with relocated insn.
963 this->erratum_insn_ = insn;
964 break;
965 case ST_E_835769:
966 gold_assert(insn == this->erratum_insn());
967 break;
968 default:
969 gold_unreachable();
970 }
971 }
972
973
974 // Return the address where an erratum must be done.
975 AArch64_address
976 erratum_address() const
977 {
978 gold_assert(this->erratum_address_ != this->invalid_address);
979 return this->erratum_address_;
980 }
981
982 // Set the address where an erratum must be done.
983 void
984 set_erratum_address(AArch64_address addr)
985 { this->erratum_address_ = addr; }
986
987 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
988 // sh_offset). We do not include 'type' in the calculation, becuase there is
989 // at most one stub type at (obj, shndx, sh_offset).
990 bool
991 operator<(const Erratum_stub<size, big_endian>& k) const
992 {
993 if (this == &k)
994 return false;
995 // We group stubs by relobj.
996 if (this->relobj_ != k.relobj_)
997 return this->relobj_ < k.relobj_;
998 // Then by section index.
999 if (this->shndx_ != k.shndx_)
1000 return this->shndx_ < k.shndx_;
1001 // Lastly by section offset.
1002 return this->sh_offset_ < k.sh_offset_;
1003 }
1004
1005 protected:
1006 virtual void
1007 do_write(unsigned char*, section_size_type);
1008
1009 private:
1010 // The object that needs to be fixed.
1011 The_aarch64_relobj* relobj_;
1012 // The shndx in the object that needs to be fixed.
1013 const unsigned int shndx_;
1014 // The section offset in the obejct that needs to be fixed.
1015 const unsigned int sh_offset_;
1016 // The insn to be fixed.
1017 Insntype erratum_insn_;
1018 // The address of the above insn.
1019 AArch64_address erratum_address_;
1020 }; // End of "Erratum_stub".
1021
1022 template<int size, bool big_endian>
1023 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1024
1025 // Comparator used in set definition.
1026 template<int size, bool big_endian>
1027 struct Erratum_stub_less
1028 {
1029 bool
1030 operator()(const Erratum_stub<size, big_endian>* s1,
1031 const Erratum_stub<size, big_endian>* s2) const
1032 { return *s1 < *s2; }
1033 };
1034
1035 // Erratum_stub implementation for writing stub to output file.
1036
1037 template<int size, bool big_endian>
1038 void
1039 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1040 {
1041 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1042 const Insntype* insns = this->insns();
1043 uint32_t num_insns = this->insn_num();
1044 Insntype* ip = reinterpret_cast<Insntype*>(view);
1045 // For current implemented erratum 843419 and 835769, the first insn in the
1046 // stub is always a copy of the problematic insn (in 843419, the mem access
1047 // insn, in 835769, the mac insn), followed by a jump-back.
1048 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1049 for (uint32_t i = 1; i < num_insns; ++i)
1050 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1051 }
1052
1053
1054 // Reloc stub class.
1055
1056 template<int size, bool big_endian>
1057 class Reloc_stub : public Stub_base<size, big_endian>
1058 {
1059 public:
1060 typedef Reloc_stub<size, big_endian> This;
1061 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1062
1063 // Branch range. This is used to calculate the section group size, as well as
1064 // determine whether a stub is needed.
1065 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1066 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1067
1068 // Constant used to determine if an offset fits in the adrp instruction
1069 // encoding.
1070 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1071 static const int MIN_ADRP_IMM = -(1 << 20);
1072
1073 static const int BYTES_PER_INSN = 4;
1074 static const int STUB_ADDR_ALIGN;
1075
1076 // Determine whether the offset fits in the jump/branch instruction.
1077 static bool
1078 aarch64_valid_branch_offset_p(int64_t offset)
1079 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1080
1081 // Determine whether the offset fits in the adrp immediate field.
1082 static bool
1083 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1084 {
1085 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1086 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1087 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1088 }
1089
1090 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1091 // needed.
1092 static int
1093 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1094 AArch64_address target);
1095
1096 Reloc_stub(int type)
1097 : Stub_base<size, big_endian>(type)
1098 { }
1099
1100 ~Reloc_stub()
1101 { }
1102
1103 // The key class used to index the stub instance in the stub table's stub map.
1104 class Key
1105 {
1106 public:
1107 Key(int type, const Symbol* symbol, const Relobj* relobj,
1108 unsigned int r_sym, int32_t addend)
1109 : type_(type), addend_(addend)
1110 {
1111 if (symbol != NULL)
1112 {
1113 this->r_sym_ = Reloc_stub::invalid_index;
1114 this->u_.symbol = symbol;
1115 }
1116 else
1117 {
1118 gold_assert(relobj != NULL && r_sym != invalid_index);
1119 this->r_sym_ = r_sym;
1120 this->u_.relobj = relobj;
1121 }
1122 }
1123
1124 ~Key()
1125 { }
1126
1127 // Return stub type.
1128 int
1129 type() const
1130 { return this->type_; }
1131
1132 // Return the local symbol index or invalid_index.
1133 unsigned int
1134 r_sym() const
1135 { return this->r_sym_; }
1136
1137 // Return the symbol if there is one.
1138 const Symbol*
1139 symbol() const
1140 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1141
1142 // Return the relobj if there is one.
1143 const Relobj*
1144 relobj() const
1145 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1146
1147 // Whether this equals to another key k.
1148 bool
1149 eq(const Key& k) const
1150 {
1151 return ((this->type_ == k.type_)
1152 && (this->r_sym_ == k.r_sym_)
1153 && ((this->r_sym_ != Reloc_stub::invalid_index)
1154 ? (this->u_.relobj == k.u_.relobj)
1155 : (this->u_.symbol == k.u_.symbol))
1156 && (this->addend_ == k.addend_));
1157 }
1158
1159 // Return a hash value.
1160 size_t
1161 hash_value() const
1162 {
1163 size_t name_hash_value = gold::string_hash<char>(
1164 (this->r_sym_ != Reloc_stub::invalid_index)
1165 ? this->u_.relobj->name().c_str()
1166 : this->u_.symbol->name());
1167 // We only have 4 stub types.
1168 size_t stub_type_hash_value = 0x03 & this->type_;
1169 return (name_hash_value
1170 ^ stub_type_hash_value
1171 ^ ((this->r_sym_ & 0x3fff) << 2)
1172 ^ ((this->addend_ & 0xffff) << 16));
1173 }
1174
1175 // Functors for STL associative containers.
1176 struct hash
1177 {
1178 size_t
1179 operator()(const Key& k) const
1180 { return k.hash_value(); }
1181 };
1182
1183 struct equal_to
1184 {
1185 bool
1186 operator()(const Key& k1, const Key& k2) const
1187 { return k1.eq(k2); }
1188 };
1189
1190 private:
1191 // Stub type.
1192 const int type_;
1193 // If this is a local symbol, this is the index in the defining object.
1194 // Otherwise, it is invalid_index for a global symbol.
1195 unsigned int r_sym_;
1196 // If r_sym_ is an invalid index, this points to a global symbol.
1197 // Otherwise, it points to a relobj. We used the unsized and target
1198 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1199 // Arm_relobj, in order to avoid making the stub class a template
1200 // as most of the stub machinery is endianness-neutral. However, it
1201 // may require a bit of casting done by users of this class.
1202 union
1203 {
1204 const Symbol* symbol;
1205 const Relobj* relobj;
1206 } u_;
1207 // Addend associated with a reloc.
1208 int32_t addend_;
1209 }; // End of inner class Reloc_stub::Key
1210
1211 protected:
1212 // This may be overridden in the child class.
1213 virtual void
1214 do_write(unsigned char*, section_size_type);
1215
1216 private:
1217 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1218 }; // End of Reloc_stub
1219
1220 template<int size, bool big_endian>
1221 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1222
1223 // Write data to output file.
1224
1225 template<int size, bool big_endian>
1226 void
1227 Reloc_stub<size, big_endian>::
1228 do_write(unsigned char* view, section_size_type)
1229 {
1230 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1231 const uint32_t* insns = this->insns();
1232 uint32_t num_insns = this->insn_num();
1233 Insntype* ip = reinterpret_cast<Insntype*>(view);
1234 for (uint32_t i = 0; i < num_insns; ++i)
1235 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1236 }
1237
1238
1239 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1240 // needed.
1241
1242 template<int size, bool big_endian>
1243 inline int
1244 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1245 unsigned int r_type, AArch64_address location, AArch64_address dest)
1246 {
1247 int64_t branch_offset = 0;
1248 switch(r_type)
1249 {
1250 case elfcpp::R_AARCH64_CALL26:
1251 case elfcpp::R_AARCH64_JUMP26:
1252 branch_offset = dest - location;
1253 break;
1254 default:
1255 gold_unreachable();
1256 }
1257
1258 if (aarch64_valid_branch_offset_p(branch_offset))
1259 return ST_NONE;
1260
1261 if (aarch64_valid_for_adrp_p(location, dest))
1262 return ST_ADRP_BRANCH;
1263
1264 if (parameters->options().output_is_position_independent()
1265 && parameters->options().output_is_executable())
1266 return ST_LONG_BRANCH_PCREL;
1267
1268 return ST_LONG_BRANCH_ABS;
1269 }
1270
1271 // A class to hold stubs for the ARM target.
1272
1273 template<int size, bool big_endian>
1274 class Stub_table : public Output_data
1275 {
1276 public:
1277 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1278 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1279 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1280 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1281 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1282 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1283 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1284 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1285 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1286 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1287 typedef Stub_table<size, big_endian> The_stub_table;
1288 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1289 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1290 Reloc_stub_map;
1291 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1292 typedef Relocate_info<size, big_endian> The_relocate_info;
1293
1294 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1295 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1296
1297 Stub_table(The_aarch64_input_section* owner)
1298 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1299 erratum_stubs_size_(0), prev_data_size_(0)
1300 { }
1301
1302 ~Stub_table()
1303 { }
1304
1305 The_aarch64_input_section*
1306 owner() const
1307 { return owner_; }
1308
1309 // Whether this stub table is empty.
1310 bool
1311 empty() const
1312 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1313
1314 // Return the current data size.
1315 off_t
1316 current_data_size() const
1317 { return this->current_data_size_for_child(); }
1318
1319 // Add a STUB using KEY. The caller is responsible for avoiding addition
1320 // if a STUB with the same key has already been added.
1321 void
1322 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1323
1324 // Add an erratum stub into the erratum stub set. The set is ordered by
1325 // (relobj, shndx, sh_offset).
1326 void
1327 add_erratum_stub(The_erratum_stub* stub);
1328
1329 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1330 The_erratum_stub*
1331 find_erratum_stub(The_aarch64_relobj* a64relobj,
1332 unsigned int shndx, unsigned int sh_offset);
1333
1334 // Find all the erratums for a given input section. The return value is a pair
1335 // of iterators [begin, end).
1336 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1337 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1338 unsigned int shndx);
1339
1340 // Compute the erratum stub address.
1341 AArch64_address
1342 erratum_stub_address(The_erratum_stub* stub) const
1343 {
1344 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1345 The_erratum_stub::STUB_ADDR_ALIGN);
1346 r += stub->offset();
1347 return r;
1348 }
1349
1350 // Finalize stubs. No-op here, just for completeness.
1351 void
1352 finalize_stubs()
1353 { }
1354
1355 // Look up a relocation stub using KEY. Return NULL if there is none.
1356 The_reloc_stub*
1357 find_reloc_stub(The_reloc_stub_key& key)
1358 {
1359 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1360 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1361 }
1362
1363 // Relocate stubs in this stub table.
1364 void
1365 relocate_stubs(const The_relocate_info*,
1366 The_target_aarch64*,
1367 Output_section*,
1368 unsigned char*,
1369 AArch64_address,
1370 section_size_type);
1371
1372 // Update data size at the end of a relaxation pass. Return true if data size
1373 // is different from that of the previous relaxation pass.
1374 bool
1375 update_data_size_changed_p()
1376 {
1377 // No addralign changed here.
1378 off_t s = align_address(this->reloc_stubs_size_,
1379 The_erratum_stub::STUB_ADDR_ALIGN)
1380 + this->erratum_stubs_size_;
1381 bool changed = (s != this->prev_data_size_);
1382 this->prev_data_size_ = s;
1383 return changed;
1384 }
1385
1386 protected:
1387 // Write out section contents.
1388 void
1389 do_write(Output_file*);
1390
1391 // Return the required alignment.
1392 uint64_t
1393 do_addralign() const
1394 {
1395 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1396 The_erratum_stub::STUB_ADDR_ALIGN);
1397 }
1398
1399 // Reset address and file offset.
1400 void
1401 do_reset_address_and_file_offset()
1402 { this->set_current_data_size_for_child(this->prev_data_size_); }
1403
1404 // Set final data size.
1405 void
1406 set_final_data_size()
1407 { this->set_data_size(this->current_data_size()); }
1408
1409 private:
1410 // Relocate one stub.
1411 void
1412 relocate_stub(The_reloc_stub*,
1413 const The_relocate_info*,
1414 The_target_aarch64*,
1415 Output_section*,
1416 unsigned char*,
1417 AArch64_address,
1418 section_size_type);
1419
1420 private:
1421 // Owner of this stub table.
1422 The_aarch64_input_section* owner_;
1423 // The relocation stubs.
1424 Reloc_stub_map reloc_stubs_;
1425 // The erratum stubs.
1426 Erratum_stub_set erratum_stubs_;
1427 // Size of reloc stubs.
1428 off_t reloc_stubs_size_;
1429 // Size of erratum stubs.
1430 off_t erratum_stubs_size_;
1431 // data size of this in the previous pass.
1432 off_t prev_data_size_;
1433 }; // End of Stub_table
1434
1435
1436 // Add an erratum stub into the erratum stub set. The set is ordered by
1437 // (relobj, shndx, sh_offset).
1438
1439 template<int size, bool big_endian>
1440 void
1441 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1442 {
1443 std::pair<Erratum_stub_set_iter, bool> ret =
1444 this->erratum_stubs_.insert(stub);
1445 gold_assert(ret.second);
1446 this->erratum_stubs_size_ = align_address(
1447 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1448 stub->set_offset(this->erratum_stubs_size_);
1449 this->erratum_stubs_size_ += stub->stub_size();
1450 }
1451
1452
1453 // Find if such erratum exists for given (obj, shndx, sh_offset).
1454
1455 template<int size, bool big_endian>
1456 Erratum_stub<size, big_endian>*
1457 Stub_table<size, big_endian>::find_erratum_stub(
1458 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1459 {
1460 // A dummy object used as key to search in the set.
1461 The_erratum_stub key(a64relobj, ST_NONE,
1462 shndx, sh_offset);
1463 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1464 if (i != this->erratum_stubs_.end())
1465 {
1466 The_erratum_stub* stub(*i);
1467 gold_assert(stub->erratum_insn() != 0);
1468 return stub;
1469 }
1470 return NULL;
1471 }
1472
1473
1474 // Find all the errata for a given input section. The return value is a pair of
1475 // iterators [begin, end).
1476
1477 template<int size, bool big_endian>
1478 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1479 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1480 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1481 The_aarch64_relobj* a64relobj, unsigned int shndx)
1482 {
1483 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1484 Erratum_stub_set_iter start, end;
1485 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1486 start = this->erratum_stubs_.lower_bound(&low_key);
1487 if (start == this->erratum_stubs_.end())
1488 return Result_pair(this->erratum_stubs_.end(),
1489 this->erratum_stubs_.end());
1490 end = start;
1491 while (end != this->erratum_stubs_.end() &&
1492 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1493 ++end;
1494 return Result_pair(start, end);
1495 }
1496
1497
1498 // Add a STUB using KEY. The caller is responsible for avoiding addition
1499 // if a STUB with the same key has already been added.
1500
1501 template<int size, bool big_endian>
1502 void
1503 Stub_table<size, big_endian>::add_reloc_stub(
1504 The_reloc_stub* stub, const The_reloc_stub_key& key)
1505 {
1506 gold_assert(stub->type() == key.type());
1507 this->reloc_stubs_[key] = stub;
1508
1509 // Assign stub offset early. We can do this because we never remove
1510 // reloc stubs and they are in the beginning of the stub table.
1511 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1512 The_reloc_stub::STUB_ADDR_ALIGN);
1513 stub->set_offset(this->reloc_stubs_size_);
1514 this->reloc_stubs_size_ += stub->stub_size();
1515 }
1516
1517
1518 // Relocate all stubs in this stub table.
1519
1520 template<int size, bool big_endian>
1521 void
1522 Stub_table<size, big_endian>::
1523 relocate_stubs(const The_relocate_info* relinfo,
1524 The_target_aarch64* target_aarch64,
1525 Output_section* output_section,
1526 unsigned char* view,
1527 AArch64_address address,
1528 section_size_type view_size)
1529 {
1530 // "view_size" is the total size of the stub_table.
1531 gold_assert(address == this->address() &&
1532 view_size == static_cast<section_size_type>(this->data_size()));
1533 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1534 p != this->reloc_stubs_.end(); ++p)
1535 relocate_stub(p->second, relinfo, target_aarch64, output_section,
1536 view, address, view_size);
1537
1538 // Just for convenience.
1539 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1540
1541 // Now 'relocate' erratum stubs.
1542 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
1543 i != this->erratum_stubs_.end(); ++i)
1544 {
1545 AArch64_address stub_address = this->erratum_stub_address(*i);
1546 // The address of "b" in the stub that is to be "relocated".
1547 AArch64_address stub_b_insn_address;
1548 // Branch offset that is to be filled in "b" insn.
1549 int b_offset = 0;
1550 switch ((*i)->type())
1551 {
1552 case ST_E_843419:
1553 case ST_E_835769:
1554 // The 1st insn of the erratum could be a relocation spot,
1555 // in this case we need to fix it with
1556 // "(*i)->erratum_insn()".
1557 elfcpp::Swap<32, big_endian>::writeval(
1558 view + (stub_address - this->address()),
1559 (*i)->erratum_insn());
1560 // For the erratum, the 2nd insn is a b-insn to be patched
1561 // (relocated).
1562 stub_b_insn_address = stub_address + 1 * BPI;
1563 b_offset = (*i)->destination_address() - stub_b_insn_address;
1564 AArch64_relocate_functions<size, big_endian>::construct_b(
1565 view + (stub_b_insn_address - this->address()),
1566 ((unsigned int)(b_offset)) & 0xfffffff);
1567 break;
1568 default:
1569 gold_unreachable();
1570 break;
1571 }
1572 }
1573 }
1574
1575
1576 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
1577
1578 template<int size, bool big_endian>
1579 void
1580 Stub_table<size, big_endian>::
1581 relocate_stub(The_reloc_stub* stub,
1582 const The_relocate_info* relinfo,
1583 The_target_aarch64* target_aarch64,
1584 Output_section* output_section,
1585 unsigned char* view,
1586 AArch64_address address,
1587 section_size_type view_size)
1588 {
1589 // "offset" is the offset from the beginning of the stub_table.
1590 section_size_type offset = stub->offset();
1591 section_size_type stub_size = stub->stub_size();
1592 // "view_size" is the total size of the stub_table.
1593 gold_assert(offset + stub_size <= view_size);
1594
1595 target_aarch64->relocate_stub(stub, relinfo, output_section,
1596 view + offset, address + offset, view_size);
1597 }
1598
1599
1600 // Write out the stubs to file.
1601
1602 template<int size, bool big_endian>
1603 void
1604 Stub_table<size, big_endian>::do_write(Output_file* of)
1605 {
1606 off_t offset = this->offset();
1607 const section_size_type oview_size =
1608 convert_to_section_size_type(this->data_size());
1609 unsigned char* const oview = of->get_output_view(offset, oview_size);
1610
1611 // Write relocation stubs.
1612 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1613 p != this->reloc_stubs_.end(); ++p)
1614 {
1615 The_reloc_stub* stub = p->second;
1616 AArch64_address address = this->address() + stub->offset();
1617 gold_assert(address ==
1618 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1619 stub->write(oview + stub->offset(), stub->stub_size());
1620 }
1621
1622 // Write erratum stubs.
1623 unsigned int erratum_stub_start_offset =
1624 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1625 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1626 p != this->erratum_stubs_.end(); ++p)
1627 {
1628 The_erratum_stub* stub(*p);
1629 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1630 stub->stub_size());
1631 }
1632
1633 of->write_output_view(this->offset(), oview_size, oview);
1634 }
1635
1636
1637 // AArch64_relobj class.
1638
1639 template<int size, bool big_endian>
1640 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1641 {
1642 public:
1643 typedef AArch64_relobj<size, big_endian> This;
1644 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1645 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1646 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1647 typedef Stub_table<size, big_endian> The_stub_table;
1648 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1649 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1650 typedef std::vector<The_stub_table*> Stub_table_list;
1651 static const AArch64_address invalid_address =
1652 static_cast<AArch64_address>(-1);
1653
1654 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1655 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1656 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1657 stub_tables_()
1658 { }
1659
1660 ~AArch64_relobj()
1661 { }
1662
1663 // Return the stub table of the SHNDX-th section if there is one.
1664 The_stub_table*
1665 stub_table(unsigned int shndx) const
1666 {
1667 gold_assert(shndx < this->stub_tables_.size());
1668 return this->stub_tables_[shndx];
1669 }
1670
1671 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1672 void
1673 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1674 {
1675 gold_assert(shndx < this->stub_tables_.size());
1676 this->stub_tables_[shndx] = stub_table;
1677 }
1678
1679 // Entrance to errata scanning.
1680 void
1681 scan_errata(unsigned int shndx,
1682 const elfcpp::Shdr<size, big_endian>&,
1683 Output_section*, const Symbol_table*,
1684 The_target_aarch64*);
1685
1686 // Scan all relocation sections for stub generation.
1687 void
1688 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1689 const Layout*);
1690
1691 // Whether a section is a scannable text section.
1692 bool
1693 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1694 const Output_section*, const Symbol_table*);
1695
1696 // Convert regular input section with index SHNDX to a relaxed section.
1697 void
1698 convert_input_section_to_relaxed_section(unsigned /* shndx */)
1699 {
1700 // The stubs have relocations and we need to process them after writing
1701 // out the stubs. So relocation now must follow section write.
1702 this->set_relocs_must_follow_section_writes();
1703 }
1704
1705 // Structure for mapping symbol position.
1706 struct Mapping_symbol_position
1707 {
1708 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1709 shndx_(shndx), offset_(offset)
1710 {}
1711
1712 // "<" comparator used in ordered_map container.
1713 bool
1714 operator<(const Mapping_symbol_position& p) const
1715 {
1716 return (this->shndx_ < p.shndx_
1717 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1718 }
1719
1720 // Section index.
1721 unsigned int shndx_;
1722
1723 // Section offset.
1724 AArch64_address offset_;
1725 };
1726
1727 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1728
1729 protected:
1730 // Post constructor setup.
1731 void
1732 do_setup()
1733 {
1734 // Call parent's setup method.
1735 Sized_relobj_file<size, big_endian>::do_setup();
1736
1737 // Initialize look-up tables.
1738 this->stub_tables_.resize(this->shnum());
1739 }
1740
1741 virtual void
1742 do_relocate_sections(
1743 const Symbol_table* symtab, const Layout* layout,
1744 const unsigned char* pshdrs, Output_file* of,
1745 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1746
1747 // Count local symbols and (optionally) record mapping info.
1748 virtual void
1749 do_count_local_symbols(Stringpool_template<char>*,
1750 Stringpool_template<char>*);
1751
1752 private:
1753 // Fix all errata in the object.
1754 void
1755 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
1756
1757 // Whether a section needs to be scanned for relocation stubs.
1758 bool
1759 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1760 const Relobj::Output_sections&,
1761 const Symbol_table*, const unsigned char*);
1762
1763 // List of stub tables.
1764 Stub_table_list stub_tables_;
1765
1766 // Mapping symbol information sorted by (section index, section_offset).
1767 Mapping_symbol_info mapping_symbol_info_;
1768 }; // End of AArch64_relobj
1769
1770
1771 // Override to record mapping symbol information.
1772 template<int size, bool big_endian>
1773 void
1774 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1775 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1776 {
1777 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1778
1779 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1780 // processing if not fixing erratum.
1781 if (!parameters->options().fix_cortex_a53_843419()
1782 && !parameters->options().fix_cortex_a53_835769())
1783 return;
1784
1785 const unsigned int loccount = this->local_symbol_count();
1786 if (loccount == 0)
1787 return;
1788
1789 // Read the symbol table section header.
1790 const unsigned int symtab_shndx = this->symtab_shndx();
1791 elfcpp::Shdr<size, big_endian>
1792 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1793 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1794
1795 // Read the local symbols.
1796 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1797 gold_assert(loccount == symtabshdr.get_sh_info());
1798 off_t locsize = loccount * sym_size;
1799 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1800 locsize, true, true);
1801
1802 // For mapping symbol processing, we need to read the symbol names.
1803 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1804 if (strtab_shndx >= this->shnum())
1805 {
1806 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1807 return;
1808 }
1809
1810 elfcpp::Shdr<size, big_endian>
1811 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1812 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1813 {
1814 this->error(_("symbol table name section has wrong type: %u"),
1815 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1816 return;
1817 }
1818
1819 const char* pnames =
1820 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1821 strtabshdr.get_sh_size(),
1822 false, false));
1823
1824 // Skip the first dummy symbol.
1825 psyms += sym_size;
1826 typename Sized_relobj_file<size, big_endian>::Local_values*
1827 plocal_values = this->local_values();
1828 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1829 {
1830 elfcpp::Sym<size, big_endian> sym(psyms);
1831 Symbol_value<size>& lv((*plocal_values)[i]);
1832 AArch64_address input_value = lv.input_value();
1833
1834 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1835 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1836 // symbols.
1837 // Mapping symbols could be one of the following 4 forms -
1838 // a) $x
1839 // b) $x.<any...>
1840 // c) $d
1841 // d) $d.<any...>
1842 const char* sym_name = pnames + sym.get_st_name();
1843 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1844 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1845 {
1846 bool is_ordinary;
1847 unsigned int input_shndx =
1848 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1849 gold_assert(is_ordinary);
1850
1851 Mapping_symbol_position msp(input_shndx, input_value);
1852 // Insert mapping_symbol_info into map whose ordering is defined by
1853 // (shndx, offset_within_section).
1854 this->mapping_symbol_info_[msp] = sym_name[1];
1855 }
1856 }
1857 }
1858
1859
1860 // Fix all errata in the object.
1861
1862 template<int size, bool big_endian>
1863 void
1864 AArch64_relobj<size, big_endian>::fix_errata(
1865 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1866 {
1867 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1868 unsigned int shnum = this->shnum();
1869 for (unsigned int i = 1; i < shnum; ++i)
1870 {
1871 The_stub_table* stub_table = this->stub_table(i);
1872 if (!stub_table)
1873 continue;
1874 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1875 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1876 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1877 while (p != end)
1878 {
1879 The_erratum_stub* stub = *p;
1880 typename Sized_relobj_file<size, big_endian>::View_size&
1881 pview((*pviews)[i]);
1882
1883 // Double check data before fix.
1884 gold_assert(pview.address + stub->sh_offset()
1885 == stub->erratum_address());
1886
1887 // Update previously recorded erratum insn with relocated
1888 // version.
1889 Insntype* ip =
1890 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
1891 Insntype insn_to_fix = ip[0];
1892 stub->update_erratum_insn(insn_to_fix);
1893
1894 // Replace the erratum insn with a branch-to-stub.
1895 AArch64_address stub_address =
1896 stub_table->erratum_stub_address(stub);
1897 unsigned int b_offset = stub_address - stub->erratum_address();
1898 AArch64_relocate_functions<size, big_endian>::construct_b(
1899 pview.view + stub->sh_offset(), b_offset & 0xfffffff);
1900 ++p;
1901 }
1902 }
1903 }
1904
1905
1906 // Relocate sections.
1907
1908 template<int size, bool big_endian>
1909 void
1910 AArch64_relobj<size, big_endian>::do_relocate_sections(
1911 const Symbol_table* symtab, const Layout* layout,
1912 const unsigned char* pshdrs, Output_file* of,
1913 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1914 {
1915 // Call parent to relocate sections.
1916 Sized_relobj_file<size, big_endian>::do_relocate_sections(symtab, layout,
1917 pshdrs, of, pviews);
1918
1919 // We do not generate stubs if doing a relocatable link.
1920 if (parameters->options().relocatable())
1921 return;
1922
1923 if (parameters->options().fix_cortex_a53_843419()
1924 || parameters->options().fix_cortex_a53_835769())
1925 this->fix_errata(pviews);
1926
1927 Relocate_info<size, big_endian> relinfo;
1928 relinfo.symtab = symtab;
1929 relinfo.layout = layout;
1930 relinfo.object = this;
1931
1932 // Relocate stub tables.
1933 unsigned int shnum = this->shnum();
1934 The_target_aarch64* target = The_target_aarch64::current_target();
1935
1936 for (unsigned int i = 1; i < shnum; ++i)
1937 {
1938 The_aarch64_input_section* aarch64_input_section =
1939 target->find_aarch64_input_section(this, i);
1940 if (aarch64_input_section != NULL
1941 && aarch64_input_section->is_stub_table_owner()
1942 && !aarch64_input_section->stub_table()->empty())
1943 {
1944 Output_section* os = this->output_section(i);
1945 gold_assert(os != NULL);
1946
1947 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
1948 relinfo.reloc_shdr = NULL;
1949 relinfo.data_shndx = i;
1950 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
1951
1952 typename Sized_relobj_file<size, big_endian>::View_size&
1953 view_struct = (*pviews)[i];
1954 gold_assert(view_struct.view != NULL);
1955
1956 The_stub_table* stub_table = aarch64_input_section->stub_table();
1957 off_t offset = stub_table->address() - view_struct.address;
1958 unsigned char* view = view_struct.view + offset;
1959 AArch64_address address = stub_table->address();
1960 section_size_type view_size = stub_table->data_size();
1961 stub_table->relocate_stubs(&relinfo, target, os, view, address,
1962 view_size);
1963 }
1964 }
1965 }
1966
1967
1968 // Determine if an input section is scannable for stub processing. SHDR is
1969 // the header of the section and SHNDX is the section index. OS is the output
1970 // section for the input section and SYMTAB is the global symbol table used to
1971 // look up ICF information.
1972
1973 template<int size, bool big_endian>
1974 bool
1975 AArch64_relobj<size, big_endian>::text_section_is_scannable(
1976 const elfcpp::Shdr<size, big_endian>& text_shdr,
1977 unsigned int text_shndx,
1978 const Output_section* os,
1979 const Symbol_table* symtab)
1980 {
1981 // Skip any empty sections, unallocated sections or sections whose
1982 // type are not SHT_PROGBITS.
1983 if (text_shdr.get_sh_size() == 0
1984 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
1985 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
1986 return false;
1987
1988 // Skip any discarded or ICF'ed sections.
1989 if (os == NULL || symtab->is_section_folded(this, text_shndx))
1990 return false;
1991
1992 // Skip exception frame.
1993 if (strcmp(os->name(), ".eh_frame") == 0)
1994 return false ;
1995
1996 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
1997 os->find_relaxed_input_section(this, text_shndx) != NULL);
1998
1999 return true;
2000 }
2001
2002
2003 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2004 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2005
2006 template<int size, bool big_endian>
2007 bool
2008 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2009 const elfcpp::Shdr<size, big_endian>& shdr,
2010 const Relobj::Output_sections& out_sections,
2011 const Symbol_table* symtab,
2012 const unsigned char* pshdrs)
2013 {
2014 unsigned int sh_type = shdr.get_sh_type();
2015 if (sh_type != elfcpp::SHT_RELA)
2016 return false;
2017
2018 // Ignore empty section.
2019 off_t sh_size = shdr.get_sh_size();
2020 if (sh_size == 0)
2021 return false;
2022
2023 // Ignore reloc section with unexpected symbol table. The
2024 // error will be reported in the final link.
2025 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2026 return false;
2027
2028 gold_assert(sh_type == elfcpp::SHT_RELA);
2029 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2030
2031 // Ignore reloc section with unexpected entsize or uneven size.
2032 // The error will be reported in the final link.
2033 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2034 return false;
2035
2036 // Ignore reloc section with bad info. This error will be
2037 // reported in the final link.
2038 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2039 if (text_shndx >= this->shnum())
2040 return false;
2041
2042 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2043 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2044 text_shndx * shdr_size);
2045 return this->text_section_is_scannable(text_shdr, text_shndx,
2046 out_sections[text_shndx], symtab);
2047 }
2048
2049
2050 // Scan section SHNDX for erratum 843419 and 835769.
2051
2052 template<int size, bool big_endian>
2053 void
2054 AArch64_relobj<size, big_endian>::scan_errata(
2055 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2056 Output_section* os, const Symbol_table* symtab,
2057 The_target_aarch64* target)
2058 {
2059 if (shdr.get_sh_size() == 0
2060 || (shdr.get_sh_flags() &
2061 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2062 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2063 return;
2064
2065 if (!os || symtab->is_section_folded(this, shndx)) return;
2066
2067 AArch64_address output_offset = this->get_output_section_offset(shndx);
2068 AArch64_address output_address;
2069 if (output_offset != invalid_address)
2070 output_address = os->address() + output_offset;
2071 else
2072 {
2073 const Output_relaxed_input_section* poris =
2074 os->find_relaxed_input_section(this, shndx);
2075 if (!poris) return;
2076 output_address = poris->address();
2077 }
2078
2079 section_size_type input_view_size = 0;
2080 const unsigned char* input_view =
2081 this->section_contents(shndx, &input_view_size, false);
2082
2083 Mapping_symbol_position section_start(shndx, 0);
2084 // Find the first mapping symbol record within section shndx.
2085 typename Mapping_symbol_info::const_iterator p =
2086 this->mapping_symbol_info_.lower_bound(section_start);
2087 while (p != this->mapping_symbol_info_.end() &&
2088 p->first.shndx_ == shndx)
2089 {
2090 typename Mapping_symbol_info::const_iterator prev = p;
2091 ++p;
2092 if (prev->second == 'x')
2093 {
2094 section_size_type span_start =
2095 convert_to_section_size_type(prev->first.offset_);
2096 section_size_type span_end;
2097 if (p != this->mapping_symbol_info_.end()
2098 && p->first.shndx_ == shndx)
2099 span_end = convert_to_section_size_type(p->first.offset_);
2100 else
2101 span_end = convert_to_section_size_type(shdr.get_sh_size());
2102
2103 // Here we do not share the scanning code of both errata. For 843419,
2104 // only the last few insns of each page are examined, which is fast,
2105 // whereas, for 835769, every insn pair needs to be checked.
2106
2107 if (parameters->options().fix_cortex_a53_843419())
2108 target->scan_erratum_843419_span(
2109 this, shndx, span_start, span_end,
2110 const_cast<unsigned char*>(input_view), output_address);
2111
2112 if (parameters->options().fix_cortex_a53_835769())
2113 target->scan_erratum_835769_span(
2114 this, shndx, span_start, span_end,
2115 const_cast<unsigned char*>(input_view), output_address);
2116 }
2117 }
2118 }
2119
2120
2121 // Scan relocations for stub generation.
2122
2123 template<int size, bool big_endian>
2124 void
2125 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2126 The_target_aarch64* target,
2127 const Symbol_table* symtab,
2128 const Layout* layout)
2129 {
2130 unsigned int shnum = this->shnum();
2131 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2132
2133 // Read the section headers.
2134 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2135 shnum * shdr_size,
2136 true, true);
2137
2138 // To speed up processing, we set up hash tables for fast lookup of
2139 // input offsets to output addresses.
2140 this->initialize_input_to_output_maps();
2141
2142 const Relobj::Output_sections& out_sections(this->output_sections());
2143
2144 Relocate_info<size, big_endian> relinfo;
2145 relinfo.symtab = symtab;
2146 relinfo.layout = layout;
2147 relinfo.object = this;
2148
2149 // Do relocation stubs scanning.
2150 const unsigned char* p = pshdrs + shdr_size;
2151 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2152 {
2153 const elfcpp::Shdr<size, big_endian> shdr(p);
2154 if (parameters->options().fix_cortex_a53_843419()
2155 || parameters->options().fix_cortex_a53_835769())
2156 scan_errata(i, shdr, out_sections[i], symtab, target);
2157 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2158 pshdrs))
2159 {
2160 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2161 AArch64_address output_offset =
2162 this->get_output_section_offset(index);
2163 AArch64_address output_address;
2164 if (output_offset != invalid_address)
2165 {
2166 output_address = out_sections[index]->address() + output_offset;
2167 }
2168 else
2169 {
2170 // Currently this only happens for a relaxed section.
2171 const Output_relaxed_input_section* poris =
2172 out_sections[index]->find_relaxed_input_section(this, index);
2173 gold_assert(poris != NULL);
2174 output_address = poris->address();
2175 }
2176
2177 // Get the relocations.
2178 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2179 shdr.get_sh_size(),
2180 true, false);
2181
2182 // Get the section contents.
2183 section_size_type input_view_size = 0;
2184 const unsigned char* input_view =
2185 this->section_contents(index, &input_view_size, false);
2186
2187 relinfo.reloc_shndx = i;
2188 relinfo.data_shndx = index;
2189 unsigned int sh_type = shdr.get_sh_type();
2190 unsigned int reloc_size;
2191 gold_assert (sh_type == elfcpp::SHT_RELA);
2192 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2193
2194 Output_section* os = out_sections[index];
2195 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2196 shdr.get_sh_size() / reloc_size,
2197 os,
2198 output_offset == invalid_address,
2199 input_view, output_address,
2200 input_view_size);
2201 }
2202 }
2203 }
2204
2205
2206 // A class to wrap an ordinary input section containing executable code.
2207
2208 template<int size, bool big_endian>
2209 class AArch64_input_section : public Output_relaxed_input_section
2210 {
2211 public:
2212 typedef Stub_table<size, big_endian> The_stub_table;
2213
2214 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2215 : Output_relaxed_input_section(relobj, shndx, 1),
2216 stub_table_(NULL),
2217 original_contents_(NULL), original_size_(0),
2218 original_addralign_(1)
2219 { }
2220
2221 ~AArch64_input_section()
2222 { delete[] this->original_contents_; }
2223
2224 // Initialize.
2225 void
2226 init();
2227
2228 // Set the stub_table.
2229 void
2230 set_stub_table(The_stub_table* st)
2231 { this->stub_table_ = st; }
2232
2233 // Whether this is a stub table owner.
2234 bool
2235 is_stub_table_owner() const
2236 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2237
2238 // Return the original size of the section.
2239 uint32_t
2240 original_size() const
2241 { return this->original_size_; }
2242
2243 // Return the stub table.
2244 The_stub_table*
2245 stub_table()
2246 { return stub_table_; }
2247
2248 protected:
2249 // Write out this input section.
2250 void
2251 do_write(Output_file*);
2252
2253 // Return required alignment of this.
2254 uint64_t
2255 do_addralign() const
2256 {
2257 if (this->is_stub_table_owner())
2258 return std::max(this->stub_table_->addralign(),
2259 static_cast<uint64_t>(this->original_addralign_));
2260 else
2261 return this->original_addralign_;
2262 }
2263
2264 // Finalize data size.
2265 void
2266 set_final_data_size();
2267
2268 // Reset address and file offset.
2269 void
2270 do_reset_address_and_file_offset();
2271
2272 // Output offset.
2273 bool
2274 do_output_offset(const Relobj* object, unsigned int shndx,
2275 section_offset_type offset,
2276 section_offset_type* poutput) const
2277 {
2278 if ((object == this->relobj())
2279 && (shndx == this->shndx())
2280 && (offset >= 0)
2281 && (offset <=
2282 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2283 {
2284 *poutput = offset;
2285 return true;
2286 }
2287 else
2288 return false;
2289 }
2290
2291 private:
2292 // Copying is not allowed.
2293 AArch64_input_section(const AArch64_input_section&);
2294 AArch64_input_section& operator=(const AArch64_input_section&);
2295
2296 // The relocation stubs.
2297 The_stub_table* stub_table_;
2298 // Original section contents. We have to make a copy here since the file
2299 // containing the original section may not be locked when we need to access
2300 // the contents.
2301 unsigned char* original_contents_;
2302 // Section size of the original input section.
2303 uint32_t original_size_;
2304 // Address alignment of the original input section.
2305 uint32_t original_addralign_;
2306 }; // End of AArch64_input_section
2307
2308
2309 // Finalize data size.
2310
2311 template<int size, bool big_endian>
2312 void
2313 AArch64_input_section<size, big_endian>::set_final_data_size()
2314 {
2315 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2316
2317 if (this->is_stub_table_owner())
2318 {
2319 this->stub_table_->finalize_data_size();
2320 off = align_address(off, this->stub_table_->addralign());
2321 off += this->stub_table_->data_size();
2322 }
2323 this->set_data_size(off);
2324 }
2325
2326
2327 // Reset address and file offset.
2328
2329 template<int size, bool big_endian>
2330 void
2331 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2332 {
2333 // Size of the original input section contents.
2334 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2335
2336 // If this is a stub table owner, account for the stub table size.
2337 if (this->is_stub_table_owner())
2338 {
2339 The_stub_table* stub_table = this->stub_table_;
2340
2341 // Reset the stub table's address and file offset. The
2342 // current data size for child will be updated after that.
2343 stub_table_->reset_address_and_file_offset();
2344 off = align_address(off, stub_table_->addralign());
2345 off += stub_table->current_data_size();
2346 }
2347
2348 this->set_current_data_size(off);
2349 }
2350
2351
2352 // Initialize an Arm_input_section.
2353
2354 template<int size, bool big_endian>
2355 void
2356 AArch64_input_section<size, big_endian>::init()
2357 {
2358 Relobj* relobj = this->relobj();
2359 unsigned int shndx = this->shndx();
2360
2361 // We have to cache original size, alignment and contents to avoid locking
2362 // the original file.
2363 this->original_addralign_ =
2364 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2365
2366 // This is not efficient but we expect only a small number of relaxed
2367 // input sections for stubs.
2368 section_size_type section_size;
2369 const unsigned char* section_contents =
2370 relobj->section_contents(shndx, &section_size, false);
2371 this->original_size_ =
2372 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2373
2374 gold_assert(this->original_contents_ == NULL);
2375 this->original_contents_ = new unsigned char[section_size];
2376 memcpy(this->original_contents_, section_contents, section_size);
2377
2378 // We want to make this look like the original input section after
2379 // output sections are finalized.
2380 Output_section* os = relobj->output_section(shndx);
2381 off_t offset = relobj->output_section_offset(shndx);
2382 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2383 this->set_address(os->address() + offset);
2384 this->set_file_offset(os->offset() + offset);
2385 this->set_current_data_size(this->original_size_);
2386 this->finalize_data_size();
2387 }
2388
2389
2390 // Write data to output file.
2391
2392 template<int size, bool big_endian>
2393 void
2394 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2395 {
2396 // We have to write out the original section content.
2397 gold_assert(this->original_contents_ != NULL);
2398 of->write(this->offset(), this->original_contents_,
2399 this->original_size_);
2400
2401 // If this owns a stub table and it is not empty, write it.
2402 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2403 this->stub_table_->write(of);
2404 }
2405
2406
2407 // Arm output section class. This is defined mainly to add a number of stub
2408 // generation methods.
2409
2410 template<int size, bool big_endian>
2411 class AArch64_output_section : public Output_section
2412 {
2413 public:
2414 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2415 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2416 typedef Stub_table<size, big_endian> The_stub_table;
2417 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2418
2419 public:
2420 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2421 elfcpp::Elf_Xword flags)
2422 : Output_section(name, type, flags)
2423 { }
2424
2425 ~AArch64_output_section() {}
2426
2427 // Group input sections for stub generation.
2428 void
2429 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2430 const Task*);
2431
2432 private:
2433 typedef Output_section::Input_section Input_section;
2434 typedef Output_section::Input_section_list Input_section_list;
2435
2436 // Create a stub group.
2437 void
2438 create_stub_group(Input_section_list::const_iterator,
2439 Input_section_list::const_iterator,
2440 Input_section_list::const_iterator,
2441 The_target_aarch64*,
2442 std::vector<Output_relaxed_input_section*>&,
2443 const Task*);
2444 }; // End of AArch64_output_section
2445
2446
2447 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2448 // the input section that will be the owner of the stub table.
2449
2450 template<int size, bool big_endian> void
2451 AArch64_output_section<size, big_endian>::create_stub_group(
2452 Input_section_list::const_iterator first,
2453 Input_section_list::const_iterator last,
2454 Input_section_list::const_iterator owner,
2455 The_target_aarch64* target,
2456 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2457 const Task* task)
2458 {
2459 // Currently we convert ordinary input sections into relaxed sections only
2460 // at this point.
2461 The_aarch64_input_section* input_section;
2462 if (owner->is_relaxed_input_section())
2463 gold_unreachable();
2464 else
2465 {
2466 gold_assert(owner->is_input_section());
2467 // Create a new relaxed input section. We need to lock the original
2468 // file.
2469 Task_lock_obj<Object> tl(task, owner->relobj());
2470 input_section =
2471 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2472 new_relaxed_sections.push_back(input_section);
2473 }
2474
2475 // Create a stub table.
2476 The_stub_table* stub_table =
2477 target->new_stub_table(input_section);
2478
2479 input_section->set_stub_table(stub_table);
2480
2481 Input_section_list::const_iterator p = first;
2482 // Look for input sections or relaxed input sections in [first ... last].
2483 do
2484 {
2485 if (p->is_input_section() || p->is_relaxed_input_section())
2486 {
2487 // The stub table information for input sections live
2488 // in their objects.
2489 The_aarch64_relobj* aarch64_relobj =
2490 static_cast<The_aarch64_relobj*>(p->relobj());
2491 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2492 }
2493 }
2494 while (p++ != last);
2495 }
2496
2497
2498 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2499 // stub groups. We grow a stub group by adding input section until the size is
2500 // just below GROUP_SIZE. The last input section will be converted into a stub
2501 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2502 // after the stub table, effectively doubling the group size.
2503 //
2504 // This is similar to the group_sections() function in elf32-arm.c but is
2505 // implemented differently.
2506
2507 template<int size, bool big_endian>
2508 void AArch64_output_section<size, big_endian>::group_sections(
2509 section_size_type group_size,
2510 bool stubs_always_after_branch,
2511 Target_aarch64<size, big_endian>* target,
2512 const Task* task)
2513 {
2514 typedef enum
2515 {
2516 NO_GROUP,
2517 FINDING_STUB_SECTION,
2518 HAS_STUB_SECTION
2519 } State;
2520
2521 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2522
2523 State state = NO_GROUP;
2524 section_size_type off = 0;
2525 section_size_type group_begin_offset = 0;
2526 section_size_type group_end_offset = 0;
2527 section_size_type stub_table_end_offset = 0;
2528 Input_section_list::const_iterator group_begin =
2529 this->input_sections().end();
2530 Input_section_list::const_iterator stub_table =
2531 this->input_sections().end();
2532 Input_section_list::const_iterator group_end = this->input_sections().end();
2533 for (Input_section_list::const_iterator p = this->input_sections().begin();
2534 p != this->input_sections().end();
2535 ++p)
2536 {
2537 section_size_type section_begin_offset =
2538 align_address(off, p->addralign());
2539 section_size_type section_end_offset =
2540 section_begin_offset + p->data_size();
2541
2542 // Check to see if we should group the previously seen sections.
2543 switch (state)
2544 {
2545 case NO_GROUP:
2546 break;
2547
2548 case FINDING_STUB_SECTION:
2549 // Adding this section makes the group larger than GROUP_SIZE.
2550 if (section_end_offset - group_begin_offset >= group_size)
2551 {
2552 if (stubs_always_after_branch)
2553 {
2554 gold_assert(group_end != this->input_sections().end());
2555 this->create_stub_group(group_begin, group_end, group_end,
2556 target, new_relaxed_sections,
2557 task);
2558 state = NO_GROUP;
2559 }
2560 else
2561 {
2562 // Input sections up to stub_group_size bytes after the stub
2563 // table can be handled by it too.
2564 state = HAS_STUB_SECTION;
2565 stub_table = group_end;
2566 stub_table_end_offset = group_end_offset;
2567 }
2568 }
2569 break;
2570
2571 case HAS_STUB_SECTION:
2572 // Adding this section makes the post stub-section group larger
2573 // than GROUP_SIZE.
2574 gold_unreachable();
2575 // NOT SUPPORTED YET. For completeness only.
2576 if (section_end_offset - stub_table_end_offset >= group_size)
2577 {
2578 gold_assert(group_end != this->input_sections().end());
2579 this->create_stub_group(group_begin, group_end, stub_table,
2580 target, new_relaxed_sections, task);
2581 state = NO_GROUP;
2582 }
2583 break;
2584
2585 default:
2586 gold_unreachable();
2587 }
2588
2589 // If we see an input section and currently there is no group, start
2590 // a new one. Skip any empty sections. We look at the data size
2591 // instead of calling p->relobj()->section_size() to avoid locking.
2592 if ((p->is_input_section() || p->is_relaxed_input_section())
2593 && (p->data_size() != 0))
2594 {
2595 if (state == NO_GROUP)
2596 {
2597 state = FINDING_STUB_SECTION;
2598 group_begin = p;
2599 group_begin_offset = section_begin_offset;
2600 }
2601
2602 // Keep track of the last input section seen.
2603 group_end = p;
2604 group_end_offset = section_end_offset;
2605 }
2606
2607 off = section_end_offset;
2608 }
2609
2610 // Create a stub group for any ungrouped sections.
2611 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2612 {
2613 gold_assert(group_end != this->input_sections().end());
2614 this->create_stub_group(group_begin, group_end,
2615 (state == FINDING_STUB_SECTION
2616 ? group_end
2617 : stub_table),
2618 target, new_relaxed_sections, task);
2619 }
2620
2621 if (!new_relaxed_sections.empty())
2622 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2623
2624 // Update the section offsets
2625 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2626 {
2627 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2628 new_relaxed_sections[i]->relobj());
2629 unsigned int shndx = new_relaxed_sections[i]->shndx();
2630 // Tell AArch64_relobj that this input section is converted.
2631 relobj->convert_input_section_to_relaxed_section(shndx);
2632 }
2633 } // End of AArch64_output_section::group_sections
2634
2635
2636 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2637
2638
2639 // The aarch64 target class.
2640 // See the ABI at
2641 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2642 template<int size, bool big_endian>
2643 class Target_aarch64 : public Sized_target<size, big_endian>
2644 {
2645 public:
2646 typedef Target_aarch64<size, big_endian> This;
2647 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2648 Reloc_section;
2649 typedef Relocate_info<size, big_endian> The_relocate_info;
2650 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2651 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2652 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2653 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2654 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2655 typedef Stub_table<size, big_endian> The_stub_table;
2656 typedef std::vector<The_stub_table*> Stub_table_list;
2657 typedef typename Stub_table_list::iterator Stub_table_iterator;
2658 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2659 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2660 typedef Unordered_map<Section_id,
2661 AArch64_input_section<size, big_endian>*,
2662 Section_id_hash> AArch64_input_section_map;
2663 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2664 const static int TCB_SIZE = size / 8 * 2;
2665
2666 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2667 : Sized_target<size, big_endian>(info),
2668 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2669 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2670 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2671 got_mod_index_offset_(-1U),
2672 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2673 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2674 { }
2675
2676 // Scan the relocations to determine unreferenced sections for
2677 // garbage collection.
2678 void
2679 gc_process_relocs(Symbol_table* symtab,
2680 Layout* layout,
2681 Sized_relobj_file<size, big_endian>* object,
2682 unsigned int data_shndx,
2683 unsigned int sh_type,
2684 const unsigned char* prelocs,
2685 size_t reloc_count,
2686 Output_section* output_section,
2687 bool needs_special_offset_handling,
2688 size_t local_symbol_count,
2689 const unsigned char* plocal_symbols);
2690
2691 // Scan the relocations to look for symbol adjustments.
2692 void
2693 scan_relocs(Symbol_table* symtab,
2694 Layout* layout,
2695 Sized_relobj_file<size, big_endian>* object,
2696 unsigned int data_shndx,
2697 unsigned int sh_type,
2698 const unsigned char* prelocs,
2699 size_t reloc_count,
2700 Output_section* output_section,
2701 bool needs_special_offset_handling,
2702 size_t local_symbol_count,
2703 const unsigned char* plocal_symbols);
2704
2705 // Finalize the sections.
2706 void
2707 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2708
2709 // Return the value to use for a dynamic which requires special
2710 // treatment.
2711 uint64_t
2712 do_dynsym_value(const Symbol*) const;
2713
2714 // Relocate a section.
2715 void
2716 relocate_section(const Relocate_info<size, big_endian>*,
2717 unsigned int sh_type,
2718 const unsigned char* prelocs,
2719 size_t reloc_count,
2720 Output_section* output_section,
2721 bool needs_special_offset_handling,
2722 unsigned char* view,
2723 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2724 section_size_type view_size,
2725 const Reloc_symbol_changes*);
2726
2727 // Scan the relocs during a relocatable link.
2728 void
2729 scan_relocatable_relocs(Symbol_table* symtab,
2730 Layout* layout,
2731 Sized_relobj_file<size, big_endian>* object,
2732 unsigned int data_shndx,
2733 unsigned int sh_type,
2734 const unsigned char* prelocs,
2735 size_t reloc_count,
2736 Output_section* output_section,
2737 bool needs_special_offset_handling,
2738 size_t local_symbol_count,
2739 const unsigned char* plocal_symbols,
2740 Relocatable_relocs*);
2741
2742 // Relocate a section during a relocatable link.
2743 void
2744 relocate_relocs(
2745 const Relocate_info<size, big_endian>*,
2746 unsigned int sh_type,
2747 const unsigned char* prelocs,
2748 size_t reloc_count,
2749 Output_section* output_section,
2750 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2751 const Relocatable_relocs*,
2752 unsigned char* view,
2753 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2754 section_size_type view_size,
2755 unsigned char* reloc_view,
2756 section_size_type reloc_view_size);
2757
2758 // Return the symbol index to use for a target specific relocation.
2759 // The only target specific relocation is R_AARCH64_TLSDESC for a
2760 // local symbol, which is an absolute reloc.
2761 unsigned int
2762 do_reloc_symbol_index(void*, unsigned int r_type) const
2763 {
2764 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
2765 return 0;
2766 }
2767
2768 // Return the addend to use for a target specific relocation.
2769 uint64_t
2770 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
2771
2772 // Return the PLT section.
2773 uint64_t
2774 do_plt_address_for_global(const Symbol* gsym) const
2775 { return this->plt_section()->address_for_global(gsym); }
2776
2777 uint64_t
2778 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
2779 { return this->plt_section()->address_for_local(relobj, symndx); }
2780
2781 // This function should be defined in targets that can use relocation
2782 // types to determine (implemented in local_reloc_may_be_function_pointer
2783 // and global_reloc_may_be_function_pointer)
2784 // if a function's pointer is taken. ICF uses this in safe mode to only
2785 // fold those functions whose pointer is defintely not taken.
2786 bool
2787 do_can_check_for_function_pointers() const
2788 { return true; }
2789
2790 // Return the number of entries in the PLT.
2791 unsigned int
2792 plt_entry_count() const;
2793
2794 //Return the offset of the first non-reserved PLT entry.
2795 unsigned int
2796 first_plt_entry_offset() const;
2797
2798 // Return the size of each PLT entry.
2799 unsigned int
2800 plt_entry_size() const;
2801
2802 // Create a stub table.
2803 The_stub_table*
2804 new_stub_table(The_aarch64_input_section*);
2805
2806 // Create an aarch64 input section.
2807 The_aarch64_input_section*
2808 new_aarch64_input_section(Relobj*, unsigned int);
2809
2810 // Find an aarch64 input section instance for a given OBJ and SHNDX.
2811 The_aarch64_input_section*
2812 find_aarch64_input_section(Relobj*, unsigned int) const;
2813
2814 // Return the thread control block size.
2815 unsigned int
2816 tcb_size() const { return This::TCB_SIZE; }
2817
2818 // Scan a section for stub generation.
2819 void
2820 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
2821 const unsigned char*, size_t, Output_section*,
2822 bool, const unsigned char*,
2823 Address,
2824 section_size_type);
2825
2826 // Scan a relocation section for stub.
2827 template<int sh_type>
2828 void
2829 scan_reloc_section_for_stubs(
2830 const The_relocate_info* relinfo,
2831 const unsigned char* prelocs,
2832 size_t reloc_count,
2833 Output_section* output_section,
2834 bool needs_special_offset_handling,
2835 const unsigned char* view,
2836 Address view_address,
2837 section_size_type);
2838
2839 // Relocate a single stub.
2840 void
2841 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
2842 Output_section*, unsigned char*, Address,
2843 section_size_type);
2844
2845 // Get the default AArch64 target.
2846 static This*
2847 current_target()
2848 {
2849 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
2850 && parameters->target().get_size() == size
2851 && parameters->target().is_big_endian() == big_endian);
2852 return static_cast<This*>(parameters->sized_target<size, big_endian>());
2853 }
2854
2855
2856 // Scan erratum 843419 for a part of a section.
2857 void
2858 scan_erratum_843419_span(
2859 AArch64_relobj<size, big_endian>*,
2860 unsigned int,
2861 const section_size_type,
2862 const section_size_type,
2863 unsigned char*,
2864 Address);
2865
2866 // Scan erratum 835769 for a part of a section.
2867 void
2868 scan_erratum_835769_span(
2869 AArch64_relobj<size, big_endian>*,
2870 unsigned int,
2871 const section_size_type,
2872 const section_size_type,
2873 unsigned char*,
2874 Address);
2875
2876 protected:
2877 void
2878 do_select_as_default_target()
2879 {
2880 gold_assert(aarch64_reloc_property_table == NULL);
2881 aarch64_reloc_property_table = new AArch64_reloc_property_table();
2882 }
2883
2884 // Add a new reloc argument, returning the index in the vector.
2885 size_t
2886 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
2887 unsigned int r_sym)
2888 {
2889 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
2890 return this->tlsdesc_reloc_info_.size() - 1;
2891 }
2892
2893 virtual Output_data_plt_aarch64<size, big_endian>*
2894 do_make_data_plt(Layout* layout,
2895 Output_data_got_aarch64<size, big_endian>* got,
2896 Output_data_space* got_plt,
2897 Output_data_space* got_irelative)
2898 {
2899 return new Output_data_plt_aarch64_standard<size, big_endian>(
2900 layout, got, got_plt, got_irelative);
2901 }
2902
2903
2904 // do_make_elf_object to override the same function in the base class.
2905 Object*
2906 do_make_elf_object(const std::string&, Input_file*, off_t,
2907 const elfcpp::Ehdr<size, big_endian>&);
2908
2909 Output_data_plt_aarch64<size, big_endian>*
2910 make_data_plt(Layout* layout,
2911 Output_data_got_aarch64<size, big_endian>* got,
2912 Output_data_space* got_plt,
2913 Output_data_space* got_irelative)
2914 {
2915 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
2916 }
2917
2918 // We only need to generate stubs, and hence perform relaxation if we are
2919 // not doing relocatable linking.
2920 virtual bool
2921 do_may_relax() const
2922 { return !parameters->options().relocatable(); }
2923
2924 // Relaxation hook. This is where we do stub generation.
2925 virtual bool
2926 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
2927
2928 void
2929 group_sections(Layout* layout,
2930 section_size_type group_size,
2931 bool stubs_always_after_branch,
2932 const Task* task);
2933
2934 void
2935 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
2936 const Sized_symbol<size>*, unsigned int,
2937 const Symbol_value<size>*,
2938 typename elfcpp::Elf_types<size>::Elf_Swxword,
2939 Address Elf_Addr);
2940
2941 // Make an output section.
2942 Output_section*
2943 do_make_output_section(const char* name, elfcpp::Elf_Word type,
2944 elfcpp::Elf_Xword flags)
2945 { return new The_aarch64_output_section(name, type, flags); }
2946
2947 private:
2948 // The class which scans relocations.
2949 class Scan
2950 {
2951 public:
2952 Scan()
2953 : issued_non_pic_error_(false)
2954 { }
2955
2956 inline void
2957 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
2958 Sized_relobj_file<size, big_endian>* object,
2959 unsigned int data_shndx,
2960 Output_section* output_section,
2961 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
2962 const elfcpp::Sym<size, big_endian>& lsym,
2963 bool is_discarded);
2964
2965 inline void
2966 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
2967 Sized_relobj_file<size, big_endian>* object,
2968 unsigned int data_shndx,
2969 Output_section* output_section,
2970 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
2971 Symbol* gsym);
2972
2973 inline bool
2974 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
2975 Target_aarch64<size, big_endian>* ,
2976 Sized_relobj_file<size, big_endian>* ,
2977 unsigned int ,
2978 Output_section* ,
2979 const elfcpp::Rela<size, big_endian>& ,
2980 unsigned int r_type,
2981 const elfcpp::Sym<size, big_endian>&);
2982
2983 inline bool
2984 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
2985 Target_aarch64<size, big_endian>* ,
2986 Sized_relobj_file<size, big_endian>* ,
2987 unsigned int ,
2988 Output_section* ,
2989 const elfcpp::Rela<size, big_endian>& ,
2990 unsigned int r_type,
2991 Symbol* gsym);
2992
2993 private:
2994 static void
2995 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
2996 unsigned int r_type);
2997
2998 static void
2999 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3000 unsigned int r_type, Symbol*);
3001
3002 inline bool
3003 possible_function_pointer_reloc(unsigned int r_type);
3004
3005 void
3006 check_non_pic(Relobj*, unsigned int r_type);
3007
3008 bool
3009 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3010 unsigned int r_type);
3011
3012 // Whether we have issued an error about a non-PIC compilation.
3013 bool issued_non_pic_error_;
3014 };
3015
3016 // The class which implements relocation.
3017 class Relocate
3018 {
3019 public:
3020 Relocate()
3021 : skip_call_tls_get_addr_(false)
3022 { }
3023
3024 ~Relocate()
3025 { }
3026
3027 // Do a relocation. Return false if the caller should not issue
3028 // any warnings about this relocation.
3029 inline bool
3030 relocate(const Relocate_info<size, big_endian>*, Target_aarch64*,
3031 Output_section*,
3032 size_t relnum, const elfcpp::Rela<size, big_endian>&,
3033 unsigned int r_type, const Sized_symbol<size>*,
3034 const Symbol_value<size>*,
3035 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3036 section_size_type);
3037
3038 private:
3039 inline typename AArch64_relocate_functions<size, big_endian>::Status
3040 relocate_tls(const Relocate_info<size, big_endian>*,
3041 Target_aarch64<size, big_endian>*,
3042 size_t,
3043 const elfcpp::Rela<size, big_endian>&,
3044 unsigned int r_type, const Sized_symbol<size>*,
3045 const Symbol_value<size>*,
3046 unsigned char*,
3047 typename elfcpp::Elf_types<size>::Elf_Addr);
3048
3049 inline typename AArch64_relocate_functions<size, big_endian>::Status
3050 tls_gd_to_le(
3051 const Relocate_info<size, big_endian>*,
3052 Target_aarch64<size, big_endian>*,
3053 const elfcpp::Rela<size, big_endian>&,
3054 unsigned int,
3055 unsigned char*,
3056 const Symbol_value<size>*);
3057
3058 inline typename AArch64_relocate_functions<size, big_endian>::Status
3059 tls_ld_to_le(
3060 const Relocate_info<size, big_endian>*,
3061 Target_aarch64<size, big_endian>*,
3062 const elfcpp::Rela<size, big_endian>&,
3063 unsigned int,
3064 unsigned char*,
3065 const Symbol_value<size>*);
3066
3067 inline typename AArch64_relocate_functions<size, big_endian>::Status
3068 tls_ie_to_le(
3069 const Relocate_info<size, big_endian>*,
3070 Target_aarch64<size, big_endian>*,
3071 const elfcpp::Rela<size, big_endian>&,
3072 unsigned int,
3073 unsigned char*,
3074 const Symbol_value<size>*);
3075
3076 inline typename AArch64_relocate_functions<size, big_endian>::Status
3077 tls_desc_gd_to_le(
3078 const Relocate_info<size, big_endian>*,
3079 Target_aarch64<size, big_endian>*,
3080 const elfcpp::Rela<size, big_endian>&,
3081 unsigned int,
3082 unsigned char*,
3083 const Symbol_value<size>*);
3084
3085 inline typename AArch64_relocate_functions<size, big_endian>::Status
3086 tls_desc_gd_to_ie(
3087 const Relocate_info<size, big_endian>*,
3088 Target_aarch64<size, big_endian>*,
3089 const elfcpp::Rela<size, big_endian>&,
3090 unsigned int,
3091 unsigned char*,
3092 const Symbol_value<size>*,
3093 typename elfcpp::Elf_types<size>::Elf_Addr,
3094 typename elfcpp::Elf_types<size>::Elf_Addr);
3095
3096 bool skip_call_tls_get_addr_;
3097
3098 }; // End of class Relocate
3099
3100 // A class which returns the size required for a relocation type,
3101 // used while scanning relocs during a relocatable link.
3102 class Relocatable_size_for_reloc
3103 {
3104 public:
3105 unsigned int
3106 get_size_for_reloc(unsigned int, Relobj*);
3107 };
3108
3109 // Adjust TLS relocation type based on the options and whether this
3110 // is a local symbol.
3111 static tls::Tls_optimization
3112 optimize_tls_reloc(bool is_final, int r_type);
3113
3114 // Get the GOT section, creating it if necessary.
3115 Output_data_got_aarch64<size, big_endian>*
3116 got_section(Symbol_table*, Layout*);
3117
3118 // Get the GOT PLT section.
3119 Output_data_space*
3120 got_plt_section() const
3121 {
3122 gold_assert(this->got_plt_ != NULL);
3123 return this->got_plt_;
3124 }
3125
3126 // Get the GOT section for TLSDESC entries.
3127 Output_data_got<size, big_endian>*
3128 got_tlsdesc_section() const
3129 {
3130 gold_assert(this->got_tlsdesc_ != NULL);
3131 return this->got_tlsdesc_;
3132 }
3133
3134 // Create the PLT section.
3135 void
3136 make_plt_section(Symbol_table* symtab, Layout* layout);
3137
3138 // Create a PLT entry for a global symbol.
3139 void
3140 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3141
3142 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3143 void
3144 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3145 Sized_relobj_file<size, big_endian>* relobj,
3146 unsigned int local_sym_index);
3147
3148 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3149 void
3150 define_tls_base_symbol(Symbol_table*, Layout*);
3151
3152 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3153 void
3154 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3155
3156 // Create a GOT entry for the TLS module index.
3157 unsigned int
3158 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3159 Sized_relobj_file<size, big_endian>* object);
3160
3161 // Get the PLT section.
3162 Output_data_plt_aarch64<size, big_endian>*
3163 plt_section() const
3164 {
3165 gold_assert(this->plt_ != NULL);
3166 return this->plt_;
3167 }
3168
3169 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769.
3170 void create_erratum_stub(
3171 AArch64_relobj<size, big_endian>* relobj,
3172 unsigned int shndx,
3173 section_size_type erratum_insn_offset,
3174 Address erratum_address,
3175 typename Insn_utilities::Insntype erratum_insn,
3176 int erratum_type);
3177
3178 // Return whether this is a 3-insn erratum sequence.
3179 bool is_erratum_843419_sequence(
3180 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3181 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3182 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3183
3184 // Return whether this is a 835769 sequence.
3185 // (Similarly implemented as in elfnn-aarch64.c.)
3186 bool is_erratum_835769_sequence(
3187 typename elfcpp::Swap<32,big_endian>::Valtype,
3188 typename elfcpp::Swap<32,big_endian>::Valtype);
3189
3190 // Get the dynamic reloc section, creating it if necessary.
3191 Reloc_section*
3192 rela_dyn_section(Layout*);
3193
3194 // Get the section to use for TLSDESC relocations.
3195 Reloc_section*
3196 rela_tlsdesc_section(Layout*) const;
3197
3198 // Get the section to use for IRELATIVE relocations.
3199 Reloc_section*
3200 rela_irelative_section(Layout*);
3201
3202 // Add a potential copy relocation.
3203 void
3204 copy_reloc(Symbol_table* symtab, Layout* layout,
3205 Sized_relobj_file<size, big_endian>* object,
3206 unsigned int shndx, Output_section* output_section,
3207 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3208 {
3209 this->copy_relocs_.copy_reloc(symtab, layout,
3210 symtab->get_sized_symbol<size>(sym),
3211 object, shndx, output_section,
3212 reloc, this->rela_dyn_section(layout));
3213 }
3214
3215 // Information about this specific target which we pass to the
3216 // general Target structure.
3217 static const Target::Target_info aarch64_info;
3218
3219 // The types of GOT entries needed for this platform.
3220 // These values are exposed to the ABI in an incremental link.
3221 // Do not renumber existing values without changing the version
3222 // number of the .gnu_incremental_inputs section.
3223 enum Got_type
3224 {
3225 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3226 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3227 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3228 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3229 };
3230
3231 // This type is used as the argument to the target specific
3232 // relocation routines. The only target specific reloc is
3233 // R_AARCh64_TLSDESC against a local symbol.
3234 struct Tlsdesc_info
3235 {
3236 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3237 unsigned int a_r_sym)
3238 : object(a_object), r_sym(a_r_sym)
3239 { }
3240
3241 // The object in which the local symbol is defined.
3242 Sized_relobj_file<size, big_endian>* object;
3243 // The local symbol index in the object.
3244 unsigned int r_sym;
3245 };
3246
3247 // The GOT section.
3248 Output_data_got_aarch64<size, big_endian>* got_;
3249 // The PLT section.
3250 Output_data_plt_aarch64<size, big_endian>* plt_;
3251 // The GOT PLT section.
3252 Output_data_space* got_plt_;
3253 // The GOT section for IRELATIVE relocations.
3254 Output_data_space* got_irelative_;
3255 // The GOT section for TLSDESC relocations.
3256 Output_data_got<size, big_endian>* got_tlsdesc_;
3257 // The _GLOBAL_OFFSET_TABLE_ symbol.
3258 Symbol* global_offset_table_;
3259 // The dynamic reloc section.
3260 Reloc_section* rela_dyn_;
3261 // The section to use for IRELATIVE relocs.
3262 Reloc_section* rela_irelative_;
3263 // Relocs saved to avoid a COPY reloc.
3264 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3265 // Offset of the GOT entry for the TLS module index.
3266 unsigned int got_mod_index_offset_;
3267 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3268 // specific relocation. Here we store the object and local symbol
3269 // index for the relocation.
3270 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3271 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3272 bool tls_base_symbol_defined_;
3273 // List of stub_tables
3274 Stub_table_list stub_tables_;
3275 // Actual stub group size
3276 section_size_type stub_group_size_;
3277 AArch64_input_section_map aarch64_input_section_map_;
3278 }; // End of Target_aarch64
3279
3280
3281 template<>
3282 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3283 {
3284 64, // size
3285 false, // is_big_endian
3286 elfcpp::EM_AARCH64, // machine_code
3287 false, // has_make_symbol
3288 false, // has_resolve
3289 false, // has_code_fill
3290 true, // is_default_stack_executable
3291 true, // can_icf_inline_merge_sections
3292 '\0', // wrap_char
3293 "/lib/ld.so.1", // program interpreter
3294 0x400000, // default_text_segment_address
3295 0x1000, // abi_pagesize (overridable by -z max-page-size)
3296 0x1000, // common_pagesize (overridable by -z common-page-size)
3297 false, // isolate_execinstr
3298 0, // rosegment_gap
3299 elfcpp::SHN_UNDEF, // small_common_shndx
3300 elfcpp::SHN_UNDEF, // large_common_shndx
3301 0, // small_common_section_flags
3302 0, // large_common_section_flags
3303 NULL, // attributes_section
3304 NULL, // attributes_vendor
3305 "_start" // entry_symbol_name
3306 };
3307
3308 template<>
3309 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3310 {
3311 32, // size
3312 false, // is_big_endian
3313 elfcpp::EM_AARCH64, // machine_code
3314 false, // has_make_symbol
3315 false, // has_resolve
3316 false, // has_code_fill
3317 true, // is_default_stack_executable
3318 false, // can_icf_inline_merge_sections
3319 '\0', // wrap_char
3320 "/lib/ld.so.1", // program interpreter
3321 0x400000, // default_text_segment_address
3322 0x1000, // abi_pagesize (overridable by -z max-page-size)
3323 0x1000, // common_pagesize (overridable by -z common-page-size)
3324 false, // isolate_execinstr
3325 0, // rosegment_gap
3326 elfcpp::SHN_UNDEF, // small_common_shndx
3327 elfcpp::SHN_UNDEF, // large_common_shndx
3328 0, // small_common_section_flags
3329 0, // large_common_section_flags
3330 NULL, // attributes_section
3331 NULL, // attributes_vendor
3332 "_start" // entry_symbol_name
3333 };
3334
3335 template<>
3336 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3337 {
3338 64, // size
3339 true, // is_big_endian
3340 elfcpp::EM_AARCH64, // machine_code
3341 false, // has_make_symbol
3342 false, // has_resolve
3343 false, // has_code_fill
3344 true, // is_default_stack_executable
3345 true, // can_icf_inline_merge_sections
3346 '\0', // wrap_char
3347 "/lib/ld.so.1", // program interpreter
3348 0x400000, // default_text_segment_address
3349 0x1000, // abi_pagesize (overridable by -z max-page-size)
3350 0x1000, // common_pagesize (overridable by -z common-page-size)
3351 false, // isolate_execinstr
3352 0, // rosegment_gap
3353 elfcpp::SHN_UNDEF, // small_common_shndx
3354 elfcpp::SHN_UNDEF, // large_common_shndx
3355 0, // small_common_section_flags
3356 0, // large_common_section_flags
3357 NULL, // attributes_section
3358 NULL, // attributes_vendor
3359 "_start" // entry_symbol_name
3360 };
3361
3362 template<>
3363 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3364 {
3365 32, // size
3366 true, // is_big_endian
3367 elfcpp::EM_AARCH64, // machine_code
3368 false, // has_make_symbol
3369 false, // has_resolve
3370 false, // has_code_fill
3371 true, // is_default_stack_executable
3372 false, // can_icf_inline_merge_sections
3373 '\0', // wrap_char
3374 "/lib/ld.so.1", // program interpreter
3375 0x400000, // default_text_segment_address
3376 0x1000, // abi_pagesize (overridable by -z max-page-size)
3377 0x1000, // common_pagesize (overridable by -z common-page-size)
3378 false, // isolate_execinstr
3379 0, // rosegment_gap
3380 elfcpp::SHN_UNDEF, // small_common_shndx
3381 elfcpp::SHN_UNDEF, // large_common_shndx
3382 0, // small_common_section_flags
3383 0, // large_common_section_flags
3384 NULL, // attributes_section
3385 NULL, // attributes_vendor
3386 "_start" // entry_symbol_name
3387 };
3388
3389 // Get the GOT section, creating it if necessary.
3390
3391 template<int size, bool big_endian>
3392 Output_data_got_aarch64<size, big_endian>*
3393 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3394 Layout* layout)
3395 {
3396 if (this->got_ == NULL)
3397 {
3398 gold_assert(symtab != NULL && layout != NULL);
3399
3400 // When using -z now, we can treat .got.plt as a relro section.
3401 // Without -z now, it is modified after program startup by lazy
3402 // PLT relocations.
3403 bool is_got_plt_relro = parameters->options().now();
3404 Output_section_order got_order = (is_got_plt_relro
3405 ? ORDER_RELRO
3406 : ORDER_RELRO_LAST);
3407 Output_section_order got_plt_order = (is_got_plt_relro
3408 ? ORDER_RELRO
3409 : ORDER_NON_RELRO_FIRST);
3410
3411 // Layout of .got and .got.plt sections.
3412 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3413 // ...
3414 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3415 // .gotplt[1] reserved for ld.so (resolver)
3416 // .gotplt[2] reserved
3417
3418 // Generate .got section.
3419 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3420 layout);
3421 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3422 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3423 this->got_, got_order, true);
3424 // The first word of GOT is reserved for the address of .dynamic.
3425 // We put 0 here now. The value will be replaced later in
3426 // Output_data_got_aarch64::do_write.
3427 this->got_->add_constant(0);
3428
3429 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3430 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3431 // even if there is a .got.plt section.
3432 this->global_offset_table_ =
3433 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3434 Symbol_table::PREDEFINED,
3435 this->got_,
3436 0, 0, elfcpp::STT_OBJECT,
3437 elfcpp::STB_LOCAL,
3438 elfcpp::STV_HIDDEN, 0,
3439 false, false);
3440
3441 // Generate .got.plt section.
3442 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3443 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3444 (elfcpp::SHF_ALLOC
3445 | elfcpp::SHF_WRITE),
3446 this->got_plt_, got_plt_order,
3447 is_got_plt_relro);
3448
3449 // The first three entries are reserved.
3450 this->got_plt_->set_current_data_size(
3451 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3452
3453 // If there are any IRELATIVE relocations, they get GOT entries
3454 // in .got.plt after the jump slot entries.
3455 this->got_irelative_ = new Output_data_space(size / 8,
3456 "** GOT IRELATIVE PLT");
3457 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3458 (elfcpp::SHF_ALLOC
3459 | elfcpp::SHF_WRITE),
3460 this->got_irelative_,
3461 got_plt_order,
3462 is_got_plt_relro);
3463
3464 // If there are any TLSDESC relocations, they get GOT entries in
3465 // .got.plt after the jump slot and IRELATIVE entries.
3466 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3467 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3468 (elfcpp::SHF_ALLOC
3469 | elfcpp::SHF_WRITE),
3470 this->got_tlsdesc_,
3471 got_plt_order,
3472 is_got_plt_relro);
3473
3474 if (!is_got_plt_relro)
3475 {
3476 // Those bytes can go into the relro segment.
3477 layout->increase_relro(
3478 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3479 }
3480
3481 }
3482 return this->got_;
3483 }
3484
3485 // Get the dynamic reloc section, creating it if necessary.
3486
3487 template<int size, bool big_endian>
3488 typename Target_aarch64<size, big_endian>::Reloc_section*
3489 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3490 {
3491 if (this->rela_dyn_ == NULL)
3492 {
3493 gold_assert(layout != NULL);
3494 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3495 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3496 elfcpp::SHF_ALLOC, this->rela_dyn_,
3497 ORDER_DYNAMIC_RELOCS, false);
3498 }
3499 return this->rela_dyn_;
3500 }
3501
3502 // Get the section to use for IRELATIVE relocs, creating it if
3503 // necessary. These go in .rela.dyn, but only after all other dynamic
3504 // relocations. They need to follow the other dynamic relocations so
3505 // that they can refer to global variables initialized by those
3506 // relocs.
3507
3508 template<int size, bool big_endian>
3509 typename Target_aarch64<size, big_endian>::Reloc_section*
3510 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3511 {
3512 if (this->rela_irelative_ == NULL)
3513 {
3514 // Make sure we have already created the dynamic reloc section.
3515 this->rela_dyn_section(layout);
3516 this->rela_irelative_ = new Reloc_section(false);
3517 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3518 elfcpp::SHF_ALLOC, this->rela_irelative_,
3519 ORDER_DYNAMIC_RELOCS, false);
3520 gold_assert(this->rela_dyn_->output_section()
3521 == this->rela_irelative_->output_section());
3522 }
3523 return this->rela_irelative_;
3524 }
3525
3526
3527 // do_make_elf_object to override the same function in the base class. We need
3528 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3529 // store backend specific information. Hence we need to have our own ELF object
3530 // creation.
3531
3532 template<int size, bool big_endian>
3533 Object*
3534 Target_aarch64<size, big_endian>::do_make_elf_object(
3535 const std::string& name,
3536 Input_file* input_file,
3537 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3538 {
3539 int et = ehdr.get_e_type();
3540 // ET_EXEC files are valid input for --just-symbols/-R,
3541 // and we treat them as relocatable objects.
3542 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3543 return Sized_target<size, big_endian>::do_make_elf_object(
3544 name, input_file, offset, ehdr);
3545 else if (et == elfcpp::ET_REL)
3546 {
3547 AArch64_relobj<size, big_endian>* obj =
3548 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3549 obj->setup();
3550 return obj;
3551 }
3552 else if (et == elfcpp::ET_DYN)
3553 {
3554 // Keep base implementation.
3555 Sized_dynobj<size, big_endian>* obj =
3556 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3557 obj->setup();
3558 return obj;
3559 }
3560 else
3561 {
3562 gold_error(_("%s: unsupported ELF file type %d"),
3563 name.c_str(), et);
3564 return NULL;
3565 }
3566 }
3567
3568
3569 // Scan a relocation for stub generation.
3570
3571 template<int size, bool big_endian>
3572 void
3573 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3574 const Relocate_info<size, big_endian>* relinfo,
3575 unsigned int r_type,
3576 const Sized_symbol<size>* gsym,
3577 unsigned int r_sym,
3578 const Symbol_value<size>* psymval,
3579 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3580 Address address)
3581 {
3582 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3583 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3584
3585 Symbol_value<size> symval;
3586 if (gsym != NULL)
3587 {
3588 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3589 get_reloc_property(r_type);
3590 if (gsym->use_plt_offset(arp->reference_flags()))
3591 {
3592 // This uses a PLT, change the symbol value.
3593 symval.set_output_value(this->plt_section()->address()
3594 + gsym->plt_offset());
3595 psymval = &symval;
3596 }
3597 else if (gsym->is_undefined())
3598 // There is no need to generate a stub symbol is undefined.
3599 return;
3600 }
3601
3602 // Get the symbol value.
3603 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3604
3605 // Owing to pipelining, the PC relative branches below actually skip
3606 // two instructions when the branch offset is 0.
3607 Address destination = static_cast<Address>(-1);
3608 switch (r_type)
3609 {
3610 case elfcpp::R_AARCH64_CALL26:
3611 case elfcpp::R_AARCH64_JUMP26:
3612 destination = value + addend;
3613 break;
3614 default:
3615 gold_unreachable();
3616 }
3617
3618 int stub_type = The_reloc_stub::
3619 stub_type_for_reloc(r_type, address, destination);
3620 if (stub_type == ST_NONE)
3621 return;
3622
3623 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3624 gold_assert(stub_table != NULL);
3625
3626 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3627 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3628 if (stub == NULL)
3629 {
3630 stub = new The_reloc_stub(stub_type);
3631 stub_table->add_reloc_stub(stub, key);
3632 }
3633 stub->set_destination_address(destination);
3634 } // End of Target_aarch64::scan_reloc_for_stub
3635
3636
3637 // This function scans a relocation section for stub generation.
3638 // The template parameter Relocate must be a class type which provides
3639 // a single function, relocate(), which implements the machine
3640 // specific part of a relocation.
3641
3642 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3643 // SHT_REL or SHT_RELA.
3644
3645 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3646 // of relocs. OUTPUT_SECTION is the output section.
3647 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3648 // mapped to output offsets.
3649
3650 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3651 // VIEW_SIZE is the size. These refer to the input section, unless
3652 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3653 // the output section.
3654
3655 template<int size, bool big_endian>
3656 template<int sh_type>
3657 void inline
3658 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3659 const Relocate_info<size, big_endian>* relinfo,
3660 const unsigned char* prelocs,
3661 size_t reloc_count,
3662 Output_section* /*output_section*/,
3663 bool /*needs_special_offset_handling*/,
3664 const unsigned char* /*view*/,
3665 Address view_address,
3666 section_size_type)
3667 {
3668 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3669
3670 const int reloc_size =
3671 Reloc_types<sh_type,size,big_endian>::reloc_size;
3672 AArch64_relobj<size, big_endian>* object =
3673 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3674 unsigned int local_count = object->local_symbol_count();
3675
3676 gold::Default_comdat_behavior default_comdat_behavior;
3677 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3678
3679 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3680 {
3681 Reltype reloc(prelocs);
3682 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3683 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3684 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3685 if (r_type != elfcpp::R_AARCH64_CALL26
3686 && r_type != elfcpp::R_AARCH64_JUMP26)
3687 continue;
3688
3689 section_offset_type offset =
3690 convert_to_section_size_type(reloc.get_r_offset());
3691
3692 // Get the addend.
3693 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3694 reloc.get_r_addend();
3695
3696 const Sized_symbol<size>* sym;
3697 Symbol_value<size> symval;
3698 const Symbol_value<size> *psymval;
3699 bool is_defined_in_discarded_section;
3700 unsigned int shndx;
3701 if (r_sym < local_count)
3702 {
3703 sym = NULL;
3704 psymval = object->local_symbol(r_sym);
3705
3706 // If the local symbol belongs to a section we are discarding,
3707 // and that section is a debug section, try to find the
3708 // corresponding kept section and map this symbol to its
3709 // counterpart in the kept section. The symbol must not
3710 // correspond to a section we are folding.
3711 bool is_ordinary;
3712 shndx = psymval->input_shndx(&is_ordinary);
3713 is_defined_in_discarded_section =
3714 (is_ordinary
3715 && shndx != elfcpp::SHN_UNDEF
3716 && !object->is_section_included(shndx)
3717 && !relinfo->symtab->is_section_folded(object, shndx));
3718
3719 // We need to compute the would-be final value of this local
3720 // symbol.
3721 if (!is_defined_in_discarded_section)
3722 {
3723 typedef Sized_relobj_file<size, big_endian> ObjType;
3724 typename ObjType::Compute_final_local_value_status status =
3725 object->compute_final_local_value(r_sym, psymval, &symval,
3726 relinfo->symtab);
3727 if (status == ObjType::CFLV_OK)
3728 {
3729 // Currently we cannot handle a branch to a target in
3730 // a merged section. If this is the case, issue an error
3731 // and also free the merge symbol value.
3732 if (!symval.has_output_value())
3733 {
3734 const std::string& section_name =
3735 object->section_name(shndx);
3736 object->error(_("cannot handle branch to local %u "
3737 "in a merged section %s"),
3738 r_sym, section_name.c_str());
3739 }
3740 psymval = &symval;
3741 }
3742 else
3743 {
3744 // We cannot determine the final value.
3745 continue;
3746 }
3747 }
3748 }
3749 else
3750 {
3751 const Symbol* gsym;
3752 gsym = object->global_symbol(r_sym);
3753 gold_assert(gsym != NULL);
3754 if (gsym->is_forwarder())
3755 gsym = relinfo->symtab->resolve_forwards(gsym);
3756
3757 sym = static_cast<const Sized_symbol<size>*>(gsym);
3758 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
3759 symval.set_output_symtab_index(sym->symtab_index());
3760 else
3761 symval.set_no_output_symtab_entry();
3762
3763 // We need to compute the would-be final value of this global
3764 // symbol.
3765 const Symbol_table* symtab = relinfo->symtab;
3766 const Sized_symbol<size>* sized_symbol =
3767 symtab->get_sized_symbol<size>(gsym);
3768 Symbol_table::Compute_final_value_status status;
3769 typename elfcpp::Elf_types<size>::Elf_Addr value =
3770 symtab->compute_final_value<size>(sized_symbol, &status);
3771
3772 // Skip this if the symbol has not output section.
3773 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
3774 continue;
3775 symval.set_output_value(value);
3776
3777 if (gsym->type() == elfcpp::STT_TLS)
3778 symval.set_is_tls_symbol();
3779 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
3780 symval.set_is_ifunc_symbol();
3781 psymval = &symval;
3782
3783 is_defined_in_discarded_section =
3784 (gsym->is_defined_in_discarded_section()
3785 && gsym->is_undefined());
3786 shndx = 0;
3787 }
3788
3789 Symbol_value<size> symval2;
3790 if (is_defined_in_discarded_section)
3791 {
3792 if (comdat_behavior == CB_UNDETERMINED)
3793 {
3794 std::string name = object->section_name(relinfo->data_shndx);
3795 comdat_behavior = default_comdat_behavior.get(name.c_str());
3796 }
3797 if (comdat_behavior == CB_PRETEND)
3798 {
3799 bool found;
3800 typename elfcpp::Elf_types<size>::Elf_Addr value =
3801 object->map_to_kept_section(shndx, &found);
3802 if (found)
3803 symval2.set_output_value(value + psymval->input_value());
3804 else
3805 symval2.set_output_value(0);
3806 }
3807 else
3808 {
3809 if (comdat_behavior == CB_WARNING)
3810 gold_warning_at_location(relinfo, i, offset,
3811 _("relocation refers to discarded "
3812 "section"));
3813 symval2.set_output_value(0);
3814 }
3815 symval2.set_no_output_symtab_entry();
3816 psymval = &symval2;
3817 }
3818
3819 // If symbol is a section symbol, we don't know the actual type of
3820 // destination. Give up.
3821 if (psymval->is_section_symbol())
3822 continue;
3823
3824 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
3825 addend, view_address + offset);
3826 } // End of iterating relocs in a section
3827 } // End of Target_aarch64::scan_reloc_section_for_stubs
3828
3829
3830 // Scan an input section for stub generation.
3831
3832 template<int size, bool big_endian>
3833 void
3834 Target_aarch64<size, big_endian>::scan_section_for_stubs(
3835 const Relocate_info<size, big_endian>* relinfo,
3836 unsigned int sh_type,
3837 const unsigned char* prelocs,
3838 size_t reloc_count,
3839 Output_section* output_section,
3840 bool needs_special_offset_handling,
3841 const unsigned char* view,
3842 Address view_address,
3843 section_size_type view_size)
3844 {
3845 gold_assert(sh_type == elfcpp::SHT_RELA);
3846 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
3847 relinfo,
3848 prelocs,
3849 reloc_count,
3850 output_section,
3851 needs_special_offset_handling,
3852 view,
3853 view_address,
3854 view_size);
3855 }
3856
3857
3858 // Relocate a single stub.
3859
3860 template<int size, bool big_endian>
3861 void Target_aarch64<size, big_endian>::
3862 relocate_stub(The_reloc_stub* stub,
3863 const The_relocate_info*,
3864 Output_section*,
3865 unsigned char* view,
3866 Address address,
3867 section_size_type)
3868 {
3869 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
3870 typedef typename The_reloc_functions::Status The_reloc_functions_status;
3871 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
3872
3873 Insntype* ip = reinterpret_cast<Insntype*>(view);
3874 int insn_number = stub->insn_num();
3875 const uint32_t* insns = stub->insns();
3876 // Check the insns are really those stub insns.
3877 for (int i = 0; i < insn_number; ++i)
3878 {
3879 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
3880 gold_assert(((uint32_t)insn == insns[i]));
3881 }
3882
3883 Address dest = stub->destination_address();
3884
3885 switch(stub->type())
3886 {
3887 case ST_ADRP_BRANCH:
3888 {
3889 // 1st reloc is ADR_PREL_PG_HI21
3890 The_reloc_functions_status status =
3891 The_reloc_functions::adrp(view, dest, address);
3892 // An error should never arise in the above step. If so, please
3893 // check 'aarch64_valid_for_adrp_p'.
3894 gold_assert(status == The_reloc_functions::STATUS_OKAY);
3895
3896 // 2nd reloc is ADD_ABS_LO12_NC
3897 const AArch64_reloc_property* arp =
3898 aarch64_reloc_property_table->get_reloc_property(
3899 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
3900 gold_assert(arp != NULL);
3901 status = The_reloc_functions::template
3902 rela_general<32>(view + 4, dest, 0, arp);
3903 // An error should never arise, it is an "_NC" relocation.
3904 gold_assert(status == The_reloc_functions::STATUS_OKAY);
3905 }
3906 break;
3907
3908 case ST_LONG_BRANCH_ABS:
3909 // 1st reloc is R_AARCH64_PREL64, at offset 8
3910 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
3911 break;
3912
3913 case ST_LONG_BRANCH_PCREL:
3914 {
3915 // "PC" calculation is the 2nd insn in the stub.
3916 uint64_t offset = dest - (address + 4);
3917 // Offset is placed at offset 4 and 5.
3918 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
3919 }
3920 break;
3921
3922 default:
3923 gold_unreachable();
3924 }
3925 }
3926
3927
3928 // A class to handle the PLT data.
3929 // This is an abstract base class that handles most of the linker details
3930 // but does not know the actual contents of PLT entries. The derived
3931 // classes below fill in those details.
3932
3933 template<int size, bool big_endian>
3934 class Output_data_plt_aarch64 : public Output_section_data
3935 {
3936 public:
3937 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
3938 Reloc_section;
3939 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
3940
3941 Output_data_plt_aarch64(Layout* layout,
3942 uint64_t addralign,
3943 Output_data_got_aarch64<size, big_endian>* got,
3944 Output_data_space* got_plt,
3945 Output_data_space* got_irelative)
3946 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
3947 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
3948 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
3949 { this->init(layout); }
3950
3951 // Initialize the PLT section.
3952 void
3953 init(Layout* layout);
3954
3955 // Add an entry to the PLT.
3956 void
3957 add_entry(Symbol_table*, Layout*, Symbol* gsym);
3958
3959 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
3960 unsigned int
3961 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
3962 Sized_relobj_file<size, big_endian>* relobj,
3963 unsigned int local_sym_index);
3964
3965 // Add the relocation for a PLT entry.
3966 void
3967 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
3968 unsigned int got_offset);
3969
3970 // Add the reserved TLSDESC_PLT entry to the PLT.
3971 void
3972 reserve_tlsdesc_entry(unsigned int got_offset)
3973 { this->tlsdesc_got_offset_ = got_offset; }
3974
3975 // Return true if a TLSDESC_PLT entry has been reserved.
3976 bool
3977 has_tlsdesc_entry() const
3978 { return this->tlsdesc_got_offset_ != -1U; }
3979
3980 // Return the GOT offset for the reserved TLSDESC_PLT entry.
3981 unsigned int
3982 get_tlsdesc_got_offset() const
3983 { return this->tlsdesc_got_offset_; }
3984
3985 // Return the PLT offset of the reserved TLSDESC_PLT entry.
3986 unsigned int
3987 get_tlsdesc_plt_offset() const
3988 {
3989 return (this->first_plt_entry_offset() +
3990 (this->count_ + this->irelative_count_)
3991 * this->get_plt_entry_size());
3992 }
3993
3994 // Return the .rela.plt section data.
3995 Reloc_section*
3996 rela_plt()
3997 { return this->rel_; }
3998
3999 // Return where the TLSDESC relocations should go.
4000 Reloc_section*
4001 rela_tlsdesc(Layout*);
4002
4003 // Return where the IRELATIVE relocations should go in the PLT
4004 // relocations.
4005 Reloc_section*
4006 rela_irelative(Symbol_table*, Layout*);
4007
4008 // Return whether we created a section for IRELATIVE relocations.
4009 bool
4010 has_irelative_section() const
4011 { return this->irelative_rel_ != NULL; }
4012
4013 // Return the number of PLT entries.
4014 unsigned int
4015 entry_count() const
4016 { return this->count_ + this->irelative_count_; }
4017
4018 // Return the offset of the first non-reserved PLT entry.
4019 unsigned int
4020 first_plt_entry_offset() const
4021 { return this->do_first_plt_entry_offset(); }
4022
4023 // Return the size of a PLT entry.
4024 unsigned int
4025 get_plt_entry_size() const
4026 { return this->do_get_plt_entry_size(); }
4027
4028 // Return the reserved tlsdesc entry size.
4029 unsigned int
4030 get_plt_tlsdesc_entry_size() const
4031 { return this->do_get_plt_tlsdesc_entry_size(); }
4032
4033 // Return the PLT address to use for a global symbol.
4034 uint64_t
4035 address_for_global(const Symbol*);
4036
4037 // Return the PLT address to use for a local symbol.
4038 uint64_t
4039 address_for_local(const Relobj*, unsigned int symndx);
4040
4041 protected:
4042 // Fill in the first PLT entry.
4043 void
4044 fill_first_plt_entry(unsigned char* pov,
4045 Address got_address,
4046 Address plt_address)
4047 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4048
4049 // Fill in a normal PLT entry.
4050 void
4051 fill_plt_entry(unsigned char* pov,
4052 Address got_address,
4053 Address plt_address,
4054 unsigned int got_offset,
4055 unsigned int plt_offset)
4056 {
4057 this->do_fill_plt_entry(pov, got_address, plt_address,
4058 got_offset, plt_offset);
4059 }
4060
4061 // Fill in the reserved TLSDESC PLT entry.
4062 void
4063 fill_tlsdesc_entry(unsigned char* pov,
4064 Address gotplt_address,
4065 Address plt_address,
4066 Address got_base,
4067 unsigned int tlsdesc_got_offset,
4068 unsigned int plt_offset)
4069 {
4070 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4071 tlsdesc_got_offset, plt_offset);
4072 }
4073
4074 virtual unsigned int
4075 do_first_plt_entry_offset() const = 0;
4076
4077 virtual unsigned int
4078 do_get_plt_entry_size() const = 0;
4079
4080 virtual unsigned int
4081 do_get_plt_tlsdesc_entry_size() const = 0;
4082
4083 virtual void
4084 do_fill_first_plt_entry(unsigned char* pov,
4085 Address got_addr,
4086 Address plt_addr) = 0;
4087
4088 virtual void
4089 do_fill_plt_entry(unsigned char* pov,
4090 Address got_address,
4091 Address plt_address,
4092 unsigned int got_offset,
4093 unsigned int plt_offset) = 0;
4094
4095 virtual void
4096 do_fill_tlsdesc_entry(unsigned char* pov,
4097 Address gotplt_address,
4098 Address plt_address,
4099 Address got_base,
4100 unsigned int tlsdesc_got_offset,
4101 unsigned int plt_offset) = 0;
4102
4103 void
4104 do_adjust_output_section(Output_section* os);
4105
4106 // Write to a map file.
4107 void
4108 do_print_to_mapfile(Mapfile* mapfile) const
4109 { mapfile->print_output_data(this, _("** PLT")); }
4110
4111 private:
4112 // Set the final size.
4113 void
4114 set_final_data_size();
4115
4116 // Write out the PLT data.
4117 void
4118 do_write(Output_file*);
4119
4120 // The reloc section.
4121 Reloc_section* rel_;
4122
4123 // The TLSDESC relocs, if necessary. These must follow the regular
4124 // PLT relocs.
4125 Reloc_section* tlsdesc_rel_;
4126
4127 // The IRELATIVE relocs, if necessary. These must follow the
4128 // regular PLT relocations.
4129 Reloc_section* irelative_rel_;
4130
4131 // The .got section.
4132 Output_data_got_aarch64<size, big_endian>* got_;
4133
4134 // The .got.plt section.
4135 Output_data_space* got_plt_;
4136
4137 // The part of the .got.plt section used for IRELATIVE relocs.
4138 Output_data_space* got_irelative_;
4139
4140 // The number of PLT entries.
4141 unsigned int count_;
4142
4143 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4144 // follow the regular PLT entries.
4145 unsigned int irelative_count_;
4146
4147 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4148 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4149 // indicates an offset is not allocated.
4150 unsigned int tlsdesc_got_offset_;
4151 };
4152
4153 // Initialize the PLT section.
4154
4155 template<int size, bool big_endian>
4156 void
4157 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4158 {
4159 this->rel_ = new Reloc_section(false);
4160 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4161 elfcpp::SHF_ALLOC, this->rel_,
4162 ORDER_DYNAMIC_PLT_RELOCS, false);
4163 }
4164
4165 template<int size, bool big_endian>
4166 void
4167 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4168 Output_section* os)
4169 {
4170 os->set_entsize(this->get_plt_entry_size());
4171 }
4172
4173 // Add an entry to the PLT.
4174
4175 template<int size, bool big_endian>
4176 void
4177 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4178 Layout* layout, Symbol* gsym)
4179 {
4180 gold_assert(!gsym->has_plt_offset());
4181
4182 unsigned int* pcount;
4183 unsigned int plt_reserved;
4184 Output_section_data_build* got;
4185
4186 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4187 && gsym->can_use_relative_reloc(false))
4188 {
4189 pcount = &this->irelative_count_;
4190 plt_reserved = 0;
4191 got = this->got_irelative_;
4192 }
4193 else
4194 {
4195 pcount = &this->count_;
4196 plt_reserved = this->first_plt_entry_offset();
4197 got = this->got_plt_;
4198 }
4199
4200 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4201 + plt_reserved);
4202
4203 ++*pcount;
4204
4205 section_offset_type got_offset = got->current_data_size();
4206
4207 // Every PLT entry needs a GOT entry which points back to the PLT
4208 // entry (this will be changed by the dynamic linker, normally
4209 // lazily when the function is called).
4210 got->set_current_data_size(got_offset + size / 8);
4211
4212 // Every PLT entry needs a reloc.
4213 this->add_relocation(symtab, layout, gsym, got_offset);
4214
4215 // Note that we don't need to save the symbol. The contents of the
4216 // PLT are independent of which symbols are used. The symbols only
4217 // appear in the relocations.
4218 }
4219
4220 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4221 // the PLT offset.
4222
4223 template<int size, bool big_endian>
4224 unsigned int
4225 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4226 Symbol_table* symtab,
4227 Layout* layout,
4228 Sized_relobj_file<size, big_endian>* relobj,
4229 unsigned int local_sym_index)
4230 {
4231 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4232 ++this->irelative_count_;
4233
4234 section_offset_type got_offset = this->got_irelative_->current_data_size();
4235
4236 // Every PLT entry needs a GOT entry which points back to the PLT
4237 // entry.
4238 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4239
4240 // Every PLT entry needs a reloc.
4241 Reloc_section* rela = this->rela_irelative(symtab, layout);
4242 rela->add_symbolless_local_addend(relobj, local_sym_index,
4243 elfcpp::R_AARCH64_IRELATIVE,
4244 this->got_irelative_, got_offset, 0);
4245
4246 return plt_offset;
4247 }
4248
4249 // Add the relocation for a PLT entry.
4250
4251 template<int size, bool big_endian>
4252 void
4253 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4254 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4255 {
4256 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4257 && gsym->can_use_relative_reloc(false))
4258 {
4259 Reloc_section* rela = this->rela_irelative(symtab, layout);
4260 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4261 this->got_irelative_, got_offset, 0);
4262 }
4263 else
4264 {
4265 gsym->set_needs_dynsym_entry();
4266 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4267 got_offset, 0);
4268 }
4269 }
4270
4271 // Return where the TLSDESC relocations should go, creating it if
4272 // necessary. These follow the JUMP_SLOT relocations.
4273
4274 template<int size, bool big_endian>
4275 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4276 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4277 {
4278 if (this->tlsdesc_rel_ == NULL)
4279 {
4280 this->tlsdesc_rel_ = new Reloc_section(false);
4281 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4282 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4283 ORDER_DYNAMIC_PLT_RELOCS, false);
4284 gold_assert(this->tlsdesc_rel_->output_section()
4285 == this->rel_->output_section());
4286 }
4287 return this->tlsdesc_rel_;
4288 }
4289
4290 // Return where the IRELATIVE relocations should go in the PLT. These
4291 // follow the JUMP_SLOT and the TLSDESC relocations.
4292
4293 template<int size, bool big_endian>
4294 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4295 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4296 Layout* layout)
4297 {
4298 if (this->irelative_rel_ == NULL)
4299 {
4300 // Make sure we have a place for the TLSDESC relocations, in
4301 // case we see any later on.
4302 this->rela_tlsdesc(layout);
4303 this->irelative_rel_ = new Reloc_section(false);
4304 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4305 elfcpp::SHF_ALLOC, this->irelative_rel_,
4306 ORDER_DYNAMIC_PLT_RELOCS, false);
4307 gold_assert(this->irelative_rel_->output_section()
4308 == this->rel_->output_section());
4309
4310 if (parameters->doing_static_link())
4311 {
4312 // A statically linked executable will only have a .rela.plt
4313 // section to hold R_AARCH64_IRELATIVE relocs for
4314 // STT_GNU_IFUNC symbols. The library will use these
4315 // symbols to locate the IRELATIVE relocs at program startup
4316 // time.
4317 symtab->define_in_output_data("__rela_iplt_start", NULL,
4318 Symbol_table::PREDEFINED,
4319 this->irelative_rel_, 0, 0,
4320 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4321 elfcpp::STV_HIDDEN, 0, false, true);
4322 symtab->define_in_output_data("__rela_iplt_end", NULL,
4323 Symbol_table::PREDEFINED,
4324 this->irelative_rel_, 0, 0,
4325 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4326 elfcpp::STV_HIDDEN, 0, true, true);
4327 }
4328 }
4329 return this->irelative_rel_;
4330 }
4331
4332 // Return the PLT address to use for a global symbol.
4333
4334 template<int size, bool big_endian>
4335 uint64_t
4336 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4337 const Symbol* gsym)
4338 {
4339 uint64_t offset = 0;
4340 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4341 && gsym->can_use_relative_reloc(false))
4342 offset = (this->first_plt_entry_offset() +
4343 this->count_ * this->get_plt_entry_size());
4344 return this->address() + offset + gsym->plt_offset();
4345 }
4346
4347 // Return the PLT address to use for a local symbol. These are always
4348 // IRELATIVE relocs.
4349
4350 template<int size, bool big_endian>
4351 uint64_t
4352 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4353 const Relobj* object,
4354 unsigned int r_sym)
4355 {
4356 return (this->address()
4357 + this->first_plt_entry_offset()
4358 + this->count_ * this->get_plt_entry_size()
4359 + object->local_plt_offset(r_sym));
4360 }
4361
4362 // Set the final size.
4363
4364 template<int size, bool big_endian>
4365 void
4366 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4367 {
4368 unsigned int count = this->count_ + this->irelative_count_;
4369 unsigned int extra_size = 0;
4370 if (this->has_tlsdesc_entry())
4371 extra_size += this->get_plt_tlsdesc_entry_size();
4372 this->set_data_size(this->first_plt_entry_offset()
4373 + count * this->get_plt_entry_size()
4374 + extra_size);
4375 }
4376
4377 template<int size, bool big_endian>
4378 class Output_data_plt_aarch64_standard :
4379 public Output_data_plt_aarch64<size, big_endian>
4380 {
4381 public:
4382 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4383 Output_data_plt_aarch64_standard(
4384 Layout* layout,
4385 Output_data_got_aarch64<size, big_endian>* got,
4386 Output_data_space* got_plt,
4387 Output_data_space* got_irelative)
4388 : Output_data_plt_aarch64<size, big_endian>(layout,
4389 size == 32 ? 4 : 8,
4390 got, got_plt,
4391 got_irelative)
4392 { }
4393
4394 protected:
4395 // Return the offset of the first non-reserved PLT entry.
4396 virtual unsigned int
4397 do_first_plt_entry_offset() const
4398 { return this->first_plt_entry_size; }
4399
4400 // Return the size of a PLT entry
4401 virtual unsigned int
4402 do_get_plt_entry_size() const
4403 { return this->plt_entry_size; }
4404
4405 // Return the size of a tlsdesc entry
4406 virtual unsigned int
4407 do_get_plt_tlsdesc_entry_size() const
4408 { return this->plt_tlsdesc_entry_size; }
4409
4410 virtual void
4411 do_fill_first_plt_entry(unsigned char* pov,
4412 Address got_address,
4413 Address plt_address);
4414
4415 virtual void
4416 do_fill_plt_entry(unsigned char* pov,
4417 Address got_address,
4418 Address plt_address,
4419 unsigned int got_offset,
4420 unsigned int plt_offset);
4421
4422 virtual void
4423 do_fill_tlsdesc_entry(unsigned char* pov,
4424 Address gotplt_address,
4425 Address plt_address,
4426 Address got_base,
4427 unsigned int tlsdesc_got_offset,
4428 unsigned int plt_offset);
4429
4430 private:
4431 // The size of the first plt entry size.
4432 static const int first_plt_entry_size = 32;
4433 // The size of the plt entry size.
4434 static const int plt_entry_size = 16;
4435 // The size of the plt tlsdesc entry size.
4436 static const int plt_tlsdesc_entry_size = 32;
4437 // Template for the first PLT entry.
4438 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4439 // Template for subsequent PLT entries.
4440 static const uint32_t plt_entry[plt_entry_size / 4];
4441 // The reserved TLSDESC entry in the PLT for an executable.
4442 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4443 };
4444
4445 // The first entry in the PLT for an executable.
4446
4447 template<>
4448 const uint32_t
4449 Output_data_plt_aarch64_standard<32, false>::
4450 first_plt_entry[first_plt_entry_size / 4] =
4451 {
4452 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4453 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4454 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4455 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4456 0xd61f0220, /* br x17 */
4457 0xd503201f, /* nop */
4458 0xd503201f, /* nop */
4459 0xd503201f, /* nop */
4460 };
4461
4462
4463 template<>
4464 const uint32_t
4465 Output_data_plt_aarch64_standard<32, true>::
4466 first_plt_entry[first_plt_entry_size / 4] =
4467 {
4468 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4469 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4470 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4471 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4472 0xd61f0220, /* br x17 */
4473 0xd503201f, /* nop */
4474 0xd503201f, /* nop */
4475 0xd503201f, /* nop */
4476 };
4477
4478
4479 template<>
4480 const uint32_t
4481 Output_data_plt_aarch64_standard<64, false>::
4482 first_plt_entry[first_plt_entry_size / 4] =
4483 {
4484 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4485 0x90000010, /* adrp x16, PLT_GOT+16 */
4486 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4487 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4488 0xd61f0220, /* br x17 */
4489 0xd503201f, /* nop */
4490 0xd503201f, /* nop */
4491 0xd503201f, /* nop */
4492 };
4493
4494
4495 template<>
4496 const uint32_t
4497 Output_data_plt_aarch64_standard<64, true>::
4498 first_plt_entry[first_plt_entry_size / 4] =
4499 {
4500 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4501 0x90000010, /* adrp x16, PLT_GOT+16 */
4502 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4503 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4504 0xd61f0220, /* br x17 */
4505 0xd503201f, /* nop */
4506 0xd503201f, /* nop */
4507 0xd503201f, /* nop */
4508 };
4509
4510
4511 template<>
4512 const uint32_t
4513 Output_data_plt_aarch64_standard<32, false>::
4514 plt_entry[plt_entry_size / 4] =
4515 {
4516 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4517 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4518 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4519 0xd61f0220, /* br x17. */
4520 };
4521
4522
4523 template<>
4524 const uint32_t
4525 Output_data_plt_aarch64_standard<32, true>::
4526 plt_entry[plt_entry_size / 4] =
4527 {
4528 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4529 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4530 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4531 0xd61f0220, /* br x17. */
4532 };
4533
4534
4535 template<>
4536 const uint32_t
4537 Output_data_plt_aarch64_standard<64, false>::
4538 plt_entry[plt_entry_size / 4] =
4539 {
4540 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4541 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4542 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4543 0xd61f0220, /* br x17. */
4544 };
4545
4546
4547 template<>
4548 const uint32_t
4549 Output_data_plt_aarch64_standard<64, true>::
4550 plt_entry[plt_entry_size / 4] =
4551 {
4552 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4553 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4554 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4555 0xd61f0220, /* br x17. */
4556 };
4557
4558
4559 template<int size, bool big_endian>
4560 void
4561 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4562 unsigned char* pov,
4563 Address got_address,
4564 Address plt_address)
4565 {
4566 // PLT0 of the small PLT looks like this in ELF64 -
4567 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4568 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4569 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4570 // symbol resolver
4571 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4572 // GOTPLT entry for this.
4573 // br x17
4574 // PLT0 will be slightly different in ELF32 due to different got entry
4575 // size.
4576 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4577 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4578
4579 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4580 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4581 // FIXME: This only works for 64bit
4582 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4583 gotplt_2nd_ent, plt_address + 4);
4584
4585 // Fill in R_AARCH64_LDST8_LO12
4586 elfcpp::Swap<32, big_endian>::writeval(
4587 pov + 8,
4588 ((this->first_plt_entry[2] & 0xffc003ff)
4589 | ((gotplt_2nd_ent & 0xff8) << 7)));
4590
4591 // Fill in R_AARCH64_ADD_ABS_LO12
4592 elfcpp::Swap<32, big_endian>::writeval(
4593 pov + 12,
4594 ((this->first_plt_entry[3] & 0xffc003ff)
4595 | ((gotplt_2nd_ent & 0xfff) << 10)));
4596 }
4597
4598
4599 // Subsequent entries in the PLT for an executable.
4600 // FIXME: This only works for 64bit
4601
4602 template<int size, bool big_endian>
4603 void
4604 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4605 unsigned char* pov,
4606 Address got_address,
4607 Address plt_address,
4608 unsigned int got_offset,
4609 unsigned int plt_offset)
4610 {
4611 memcpy(pov, this->plt_entry, this->plt_entry_size);
4612
4613 Address gotplt_entry_address = got_address + got_offset;
4614 Address plt_entry_address = plt_address + plt_offset;
4615
4616 // Fill in R_AARCH64_PCREL_ADR_HI21
4617 AArch64_relocate_functions<size, big_endian>::adrp(
4618 pov,
4619 gotplt_entry_address,
4620 plt_entry_address);
4621
4622 // Fill in R_AARCH64_LDST64_ABS_LO12
4623 elfcpp::Swap<32, big_endian>::writeval(
4624 pov + 4,
4625 ((this->plt_entry[1] & 0xffc003ff)
4626 | ((gotplt_entry_address & 0xff8) << 7)));
4627
4628 // Fill in R_AARCH64_ADD_ABS_LO12
4629 elfcpp::Swap<32, big_endian>::writeval(
4630 pov + 8,
4631 ((this->plt_entry[2] & 0xffc003ff)
4632 | ((gotplt_entry_address & 0xfff) <<10)));
4633
4634 }
4635
4636
4637 template<>
4638 const uint32_t
4639 Output_data_plt_aarch64_standard<32, false>::
4640 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4641 {
4642 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4643 0x90000002, /* adrp x2, 0 */
4644 0x90000003, /* adrp x3, 0 */
4645 0xb9400042, /* ldr w2, [w2, #0] */
4646 0x11000063, /* add w3, w3, 0 */
4647 0xd61f0040, /* br x2 */
4648 0xd503201f, /* nop */
4649 0xd503201f, /* nop */
4650 };
4651
4652 template<>
4653 const uint32_t
4654 Output_data_plt_aarch64_standard<32, true>::
4655 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4656 {
4657 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4658 0x90000002, /* adrp x2, 0 */
4659 0x90000003, /* adrp x3, 0 */
4660 0xb9400042, /* ldr w2, [w2, #0] */
4661 0x11000063, /* add w3, w3, 0 */
4662 0xd61f0040, /* br x2 */
4663 0xd503201f, /* nop */
4664 0xd503201f, /* nop */
4665 };
4666
4667 template<>
4668 const uint32_t
4669 Output_data_plt_aarch64_standard<64, false>::
4670 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4671 {
4672 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4673 0x90000002, /* adrp x2, 0 */
4674 0x90000003, /* adrp x3, 0 */
4675 0xf9400042, /* ldr x2, [x2, #0] */
4676 0x91000063, /* add x3, x3, 0 */
4677 0xd61f0040, /* br x2 */
4678 0xd503201f, /* nop */
4679 0xd503201f, /* nop */
4680 };
4681
4682 template<>
4683 const uint32_t
4684 Output_data_plt_aarch64_standard<64, true>::
4685 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4686 {
4687 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4688 0x90000002, /* adrp x2, 0 */
4689 0x90000003, /* adrp x3, 0 */
4690 0xf9400042, /* ldr x2, [x2, #0] */
4691 0x91000063, /* add x3, x3, 0 */
4692 0xd61f0040, /* br x2 */
4693 0xd503201f, /* nop */
4694 0xd503201f, /* nop */
4695 };
4696
4697 template<int size, bool big_endian>
4698 void
4699 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4700 unsigned char* pov,
4701 Address gotplt_address,
4702 Address plt_address,
4703 Address got_base,
4704 unsigned int tlsdesc_got_offset,
4705 unsigned int plt_offset)
4706 {
4707 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4708
4709 // move DT_TLSDESC_GOT address into x2
4710 // move .got.plt address into x3
4711 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4712 Address plt_entry_address = plt_address + plt_offset;
4713
4714 // R_AARCH64_ADR_PREL_PG_HI21
4715 AArch64_relocate_functions<size, big_endian>::adrp(
4716 pov + 4,
4717 tlsdesc_got_entry,
4718 plt_entry_address + 4);
4719
4720 // R_AARCH64_ADR_PREL_PG_HI21
4721 AArch64_relocate_functions<size, big_endian>::adrp(
4722 pov + 8,
4723 gotplt_address,
4724 plt_entry_address + 8);
4725
4726 // R_AARCH64_LDST64_ABS_LO12
4727 elfcpp::Swap<32, big_endian>::writeval(
4728 pov + 12,
4729 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4730 | ((tlsdesc_got_entry & 0xff8) << 7)));
4731
4732 // R_AARCH64_ADD_ABS_LO12
4733 elfcpp::Swap<32, big_endian>::writeval(
4734 pov + 16,
4735 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4736 | ((gotplt_address & 0xfff) << 10)));
4737 }
4738
4739 // Write out the PLT. This uses the hand-coded instructions above,
4740 // and adjusts them as needed. This is specified by the AMD64 ABI.
4741
4742 template<int size, bool big_endian>
4743 void
4744 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4745 {
4746 const off_t offset = this->offset();
4747 const section_size_type oview_size =
4748 convert_to_section_size_type(this->data_size());
4749 unsigned char* const oview = of->get_output_view(offset, oview_size);
4750
4751 const off_t got_file_offset = this->got_plt_->offset();
4752 gold_assert(got_file_offset + this->got_plt_->data_size()
4753 == this->got_irelative_->offset());
4754
4755 const section_size_type got_size =
4756 convert_to_section_size_type(this->got_plt_->data_size()
4757 + this->got_irelative_->data_size());
4758 unsigned char* const got_view = of->get_output_view(got_file_offset,
4759 got_size);
4760
4761 unsigned char* pov = oview;
4762
4763 // The base address of the .plt section.
4764 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
4765 // The base address of the PLT portion of the .got section.
4766 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
4767 = this->got_plt_->address();
4768
4769 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
4770 pov += this->first_plt_entry_offset();
4771
4772 // The first three entries in .got.plt are reserved.
4773 unsigned char* got_pov = got_view;
4774 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
4775 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4776
4777 unsigned int plt_offset = this->first_plt_entry_offset();
4778 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4779 const unsigned int count = this->count_ + this->irelative_count_;
4780 for (unsigned int plt_index = 0;
4781 plt_index < count;
4782 ++plt_index,
4783 pov += this->get_plt_entry_size(),
4784 got_pov += size / 8,
4785 plt_offset += this->get_plt_entry_size(),
4786 got_offset += size / 8)
4787 {
4788 // Set and adjust the PLT entry itself.
4789 this->fill_plt_entry(pov, gotplt_address, plt_address,
4790 got_offset, plt_offset);
4791
4792 // Set the entry in the GOT, which points to plt0.
4793 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
4794 }
4795
4796 if (this->has_tlsdesc_entry())
4797 {
4798 // Set and adjust the reserved TLSDESC PLT entry.
4799 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
4800 // The base address of the .base section.
4801 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
4802 this->got_->address();
4803 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4804 tlsdesc_got_offset, plt_offset);
4805 pov += this->get_plt_tlsdesc_entry_size();
4806 }
4807
4808 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4809 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4810
4811 of->write_output_view(offset, oview_size, oview);
4812 of->write_output_view(got_file_offset, got_size, got_view);
4813 }
4814
4815 // Telling how to update the immediate field of an instruction.
4816 struct AArch64_howto
4817 {
4818 // The immediate field mask.
4819 elfcpp::Elf_Xword dst_mask;
4820
4821 // The offset to apply relocation immediate
4822 int doffset;
4823
4824 // The second part offset, if the immediate field has two parts.
4825 // -1 if the immediate field has only one part.
4826 int doffset2;
4827 };
4828
4829 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
4830 {
4831 {0, -1, -1}, // DATA
4832 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
4833 {0xffffe0, 5, -1}, // LD [23:5]-imm19
4834 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
4835 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
4836 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
4837 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
4838 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
4839 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
4840 {0x3ffffff, 0, -1}, // B [25:0]-imm26
4841 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
4842 };
4843
4844 // AArch64 relocate function class
4845
4846 template<int size, bool big_endian>
4847 class AArch64_relocate_functions
4848 {
4849 public:
4850 typedef enum
4851 {
4852 STATUS_OKAY, // No error during relocation.
4853 STATUS_OVERFLOW, // Relocation overflow.
4854 STATUS_BAD_RELOC, // Relocation cannot be applied.
4855 } Status;
4856
4857 typedef AArch64_relocate_functions<size, big_endian> This;
4858 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4859 typedef Relocate_info<size, big_endian> The_relocate_info;
4860 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
4861 typedef Reloc_stub<size, big_endian> The_reloc_stub;
4862 typedef Stub_table<size, big_endian> The_stub_table;
4863 typedef elfcpp::Rela<size, big_endian> The_rela;
4864 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
4865
4866 // Return the page address of the address.
4867 // Page(address) = address & ~0xFFF
4868
4869 static inline AArch64_valtype
4870 Page(Address address)
4871 {
4872 return (address & (~static_cast<Address>(0xFFF)));
4873 }
4874
4875 private:
4876 // Update instruction (pointed by view) with selected bits (immed).
4877 // val = (val & ~dst_mask) | (immed << doffset)
4878
4879 template<int valsize>
4880 static inline void
4881 update_view(unsigned char* view,
4882 AArch64_valtype immed,
4883 elfcpp::Elf_Xword doffset,
4884 elfcpp::Elf_Xword dst_mask)
4885 {
4886 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
4887 Valtype* wv = reinterpret_cast<Valtype*>(view);
4888 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
4889
4890 // Clear immediate fields.
4891 val &= ~dst_mask;
4892 elfcpp::Swap<valsize, big_endian>::writeval(wv,
4893 static_cast<Valtype>(val | (immed << doffset)));
4894 }
4895
4896 // Update two parts of an instruction (pointed by view) with selected
4897 // bits (immed1 and immed2).
4898 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
4899
4900 template<int valsize>
4901 static inline void
4902 update_view_two_parts(
4903 unsigned char* view,
4904 AArch64_valtype immed1,
4905 AArch64_valtype immed2,
4906 elfcpp::Elf_Xword doffset1,
4907 elfcpp::Elf_Xword doffset2,
4908 elfcpp::Elf_Xword dst_mask)
4909 {
4910 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
4911 Valtype* wv = reinterpret_cast<Valtype*>(view);
4912 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
4913 val &= ~dst_mask;
4914 elfcpp::Swap<valsize, big_endian>::writeval(wv,
4915 static_cast<Valtype>(val | (immed1 << doffset1) |
4916 (immed2 << doffset2)));
4917 }
4918
4919 // Update adr or adrp instruction with immed.
4920 // In adr and adrp: [30:29] immlo [23:5] immhi
4921
4922 static inline void
4923 update_adr(unsigned char* view, AArch64_valtype immed)
4924 {
4925 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
4926 This::template update_view_two_parts<32>(
4927 view,
4928 immed & 0x3,
4929 (immed & 0x1ffffc) >> 2,
4930 29,
4931 5,
4932 dst_mask);
4933 }
4934
4935 // Update movz/movn instruction with bits immed.
4936 // Set instruction to movz if is_movz is true, otherwise set instruction
4937 // to movn.
4938
4939 static inline void
4940 update_movnz(unsigned char* view,
4941 AArch64_valtype immed,
4942 bool is_movz)
4943 {
4944 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
4945 Valtype* wv = reinterpret_cast<Valtype*>(view);
4946 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
4947
4948 const elfcpp::Elf_Xword doffset =
4949 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
4950 const elfcpp::Elf_Xword dst_mask =
4951 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
4952
4953 // Clear immediate fields and opc code.
4954 val &= ~(dst_mask | (0x3 << 29));
4955
4956 // Set instruction to movz or movn.
4957 // movz: [30:29] is 10 movn: [30:29] is 00
4958 if (is_movz)
4959 val |= (0x2 << 29);
4960
4961 elfcpp::Swap<32, big_endian>::writeval(wv,
4962 static_cast<Valtype>(val | (immed << doffset)));
4963 }
4964
4965 // Update selected bits in text.
4966
4967 template<int valsize>
4968 static inline typename This::Status
4969 reloc_common(unsigned char* view, Address x,
4970 const AArch64_reloc_property* reloc_property)
4971 {
4972 // Select bits from X.
4973 Address immed = reloc_property->select_x_value(x);
4974
4975 // Update view.
4976 const AArch64_reloc_property::Reloc_inst inst =
4977 reloc_property->reloc_inst();
4978 // If it is a data relocation or instruction has 2 parts of immediate
4979 // fields, you should not call pcrela_general.
4980 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
4981 aarch64_howto[inst].doffset != -1);
4982 This::template update_view<valsize>(view, immed,
4983 aarch64_howto[inst].doffset,
4984 aarch64_howto[inst].dst_mask);
4985
4986 // Do check overflow or alignment if needed.
4987 return (reloc_property->checkup_x_value(x)
4988 ? This::STATUS_OKAY
4989 : This::STATUS_OVERFLOW);
4990 }
4991
4992 public:
4993
4994 // Construct a B insn. Note, although we group it here with other relocation
4995 // operation, there is actually no 'relocation' involved here.
4996 static inline void
4997 construct_b(unsigned char* view, unsigned int branch_offset)
4998 {
4999 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5000 26, 0, 0xffffffff);
5001 }
5002
5003 // Do a simple rela relocation at unaligned addresses.
5004
5005 template<int valsize>
5006 static inline typename This::Status
5007 rela_ua(unsigned char* view,
5008 const Sized_relobj_file<size, big_endian>* object,
5009 const Symbol_value<size>* psymval,
5010 AArch64_valtype addend,
5011 const AArch64_reloc_property* reloc_property)
5012 {
5013 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5014 Valtype;
5015 typename elfcpp::Elf_types<size>::Elf_Addr x =
5016 psymval->value(object, addend);
5017 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5018 static_cast<Valtype>(x));
5019 return (reloc_property->checkup_x_value(x)
5020 ? This::STATUS_OKAY
5021 : This::STATUS_OVERFLOW);
5022 }
5023
5024 // Do a simple pc-relative relocation at unaligned addresses.
5025
5026 template<int valsize>
5027 static inline typename This::Status
5028 pcrela_ua(unsigned char* view,
5029 const Sized_relobj_file<size, big_endian>* object,
5030 const Symbol_value<size>* psymval,
5031 AArch64_valtype addend,
5032 Address address,
5033 const AArch64_reloc_property* reloc_property)
5034 {
5035 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5036 Valtype;
5037 Address x = psymval->value(object, addend) - address;
5038 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5039 static_cast<Valtype>(x));
5040 return (reloc_property->checkup_x_value(x)
5041 ? This::STATUS_OKAY
5042 : This::STATUS_OVERFLOW);
5043 }
5044
5045 // Do a simple rela relocation at aligned addresses.
5046
5047 template<int valsize>
5048 static inline typename This::Status
5049 rela(
5050 unsigned char* view,
5051 const Sized_relobj_file<size, big_endian>* object,
5052 const Symbol_value<size>* psymval,
5053 AArch64_valtype addend,
5054 const AArch64_reloc_property* reloc_property)
5055 {
5056 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5057 Valtype* wv = reinterpret_cast<Valtype*>(view);
5058 Address x = psymval->value(object, addend);
5059 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5060 return (reloc_property->checkup_x_value(x)
5061 ? This::STATUS_OKAY
5062 : This::STATUS_OVERFLOW);
5063 }
5064
5065 // Do relocate. Update selected bits in text.
5066 // new_val = (val & ~dst_mask) | (immed << doffset)
5067
5068 template<int valsize>
5069 static inline typename This::Status
5070 rela_general(unsigned char* view,
5071 const Sized_relobj_file<size, big_endian>* object,
5072 const Symbol_value<size>* psymval,
5073 AArch64_valtype addend,
5074 const AArch64_reloc_property* reloc_property)
5075 {
5076 // Calculate relocation.
5077 Address x = psymval->value(object, addend);
5078 return This::template reloc_common<valsize>(view, x, reloc_property);
5079 }
5080
5081 // Do relocate. Update selected bits in text.
5082 // new val = (val & ~dst_mask) | (immed << doffset)
5083
5084 template<int valsize>
5085 static inline typename This::Status
5086 rela_general(
5087 unsigned char* view,
5088 AArch64_valtype s,
5089 AArch64_valtype addend,
5090 const AArch64_reloc_property* reloc_property)
5091 {
5092 // Calculate relocation.
5093 Address x = s + addend;
5094 return This::template reloc_common<valsize>(view, x, reloc_property);
5095 }
5096
5097 // Do address relative relocate. Update selected bits in text.
5098 // new val = (val & ~dst_mask) | (immed << doffset)
5099
5100 template<int valsize>
5101 static inline typename This::Status
5102 pcrela_general(
5103 unsigned char* view,
5104 const Sized_relobj_file<size, big_endian>* object,
5105 const Symbol_value<size>* psymval,
5106 AArch64_valtype addend,
5107 Address address,
5108 const AArch64_reloc_property* reloc_property)
5109 {
5110 // Calculate relocation.
5111 Address x = psymval->value(object, addend) - address;
5112 return This::template reloc_common<valsize>(view, x, reloc_property);
5113 }
5114
5115
5116 // Calculate (S + A) - address, update adr instruction.
5117
5118 static inline typename This::Status
5119 adr(unsigned char* view,
5120 const Sized_relobj_file<size, big_endian>* object,
5121 const Symbol_value<size>* psymval,
5122 Address addend,
5123 Address address,
5124 const AArch64_reloc_property* /* reloc_property */)
5125 {
5126 AArch64_valtype x = psymval->value(object, addend) - address;
5127 // Pick bits [20:0] of X.
5128 AArch64_valtype immed = x & 0x1fffff;
5129 update_adr(view, immed);
5130 // Check -2^20 <= X < 2^20
5131 return (size == 64 && Bits<21>::has_overflow((x))
5132 ? This::STATUS_OVERFLOW
5133 : This::STATUS_OKAY);
5134 }
5135
5136 // Calculate PG(S+A) - PG(address), update adrp instruction.
5137 // R_AARCH64_ADR_PREL_PG_HI21
5138
5139 static inline typename This::Status
5140 adrp(
5141 unsigned char* view,
5142 Address sa,
5143 Address address)
5144 {
5145 AArch64_valtype x = This::Page(sa) - This::Page(address);
5146 // Pick [32:12] of X.
5147 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5148 update_adr(view, immed);
5149 // Check -2^32 <= X < 2^32
5150 return (size == 64 && Bits<33>::has_overflow((x))
5151 ? This::STATUS_OVERFLOW
5152 : This::STATUS_OKAY);
5153 }
5154
5155 // Calculate PG(S+A) - PG(address), update adrp instruction.
5156 // R_AARCH64_ADR_PREL_PG_HI21
5157
5158 static inline typename This::Status
5159 adrp(unsigned char* view,
5160 const Sized_relobj_file<size, big_endian>* object,
5161 const Symbol_value<size>* psymval,
5162 Address addend,
5163 Address address,
5164 const AArch64_reloc_property* reloc_property)
5165 {
5166 Address sa = psymval->value(object, addend);
5167 AArch64_valtype x = This::Page(sa) - This::Page(address);
5168 // Pick [32:12] of X.
5169 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5170 update_adr(view, immed);
5171 return (reloc_property->checkup_x_value(x)
5172 ? This::STATUS_OKAY
5173 : This::STATUS_OVERFLOW);
5174 }
5175
5176 // Update mov[n/z] instruction. Check overflow if needed.
5177 // If X >=0, set the instruction to movz and its immediate value to the
5178 // selected bits S.
5179 // If X < 0, set the instruction to movn and its immediate value to
5180 // NOT (selected bits of).
5181
5182 static inline typename This::Status
5183 movnz(unsigned char* view,
5184 AArch64_valtype x,
5185 const AArch64_reloc_property* reloc_property)
5186 {
5187 // Select bits from X.
5188 Address immed;
5189 bool is_movz;
5190 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5191 if (static_cast<SignedW>(x) >= 0)
5192 {
5193 immed = reloc_property->select_x_value(x);
5194 is_movz = true;
5195 }
5196 else
5197 {
5198 immed = reloc_property->select_x_value(~x);;
5199 is_movz = false;
5200 }
5201
5202 // Update movnz instruction.
5203 update_movnz(view, immed, is_movz);
5204
5205 // Do check overflow or alignment if needed.
5206 return (reloc_property->checkup_x_value(x)
5207 ? This::STATUS_OKAY
5208 : This::STATUS_OVERFLOW);
5209 }
5210
5211 static inline bool
5212 maybe_apply_stub(unsigned int,
5213 const The_relocate_info*,
5214 const The_rela&,
5215 unsigned char*,
5216 Address,
5217 const Sized_symbol<size>*,
5218 const Symbol_value<size>*,
5219 const Sized_relobj_file<size, big_endian>*,
5220 section_size_type);
5221
5222 }; // End of AArch64_relocate_functions
5223
5224
5225 // For a certain relocation type (usually jump/branch), test to see if the
5226 // destination needs a stub to fulfil. If so, re-route the destination of the
5227 // original instruction to the stub, note, at this time, the stub has already
5228 // been generated.
5229
5230 template<int size, bool big_endian>
5231 bool
5232 AArch64_relocate_functions<size, big_endian>::
5233 maybe_apply_stub(unsigned int r_type,
5234 const The_relocate_info* relinfo,
5235 const The_rela& rela,
5236 unsigned char* view,
5237 Address address,
5238 const Sized_symbol<size>* gsym,
5239 const Symbol_value<size>* psymval,
5240 const Sized_relobj_file<size, big_endian>* object,
5241 section_size_type current_group_size)
5242 {
5243 if (parameters->options().relocatable())
5244 return false;
5245
5246 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5247 Address branch_target = psymval->value(object, 0) + addend;
5248 int stub_type =
5249 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5250 if (stub_type == ST_NONE)
5251 return false;
5252
5253 const The_aarch64_relobj* aarch64_relobj =
5254 static_cast<const The_aarch64_relobj*>(object);
5255 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5256 gold_assert(stub_table != NULL);
5257
5258 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5259 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5260 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5261 gold_assert(stub != NULL);
5262
5263 Address new_branch_target = stub_table->address() + stub->offset();
5264 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5265 new_branch_target - address;
5266 const AArch64_reloc_property* arp =
5267 aarch64_reloc_property_table->get_reloc_property(r_type);
5268 gold_assert(arp != NULL);
5269 typename This::Status status = This::template
5270 rela_general<32>(view, branch_offset, 0, arp);
5271 if (status != This::STATUS_OKAY)
5272 gold_error(_("Stub is too far away, try a smaller value "
5273 "for '--stub-group-size'. The current value is 0x%lx."),
5274 static_cast<unsigned long>(current_group_size));
5275 return true;
5276 }
5277
5278
5279 // Group input sections for stub generation.
5280 //
5281 // We group input sections in an output section so that the total size,
5282 // including any padding space due to alignment is smaller than GROUP_SIZE
5283 // unless the only input section in group is bigger than GROUP_SIZE already.
5284 // Then an ARM stub table is created to follow the last input section
5285 // in group. For each group an ARM stub table is created an is placed
5286 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5287 // extend the group after the stub table.
5288
5289 template<int size, bool big_endian>
5290 void
5291 Target_aarch64<size, big_endian>::group_sections(
5292 Layout* layout,
5293 section_size_type group_size,
5294 bool stubs_always_after_branch,
5295 const Task* task)
5296 {
5297 // Group input sections and insert stub table
5298 Layout::Section_list section_list;
5299 layout->get_executable_sections(&section_list);
5300 for (Layout::Section_list::const_iterator p = section_list.begin();
5301 p != section_list.end();
5302 ++p)
5303 {
5304 AArch64_output_section<size, big_endian>* output_section =
5305 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5306 output_section->group_sections(group_size, stubs_always_after_branch,
5307 this, task);
5308 }
5309 }
5310
5311
5312 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5313 // section of RELOBJ.
5314
5315 template<int size, bool big_endian>
5316 AArch64_input_section<size, big_endian>*
5317 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5318 Relobj* relobj, unsigned int shndx) const
5319 {
5320 Section_id sid(relobj, shndx);
5321 typename AArch64_input_section_map::const_iterator p =
5322 this->aarch64_input_section_map_.find(sid);
5323 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5324 }
5325
5326
5327 // Make a new AArch64_input_section object.
5328
5329 template<int size, bool big_endian>
5330 AArch64_input_section<size, big_endian>*
5331 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5332 Relobj* relobj, unsigned int shndx)
5333 {
5334 Section_id sid(relobj, shndx);
5335
5336 AArch64_input_section<size, big_endian>* input_section =
5337 new AArch64_input_section<size, big_endian>(relobj, shndx);
5338 input_section->init();
5339
5340 // Register new AArch64_input_section in map for look-up.
5341 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5342 this->aarch64_input_section_map_.insert(
5343 std::make_pair(sid, input_section));
5344
5345 // Make sure that it we have not created another AArch64_input_section
5346 // for this input section already.
5347 gold_assert(ins.second);
5348
5349 return input_section;
5350 }
5351
5352
5353 // Relaxation hook. This is where we do stub generation.
5354
5355 template<int size, bool big_endian>
5356 bool
5357 Target_aarch64<size, big_endian>::do_relax(
5358 int pass,
5359 const Input_objects* input_objects,
5360 Symbol_table* symtab,
5361 Layout* layout ,
5362 const Task* task)
5363 {
5364 gold_assert(!parameters->options().relocatable());
5365 if (pass == 1)
5366 {
5367 // We don't handle negative stub_group_size right now.
5368 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5369 if (this->stub_group_size_ == 1)
5370 {
5371 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5372 // will fail to link. The user will have to relink with an explicit
5373 // group size option.
5374 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5375 4096 * 4;
5376 }
5377 group_sections(layout, this->stub_group_size_, true, task);
5378 }
5379 else
5380 {
5381 // If this is not the first pass, addresses and file offsets have
5382 // been reset at this point, set them here.
5383 for (Stub_table_iterator sp = this->stub_tables_.begin();
5384 sp != this->stub_tables_.end(); ++sp)
5385 {
5386 The_stub_table* stt = *sp;
5387 The_aarch64_input_section* owner = stt->owner();
5388 off_t off = align_address(owner->original_size(),
5389 stt->addralign());
5390 stt->set_address_and_file_offset(owner->address() + off,
5391 owner->offset() + off);
5392 }
5393 }
5394
5395 // Scan relocs for relocation stubs
5396 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5397 op != input_objects->relobj_end();
5398 ++op)
5399 {
5400 The_aarch64_relobj* aarch64_relobj =
5401 static_cast<The_aarch64_relobj*>(*op);
5402 // Lock the object so we can read from it. This is only called
5403 // single-threaded from Layout::finalize, so it is OK to lock.
5404 Task_lock_obj<Object> tl(task, aarch64_relobj);
5405 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5406 }
5407
5408 bool any_stub_table_changed = false;
5409 for (Stub_table_iterator siter = this->stub_tables_.begin();
5410 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5411 {
5412 The_stub_table* stub_table = *siter;
5413 if (stub_table->update_data_size_changed_p())
5414 {
5415 The_aarch64_input_section* owner = stub_table->owner();
5416 uint64_t address = owner->address();
5417 off_t offset = owner->offset();
5418 owner->reset_address_and_file_offset();
5419 owner->set_address_and_file_offset(address, offset);
5420
5421 any_stub_table_changed = true;
5422 }
5423 }
5424
5425 // Do not continue relaxation.
5426 bool continue_relaxation = any_stub_table_changed;
5427 if (!continue_relaxation)
5428 for (Stub_table_iterator sp = this->stub_tables_.begin();
5429 (sp != this->stub_tables_.end());
5430 ++sp)
5431 (*sp)->finalize_stubs();
5432
5433 return continue_relaxation;
5434 }
5435
5436
5437 // Make a new Stub_table.
5438
5439 template<int size, bool big_endian>
5440 Stub_table<size, big_endian>*
5441 Target_aarch64<size, big_endian>::new_stub_table(
5442 AArch64_input_section<size, big_endian>* owner)
5443 {
5444 Stub_table<size, big_endian>* stub_table =
5445 new Stub_table<size, big_endian>(owner);
5446 stub_table->set_address(align_address(
5447 owner->address() + owner->data_size(), 8));
5448 stub_table->set_file_offset(owner->offset() + owner->data_size());
5449 stub_table->finalize_data_size();
5450
5451 this->stub_tables_.push_back(stub_table);
5452
5453 return stub_table;
5454 }
5455
5456
5457 template<int size, bool big_endian>
5458 uint64_t
5459 Target_aarch64<size, big_endian>::do_reloc_addend(
5460 void* arg, unsigned int r_type, uint64_t) const
5461 {
5462 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5463 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5464 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5465 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5466 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5467 gold_assert(psymval->is_tls_symbol());
5468 // The value of a TLS symbol is the offset in the TLS segment.
5469 return psymval->value(ti.object, 0);
5470 }
5471
5472 // Return the number of entries in the PLT.
5473
5474 template<int size, bool big_endian>
5475 unsigned int
5476 Target_aarch64<size, big_endian>::plt_entry_count() const
5477 {
5478 if (this->plt_ == NULL)
5479 return 0;
5480 return this->plt_->entry_count();
5481 }
5482
5483 // Return the offset of the first non-reserved PLT entry.
5484
5485 template<int size, bool big_endian>
5486 unsigned int
5487 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5488 {
5489 return this->plt_->first_plt_entry_offset();
5490 }
5491
5492 // Return the size of each PLT entry.
5493
5494 template<int size, bool big_endian>
5495 unsigned int
5496 Target_aarch64<size, big_endian>::plt_entry_size() const
5497 {
5498 return this->plt_->get_plt_entry_size();
5499 }
5500
5501 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5502
5503 template<int size, bool big_endian>
5504 void
5505 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5506 Symbol_table* symtab, Layout* layout)
5507 {
5508 if (this->tls_base_symbol_defined_)
5509 return;
5510
5511 Output_segment* tls_segment = layout->tls_segment();
5512 if (tls_segment != NULL)
5513 {
5514 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5515 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5516 Symbol_table::PREDEFINED,
5517 tls_segment, 0, 0,
5518 elfcpp::STT_TLS,
5519 elfcpp::STB_LOCAL,
5520 elfcpp::STV_HIDDEN, 0,
5521 Symbol::SEGMENT_START,
5522 true);
5523 }
5524 this->tls_base_symbol_defined_ = true;
5525 }
5526
5527 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5528
5529 template<int size, bool big_endian>
5530 void
5531 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5532 Symbol_table* symtab, Layout* layout)
5533 {
5534 if (this->plt_ == NULL)
5535 this->make_plt_section(symtab, layout);
5536
5537 if (!this->plt_->has_tlsdesc_entry())
5538 {
5539 // Allocate the TLSDESC_GOT entry.
5540 Output_data_got_aarch64<size, big_endian>* got =
5541 this->got_section(symtab, layout);
5542 unsigned int got_offset = got->add_constant(0);
5543
5544 // Allocate the TLSDESC_PLT entry.
5545 this->plt_->reserve_tlsdesc_entry(got_offset);
5546 }
5547 }
5548
5549 // Create a GOT entry for the TLS module index.
5550
5551 template<int size, bool big_endian>
5552 unsigned int
5553 Target_aarch64<size, big_endian>::got_mod_index_entry(
5554 Symbol_table* symtab, Layout* layout,
5555 Sized_relobj_file<size, big_endian>* object)
5556 {
5557 if (this->got_mod_index_offset_ == -1U)
5558 {
5559 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5560 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5561 Output_data_got_aarch64<size, big_endian>* got =
5562 this->got_section(symtab, layout);
5563 unsigned int got_offset = got->add_constant(0);
5564 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5565 got_offset, 0);
5566 got->add_constant(0);
5567 this->got_mod_index_offset_ = got_offset;
5568 }
5569 return this->got_mod_index_offset_;
5570 }
5571
5572 // Optimize the TLS relocation type based on what we know about the
5573 // symbol. IS_FINAL is true if the final address of this symbol is
5574 // known at link time.
5575
5576 template<int size, bool big_endian>
5577 tls::Tls_optimization
5578 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5579 int r_type)
5580 {
5581 // If we are generating a shared library, then we can't do anything
5582 // in the linker
5583 if (parameters->options().shared())
5584 return tls::TLSOPT_NONE;
5585
5586 switch (r_type)
5587 {
5588 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5589 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5590 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5591 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5592 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5593 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5594 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5595 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5596 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5597 case elfcpp::R_AARCH64_TLSDESC_LDR:
5598 case elfcpp::R_AARCH64_TLSDESC_ADD:
5599 case elfcpp::R_AARCH64_TLSDESC_CALL:
5600 // These are General-Dynamic which permits fully general TLS
5601 // access. Since we know that we are generating an executable,
5602 // we can convert this to Initial-Exec. If we also know that
5603 // this is a local symbol, we can further switch to Local-Exec.
5604 if (is_final)
5605 return tls::TLSOPT_TO_LE;
5606 return tls::TLSOPT_TO_IE;
5607
5608 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5609 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5610 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5611 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5612 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5613 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5614 // These are Local-Dynamic, which refer to local symbols in the
5615 // dynamic TLS block. Since we know that we generating an
5616 // executable, we can switch to Local-Exec.
5617 return tls::TLSOPT_TO_LE;
5618
5619 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5620 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5621 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5622 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5623 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5624 // These are Initial-Exec relocs which get the thread offset
5625 // from the GOT. If we know that we are linking against the
5626 // local symbol, we can switch to Local-Exec, which links the
5627 // thread offset into the instruction.
5628 if (is_final)
5629 return tls::TLSOPT_TO_LE;
5630 return tls::TLSOPT_NONE;
5631
5632 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5633 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5634 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5635 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5636 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5637 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5638 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5639 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5640 // When we already have Local-Exec, there is nothing further we
5641 // can do.
5642 return tls::TLSOPT_NONE;
5643
5644 default:
5645 gold_unreachable();
5646 }
5647 }
5648
5649 // Returns true if this relocation type could be that of a function pointer.
5650
5651 template<int size, bool big_endian>
5652 inline bool
5653 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5654 unsigned int r_type)
5655 {
5656 switch (r_type)
5657 {
5658 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5659 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5660 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5661 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5662 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5663 {
5664 return true;
5665 }
5666 }
5667 return false;
5668 }
5669
5670 // For safe ICF, scan a relocation for a local symbol to check if it
5671 // corresponds to a function pointer being taken. In that case mark
5672 // the function whose pointer was taken as not foldable.
5673
5674 template<int size, bool big_endian>
5675 inline bool
5676 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5677 Symbol_table* ,
5678 Layout* ,
5679 Target_aarch64<size, big_endian>* ,
5680 Sized_relobj_file<size, big_endian>* ,
5681 unsigned int ,
5682 Output_section* ,
5683 const elfcpp::Rela<size, big_endian>& ,
5684 unsigned int r_type,
5685 const elfcpp::Sym<size, big_endian>&)
5686 {
5687 // When building a shared library, do not fold any local symbols.
5688 return (parameters->options().shared()
5689 || possible_function_pointer_reloc(r_type));
5690 }
5691
5692 // For safe ICF, scan a relocation for a global symbol to check if it
5693 // corresponds to a function pointer being taken. In that case mark
5694 // the function whose pointer was taken as not foldable.
5695
5696 template<int size, bool big_endian>
5697 inline bool
5698 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5699 Symbol_table* ,
5700 Layout* ,
5701 Target_aarch64<size, big_endian>* ,
5702 Sized_relobj_file<size, big_endian>* ,
5703 unsigned int ,
5704 Output_section* ,
5705 const elfcpp::Rela<size, big_endian>& ,
5706 unsigned int r_type,
5707 Symbol* gsym)
5708 {
5709 // When building a shared library, do not fold symbols whose visibility
5710 // is hidden, internal or protected.
5711 return ((parameters->options().shared()
5712 && (gsym->visibility() == elfcpp::STV_INTERNAL
5713 || gsym->visibility() == elfcpp::STV_PROTECTED
5714 || gsym->visibility() == elfcpp::STV_HIDDEN))
5715 || possible_function_pointer_reloc(r_type));
5716 }
5717
5718 // Report an unsupported relocation against a local symbol.
5719
5720 template<int size, bool big_endian>
5721 void
5722 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5723 Sized_relobj_file<size, big_endian>* object,
5724 unsigned int r_type)
5725 {
5726 gold_error(_("%s: unsupported reloc %u against local symbol"),
5727 object->name().c_str(), r_type);
5728 }
5729
5730 // We are about to emit a dynamic relocation of type R_TYPE. If the
5731 // dynamic linker does not support it, issue an error.
5732
5733 template<int size, bool big_endian>
5734 void
5735 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5736 unsigned int r_type)
5737 {
5738 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5739
5740 switch (r_type)
5741 {
5742 // These are the relocation types supported by glibc for AARCH64.
5743 case elfcpp::R_AARCH64_NONE:
5744 case elfcpp::R_AARCH64_COPY:
5745 case elfcpp::R_AARCH64_GLOB_DAT:
5746 case elfcpp::R_AARCH64_JUMP_SLOT:
5747 case elfcpp::R_AARCH64_RELATIVE:
5748 case elfcpp::R_AARCH64_TLS_DTPREL64:
5749 case elfcpp::R_AARCH64_TLS_DTPMOD64:
5750 case elfcpp::R_AARCH64_TLS_TPREL64:
5751 case elfcpp::R_AARCH64_TLSDESC:
5752 case elfcpp::R_AARCH64_IRELATIVE:
5753 case elfcpp::R_AARCH64_ABS32:
5754 case elfcpp::R_AARCH64_ABS64:
5755 return;
5756
5757 default:
5758 break;
5759 }
5760
5761 // This prevents us from issuing more than one error per reloc
5762 // section. But we can still wind up issuing more than one
5763 // error per object file.
5764 if (this->issued_non_pic_error_)
5765 return;
5766 gold_assert(parameters->options().output_is_position_independent());
5767 object->error(_("requires unsupported dynamic reloc; "
5768 "recompile with -fPIC"));
5769 this->issued_non_pic_error_ = true;
5770 return;
5771 }
5772
5773 // Return whether we need to make a PLT entry for a relocation of the
5774 // given type against a STT_GNU_IFUNC symbol.
5775
5776 template<int size, bool big_endian>
5777 bool
5778 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
5779 Sized_relobj_file<size, big_endian>* object,
5780 unsigned int r_type)
5781 {
5782 const AArch64_reloc_property* arp =
5783 aarch64_reloc_property_table->get_reloc_property(r_type);
5784 gold_assert(arp != NULL);
5785
5786 int flags = arp->reference_flags();
5787 if (flags & Symbol::TLS_REF)
5788 {
5789 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
5790 object->name().c_str(), arp->name().c_str());
5791 return false;
5792 }
5793 return flags != 0;
5794 }
5795
5796 // Scan a relocation for a local symbol.
5797
5798 template<int size, bool big_endian>
5799 inline void
5800 Target_aarch64<size, big_endian>::Scan::local(
5801 Symbol_table* symtab,
5802 Layout* layout,
5803 Target_aarch64<size, big_endian>* target,
5804 Sized_relobj_file<size, big_endian>* object,
5805 unsigned int data_shndx,
5806 Output_section* output_section,
5807 const elfcpp::Rela<size, big_endian>& rela,
5808 unsigned int r_type,
5809 const elfcpp::Sym<size, big_endian>& lsym,
5810 bool is_discarded)
5811 {
5812 if (is_discarded)
5813 return;
5814
5815 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
5816 Reloc_section;
5817 Output_data_got_aarch64<size, big_endian>* got =
5818 target->got_section(symtab, layout);
5819 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5820
5821 // A local STT_GNU_IFUNC symbol may require a PLT entry.
5822 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
5823 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
5824 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
5825
5826 switch (r_type)
5827 {
5828 case elfcpp::R_AARCH64_ABS32:
5829 case elfcpp::R_AARCH64_ABS16:
5830 if (parameters->options().output_is_position_independent())
5831 {
5832 gold_error(_("%s: unsupported reloc %u in pos independent link."),
5833 object->name().c_str(), r_type);
5834 }
5835 break;
5836
5837 case elfcpp::R_AARCH64_ABS64:
5838 // If building a shared library or pie, we need to mark this as a dynmic
5839 // reloction, so that the dynamic loader can relocate it.
5840 if (parameters->options().output_is_position_independent())
5841 {
5842 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
5843 rela_dyn->add_local_relative(object, r_sym,
5844 elfcpp::R_AARCH64_RELATIVE,
5845 output_section,
5846 data_shndx,
5847 rela.get_r_offset(),
5848 rela.get_r_addend(),
5849 is_ifunc);
5850 }
5851 break;
5852
5853 case elfcpp::R_AARCH64_PREL64:
5854 case elfcpp::R_AARCH64_PREL32:
5855 case elfcpp::R_AARCH64_PREL16:
5856 break;
5857
5858 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
5859 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
5860 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
5861 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
5862 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
5863 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
5864 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
5865 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
5866 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
5867 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
5868 break;
5869
5870 // Control flow, pc-relative. We don't need to do anything for a relative
5871 // addressing relocation against a local symbol if it does not reference
5872 // the GOT.
5873 case elfcpp::R_AARCH64_TSTBR14:
5874 case elfcpp::R_AARCH64_CONDBR19:
5875 case elfcpp::R_AARCH64_JUMP26:
5876 case elfcpp::R_AARCH64_CALL26:
5877 break;
5878
5879 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5880 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5881 {
5882 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5883 optimize_tls_reloc(!parameters->options().shared(), r_type);
5884 if (tlsopt == tls::TLSOPT_TO_LE)
5885 break;
5886
5887 layout->set_has_static_tls();
5888 // Create a GOT entry for the tp-relative offset.
5889 if (!parameters->doing_static_link())
5890 {
5891 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
5892 target->rela_dyn_section(layout),
5893 elfcpp::R_AARCH64_TLS_TPREL64);
5894 }
5895 else if (!object->local_has_got_offset(r_sym,
5896 GOT_TYPE_TLS_OFFSET))
5897 {
5898 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
5899 unsigned int got_offset =
5900 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
5901 const elfcpp::Elf_Xword addend = rela.get_r_addend();
5902 gold_assert(addend == 0);
5903 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
5904 object, r_sym);
5905 }
5906 }
5907 break;
5908
5909 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5910 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5911 {
5912 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5913 optimize_tls_reloc(!parameters->options().shared(), r_type);
5914 if (tlsopt == tls::TLSOPT_TO_LE)
5915 {
5916 layout->set_has_static_tls();
5917 break;
5918 }
5919 gold_assert(tlsopt == tls::TLSOPT_NONE);
5920
5921 got->add_local_pair_with_rel(object,r_sym, data_shndx,
5922 GOT_TYPE_TLS_PAIR,
5923 target->rela_dyn_section(layout),
5924 elfcpp::R_AARCH64_TLS_DTPMOD64);
5925 }
5926 break;
5927
5928 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5929 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5930 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5931 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5932 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5933 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5934 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5935 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5936 {
5937 layout->set_has_static_tls();
5938 bool output_is_shared = parameters->options().shared();
5939 if (output_is_shared)
5940 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
5941 object->name().c_str(), r_type);
5942 }
5943 break;
5944
5945 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5946 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5947 {
5948 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5949 optimize_tls_reloc(!parameters->options().shared(), r_type);
5950 if (tlsopt == tls::TLSOPT_NONE)
5951 {
5952 // Create a GOT entry for the module index.
5953 target->got_mod_index_entry(symtab, layout, object);
5954 }
5955 else if (tlsopt != tls::TLSOPT_TO_LE)
5956 unsupported_reloc_local(object, r_type);
5957 }
5958 break;
5959
5960 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5961 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5962 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5963 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5964 break;
5965
5966 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5967 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5968 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5969 {
5970 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5971 optimize_tls_reloc(!parameters->options().shared(), r_type);
5972 target->define_tls_base_symbol(symtab, layout);
5973 if (tlsopt == tls::TLSOPT_NONE)
5974 {
5975 // Create reserved PLT and GOT entries for the resolver.
5976 target->reserve_tlsdesc_entries(symtab, layout);
5977
5978 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
5979 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
5980 // entry needs to be in an area in .got.plt, not .got. Call
5981 // got_section to make sure the section has been created.
5982 target->got_section(symtab, layout);
5983 Output_data_got<size, big_endian>* got =
5984 target->got_tlsdesc_section();
5985 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5986 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
5987 {
5988 unsigned int got_offset = got->add_constant(0);
5989 got->add_constant(0);
5990 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
5991 got_offset);
5992 Reloc_section* rt = target->rela_tlsdesc_section(layout);
5993 // We store the arguments we need in a vector, and use
5994 // the index into the vector as the parameter to pass
5995 // to the target specific routines.
5996 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
5997 void* arg = reinterpret_cast<void*>(intarg);
5998 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
5999 got, got_offset, 0);
6000 }
6001 }
6002 else if (tlsopt != tls::TLSOPT_TO_LE)
6003 unsupported_reloc_local(object, r_type);
6004 }
6005 break;
6006
6007 case elfcpp::R_AARCH64_TLSDESC_CALL:
6008 break;
6009
6010 default:
6011 unsupported_reloc_local(object, r_type);
6012 }
6013 }
6014
6015
6016 // Report an unsupported relocation against a global symbol.
6017
6018 template<int size, bool big_endian>
6019 void
6020 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6021 Sized_relobj_file<size, big_endian>* object,
6022 unsigned int r_type,
6023 Symbol* gsym)
6024 {
6025 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6026 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6027 }
6028
6029 template<int size, bool big_endian>
6030 inline void
6031 Target_aarch64<size, big_endian>::Scan::global(
6032 Symbol_table* symtab,
6033 Layout* layout,
6034 Target_aarch64<size, big_endian>* target,
6035 Sized_relobj_file<size, big_endian> * object,
6036 unsigned int data_shndx,
6037 Output_section* output_section,
6038 const elfcpp::Rela<size, big_endian>& rela,
6039 unsigned int r_type,
6040 Symbol* gsym)
6041 {
6042 // A STT_GNU_IFUNC symbol may require a PLT entry.
6043 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6044 && this->reloc_needs_plt_for_ifunc(object, r_type))
6045 target->make_plt_entry(symtab, layout, gsym);
6046
6047 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6048 Reloc_section;
6049 const AArch64_reloc_property* arp =
6050 aarch64_reloc_property_table->get_reloc_property(r_type);
6051 gold_assert(arp != NULL);
6052
6053 switch (r_type)
6054 {
6055 case elfcpp::R_AARCH64_ABS16:
6056 case elfcpp::R_AARCH64_ABS32:
6057 case elfcpp::R_AARCH64_ABS64:
6058 {
6059 // Make a PLT entry if necessary.
6060 if (gsym->needs_plt_entry())
6061 {
6062 target->make_plt_entry(symtab, layout, gsym);
6063 // Since this is not a PC-relative relocation, we may be
6064 // taking the address of a function. In that case we need to
6065 // set the entry in the dynamic symbol table to the address of
6066 // the PLT entry.
6067 if (gsym->is_from_dynobj() && !parameters->options().shared())
6068 gsym->set_needs_dynsym_value();
6069 }
6070 // Make a dynamic relocation if necessary.
6071 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6072 {
6073 if (!parameters->options().output_is_position_independent()
6074 && gsym->may_need_copy_reloc())
6075 {
6076 target->copy_reloc(symtab, layout, object,
6077 data_shndx, output_section, gsym, rela);
6078 }
6079 else if (r_type == elfcpp::R_AARCH64_ABS64
6080 && gsym->type() == elfcpp::STT_GNU_IFUNC
6081 && gsym->can_use_relative_reloc(false)
6082 && !gsym->is_from_dynobj()
6083 && !gsym->is_undefined()
6084 && !gsym->is_preemptible())
6085 {
6086 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6087 // symbol. This makes a function address in a PIE executable
6088 // match the address in a shared library that it links against.
6089 Reloc_section* rela_dyn =
6090 target->rela_irelative_section(layout);
6091 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6092 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6093 output_section, object,
6094 data_shndx,
6095 rela.get_r_offset(),
6096 rela.get_r_addend());
6097 }
6098 else if (r_type == elfcpp::R_AARCH64_ABS64
6099 && gsym->can_use_relative_reloc(false))
6100 {
6101 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6102 rela_dyn->add_global_relative(gsym,
6103 elfcpp::R_AARCH64_RELATIVE,
6104 output_section,
6105 object,
6106 data_shndx,
6107 rela.get_r_offset(),
6108 rela.get_r_addend(),
6109 false);
6110 }
6111 else
6112 {
6113 check_non_pic(object, r_type);
6114 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6115 rela_dyn = target->rela_dyn_section(layout);
6116 rela_dyn->add_global(
6117 gsym, r_type, output_section, object,
6118 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6119 }
6120 }
6121 }
6122 break;
6123
6124 case elfcpp::R_AARCH64_PREL16:
6125 case elfcpp::R_AARCH64_PREL32:
6126 case elfcpp::R_AARCH64_PREL64:
6127 // This is used to fill the GOT absolute address.
6128 if (gsym->needs_plt_entry())
6129 {
6130 target->make_plt_entry(symtab, layout, gsym);
6131 }
6132 break;
6133
6134 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6135 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6136 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6137 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6138 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6139 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6140 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6141 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6142 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6143 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6144 {
6145 if (gsym->needs_plt_entry())
6146 target->make_plt_entry(symtab, layout, gsym);
6147 // Make a dynamic relocation if necessary.
6148 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6149 {
6150 if (parameters->options().output_is_executable()
6151 && gsym->may_need_copy_reloc())
6152 {
6153 target->copy_reloc(symtab, layout, object,
6154 data_shndx, output_section, gsym, rela);
6155 }
6156 }
6157 break;
6158 }
6159
6160 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6161 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6162 {
6163 // This pair of relocations is used to access a specific GOT entry.
6164 // Note a GOT entry is an *address* to a symbol.
6165 // The symbol requires a GOT entry
6166 Output_data_got_aarch64<size, big_endian>* got =
6167 target->got_section(symtab, layout);
6168 if (gsym->final_value_is_known())
6169 {
6170 // For a STT_GNU_IFUNC symbol we want the PLT address.
6171 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6172 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6173 else
6174 got->add_global(gsym, GOT_TYPE_STANDARD);
6175 }
6176 else
6177 {
6178 // If this symbol is not fully resolved, we need to add a dynamic
6179 // relocation for it.
6180 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6181
6182 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6183 //
6184 // 1) The symbol may be defined in some other module.
6185 // 2) We are building a shared library and this is a protected
6186 // symbol; using GLOB_DAT means that the dynamic linker can use
6187 // the address of the PLT in the main executable when appropriate
6188 // so that function address comparisons work.
6189 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6190 // again so that function address comparisons work.
6191 if (gsym->is_from_dynobj()
6192 || gsym->is_undefined()
6193 || gsym->is_preemptible()
6194 || (gsym->visibility() == elfcpp::STV_PROTECTED
6195 && parameters->options().shared())
6196 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6197 && parameters->options().output_is_position_independent()))
6198 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6199 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6200 else
6201 {
6202 // For a STT_GNU_IFUNC symbol we want to write the PLT
6203 // offset into the GOT, so that function pointer
6204 // comparisons work correctly.
6205 bool is_new;
6206 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6207 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6208 else
6209 {
6210 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6211 // Tell the dynamic linker to use the PLT address
6212 // when resolving relocations.
6213 if (gsym->is_from_dynobj()
6214 && !parameters->options().shared())
6215 gsym->set_needs_dynsym_value();
6216 }
6217 if (is_new)
6218 {
6219 rela_dyn->add_global_relative(
6220 gsym, elfcpp::R_AARCH64_RELATIVE,
6221 got,
6222 gsym->got_offset(GOT_TYPE_STANDARD),
6223 0,
6224 false);
6225 }
6226 }
6227 }
6228 break;
6229 }
6230
6231 case elfcpp::R_AARCH64_TSTBR14:
6232 case elfcpp::R_AARCH64_CONDBR19:
6233 case elfcpp::R_AARCH64_JUMP26:
6234 case elfcpp::R_AARCH64_CALL26:
6235 {
6236 if (gsym->final_value_is_known())
6237 break;
6238
6239 if (gsym->is_defined() &&
6240 !gsym->is_from_dynobj() &&
6241 !gsym->is_preemptible())
6242 break;
6243
6244 // Make plt entry for function call.
6245 target->make_plt_entry(symtab, layout, gsym);
6246 break;
6247 }
6248
6249 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6250 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6251 {
6252 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6253 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6254 if (tlsopt == tls::TLSOPT_TO_LE)
6255 {
6256 layout->set_has_static_tls();
6257 break;
6258 }
6259 gold_assert(tlsopt == tls::TLSOPT_NONE);
6260
6261 // General dynamic.
6262 Output_data_got_aarch64<size, big_endian>* got =
6263 target->got_section(symtab, layout);
6264 // Create 2 consecutive entries for module index and offset.
6265 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6266 target->rela_dyn_section(layout),
6267 elfcpp::R_AARCH64_TLS_DTPMOD64,
6268 elfcpp::R_AARCH64_TLS_DTPREL64);
6269 }
6270 break;
6271
6272 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6273 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6274 {
6275 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6276 optimize_tls_reloc(!parameters->options().shared(), r_type);
6277 if (tlsopt == tls::TLSOPT_NONE)
6278 {
6279 // Create a GOT entry for the module index.
6280 target->got_mod_index_entry(symtab, layout, object);
6281 }
6282 else if (tlsopt != tls::TLSOPT_TO_LE)
6283 unsupported_reloc_local(object, r_type);
6284 }
6285 break;
6286
6287 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6288 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6289 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6290 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6291 break;
6292
6293 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6294 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6295 {
6296 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6297 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6298 if (tlsopt == tls::TLSOPT_TO_LE)
6299 break;
6300
6301 layout->set_has_static_tls();
6302 // Create a GOT entry for the tp-relative offset.
6303 Output_data_got_aarch64<size, big_endian>* got
6304 = target->got_section(symtab, layout);
6305 if (!parameters->doing_static_link())
6306 {
6307 got->add_global_with_rel(
6308 gsym, GOT_TYPE_TLS_OFFSET,
6309 target->rela_dyn_section(layout),
6310 elfcpp::R_AARCH64_TLS_TPREL64);
6311 }
6312 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6313 {
6314 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6315 unsigned int got_offset =
6316 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6317 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6318 gold_assert(addend == 0);
6319 got->add_static_reloc(got_offset,
6320 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6321 }
6322 }
6323 break;
6324
6325 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6326 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6327 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6328 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6329 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6330 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6331 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6332 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6333 layout->set_has_static_tls();
6334 if (parameters->options().shared())
6335 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6336 object->name().c_str(), r_type);
6337 break;
6338
6339 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6340 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6341 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6342 {
6343 target->define_tls_base_symbol(symtab, layout);
6344 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6345 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6346 if (tlsopt == tls::TLSOPT_NONE)
6347 {
6348 // Create reserved PLT and GOT entries for the resolver.
6349 target->reserve_tlsdesc_entries(symtab, layout);
6350
6351 // Create a double GOT entry with an R_AARCH64_TLSDESC
6352 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6353 // entry needs to be in an area in .got.plt, not .got. Call
6354 // got_section to make sure the section has been created.
6355 target->got_section(symtab, layout);
6356 Output_data_got<size, big_endian>* got =
6357 target->got_tlsdesc_section();
6358 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6359 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6360 elfcpp::R_AARCH64_TLSDESC, 0);
6361 }
6362 else if (tlsopt == tls::TLSOPT_TO_IE)
6363 {
6364 // Create a GOT entry for the tp-relative offset.
6365 Output_data_got<size, big_endian>* got
6366 = target->got_section(symtab, layout);
6367 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6368 target->rela_dyn_section(layout),
6369 elfcpp::R_AARCH64_TLS_TPREL64);
6370 }
6371 else if (tlsopt != tls::TLSOPT_TO_LE)
6372 unsupported_reloc_global(object, r_type, gsym);
6373 }
6374 break;
6375
6376 case elfcpp::R_AARCH64_TLSDESC_CALL:
6377 break;
6378
6379 default:
6380 gold_error(_("%s: unsupported reloc type in global scan"),
6381 aarch64_reloc_property_table->
6382 reloc_name_in_error_message(r_type).c_str());
6383 }
6384 return;
6385 } // End of Scan::global
6386
6387
6388 // Create the PLT section.
6389 template<int size, bool big_endian>
6390 void
6391 Target_aarch64<size, big_endian>::make_plt_section(
6392 Symbol_table* symtab, Layout* layout)
6393 {
6394 if (this->plt_ == NULL)
6395 {
6396 // Create the GOT section first.
6397 this->got_section(symtab, layout);
6398
6399 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6400 this->got_irelative_);
6401
6402 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6403 (elfcpp::SHF_ALLOC
6404 | elfcpp::SHF_EXECINSTR),
6405 this->plt_, ORDER_PLT, false);
6406
6407 // Make the sh_info field of .rela.plt point to .plt.
6408 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6409 rela_plt_os->set_info_section(this->plt_->output_section());
6410 }
6411 }
6412
6413 // Return the section for TLSDESC relocations.
6414
6415 template<int size, bool big_endian>
6416 typename Target_aarch64<size, big_endian>::Reloc_section*
6417 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6418 {
6419 return this->plt_section()->rela_tlsdesc(layout);
6420 }
6421
6422 // Create a PLT entry for a global symbol.
6423
6424 template<int size, bool big_endian>
6425 void
6426 Target_aarch64<size, big_endian>::make_plt_entry(
6427 Symbol_table* symtab,
6428 Layout* layout,
6429 Symbol* gsym)
6430 {
6431 if (gsym->has_plt_offset())
6432 return;
6433
6434 if (this->plt_ == NULL)
6435 this->make_plt_section(symtab, layout);
6436
6437 this->plt_->add_entry(symtab, layout, gsym);
6438 }
6439
6440 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6441
6442 template<int size, bool big_endian>
6443 void
6444 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6445 Symbol_table* symtab, Layout* layout,
6446 Sized_relobj_file<size, big_endian>* relobj,
6447 unsigned int local_sym_index)
6448 {
6449 if (relobj->local_has_plt_offset(local_sym_index))
6450 return;
6451 if (this->plt_ == NULL)
6452 this->make_plt_section(symtab, layout);
6453 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6454 relobj,
6455 local_sym_index);
6456 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6457 }
6458
6459 template<int size, bool big_endian>
6460 void
6461 Target_aarch64<size, big_endian>::gc_process_relocs(
6462 Symbol_table* symtab,
6463 Layout* layout,
6464 Sized_relobj_file<size, big_endian>* object,
6465 unsigned int data_shndx,
6466 unsigned int sh_type,
6467 const unsigned char* prelocs,
6468 size_t reloc_count,
6469 Output_section* output_section,
6470 bool needs_special_offset_handling,
6471 size_t local_symbol_count,
6472 const unsigned char* plocal_symbols)
6473 {
6474 if (sh_type == elfcpp::SHT_REL)
6475 {
6476 return;
6477 }
6478
6479 gold::gc_process_relocs<
6480 size, big_endian,
6481 Target_aarch64<size, big_endian>,
6482 elfcpp::SHT_RELA,
6483 typename Target_aarch64<size, big_endian>::Scan,
6484 typename Target_aarch64<size, big_endian>::Relocatable_size_for_reloc>(
6485 symtab,
6486 layout,
6487 this,
6488 object,
6489 data_shndx,
6490 prelocs,
6491 reloc_count,
6492 output_section,
6493 needs_special_offset_handling,
6494 local_symbol_count,
6495 plocal_symbols);
6496 }
6497
6498 // Scan relocations for a section.
6499
6500 template<int size, bool big_endian>
6501 void
6502 Target_aarch64<size, big_endian>::scan_relocs(
6503 Symbol_table* symtab,
6504 Layout* layout,
6505 Sized_relobj_file<size, big_endian>* object,
6506 unsigned int data_shndx,
6507 unsigned int sh_type,
6508 const unsigned char* prelocs,
6509 size_t reloc_count,
6510 Output_section* output_section,
6511 bool needs_special_offset_handling,
6512 size_t local_symbol_count,
6513 const unsigned char* plocal_symbols)
6514 {
6515 if (sh_type == elfcpp::SHT_REL)
6516 {
6517 gold_error(_("%s: unsupported REL reloc section"),
6518 object->name().c_str());
6519 return;
6520 }
6521 gold::scan_relocs<size, big_endian, Target_aarch64, elfcpp::SHT_RELA, Scan>(
6522 symtab,
6523 layout,
6524 this,
6525 object,
6526 data_shndx,
6527 prelocs,
6528 reloc_count,
6529 output_section,
6530 needs_special_offset_handling,
6531 local_symbol_count,
6532 plocal_symbols);
6533 }
6534
6535 // Return the value to use for a dynamic which requires special
6536 // treatment. This is how we support equality comparisons of function
6537 // pointers across shared library boundaries, as described in the
6538 // processor specific ABI supplement.
6539
6540 template<int size, bool big_endian>
6541 uint64_t
6542 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6543 {
6544 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6545 return this->plt_address_for_global(gsym);
6546 }
6547
6548
6549 // Finalize the sections.
6550
6551 template<int size, bool big_endian>
6552 void
6553 Target_aarch64<size, big_endian>::do_finalize_sections(
6554 Layout* layout,
6555 const Input_objects*,
6556 Symbol_table* symtab)
6557 {
6558 const Reloc_section* rel_plt = (this->plt_ == NULL
6559 ? NULL
6560 : this->plt_->rela_plt());
6561 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6562 this->rela_dyn_, true, false);
6563
6564 // Emit any relocs we saved in an attempt to avoid generating COPY
6565 // relocs.
6566 if (this->copy_relocs_.any_saved_relocs())
6567 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6568
6569 // Fill in some more dynamic tags.
6570 Output_data_dynamic* const odyn = layout->dynamic_data();
6571 if (odyn != NULL)
6572 {
6573 if (this->plt_ != NULL
6574 && this->plt_->output_section() != NULL
6575 && this->plt_ ->has_tlsdesc_entry())
6576 {
6577 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6578 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6579 this->got_->finalize_data_size();
6580 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6581 this->plt_, plt_offset);
6582 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6583 this->got_, got_offset);
6584 }
6585 }
6586
6587 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6588 // the .got.plt section.
6589 Symbol* sym = this->global_offset_table_;
6590 if (sym != NULL)
6591 {
6592 uint64_t data_size = this->got_plt_->current_data_size();
6593 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6594
6595 // If the .got section is more than 0x8000 bytes, we add
6596 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6597 // bit relocations have a greater chance of working.
6598 if (data_size >= 0x8000)
6599 symtab->get_sized_symbol<size>(sym)->set_value(
6600 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6601 }
6602
6603 if (parameters->doing_static_link()
6604 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6605 {
6606 // If linking statically, make sure that the __rela_iplt symbols
6607 // were defined if necessary, even if we didn't create a PLT.
6608 static const Define_symbol_in_segment syms[] =
6609 {
6610 {
6611 "__rela_iplt_start", // name
6612 elfcpp::PT_LOAD, // segment_type
6613 elfcpp::PF_W, // segment_flags_set
6614 elfcpp::PF(0), // segment_flags_clear
6615 0, // value
6616 0, // size
6617 elfcpp::STT_NOTYPE, // type
6618 elfcpp::STB_GLOBAL, // binding
6619 elfcpp::STV_HIDDEN, // visibility
6620 0, // nonvis
6621 Symbol::SEGMENT_START, // offset_from_base
6622 true // only_if_ref
6623 },
6624 {
6625 "__rela_iplt_end", // name
6626 elfcpp::PT_LOAD, // segment_type
6627 elfcpp::PF_W, // segment_flags_set
6628 elfcpp::PF(0), // segment_flags_clear
6629 0, // value
6630 0, // size
6631 elfcpp::STT_NOTYPE, // type
6632 elfcpp::STB_GLOBAL, // binding
6633 elfcpp::STV_HIDDEN, // visibility
6634 0, // nonvis
6635 Symbol::SEGMENT_START, // offset_from_base
6636 true // only_if_ref
6637 }
6638 };
6639
6640 symtab->define_symbols(layout, 2, syms,
6641 layout->script_options()->saw_sections_clause());
6642 }
6643
6644 return;
6645 }
6646
6647 // Perform a relocation.
6648
6649 template<int size, bool big_endian>
6650 inline bool
6651 Target_aarch64<size, big_endian>::Relocate::relocate(
6652 const Relocate_info<size, big_endian>* relinfo,
6653 Target_aarch64<size, big_endian>* target,
6654 Output_section* ,
6655 size_t relnum,
6656 const elfcpp::Rela<size, big_endian>& rela,
6657 unsigned int r_type,
6658 const Sized_symbol<size>* gsym,
6659 const Symbol_value<size>* psymval,
6660 unsigned char* view,
6661 typename elfcpp::Elf_types<size>::Elf_Addr address,
6662 section_size_type /* view_size */)
6663 {
6664 if (view == NULL)
6665 return true;
6666
6667 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6668
6669 const AArch64_reloc_property* reloc_property =
6670 aarch64_reloc_property_table->get_reloc_property(r_type);
6671
6672 if (reloc_property == NULL)
6673 {
6674 std::string reloc_name =
6675 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
6676 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6677 _("cannot relocate %s in object file"),
6678 reloc_name.c_str());
6679 return true;
6680 }
6681
6682 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
6683
6684 // Pick the value to use for symbols defined in the PLT.
6685 Symbol_value<size> symval;
6686 if (gsym != NULL
6687 && gsym->use_plt_offset(reloc_property->reference_flags()))
6688 {
6689 symval.set_output_value(target->plt_address_for_global(gsym));
6690 psymval = &symval;
6691 }
6692 else if (gsym == NULL && psymval->is_ifunc_symbol())
6693 {
6694 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6695 if (object->local_has_plt_offset(r_sym))
6696 {
6697 symval.set_output_value(target->plt_address_for_local(object, r_sym));
6698 psymval = &symval;
6699 }
6700 }
6701
6702 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6703
6704 // Get the GOT offset if needed.
6705 // For aarch64, the GOT pointer points to the start of the GOT section.
6706 bool have_got_offset = false;
6707 int got_offset = 0;
6708 int got_base = (target->got_ != NULL
6709 ? (target->got_->current_data_size() >= 0x8000
6710 ? 0x8000 : 0)
6711 : 0);
6712 switch (r_type)
6713 {
6714 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
6715 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
6716 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
6717 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
6718 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
6719 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
6720 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
6721 case elfcpp::R_AARCH64_GOTREL64:
6722 case elfcpp::R_AARCH64_GOTREL32:
6723 case elfcpp::R_AARCH64_GOT_LD_PREL19:
6724 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
6725 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6726 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6727 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6728 if (gsym != NULL)
6729 {
6730 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
6731 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
6732 }
6733 else
6734 {
6735 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6736 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
6737 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
6738 - got_base);
6739 }
6740 have_got_offset = true;
6741 break;
6742
6743 default:
6744 break;
6745 }
6746
6747 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
6748 typename elfcpp::Elf_types<size>::Elf_Addr value;
6749 switch (r_type)
6750 {
6751 case elfcpp::R_AARCH64_NONE:
6752 break;
6753
6754 case elfcpp::R_AARCH64_ABS64:
6755 reloc_status = Reloc::template rela_ua<64>(
6756 view, object, psymval, addend, reloc_property);
6757 break;
6758
6759 case elfcpp::R_AARCH64_ABS32:
6760 reloc_status = Reloc::template rela_ua<32>(
6761 view, object, psymval, addend, reloc_property);
6762 break;
6763
6764 case elfcpp::R_AARCH64_ABS16:
6765 reloc_status = Reloc::template rela_ua<16>(
6766 view, object, psymval, addend, reloc_property);
6767 break;
6768
6769 case elfcpp::R_AARCH64_PREL64:
6770 reloc_status = Reloc::template pcrela_ua<64>(
6771 view, object, psymval, addend, address, reloc_property);
6772 break;
6773
6774 case elfcpp::R_AARCH64_PREL32:
6775 reloc_status = Reloc::template pcrela_ua<32>(
6776 view, object, psymval, addend, address, reloc_property);
6777 break;
6778
6779 case elfcpp::R_AARCH64_PREL16:
6780 reloc_status = Reloc::template pcrela_ua<16>(
6781 view, object, psymval, addend, address, reloc_property);
6782 break;
6783
6784 case elfcpp::R_AARCH64_LD_PREL_LO19:
6785 reloc_status = Reloc::template pcrela_general<32>(
6786 view, object, psymval, addend, address, reloc_property);
6787 break;
6788
6789 case elfcpp::R_AARCH64_ADR_PREL_LO21:
6790 reloc_status = Reloc::adr(view, object, psymval, addend,
6791 address, reloc_property);
6792 break;
6793
6794 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
6795 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
6796 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
6797 reloc_property);
6798 break;
6799
6800 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
6801 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
6802 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
6803 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
6804 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
6805 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
6806 reloc_status = Reloc::template rela_general<32>(
6807 view, object, psymval, addend, reloc_property);
6808 break;
6809
6810 case elfcpp::R_AARCH64_CALL26:
6811 if (this->skip_call_tls_get_addr_)
6812 {
6813 // Double check that the TLSGD insn has been optimized away.
6814 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
6815 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
6816 reinterpret_cast<Insntype*>(view));
6817 gold_assert((insn & 0xff000000) == 0x91000000);
6818
6819 reloc_status = Reloc::STATUS_OKAY;
6820 this->skip_call_tls_get_addr_ = false;
6821 // Return false to stop further processing this reloc.
6822 return false;
6823 }
6824 // Fallthrough
6825 case elfcpp::R_AARCH64_JUMP26:
6826 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
6827 gsym, psymval, object,
6828 target->stub_group_size_))
6829 break;
6830 // Fallthrough
6831 case elfcpp::R_AARCH64_TSTBR14:
6832 case elfcpp::R_AARCH64_CONDBR19:
6833 reloc_status = Reloc::template pcrela_general<32>(
6834 view, object, psymval, addend, address, reloc_property);
6835 break;
6836
6837 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6838 gold_assert(have_got_offset);
6839 value = target->got_->address() + got_base + got_offset;
6840 reloc_status = Reloc::adrp(view, value + addend, address);
6841 break;
6842
6843 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6844 gold_assert(have_got_offset);
6845 value = target->got_->address() + got_base + got_offset;
6846 reloc_status = Reloc::template rela_general<32>(
6847 view, value, addend, reloc_property);
6848 break;
6849
6850 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6851 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6852 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6853 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6854 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6855 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6856 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6857 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6858 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6859 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6860 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6861 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6862 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6863 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6864 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6865 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6866 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6867 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6868 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6869 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6870 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6871 case elfcpp::R_AARCH64_TLSDESC_CALL:
6872 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
6873 gsym, psymval, view, address);
6874 break;
6875
6876 // These are dynamic relocations, which are unexpected when linking.
6877 case elfcpp::R_AARCH64_COPY:
6878 case elfcpp::R_AARCH64_GLOB_DAT:
6879 case elfcpp::R_AARCH64_JUMP_SLOT:
6880 case elfcpp::R_AARCH64_RELATIVE:
6881 case elfcpp::R_AARCH64_IRELATIVE:
6882 case elfcpp::R_AARCH64_TLS_DTPREL64:
6883 case elfcpp::R_AARCH64_TLS_DTPMOD64:
6884 case elfcpp::R_AARCH64_TLS_TPREL64:
6885 case elfcpp::R_AARCH64_TLSDESC:
6886 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6887 _("unexpected reloc %u in object file"),
6888 r_type);
6889 break;
6890
6891 default:
6892 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6893 _("unsupported reloc %s"),
6894 reloc_property->name().c_str());
6895 break;
6896 }
6897
6898 // Report any errors.
6899 switch (reloc_status)
6900 {
6901 case Reloc::STATUS_OKAY:
6902 break;
6903 case Reloc::STATUS_OVERFLOW:
6904 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6905 _("relocation overflow in %s"),
6906 reloc_property->name().c_str());
6907 break;
6908 case Reloc::STATUS_BAD_RELOC:
6909 gold_error_at_location(
6910 relinfo,
6911 relnum,
6912 rela.get_r_offset(),
6913 _("unexpected opcode while processing relocation %s"),
6914 reloc_property->name().c_str());
6915 break;
6916 default:
6917 gold_unreachable();
6918 }
6919
6920 return true;
6921 }
6922
6923
6924 template<int size, bool big_endian>
6925 inline
6926 typename AArch64_relocate_functions<size, big_endian>::Status
6927 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
6928 const Relocate_info<size, big_endian>* relinfo,
6929 Target_aarch64<size, big_endian>* target,
6930 size_t relnum,
6931 const elfcpp::Rela<size, big_endian>& rela,
6932 unsigned int r_type, const Sized_symbol<size>* gsym,
6933 const Symbol_value<size>* psymval,
6934 unsigned char* view,
6935 typename elfcpp::Elf_types<size>::Elf_Addr address)
6936 {
6937 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
6938 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
6939
6940 Output_segment* tls_segment = relinfo->layout->tls_segment();
6941 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6942 const AArch64_reloc_property* reloc_property =
6943 aarch64_reloc_property_table->get_reloc_property(r_type);
6944 gold_assert(reloc_property != NULL);
6945
6946 const bool is_final = (gsym == NULL
6947 ? !parameters->options().shared()
6948 : gsym->final_value_is_known());
6949 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6950 optimize_tls_reloc(is_final, r_type);
6951
6952 Sized_relobj_file<size, big_endian>* object = relinfo->object;
6953 int tls_got_offset_type;
6954 switch (r_type)
6955 {
6956 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6957 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
6958 {
6959 if (tlsopt == tls::TLSOPT_TO_LE)
6960 {
6961 if (tls_segment == NULL)
6962 {
6963 gold_assert(parameters->errors()->error_count() > 0
6964 || issue_undefined_symbol_error(gsym));
6965 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
6966 }
6967 return tls_gd_to_le(relinfo, target, rela, r_type, view,
6968 psymval);
6969 }
6970 else if (tlsopt == tls::TLSOPT_NONE)
6971 {
6972 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
6973 // Firstly get the address for the got entry.
6974 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
6975 if (gsym != NULL)
6976 {
6977 gold_assert(gsym->has_got_offset(tls_got_offset_type));
6978 got_entry_address = target->got_->address() +
6979 gsym->got_offset(tls_got_offset_type);
6980 }
6981 else
6982 {
6983 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6984 gold_assert(
6985 object->local_has_got_offset(r_sym, tls_got_offset_type));
6986 got_entry_address = target->got_->address() +
6987 object->local_got_offset(r_sym, tls_got_offset_type);
6988 }
6989
6990 // Relocate the address into adrp/ld, adrp/add pair.
6991 switch (r_type)
6992 {
6993 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6994 return aarch64_reloc_funcs::adrp(
6995 view, got_entry_address + addend, address);
6996
6997 break;
6998
6999 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7000 return aarch64_reloc_funcs::template rela_general<32>(
7001 view, got_entry_address, addend, reloc_property);
7002 break;
7003
7004 default:
7005 gold_unreachable();
7006 }
7007 }
7008 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7009 _("unsupported gd_to_ie relaxation on %u"),
7010 r_type);
7011 }
7012 break;
7013
7014 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7015 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7016 {
7017 if (tlsopt == tls::TLSOPT_TO_LE)
7018 {
7019 if (tls_segment == NULL)
7020 {
7021 gold_assert(parameters->errors()->error_count() > 0
7022 || issue_undefined_symbol_error(gsym));
7023 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7024 }
7025 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7026 psymval);
7027 }
7028
7029 gold_assert(tlsopt == tls::TLSOPT_NONE);
7030 // Relocate the field with the offset of the GOT entry for
7031 // the module index.
7032 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7033 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7034 target->got_->address());
7035
7036 switch (r_type)
7037 {
7038 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7039 return aarch64_reloc_funcs::adrp(
7040 view, got_entry_address + addend, address);
7041 break;
7042
7043 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7044 return aarch64_reloc_funcs::template rela_general<32>(
7045 view, got_entry_address, addend, reloc_property);
7046 break;
7047
7048 default:
7049 gold_unreachable();
7050 }
7051 }
7052 break;
7053
7054 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7055 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7056 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7057 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7058 {
7059 AArch64_address value = psymval->value(object, 0);
7060 if (tlsopt == tls::TLSOPT_TO_LE)
7061 {
7062 if (tls_segment == NULL)
7063 {
7064 gold_assert(parameters->errors()->error_count() > 0
7065 || issue_undefined_symbol_error(gsym));
7066 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7067 }
7068 }
7069 switch (r_type)
7070 {
7071 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7072 return aarch64_reloc_funcs::movnz(view, value + addend,
7073 reloc_property);
7074 break;
7075
7076 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7077 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7078 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7079 return aarch64_reloc_funcs::template rela_general<32>(
7080 view, value, addend, reloc_property);
7081 break;
7082
7083 default:
7084 gold_unreachable();
7085 }
7086 // We should never reach here.
7087 }
7088 break;
7089
7090 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7091 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7092 {
7093 if (tlsopt == tls::TLSOPT_TO_LE)
7094 {
7095 if (tls_segment == NULL)
7096 {
7097 gold_assert(parameters->errors()->error_count() > 0
7098 || issue_undefined_symbol_error(gsym));
7099 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7100 }
7101 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7102 psymval);
7103 }
7104 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7105
7106 // Firstly get the address for the got entry.
7107 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7108 if (gsym != NULL)
7109 {
7110 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7111 got_entry_address = target->got_->address() +
7112 gsym->got_offset(tls_got_offset_type);
7113 }
7114 else
7115 {
7116 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7117 gold_assert(
7118 object->local_has_got_offset(r_sym, tls_got_offset_type));
7119 got_entry_address = target->got_->address() +
7120 object->local_got_offset(r_sym, tls_got_offset_type);
7121 }
7122 // Relocate the address into adrp/ld, adrp/add pair.
7123 switch (r_type)
7124 {
7125 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7126 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7127 address);
7128 break;
7129 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7130 return aarch64_reloc_funcs::template rela_general<32>(
7131 view, got_entry_address, addend, reloc_property);
7132 default:
7133 gold_unreachable();
7134 }
7135 }
7136 // We shall never reach here.
7137 break;
7138
7139 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7140 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7141 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7142 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7143 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7144 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7145 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7146 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7147 {
7148 gold_assert(tls_segment != NULL);
7149 AArch64_address value = psymval->value(object, 0);
7150
7151 if (!parameters->options().shared())
7152 {
7153 AArch64_address aligned_tcb_size =
7154 align_address(target->tcb_size(),
7155 tls_segment->maximum_alignment());
7156 value += aligned_tcb_size;
7157 switch (r_type)
7158 {
7159 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7160 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7161 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7162 return aarch64_reloc_funcs::movnz(view, value + addend,
7163 reloc_property);
7164 default:
7165 return aarch64_reloc_funcs::template
7166 rela_general<32>(view,
7167 value,
7168 addend,
7169 reloc_property);
7170 }
7171 }
7172 else
7173 gold_error(_("%s: unsupported reloc %u "
7174 "in non-static TLSLE mode."),
7175 object->name().c_str(), r_type);
7176 }
7177 break;
7178
7179 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7180 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7181 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7182 case elfcpp::R_AARCH64_TLSDESC_CALL:
7183 {
7184 if (tlsopt == tls::TLSOPT_TO_LE)
7185 {
7186 if (tls_segment == NULL)
7187 {
7188 gold_assert(parameters->errors()->error_count() > 0
7189 || issue_undefined_symbol_error(gsym));
7190 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7191 }
7192 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7193 view, psymval);
7194 }
7195 else
7196 {
7197 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7198 ? GOT_TYPE_TLS_OFFSET
7199 : GOT_TYPE_TLS_DESC);
7200 unsigned int got_tlsdesc_offset = 0;
7201 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7202 && tlsopt == tls::TLSOPT_NONE)
7203 {
7204 // We created GOT entries in the .got.tlsdesc portion of the
7205 // .got.plt section, but the offset stored in the symbol is the
7206 // offset within .got.tlsdesc.
7207 got_tlsdesc_offset = (target->got_->data_size()
7208 + target->got_plt_section()->data_size());
7209 }
7210 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7211 if (gsym != NULL)
7212 {
7213 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7214 got_entry_address = target->got_->address()
7215 + got_tlsdesc_offset
7216 + gsym->got_offset(tls_got_offset_type);
7217 }
7218 else
7219 {
7220 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7221 gold_assert(
7222 object->local_has_got_offset(r_sym, tls_got_offset_type));
7223 got_entry_address = target->got_->address() +
7224 got_tlsdesc_offset +
7225 object->local_got_offset(r_sym, tls_got_offset_type);
7226 }
7227 if (tlsopt == tls::TLSOPT_TO_IE)
7228 {
7229 if (tls_segment == NULL)
7230 {
7231 gold_assert(parameters->errors()->error_count() > 0
7232 || issue_undefined_symbol_error(gsym));
7233 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7234 }
7235 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7236 view, psymval, got_entry_address,
7237 address);
7238 }
7239
7240 // Now do tlsdesc relocation.
7241 switch (r_type)
7242 {
7243 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7244 return aarch64_reloc_funcs::adrp(view,
7245 got_entry_address + addend,
7246 address);
7247 break;
7248 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7249 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7250 return aarch64_reloc_funcs::template rela_general<32>(
7251 view, got_entry_address, addend, reloc_property);
7252 break;
7253 case elfcpp::R_AARCH64_TLSDESC_CALL:
7254 return aarch64_reloc_funcs::STATUS_OKAY;
7255 break;
7256 default:
7257 gold_unreachable();
7258 }
7259 }
7260 }
7261 break;
7262
7263 default:
7264 gold_error(_("%s: unsupported TLS reloc %u."),
7265 object->name().c_str(), r_type);
7266 }
7267 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7268 } // End of relocate_tls.
7269
7270
7271 template<int size, bool big_endian>
7272 inline
7273 typename AArch64_relocate_functions<size, big_endian>::Status
7274 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7275 const Relocate_info<size, big_endian>* relinfo,
7276 Target_aarch64<size, big_endian>* target,
7277 const elfcpp::Rela<size, big_endian>& rela,
7278 unsigned int r_type,
7279 unsigned char* view,
7280 const Symbol_value<size>* psymval)
7281 {
7282 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7283 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7284 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7285
7286 Insntype* ip = reinterpret_cast<Insntype*>(view);
7287 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7288 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7289 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7290
7291 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7292 {
7293 // This is the 2nd relocs, optimization should already have been
7294 // done.
7295 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7296 return aarch64_reloc_funcs::STATUS_OKAY;
7297 }
7298
7299 // The original sequence is -
7300 // 90000000 adrp x0, 0 <main>
7301 // 91000000 add x0, x0, #0x0
7302 // 94000000 bl 0 <__tls_get_addr>
7303 // optimized to sequence -
7304 // d53bd040 mrs x0, tpidr_el0
7305 // 91400000 add x0, x0, #0x0, lsl #12
7306 // 91000000 add x0, x0, #0x0
7307
7308 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7309 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7310 // have to change "bl tls_get_addr", which does not have a corresponding tls
7311 // relocation type. So before proceeding, we need to make sure compiler
7312 // does not change the sequence.
7313 if(!(insn1 == 0x90000000 // adrp x0,0
7314 && insn2 == 0x91000000 // add x0, x0, #0x0
7315 && insn3 == 0x94000000)) // bl 0
7316 {
7317 // Ideally we should give up gd_to_le relaxation and do gd access.
7318 // However the gd_to_le relaxation decision has been made early
7319 // in the scan stage, where we did not allocate any GOT entry for
7320 // this symbol. Therefore we have to exit and report error now.
7321 gold_error(_("unexpected reloc insn sequence while relaxing "
7322 "tls gd to le for reloc %u."), r_type);
7323 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7324 }
7325
7326 // Write new insns.
7327 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7328 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7329 insn3 = 0x91000000; // add x0, x0, #0x0
7330 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7331 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7332 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7333
7334 // Calculate tprel value.
7335 Output_segment* tls_segment = relinfo->layout->tls_segment();
7336 gold_assert(tls_segment != NULL);
7337 AArch64_address value = psymval->value(relinfo->object, 0);
7338 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7339 AArch64_address aligned_tcb_size =
7340 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7341 AArch64_address x = value + aligned_tcb_size;
7342
7343 // After new insns are written, apply TLSLE relocs.
7344 const AArch64_reloc_property* rp1 =
7345 aarch64_reloc_property_table->get_reloc_property(
7346 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7347 const AArch64_reloc_property* rp2 =
7348 aarch64_reloc_property_table->get_reloc_property(
7349 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7350 gold_assert(rp1 != NULL && rp2 != NULL);
7351
7352 typename aarch64_reloc_funcs::Status s1 =
7353 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7354 x,
7355 addend,
7356 rp1);
7357 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7358 return s1;
7359
7360 typename aarch64_reloc_funcs::Status s2 =
7361 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7362 x,
7363 addend,
7364 rp2);
7365
7366 this->skip_call_tls_get_addr_ = true;
7367 return s2;
7368 } // End of tls_gd_to_le
7369
7370
7371 template<int size, bool big_endian>
7372 inline
7373 typename AArch64_relocate_functions<size, big_endian>::Status
7374 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7375 const Relocate_info<size, big_endian>* relinfo,
7376 Target_aarch64<size, big_endian>* target,
7377 const elfcpp::Rela<size, big_endian>& rela,
7378 unsigned int r_type,
7379 unsigned char* view,
7380 const Symbol_value<size>* psymval)
7381 {
7382 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7383 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7384 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7385
7386 Insntype* ip = reinterpret_cast<Insntype*>(view);
7387 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7388 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7389 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7390
7391 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7392 {
7393 // This is the 2nd relocs, optimization should already have been
7394 // done.
7395 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7396 return aarch64_reloc_funcs::STATUS_OKAY;
7397 }
7398
7399 // The original sequence is -
7400 // 90000000 adrp x0, 0 <main>
7401 // 91000000 add x0, x0, #0x0
7402 // 94000000 bl 0 <__tls_get_addr>
7403 // optimized to sequence -
7404 // d53bd040 mrs x0, tpidr_el0
7405 // 91400000 add x0, x0, #0x0, lsl #12
7406 // 91000000 add x0, x0, #0x0
7407
7408 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7409 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7410 // have to change "bl tls_get_addr", which does not have a corresponding tls
7411 // relocation type. So before proceeding, we need to make sure compiler
7412 // does not change the sequence.
7413 if(!(insn1 == 0x90000000 // adrp x0,0
7414 && insn2 == 0x91000000 // add x0, x0, #0x0
7415 && insn3 == 0x94000000)) // bl 0
7416 {
7417 // Ideally we should give up gd_to_le relaxation and do gd access.
7418 // However the gd_to_le relaxation decision has been made early
7419 // in the scan stage, where we did not allocate any GOT entry for
7420 // this symbol. Therefore we have to exit and report error now.
7421 gold_error(_("unexpected reloc insn sequence while relaxing "
7422 "tls gd to le for reloc %u."), r_type);
7423 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7424 }
7425
7426 // Write new insns.
7427 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7428 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7429 insn3 = 0x91000000; // add x0, x0, #0x0
7430 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7431 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7432 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7433
7434 // Calculate tprel value.
7435 Output_segment* tls_segment = relinfo->layout->tls_segment();
7436 gold_assert(tls_segment != NULL);
7437 AArch64_address value = psymval->value(relinfo->object, 0);
7438 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7439 AArch64_address aligned_tcb_size =
7440 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7441 AArch64_address x = value + aligned_tcb_size;
7442
7443 // After new insns are written, apply TLSLE relocs.
7444 const AArch64_reloc_property* rp1 =
7445 aarch64_reloc_property_table->get_reloc_property(
7446 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7447 const AArch64_reloc_property* rp2 =
7448 aarch64_reloc_property_table->get_reloc_property(
7449 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7450 gold_assert(rp1 != NULL && rp2 != NULL);
7451
7452 typename aarch64_reloc_funcs::Status s1 =
7453 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7454 x,
7455 addend,
7456 rp1);
7457 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7458 return s1;
7459
7460 typename aarch64_reloc_funcs::Status s2 =
7461 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7462 x,
7463 addend,
7464 rp2);
7465
7466 this->skip_call_tls_get_addr_ = true;
7467 return s2;
7468
7469 } // End of tls_ld_to_le
7470
7471 template<int size, bool big_endian>
7472 inline
7473 typename AArch64_relocate_functions<size, big_endian>::Status
7474 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7475 const Relocate_info<size, big_endian>* relinfo,
7476 Target_aarch64<size, big_endian>* target,
7477 const elfcpp::Rela<size, big_endian>& rela,
7478 unsigned int r_type,
7479 unsigned char* view,
7480 const Symbol_value<size>* psymval)
7481 {
7482 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7483 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7484 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7485
7486 AArch64_address value = psymval->value(relinfo->object, 0);
7487 Output_segment* tls_segment = relinfo->layout->tls_segment();
7488 AArch64_address aligned_tcb_address =
7489 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7490 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7491 AArch64_address x = value + addend + aligned_tcb_address;
7492 // "x" is the offset to tp, we can only do this if x is within
7493 // range [0, 2^32-1]
7494 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7495 {
7496 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7497 r_type);
7498 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7499 }
7500
7501 Insntype* ip = reinterpret_cast<Insntype*>(view);
7502 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7503 unsigned int regno;
7504 Insntype newinsn;
7505 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7506 {
7507 // Generate movz.
7508 regno = (insn & 0x1f);
7509 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7510 }
7511 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7512 {
7513 // Generate movk.
7514 regno = (insn & 0x1f);
7515 gold_assert(regno == ((insn >> 5) & 0x1f));
7516 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7517 }
7518 else
7519 gold_unreachable();
7520
7521 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7522 return aarch64_reloc_funcs::STATUS_OKAY;
7523 } // End of tls_ie_to_le
7524
7525
7526 template<int size, bool big_endian>
7527 inline
7528 typename AArch64_relocate_functions<size, big_endian>::Status
7529 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7530 const Relocate_info<size, big_endian>* relinfo,
7531 Target_aarch64<size, big_endian>* target,
7532 const elfcpp::Rela<size, big_endian>& rela,
7533 unsigned int r_type,
7534 unsigned char* view,
7535 const Symbol_value<size>* psymval)
7536 {
7537 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7538 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7539 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7540
7541 // TLSDESC-GD sequence is like:
7542 // adrp x0, :tlsdesc:v1
7543 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7544 // add x0, x0, :tlsdesc_lo12:v1
7545 // .tlsdesccall v1
7546 // blr x1
7547 // After desc_gd_to_le optimization, the sequence will be like:
7548 // movz x0, #0x0, lsl #16
7549 // movk x0, #0x10
7550 // nop
7551 // nop
7552
7553 // Calculate tprel value.
7554 Output_segment* tls_segment = relinfo->layout->tls_segment();
7555 gold_assert(tls_segment != NULL);
7556 Insntype* ip = reinterpret_cast<Insntype*>(view);
7557 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7558 AArch64_address value = psymval->value(relinfo->object, addend);
7559 AArch64_address aligned_tcb_size =
7560 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7561 AArch64_address x = value + aligned_tcb_size;
7562 // x is the offset to tp, we can only do this if x is within range
7563 // [0, 2^32-1]. If x is out of range, fail and exit.
7564 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7565 {
7566 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7567 "We Can't do gd_to_le relaxation.\n"), r_type);
7568 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7569 }
7570 Insntype newinsn;
7571 switch (r_type)
7572 {
7573 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7574 case elfcpp::R_AARCH64_TLSDESC_CALL:
7575 // Change to nop
7576 newinsn = 0xd503201f;
7577 break;
7578
7579 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7580 // Change to movz.
7581 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7582 break;
7583
7584 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7585 // Change to movk.
7586 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7587 break;
7588
7589 default:
7590 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7591 r_type);
7592 gold_unreachable();
7593 }
7594 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7595 return aarch64_reloc_funcs::STATUS_OKAY;
7596 } // End of tls_desc_gd_to_le
7597
7598
7599 template<int size, bool big_endian>
7600 inline
7601 typename AArch64_relocate_functions<size, big_endian>::Status
7602 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7603 const Relocate_info<size, big_endian>* /* relinfo */,
7604 Target_aarch64<size, big_endian>* /* target */,
7605 const elfcpp::Rela<size, big_endian>& rela,
7606 unsigned int r_type,
7607 unsigned char* view,
7608 const Symbol_value<size>* /* psymval */,
7609 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7610 typename elfcpp::Elf_types<size>::Elf_Addr address)
7611 {
7612 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7613 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7614
7615 // TLSDESC-GD sequence is like:
7616 // adrp x0, :tlsdesc:v1
7617 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7618 // add x0, x0, :tlsdesc_lo12:v1
7619 // .tlsdesccall v1
7620 // blr x1
7621 // After desc_gd_to_ie optimization, the sequence will be like:
7622 // adrp x0, :tlsie:v1
7623 // ldr x0, [x0, :tlsie_lo12:v1]
7624 // nop
7625 // nop
7626
7627 Insntype* ip = reinterpret_cast<Insntype*>(view);
7628 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7629 Insntype newinsn;
7630 switch (r_type)
7631 {
7632 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7633 case elfcpp::R_AARCH64_TLSDESC_CALL:
7634 // Change to nop
7635 newinsn = 0xd503201f;
7636 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7637 break;
7638
7639 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7640 {
7641 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7642 address);
7643 }
7644 break;
7645
7646 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7647 {
7648 // Set ldr target register to be x0.
7649 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7650 insn &= 0xffffffe0;
7651 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
7652 // Do relocation.
7653 const AArch64_reloc_property* reloc_property =
7654 aarch64_reloc_property_table->get_reloc_property(
7655 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7656 return aarch64_reloc_funcs::template rela_general<32>(
7657 view, got_entry_address, addend, reloc_property);
7658 }
7659 break;
7660
7661 default:
7662 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
7663 r_type);
7664 gold_unreachable();
7665 }
7666 return aarch64_reloc_funcs::STATUS_OKAY;
7667 } // End of tls_desc_gd_to_ie
7668
7669 // Relocate section data.
7670
7671 template<int size, bool big_endian>
7672 void
7673 Target_aarch64<size, big_endian>::relocate_section(
7674 const Relocate_info<size, big_endian>* relinfo,
7675 unsigned int sh_type,
7676 const unsigned char* prelocs,
7677 size_t reloc_count,
7678 Output_section* output_section,
7679 bool needs_special_offset_handling,
7680 unsigned char* view,
7681 typename elfcpp::Elf_types<size>::Elf_Addr address,
7682 section_size_type view_size,
7683 const Reloc_symbol_changes* reloc_symbol_changes)
7684 {
7685 gold_assert(sh_type == elfcpp::SHT_RELA);
7686 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
7687 gold::relocate_section<size, big_endian, Target_aarch64, elfcpp::SHT_RELA,
7688 AArch64_relocate, gold::Default_comdat_behavior>(
7689 relinfo,
7690 this,
7691 prelocs,
7692 reloc_count,
7693 output_section,
7694 needs_special_offset_handling,
7695 view,
7696 address,
7697 view_size,
7698 reloc_symbol_changes);
7699 }
7700
7701 // Return the size of a relocation while scanning during a relocatable
7702 // link.
7703
7704 template<int size, bool big_endian>
7705 unsigned int
7706 Target_aarch64<size, big_endian>::Relocatable_size_for_reloc::
7707 get_size_for_reloc(
7708 unsigned int ,
7709 Relobj* )
7710 {
7711 // We will never support SHT_REL relocations.
7712 gold_unreachable();
7713 return 0;
7714 }
7715
7716 // Scan the relocs during a relocatable link.
7717
7718 template<int size, bool big_endian>
7719 void
7720 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
7721 Symbol_table* symtab,
7722 Layout* layout,
7723 Sized_relobj_file<size, big_endian>* object,
7724 unsigned int data_shndx,
7725 unsigned int sh_type,
7726 const unsigned char* prelocs,
7727 size_t reloc_count,
7728 Output_section* output_section,
7729 bool needs_special_offset_handling,
7730 size_t local_symbol_count,
7731 const unsigned char* plocal_symbols,
7732 Relocatable_relocs* rr)
7733 {
7734 gold_assert(sh_type == elfcpp::SHT_RELA);
7735
7736 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA,
7737 Relocatable_size_for_reloc> Scan_relocatable_relocs;
7738
7739 gold::scan_relocatable_relocs<size, big_endian, elfcpp::SHT_RELA,
7740 Scan_relocatable_relocs>(
7741 symtab,
7742 layout,
7743 object,
7744 data_shndx,
7745 prelocs,
7746 reloc_count,
7747 output_section,
7748 needs_special_offset_handling,
7749 local_symbol_count,
7750 plocal_symbols,
7751 rr);
7752 }
7753
7754 // Relocate a section during a relocatable link.
7755
7756 template<int size, bool big_endian>
7757 void
7758 Target_aarch64<size, big_endian>::relocate_relocs(
7759 const Relocate_info<size, big_endian>* relinfo,
7760 unsigned int sh_type,
7761 const unsigned char* prelocs,
7762 size_t reloc_count,
7763 Output_section* output_section,
7764 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
7765 const Relocatable_relocs* rr,
7766 unsigned char* view,
7767 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
7768 section_size_type view_size,
7769 unsigned char* reloc_view,
7770 section_size_type reloc_view_size)
7771 {
7772 gold_assert(sh_type == elfcpp::SHT_RELA);
7773
7774 gold::relocate_relocs<size, big_endian, elfcpp::SHT_RELA>(
7775 relinfo,
7776 prelocs,
7777 reloc_count,
7778 output_section,
7779 offset_in_output_section,
7780 rr,
7781 view,
7782 view_address,
7783 view_size,
7784 reloc_view,
7785 reloc_view_size);
7786 }
7787
7788
7789 // Return whether this is a 3-insn erratum sequence.
7790
7791 template<int size, bool big_endian>
7792 bool
7793 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
7794 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
7795 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
7796 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
7797 {
7798 unsigned rt1, rt2;
7799 bool load, pair;
7800
7801 // The 2nd insn is a single register load or store; or register pair
7802 // store.
7803 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
7804 && (!pair || (pair && !load)))
7805 {
7806 // The 3rd insn is a load or store instruction from the "Load/store
7807 // register (unsigned immediate)" encoding class, using Rn as the
7808 // base address register.
7809 if (Insn_utilities::aarch64_ldst_uimm(insn3)
7810 && (Insn_utilities::aarch64_rn(insn3)
7811 == Insn_utilities::aarch64_rd(insn1)))
7812 return true;
7813 }
7814 return false;
7815 }
7816
7817
7818 // Return whether this is a 835769 sequence.
7819 // (Similarly implemented as in elfnn-aarch64.c.)
7820
7821 template<int size, bool big_endian>
7822 bool
7823 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
7824 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
7825 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
7826 {
7827 uint32_t rt;
7828 uint32_t rt2;
7829 uint32_t rn;
7830 uint32_t rm;
7831 uint32_t ra;
7832 bool pair;
7833 bool load;
7834
7835 if (Insn_utilities::aarch64_mlxl(insn2)
7836 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
7837 {
7838 /* Any SIMD memory op is independent of the subsequent MLA
7839 by definition of the erratum. */
7840 if (Insn_utilities::aarch64_bit(insn1, 26))
7841 return true;
7842
7843 /* If not SIMD, check for integer memory ops and MLA relationship. */
7844 rn = Insn_utilities::aarch64_rn(insn2);
7845 ra = Insn_utilities::aarch64_ra(insn2);
7846 rm = Insn_utilities::aarch64_rm(insn2);
7847
7848 /* If this is a load and there's a true(RAW) dependency, we are safe
7849 and this is not an erratum sequence. */
7850 if (load &&
7851 (rt == rn || rt == rm || rt == ra
7852 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
7853 return false;
7854
7855 /* We conservatively put out stubs for all other cases (including
7856 writebacks). */
7857 return true;
7858 }
7859
7860 return false;
7861 }
7862
7863
7864 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
7865
7866 template<int size, bool big_endian>
7867 void
7868 Target_aarch64<size, big_endian>::create_erratum_stub(
7869 AArch64_relobj<size, big_endian>* relobj,
7870 unsigned int shndx,
7871 section_size_type erratum_insn_offset,
7872 Address erratum_address,
7873 typename Insn_utilities::Insntype erratum_insn,
7874 int erratum_type)
7875 {
7876 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
7877 The_stub_table* stub_table = relobj->stub_table(shndx);
7878 gold_assert(stub_table != NULL);
7879 if (stub_table->find_erratum_stub(relobj,
7880 shndx,
7881 erratum_insn_offset) == NULL)
7882 {
7883 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
7884 The_erratum_stub* stub = new The_erratum_stub(
7885 relobj, erratum_type, shndx, erratum_insn_offset);
7886 stub->set_erratum_insn(erratum_insn);
7887 stub->set_erratum_address(erratum_address);
7888 // For erratum ST_E_843419 and ST_E_835769, the destination address is
7889 // always the next insn after erratum insn.
7890 stub->set_destination_address(erratum_address + BPI);
7891 stub_table->add_erratum_stub(stub);
7892 }
7893 }
7894
7895
7896 // Scan erratum for section SHNDX range [output_address + span_start,
7897 // output_address + span_end). Note here we do not share the code with
7898 // scan_erratum_843419_span function, because for 843419 we optimize by only
7899 // scanning the last few insns of a page, whereas for 835769, we need to scan
7900 // every insn.
7901
7902 template<int size, bool big_endian>
7903 void
7904 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
7905 AArch64_relobj<size, big_endian>* relobj,
7906 unsigned int shndx,
7907 const section_size_type span_start,
7908 const section_size_type span_end,
7909 unsigned char* input_view,
7910 Address output_address)
7911 {
7912 typedef typename Insn_utilities::Insntype Insntype;
7913
7914 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
7915
7916 // Adjust output_address and view to the start of span.
7917 output_address += span_start;
7918 input_view += span_start;
7919
7920 section_size_type span_length = span_end - span_start;
7921 section_size_type offset = 0;
7922 for (offset = 0; offset + BPI < span_length; offset += BPI)
7923 {
7924 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
7925 Insntype insn1 = ip[0];
7926 Insntype insn2 = ip[1];
7927 if (is_erratum_835769_sequence(insn1, insn2))
7928 {
7929 Insntype erratum_insn = insn2;
7930 // "span_start + offset" is the offset for insn1. So for insn2, it is
7931 // "span_start + offset + BPI".
7932 section_size_type erratum_insn_offset = span_start + offset + BPI;
7933 Address erratum_address = output_address + offset + BPI;
7934 gold_warning(_("Erratum 835769 found and fixed at \"%s\", "
7935 "section %d, offset 0x%08x."),
7936 relobj->name().c_str(), shndx,
7937 (unsigned int)(span_start + offset));
7938
7939 this->create_erratum_stub(relobj, shndx,
7940 erratum_insn_offset, erratum_address,
7941 erratum_insn, ST_E_835769);
7942 offset += BPI; // Skip mac insn.
7943 }
7944 }
7945 } // End of "Target_aarch64::scan_erratum_835769_span".
7946
7947
7948 // Scan erratum for section SHNDX range
7949 // [output_address + span_start, output_address + span_end).
7950
7951 template<int size, bool big_endian>
7952 void
7953 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
7954 AArch64_relobj<size, big_endian>* relobj,
7955 unsigned int shndx,
7956 const section_size_type span_start,
7957 const section_size_type span_end,
7958 unsigned char* input_view,
7959 Address output_address)
7960 {
7961 typedef typename Insn_utilities::Insntype Insntype;
7962
7963 // Adjust output_address and view to the start of span.
7964 output_address += span_start;
7965 input_view += span_start;
7966
7967 if ((output_address & 0x03) != 0)
7968 return;
7969
7970 section_size_type offset = 0;
7971 section_size_type span_length = span_end - span_start;
7972 // The first instruction must be ending at 0xFF8 or 0xFFC.
7973 unsigned int page_offset = output_address & 0xFFF;
7974 // Make sure starting position, that is "output_address+offset",
7975 // starts at page position 0xff8 or 0xffc.
7976 if (page_offset < 0xff8)
7977 offset = 0xff8 - page_offset;
7978 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
7979 {
7980 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
7981 Insntype insn1 = ip[0];
7982 if (Insn_utilities::is_adrp(insn1))
7983 {
7984 Insntype insn2 = ip[1];
7985 Insntype insn3 = ip[2];
7986 Insntype erratum_insn;
7987 unsigned insn_offset;
7988 bool do_report = false;
7989 if (is_erratum_843419_sequence(insn1, insn2, insn3))
7990 {
7991 do_report = true;
7992 erratum_insn = insn3;
7993 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
7994 }
7995 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
7996 {
7997 // Optionally we can have an insn between ins2 and ins3
7998 Insntype insn_opt = ip[2];
7999 // And insn_opt must not be a branch.
8000 if (!Insn_utilities::aarch64_b(insn_opt)
8001 && !Insn_utilities::aarch64_bl(insn_opt)
8002 && !Insn_utilities::aarch64_blr(insn_opt)
8003 && !Insn_utilities::aarch64_br(insn_opt))
8004 {
8005 // And insn_opt must not write to dest reg in insn1. However
8006 // we do a conservative scan, which means we may fix/report
8007 // more than necessary, but it doesn't hurt.
8008
8009 Insntype insn4 = ip[3];
8010 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8011 {
8012 do_report = true;
8013 erratum_insn = insn4;
8014 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8015 }
8016 }
8017 }
8018 if (do_report)
8019 {
8020 gold_warning(_("Erratum 843419 found and fixed at \"%s\", "
8021 "section %d, offset 0x%08x."),
8022 relobj->name().c_str(), shndx,
8023 (unsigned int)(span_start + offset));
8024 unsigned int erratum_insn_offset =
8025 span_start + offset + insn_offset;
8026 Address erratum_address =
8027 output_address + offset + insn_offset;
8028 create_erratum_stub(relobj, shndx,
8029 erratum_insn_offset, erratum_address,
8030 erratum_insn, ST_E_843419);
8031 }
8032 }
8033
8034 // Advance to next candidate instruction. We only consider instruction
8035 // sequences starting at a page offset of 0xff8 or 0xffc.
8036 page_offset = (output_address + offset) & 0xfff;
8037 if (page_offset == 0xff8)
8038 offset += 4;
8039 else // (page_offset == 0xffc), we move to next page's 0xff8.
8040 offset += 0xffc;
8041 }
8042 } // End of "Target_aarch64::scan_erratum_843419_span".
8043
8044
8045 // The selector for aarch64 object files.
8046
8047 template<int size, bool big_endian>
8048 class Target_selector_aarch64 : public Target_selector
8049 {
8050 public:
8051 Target_selector_aarch64();
8052
8053 virtual Target*
8054 do_instantiate_target()
8055 { return new Target_aarch64<size, big_endian>(); }
8056 };
8057
8058 template<>
8059 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8060 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8061 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8062 { }
8063
8064 template<>
8065 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8066 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8067 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8068 { }
8069
8070 template<>
8071 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8072 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8073 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8074 { }
8075
8076 template<>
8077 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8078 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8079 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8080 { }
8081
8082 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8083 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8084 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8085 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8086
8087 } // End anonymous namespace.