]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gold/aarch64.cc
[gold][aarch64] Fix erratum 835769.
[thirdparty/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2015 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adrp(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x90000000; }
108
109 static unsigned int
110 aarch64_rm(const Insntype insn)
111 { return aarch64_bits(insn, 16, 5); }
112
113 static unsigned int
114 aarch64_rn(const Insntype insn)
115 { return aarch64_bits(insn, 5, 5); }
116
117 static unsigned int
118 aarch64_rd(const Insntype insn)
119 { return aarch64_bits(insn, 0, 5); }
120
121 static unsigned int
122 aarch64_rt(const Insntype insn)
123 { return aarch64_bits(insn, 0, 5); }
124
125 static unsigned int
126 aarch64_rt2(const Insntype insn)
127 { return aarch64_bits(insn, 10, 5); }
128
129 static bool
130 aarch64_b(const Insntype insn)
131 { return (insn & 0xFC000000) == 0x14000000; }
132
133 static bool
134 aarch64_bl(const Insntype insn)
135 { return (insn & 0xFC000000) == 0x94000000; }
136
137 static bool
138 aarch64_blr(const Insntype insn)
139 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
140
141 static bool
142 aarch64_br(const Insntype insn)
143 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
144
145 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
146 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
147 static bool
148 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
149
150 static bool
151 aarch64_ldst(Insntype insn)
152 { return (insn & 0x0a000000) == 0x08000000; }
153
154 static bool
155 aarch64_ldst_ex(Insntype insn)
156 { return (insn & 0x3f000000) == 0x08000000; }
157
158 static bool
159 aarch64_ldst_pcrel(Insntype insn)
160 { return (insn & 0x3b000000) == 0x18000000; }
161
162 static bool
163 aarch64_ldst_nap(Insntype insn)
164 { return (insn & 0x3b800000) == 0x28000000; }
165
166 static bool
167 aarch64_ldstp_pi(Insntype insn)
168 { return (insn & 0x3b800000) == 0x28800000; }
169
170 static bool
171 aarch64_ldstp_o(Insntype insn)
172 { return (insn & 0x3b800000) == 0x29000000; }
173
174 static bool
175 aarch64_ldstp_pre(Insntype insn)
176 { return (insn & 0x3b800000) == 0x29800000; }
177
178 static bool
179 aarch64_ldst_ui(Insntype insn)
180 { return (insn & 0x3b200c00) == 0x38000000; }
181
182 static bool
183 aarch64_ldst_piimm(Insntype insn)
184 { return (insn & 0x3b200c00) == 0x38000400; }
185
186 static bool
187 aarch64_ldst_u(Insntype insn)
188 { return (insn & 0x3b200c00) == 0x38000800; }
189
190 static bool
191 aarch64_ldst_preimm(Insntype insn)
192 { return (insn & 0x3b200c00) == 0x38000c00; }
193
194 static bool
195 aarch64_ldst_ro(Insntype insn)
196 { return (insn & 0x3b200c00) == 0x38200800; }
197
198 static bool
199 aarch64_ldst_uimm(Insntype insn)
200 { return (insn & 0x3b000000) == 0x39000000; }
201
202 static bool
203 aarch64_ldst_simd_m(Insntype insn)
204 { return (insn & 0xbfbf0000) == 0x0c000000; }
205
206 static bool
207 aarch64_ldst_simd_m_pi(Insntype insn)
208 { return (insn & 0xbfa00000) == 0x0c800000; }
209
210 static bool
211 aarch64_ldst_simd_s(Insntype insn)
212 { return (insn & 0xbf9f0000) == 0x0d000000; }
213
214 static bool
215 aarch64_ldst_simd_s_pi(Insntype insn)
216 { return (insn & 0xbf800000) == 0x0d800000; }
217
218 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
219 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
220 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
221 // instructions PAIR is TRUE, RT and RT2 are returned.
222 static bool
223 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
224 bool *pair, bool *load)
225 {
226 uint32_t opcode;
227 unsigned int r;
228 uint32_t opc = 0;
229 uint32_t v = 0;
230 uint32_t opc_v = 0;
231
232 /* Bail out quickly if INSN doesn't fall into the the load-store
233 encoding space. */
234 if (!aarch64_ldst (insn))
235 return false;
236
237 *pair = false;
238 *load = false;
239 if (aarch64_ldst_ex (insn))
240 {
241 *rt = aarch64_rt (insn);
242 *rt2 = *rt;
243 if (aarch64_bit (insn, 21) == 1)
244 {
245 *pair = true;
246 *rt2 = aarch64_rt2 (insn);
247 }
248 *load = aarch64_ld (insn);
249 return true;
250 }
251 else if (aarch64_ldst_nap (insn)
252 || aarch64_ldstp_pi (insn)
253 || aarch64_ldstp_o (insn)
254 || aarch64_ldstp_pre (insn))
255 {
256 *pair = true;
257 *rt = aarch64_rt (insn);
258 *rt2 = aarch64_rt2 (insn);
259 *load = aarch64_ld (insn);
260 return true;
261 }
262 else if (aarch64_ldst_pcrel (insn)
263 || aarch64_ldst_ui (insn)
264 || aarch64_ldst_piimm (insn)
265 || aarch64_ldst_u (insn)
266 || aarch64_ldst_preimm (insn)
267 || aarch64_ldst_ro (insn)
268 || aarch64_ldst_uimm (insn))
269 {
270 *rt = aarch64_rt (insn);
271 *rt2 = *rt;
272 if (aarch64_ldst_pcrel (insn))
273 *load = true;
274 opc = aarch64_bits (insn, 22, 2);
275 v = aarch64_bit (insn, 26);
276 opc_v = opc | (v << 2);
277 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
278 || opc_v == 5 || opc_v == 7);
279 return true;
280 }
281 else if (aarch64_ldst_simd_m (insn)
282 || aarch64_ldst_simd_m_pi (insn))
283 {
284 *rt = aarch64_rt (insn);
285 *load = aarch64_bit (insn, 22);
286 opcode = (insn >> 12) & 0xf;
287 switch (opcode)
288 {
289 case 0:
290 case 2:
291 *rt2 = *rt + 3;
292 break;
293
294 case 4:
295 case 6:
296 *rt2 = *rt + 2;
297 break;
298
299 case 7:
300 *rt2 = *rt;
301 break;
302
303 case 8:
304 case 10:
305 *rt2 = *rt + 1;
306 break;
307
308 default:
309 return false;
310 }
311 return true;
312 }
313 else if (aarch64_ldst_simd_s (insn)
314 || aarch64_ldst_simd_s_pi (insn))
315 {
316 *rt = aarch64_rt (insn);
317 r = (insn >> 21) & 1;
318 *load = aarch64_bit (insn, 22);
319 opcode = (insn >> 13) & 0x7;
320 switch (opcode)
321 {
322 case 0:
323 case 2:
324 case 4:
325 *rt2 = *rt + r;
326 break;
327
328 case 1:
329 case 3:
330 case 5:
331 *rt2 = *rt + (r == 0 ? 2 : 3);
332 break;
333
334 case 6:
335 *rt2 = *rt + r;
336 break;
337
338 case 7:
339 *rt2 = *rt + (r == 0 ? 2 : 3);
340 break;
341
342 default:
343 return false;
344 }
345 return true;
346 }
347 return false;
348 } // End of "aarch64_mem_op_p".
349
350 // Return true if INSN is mac insn.
351 static bool
352 aarch64_mac(Insntype insn)
353 { return (insn & 0xff000000) == 0x9b000000; }
354
355 // Return true if INSN is multiply-accumulate.
356 // (This is similar to implementaton in elfnn-aarch64.c.)
357 static bool
358 aarch64_mlxl(Insntype insn)
359 {
360 uint32_t op31 = aarch64_op31(insn);
361 if (aarch64_mac(insn)
362 && (op31 == 0 || op31 == 1 || op31 == 5)
363 /* Exclude MUL instructions which are encoded as a multiple-accumulate
364 with RA = XZR. */
365 && aarch64_ra(insn) != AARCH64_ZR)
366 {
367 return true;
368 }
369 return false;
370 }
371 }; // End of "AArch64_insn_utilities".
372
373
374 // Insn length in byte.
375
376 template<bool big_endian>
377 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
378
379
380 // Zero register encoding - 31.
381
382 template<bool big_endian>
383 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
384
385
386 // Output_data_got_aarch64 class.
387
388 template<int size, bool big_endian>
389 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
390 {
391 public:
392 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
393 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
394 : Output_data_got<size, big_endian>(),
395 symbol_table_(symtab), layout_(layout)
396 { }
397
398 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
399 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
400 // applied in a static link.
401 void
402 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
403 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
404
405
406 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
407 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
408 // relocation that needs to be applied in a static link.
409 void
410 add_static_reloc(unsigned int got_offset, unsigned int r_type,
411 Sized_relobj_file<size, big_endian>* relobj,
412 unsigned int index)
413 {
414 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
415 index));
416 }
417
418
419 protected:
420 // Write out the GOT table.
421 void
422 do_write(Output_file* of) {
423 // The first entry in the GOT is the address of the .dynamic section.
424 gold_assert(this->data_size() >= size / 8);
425 Output_section* dynamic = this->layout_->dynamic_section();
426 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
427 this->replace_constant(0, dynamic_addr);
428 Output_data_got<size, big_endian>::do_write(of);
429
430 // Handling static relocs
431 if (this->static_relocs_.empty())
432 return;
433
434 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
435
436 gold_assert(parameters->doing_static_link());
437 const off_t offset = this->offset();
438 const section_size_type oview_size =
439 convert_to_section_size_type(this->data_size());
440 unsigned char* const oview = of->get_output_view(offset, oview_size);
441
442 Output_segment* tls_segment = this->layout_->tls_segment();
443 gold_assert(tls_segment != NULL);
444
445 AArch64_address aligned_tcb_address =
446 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
447 tls_segment->maximum_alignment());
448
449 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
450 {
451 Static_reloc& reloc(this->static_relocs_[i]);
452 AArch64_address value;
453
454 if (!reloc.symbol_is_global())
455 {
456 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
457 const Symbol_value<size>* psymval =
458 reloc.relobj()->local_symbol(reloc.index());
459
460 // We are doing static linking. Issue an error and skip this
461 // relocation if the symbol is undefined or in a discarded_section.
462 bool is_ordinary;
463 unsigned int shndx = psymval->input_shndx(&is_ordinary);
464 if ((shndx == elfcpp::SHN_UNDEF)
465 || (is_ordinary
466 && shndx != elfcpp::SHN_UNDEF
467 && !object->is_section_included(shndx)
468 && !this->symbol_table_->is_section_folded(object, shndx)))
469 {
470 gold_error(_("undefined or discarded local symbol %u from "
471 " object %s in GOT"),
472 reloc.index(), reloc.relobj()->name().c_str());
473 continue;
474 }
475 value = psymval->value(object, 0);
476 }
477 else
478 {
479 const Symbol* gsym = reloc.symbol();
480 gold_assert(gsym != NULL);
481 if (gsym->is_forwarder())
482 gsym = this->symbol_table_->resolve_forwards(gsym);
483
484 // We are doing static linking. Issue an error and skip this
485 // relocation if the symbol is undefined or in a discarded_section
486 // unless it is a weakly_undefined symbol.
487 if ((gsym->is_defined_in_discarded_section()
488 || gsym->is_undefined())
489 && !gsym->is_weak_undefined())
490 {
491 gold_error(_("undefined or discarded symbol %s in GOT"),
492 gsym->name());
493 continue;
494 }
495
496 if (!gsym->is_weak_undefined())
497 {
498 const Sized_symbol<size>* sym =
499 static_cast<const Sized_symbol<size>*>(gsym);
500 value = sym->value();
501 }
502 else
503 value = 0;
504 }
505
506 unsigned got_offset = reloc.got_offset();
507 gold_assert(got_offset < oview_size);
508
509 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
510 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
511 Valtype x;
512 switch (reloc.r_type())
513 {
514 case elfcpp::R_AARCH64_TLS_DTPREL64:
515 x = value;
516 break;
517 case elfcpp::R_AARCH64_TLS_TPREL64:
518 x = value + aligned_tcb_address;
519 break;
520 default:
521 gold_unreachable();
522 }
523 elfcpp::Swap<size, big_endian>::writeval(wv, x);
524 }
525
526 of->write_output_view(offset, oview_size, oview);
527 }
528
529 private:
530 // Symbol table of the output object.
531 Symbol_table* symbol_table_;
532 // A pointer to the Layout class, so that we can find the .dynamic
533 // section when we write out the GOT section.
534 Layout* layout_;
535
536 // This class represent dynamic relocations that need to be applied by
537 // gold because we are using TLS relocations in a static link.
538 class Static_reloc
539 {
540 public:
541 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
542 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
543 { this->u_.global.symbol = gsym; }
544
545 Static_reloc(unsigned int got_offset, unsigned int r_type,
546 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
547 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
548 {
549 this->u_.local.relobj = relobj;
550 this->u_.local.index = index;
551 }
552
553 // Return the GOT offset.
554 unsigned int
555 got_offset() const
556 { return this->got_offset_; }
557
558 // Relocation type.
559 unsigned int
560 r_type() const
561 { return this->r_type_; }
562
563 // Whether the symbol is global or not.
564 bool
565 symbol_is_global() const
566 { return this->symbol_is_global_; }
567
568 // For a relocation against a global symbol, the global symbol.
569 Symbol*
570 symbol() const
571 {
572 gold_assert(this->symbol_is_global_);
573 return this->u_.global.symbol;
574 }
575
576 // For a relocation against a local symbol, the defining object.
577 Sized_relobj_file<size, big_endian>*
578 relobj() const
579 {
580 gold_assert(!this->symbol_is_global_);
581 return this->u_.local.relobj;
582 }
583
584 // For a relocation against a local symbol, the local symbol index.
585 unsigned int
586 index() const
587 {
588 gold_assert(!this->symbol_is_global_);
589 return this->u_.local.index;
590 }
591
592 private:
593 // GOT offset of the entry to which this relocation is applied.
594 unsigned int got_offset_;
595 // Type of relocation.
596 unsigned int r_type_;
597 // Whether this relocation is against a global symbol.
598 bool symbol_is_global_;
599 // A global or local symbol.
600 union
601 {
602 struct
603 {
604 // For a global symbol, the symbol itself.
605 Symbol* symbol;
606 } global;
607 struct
608 {
609 // For a local symbol, the object defining the symbol.
610 Sized_relobj_file<size, big_endian>* relobj;
611 // For a local symbol, the symbol index.
612 unsigned int index;
613 } local;
614 } u_;
615 }; // End of inner class Static_reloc
616
617 std::vector<Static_reloc> static_relocs_;
618 }; // End of Output_data_got_aarch64
619
620
621 template<int size, bool big_endian>
622 class AArch64_input_section;
623
624
625 template<int size, bool big_endian>
626 class AArch64_output_section;
627
628
629 template<int size, bool big_endian>
630 class AArch64_relobj;
631
632
633 // Stub type enum constants.
634
635 enum
636 {
637 ST_NONE = 0,
638
639 // Using adrp/add pair, 4 insns (including alignment) without mem access,
640 // the fastest stub. This has a limited jump distance, which is tested by
641 // aarch64_valid_for_adrp_p.
642 ST_ADRP_BRANCH = 1,
643
644 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
645 // unlimited in jump distance.
646 ST_LONG_BRANCH_ABS = 2,
647
648 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
649 // mem access, slowest one. Only used in position independent executables.
650 ST_LONG_BRANCH_PCREL = 3,
651
652 // Stub for erratum 843419 handling.
653 ST_E_843419 = 4,
654
655 // Stub for erratum 835769 handling.
656 ST_E_835769 = 5,
657
658 // Number of total stub types.
659 ST_NUMBER = 6
660 };
661
662
663 // Struct that wraps insns for a particular stub. All stub templates are
664 // created/initialized as constants by Stub_template_repertoire.
665
666 template<bool big_endian>
667 struct Stub_template
668 {
669 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
670 const int insn_num;
671 };
672
673
674 // Simple singleton class that creates/initializes/stores all types of stub
675 // templates.
676
677 template<bool big_endian>
678 class Stub_template_repertoire
679 {
680 public:
681 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
682
683 // Single static method to get stub template for a given stub type.
684 static const Stub_template<big_endian>*
685 get_stub_template(int type)
686 {
687 static Stub_template_repertoire<big_endian> singleton;
688 return singleton.stub_templates_[type];
689 }
690
691 private:
692 // Constructor - creates/initializes all stub templates.
693 Stub_template_repertoire();
694 ~Stub_template_repertoire()
695 { }
696
697 // Disallowing copy ctor and copy assignment operator.
698 Stub_template_repertoire(Stub_template_repertoire&);
699 Stub_template_repertoire& operator=(Stub_template_repertoire&);
700
701 // Data that stores all insn templates.
702 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
703 }; // End of "class Stub_template_repertoire".
704
705
706 // Constructor - creates/initilizes all stub templates.
707
708 template<bool big_endian>
709 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
710 {
711 // Insn array definitions.
712 const static Insntype ST_NONE_INSNS[] = {};
713
714 const static Insntype ST_ADRP_BRANCH_INSNS[] =
715 {
716 0x90000010, /* adrp ip0, X */
717 /* ADR_PREL_PG_HI21(X) */
718 0x91000210, /* add ip0, ip0, :lo12:X */
719 /* ADD_ABS_LO12_NC(X) */
720 0xd61f0200, /* br ip0 */
721 0x00000000, /* alignment padding */
722 };
723
724 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
725 {
726 0x58000050, /* ldr ip0, 0x8 */
727 0xd61f0200, /* br ip0 */
728 0x00000000, /* address field */
729 0x00000000, /* address fields */
730 };
731
732 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
733 {
734 0x58000090, /* ldr ip0, 0x10 */
735 0x10000011, /* adr ip1, #0 */
736 0x8b110210, /* add ip0, ip0, ip1 */
737 0xd61f0200, /* br ip0 */
738 0x00000000, /* address field */
739 0x00000000, /* address field */
740 0x00000000, /* alignment padding */
741 0x00000000, /* alignment padding */
742 };
743
744 const static Insntype ST_E_843419_INSNS[] =
745 {
746 0x00000000, /* Placeholder for erratum insn. */
747 0x14000000, /* b <label> */
748 };
749
750 // ST_E_835769 has the same stub template as ST_E_843419.
751 const static Insntype* ST_E_835769_INSNS = ST_E_843419_INSNS;
752
753 #define install_insn_template(T) \
754 const static Stub_template<big_endian> template_##T = { \
755 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
756 this->stub_templates_[T] = &template_##T
757
758 install_insn_template(ST_NONE);
759 install_insn_template(ST_ADRP_BRANCH);
760 install_insn_template(ST_LONG_BRANCH_ABS);
761 install_insn_template(ST_LONG_BRANCH_PCREL);
762 install_insn_template(ST_E_843419);
763 install_insn_template(ST_E_835769);
764
765 #undef install_insn_template
766 }
767
768
769 // Base class for stubs.
770
771 template<int size, bool big_endian>
772 class Stub_base
773 {
774 public:
775 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
776 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
777
778 static const AArch64_address invalid_address =
779 static_cast<AArch64_address>(-1);
780
781 static const section_offset_type invalid_offset =
782 static_cast<section_offset_type>(-1);
783
784 Stub_base(int type)
785 : destination_address_(invalid_address),
786 offset_(invalid_offset),
787 type_(type)
788 {}
789
790 ~Stub_base()
791 {}
792
793 // Get stub type.
794 int
795 type() const
796 { return this->type_; }
797
798 // Get stub template that provides stub insn information.
799 const Stub_template<big_endian>*
800 stub_template() const
801 {
802 return Stub_template_repertoire<big_endian>::
803 get_stub_template(this->type());
804 }
805
806 // Get destination address.
807 AArch64_address
808 destination_address() const
809 {
810 gold_assert(this->destination_address_ != this->invalid_address);
811 return this->destination_address_;
812 }
813
814 // Set destination address.
815 void
816 set_destination_address(AArch64_address address)
817 {
818 gold_assert(address != this->invalid_address);
819 this->destination_address_ = address;
820 }
821
822 // Reset the destination address.
823 void
824 reset_destination_address()
825 { this->destination_address_ = this->invalid_address; }
826
827 // Get offset of code stub. For Reloc_stub, it is the offset from the
828 // beginning of its containing stub table; for Erratum_stub, it is the offset
829 // from the end of reloc_stubs.
830 section_offset_type
831 offset() const
832 {
833 gold_assert(this->offset_ != this->invalid_offset);
834 return this->offset_;
835 }
836
837 // Set stub offset.
838 void
839 set_offset(section_offset_type offset)
840 { this->offset_ = offset; }
841
842 // Return the stub insn.
843 const Insntype*
844 insns() const
845 { return this->stub_template()->insns; }
846
847 // Return num of stub insns.
848 unsigned int
849 insn_num() const
850 { return this->stub_template()->insn_num; }
851
852 // Get size of the stub.
853 int
854 stub_size() const
855 {
856 return this->insn_num() *
857 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
858 }
859
860 // Write stub to output file.
861 void
862 write(unsigned char* view, section_size_type view_size)
863 { this->do_write(view, view_size); }
864
865 protected:
866 // Abstract method to be implemented by sub-classes.
867 virtual void
868 do_write(unsigned char*, section_size_type) = 0;
869
870 private:
871 // The last insn of a stub is a jump to destination insn. This field records
872 // the destination address.
873 AArch64_address destination_address_;
874 // The stub offset. Note this has difference interpretations between an
875 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
876 // beginning of the containing stub_table, whereas for Erratum_stub, this is
877 // the offset from the end of reloc_stubs.
878 section_offset_type offset_;
879 // Stub type.
880 const int type_;
881 }; // End of "Stub_base".
882
883
884 // Erratum stub class. An erratum stub differs from a reloc stub in that for
885 // each erratum occurrence, we generate an erratum stub. We never share erratum
886 // stubs, whereas for reloc stubs, different branches insns share a single reloc
887 // stub as long as the branch targets are the same. (More to the point, reloc
888 // stubs can be shared because they're used to reach a specific target, whereas
889 // erratum stubs branch back to the original control flow.)
890
891 template<int size, bool big_endian>
892 class Erratum_stub : public Stub_base<size, big_endian>
893 {
894 public:
895 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
896 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
897 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
898
899 static const int STUB_ADDR_ALIGN;
900
901 static const Insntype invalid_insn = static_cast<Insntype>(-1);
902
903 Erratum_stub(The_aarch64_relobj* relobj, int type,
904 unsigned shndx, unsigned int sh_offset)
905 : Stub_base<size, big_endian>(type), relobj_(relobj),
906 shndx_(shndx), sh_offset_(sh_offset),
907 erratum_insn_(invalid_insn),
908 erratum_address_(this->invalid_address)
909 {}
910
911 ~Erratum_stub() {}
912
913 // Return the object that contains the erratum.
914 The_aarch64_relobj*
915 relobj()
916 { return this->relobj_; }
917
918 // Get section index of the erratum.
919 unsigned int
920 shndx() const
921 { return this->shndx_; }
922
923 // Get section offset of the erratum.
924 unsigned int
925 sh_offset() const
926 { return this->sh_offset_; }
927
928 // Get the erratum insn. This is the insn located at erratum_insn_address.
929 Insntype
930 erratum_insn() const
931 {
932 gold_assert(this->erratum_insn_ != this->invalid_insn);
933 return this->erratum_insn_;
934 }
935
936 // Set the insn that the erratum happens to.
937 void
938 set_erratum_insn(Insntype insn)
939 { this->erratum_insn_ = insn; }
940
941 // Return the address where an erratum must be done.
942 AArch64_address
943 erratum_address() const
944 {
945 gold_assert(this->erratum_address_ != this->invalid_address);
946 return this->erratum_address_;
947 }
948
949 // Set the address where an erratum must be done.
950 void
951 set_erratum_address(AArch64_address addr)
952 { this->erratum_address_ = addr; }
953
954 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
955 // sh_offset). We do not include 'type' in the calculation, becuase there is
956 // at most one stub type at (obj, shndx, sh_offset).
957 bool
958 operator<(const Erratum_stub<size, big_endian>& k) const
959 {
960 if (this == &k)
961 return false;
962 // We group stubs by relobj.
963 if (this->relobj_ != k.relobj_)
964 return this->relobj_ < k.relobj_;
965 // Then by section index.
966 if (this->shndx_ != k.shndx_)
967 return this->shndx_ < k.shndx_;
968 // Lastly by section offset.
969 return this->sh_offset_ < k.sh_offset_;
970 }
971
972 protected:
973 virtual void
974 do_write(unsigned char*, section_size_type);
975
976 private:
977 // The object that needs to be fixed.
978 The_aarch64_relobj* relobj_;
979 // The shndx in the object that needs to be fixed.
980 const unsigned int shndx_;
981 // The section offset in the obejct that needs to be fixed.
982 const unsigned int sh_offset_;
983 // The insn to be fixed.
984 Insntype erratum_insn_;
985 // The address of the above insn.
986 AArch64_address erratum_address_;
987 }; // End of "Erratum_stub".
988
989 template<int size, bool big_endian>
990 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
991
992 // Comparator used in set definition.
993 template<int size, bool big_endian>
994 struct Erratum_stub_less
995 {
996 bool
997 operator()(const Erratum_stub<size, big_endian>* s1,
998 const Erratum_stub<size, big_endian>* s2) const
999 { return *s1 < *s2; }
1000 };
1001
1002 // Erratum_stub implementation for writing stub to output file.
1003
1004 template<int size, bool big_endian>
1005 void
1006 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1007 {
1008 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1009 const Insntype* insns = this->insns();
1010 uint32_t num_insns = this->insn_num();
1011 Insntype* ip = reinterpret_cast<Insntype*>(view);
1012 // For current implemented erratum 843419 and 835769, the first insn in the
1013 // stub is always a copy of the problematic insn (in 843419, the mem access
1014 // insn, in 835769, the mac insn), followed by a jump-back.
1015 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1016 for (uint32_t i = 1; i < num_insns; ++i)
1017 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1018 }
1019
1020
1021 // Reloc stub class.
1022
1023 template<int size, bool big_endian>
1024 class Reloc_stub : public Stub_base<size, big_endian>
1025 {
1026 public:
1027 typedef Reloc_stub<size, big_endian> This;
1028 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1029
1030 // Branch range. This is used to calculate the section group size, as well as
1031 // determine whether a stub is needed.
1032 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1033 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1034
1035 // Constant used to determine if an offset fits in the adrp instruction
1036 // encoding.
1037 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1038 static const int MIN_ADRP_IMM = -(1 << 20);
1039
1040 static const int BYTES_PER_INSN = 4;
1041 static const int STUB_ADDR_ALIGN;
1042
1043 // Determine whether the offset fits in the jump/branch instruction.
1044 static bool
1045 aarch64_valid_branch_offset_p(int64_t offset)
1046 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1047
1048 // Determine whether the offset fits in the adrp immediate field.
1049 static bool
1050 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1051 {
1052 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1053 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1054 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1055 }
1056
1057 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1058 // needed.
1059 static int
1060 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1061 AArch64_address target);
1062
1063 Reloc_stub(int type)
1064 : Stub_base<size, big_endian>(type)
1065 { }
1066
1067 ~Reloc_stub()
1068 { }
1069
1070 // The key class used to index the stub instance in the stub table's stub map.
1071 class Key
1072 {
1073 public:
1074 Key(int type, const Symbol* symbol, const Relobj* relobj,
1075 unsigned int r_sym, int32_t addend)
1076 : type_(type), addend_(addend)
1077 {
1078 if (symbol != NULL)
1079 {
1080 this->r_sym_ = Reloc_stub::invalid_index;
1081 this->u_.symbol = symbol;
1082 }
1083 else
1084 {
1085 gold_assert(relobj != NULL && r_sym != invalid_index);
1086 this->r_sym_ = r_sym;
1087 this->u_.relobj = relobj;
1088 }
1089 }
1090
1091 ~Key()
1092 { }
1093
1094 // Return stub type.
1095 int
1096 type() const
1097 { return this->type_; }
1098
1099 // Return the local symbol index or invalid_index.
1100 unsigned int
1101 r_sym() const
1102 { return this->r_sym_; }
1103
1104 // Return the symbol if there is one.
1105 const Symbol*
1106 symbol() const
1107 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1108
1109 // Return the relobj if there is one.
1110 const Relobj*
1111 relobj() const
1112 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1113
1114 // Whether this equals to another key k.
1115 bool
1116 eq(const Key& k) const
1117 {
1118 return ((this->type_ == k.type_)
1119 && (this->r_sym_ == k.r_sym_)
1120 && ((this->r_sym_ != Reloc_stub::invalid_index)
1121 ? (this->u_.relobj == k.u_.relobj)
1122 : (this->u_.symbol == k.u_.symbol))
1123 && (this->addend_ == k.addend_));
1124 }
1125
1126 // Return a hash value.
1127 size_t
1128 hash_value() const
1129 {
1130 size_t name_hash_value = gold::string_hash<char>(
1131 (this->r_sym_ != Reloc_stub::invalid_index)
1132 ? this->u_.relobj->name().c_str()
1133 : this->u_.symbol->name());
1134 // We only have 4 stub types.
1135 size_t stub_type_hash_value = 0x03 & this->type_;
1136 return (name_hash_value
1137 ^ stub_type_hash_value
1138 ^ ((this->r_sym_ & 0x3fff) << 2)
1139 ^ ((this->addend_ & 0xffff) << 16));
1140 }
1141
1142 // Functors for STL associative containers.
1143 struct hash
1144 {
1145 size_t
1146 operator()(const Key& k) const
1147 { return k.hash_value(); }
1148 };
1149
1150 struct equal_to
1151 {
1152 bool
1153 operator()(const Key& k1, const Key& k2) const
1154 { return k1.eq(k2); }
1155 };
1156
1157 private:
1158 // Stub type.
1159 const int type_;
1160 // If this is a local symbol, this is the index in the defining object.
1161 // Otherwise, it is invalid_index for a global symbol.
1162 unsigned int r_sym_;
1163 // If r_sym_ is an invalid index, this points to a global symbol.
1164 // Otherwise, it points to a relobj. We used the unsized and target
1165 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1166 // Arm_relobj, in order to avoid making the stub class a template
1167 // as most of the stub machinery is endianness-neutral. However, it
1168 // may require a bit of casting done by users of this class.
1169 union
1170 {
1171 const Symbol* symbol;
1172 const Relobj* relobj;
1173 } u_;
1174 // Addend associated with a reloc.
1175 int32_t addend_;
1176 }; // End of inner class Reloc_stub::Key
1177
1178 protected:
1179 // This may be overridden in the child class.
1180 virtual void
1181 do_write(unsigned char*, section_size_type);
1182
1183 private:
1184 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1185 }; // End of Reloc_stub
1186
1187 template<int size, bool big_endian>
1188 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1189
1190 // Write data to output file.
1191
1192 template<int size, bool big_endian>
1193 void
1194 Reloc_stub<size, big_endian>::
1195 do_write(unsigned char* view, section_size_type)
1196 {
1197 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1198 const uint32_t* insns = this->insns();
1199 uint32_t num_insns = this->insn_num();
1200 Insntype* ip = reinterpret_cast<Insntype*>(view);
1201 for (uint32_t i = 0; i < num_insns; ++i)
1202 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1203 }
1204
1205
1206 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1207 // needed.
1208
1209 template<int size, bool big_endian>
1210 inline int
1211 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1212 unsigned int r_type, AArch64_address location, AArch64_address dest)
1213 {
1214 int64_t branch_offset = 0;
1215 switch(r_type)
1216 {
1217 case elfcpp::R_AARCH64_CALL26:
1218 case elfcpp::R_AARCH64_JUMP26:
1219 branch_offset = dest - location;
1220 break;
1221 default:
1222 gold_unreachable();
1223 }
1224
1225 if (aarch64_valid_branch_offset_p(branch_offset))
1226 return ST_NONE;
1227
1228 if (aarch64_valid_for_adrp_p(location, dest))
1229 return ST_ADRP_BRANCH;
1230
1231 if (parameters->options().output_is_position_independent()
1232 && parameters->options().output_is_executable())
1233 return ST_LONG_BRANCH_PCREL;
1234
1235 return ST_LONG_BRANCH_ABS;
1236 }
1237
1238 // A class to hold stubs for the ARM target.
1239
1240 template<int size, bool big_endian>
1241 class Stub_table : public Output_data
1242 {
1243 public:
1244 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1245 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1246 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1247 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1248 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1249 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1250 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1251 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1252 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1253 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1254 typedef Stub_table<size, big_endian> The_stub_table;
1255 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1256 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1257 Reloc_stub_map;
1258 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1259 typedef Relocate_info<size, big_endian> The_relocate_info;
1260
1261 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1262 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1263
1264 Stub_table(The_aarch64_input_section* owner)
1265 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1266 erratum_stubs_size_(0), prev_data_size_(0)
1267 { }
1268
1269 ~Stub_table()
1270 { }
1271
1272 The_aarch64_input_section*
1273 owner() const
1274 { return owner_; }
1275
1276 // Whether this stub table is empty.
1277 bool
1278 empty() const
1279 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1280
1281 // Return the current data size.
1282 off_t
1283 current_data_size() const
1284 { return this->current_data_size_for_child(); }
1285
1286 // Add a STUB using KEY. The caller is responsible for avoiding addition
1287 // if a STUB with the same key has already been added.
1288 void
1289 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1290
1291 // Add an erratum stub into the erratum stub set. The set is ordered by
1292 // (relobj, shndx, sh_offset).
1293 void
1294 add_erratum_stub(The_erratum_stub* stub);
1295
1296 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1297 The_erratum_stub*
1298 find_erratum_stub(The_aarch64_relobj* a64relobj,
1299 unsigned int shndx, unsigned int sh_offset);
1300
1301 // Find all the erratums for a given input section. The return value is a pair
1302 // of iterators [begin, end).
1303 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1304 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1305 unsigned int shndx);
1306
1307 // Compute the erratum stub address.
1308 AArch64_address
1309 erratum_stub_address(The_erratum_stub* stub) const
1310 {
1311 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1312 The_erratum_stub::STUB_ADDR_ALIGN);
1313 r += stub->offset();
1314 return r;
1315 }
1316
1317 // Finalize stubs. No-op here, just for completeness.
1318 void
1319 finalize_stubs()
1320 { }
1321
1322 // Look up a relocation stub using KEY. Return NULL if there is none.
1323 The_reloc_stub*
1324 find_reloc_stub(The_reloc_stub_key& key)
1325 {
1326 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1327 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1328 }
1329
1330 // Relocate stubs in this stub table.
1331 void
1332 relocate_stubs(const The_relocate_info*,
1333 The_target_aarch64*,
1334 Output_section*,
1335 unsigned char*,
1336 AArch64_address,
1337 section_size_type);
1338
1339 // Update data size at the end of a relaxation pass. Return true if data size
1340 // is different from that of the previous relaxation pass.
1341 bool
1342 update_data_size_changed_p()
1343 {
1344 // No addralign changed here.
1345 off_t s = align_address(this->reloc_stubs_size_,
1346 The_erratum_stub::STUB_ADDR_ALIGN)
1347 + this->erratum_stubs_size_;
1348 bool changed = (s != this->prev_data_size_);
1349 this->prev_data_size_ = s;
1350 return changed;
1351 }
1352
1353 protected:
1354 // Write out section contents.
1355 void
1356 do_write(Output_file*);
1357
1358 // Return the required alignment.
1359 uint64_t
1360 do_addralign() const
1361 {
1362 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1363 The_erratum_stub::STUB_ADDR_ALIGN);
1364 }
1365
1366 // Reset address and file offset.
1367 void
1368 do_reset_address_and_file_offset()
1369 { this->set_current_data_size_for_child(this->prev_data_size_); }
1370
1371 // Set final data size.
1372 void
1373 set_final_data_size()
1374 { this->set_data_size(this->current_data_size()); }
1375
1376 private:
1377 // Relocate one stub.
1378 void
1379 relocate_stub(The_reloc_stub*,
1380 const The_relocate_info*,
1381 The_target_aarch64*,
1382 Output_section*,
1383 unsigned char*,
1384 AArch64_address,
1385 section_size_type);
1386
1387 private:
1388 // Owner of this stub table.
1389 The_aarch64_input_section* owner_;
1390 // The relocation stubs.
1391 Reloc_stub_map reloc_stubs_;
1392 // The erratum stubs.
1393 Erratum_stub_set erratum_stubs_;
1394 // Size of reloc stubs.
1395 off_t reloc_stubs_size_;
1396 // Size of erratum stubs.
1397 off_t erratum_stubs_size_;
1398 // data size of this in the previous pass.
1399 off_t prev_data_size_;
1400 }; // End of Stub_table
1401
1402
1403 // Add an erratum stub into the erratum stub set. The set is ordered by
1404 // (relobj, shndx, sh_offset).
1405
1406 template<int size, bool big_endian>
1407 void
1408 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1409 {
1410 std::pair<Erratum_stub_set_iter, bool> ret =
1411 this->erratum_stubs_.insert(stub);
1412 gold_assert(ret.second);
1413 this->erratum_stubs_size_ = align_address(
1414 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1415 stub->set_offset(this->erratum_stubs_size_);
1416 this->erratum_stubs_size_ += stub->stub_size();
1417 }
1418
1419
1420 // Find if such erratum exists for givein (obj, shndx, sh_offset).
1421
1422 template<int size, bool big_endian>
1423 Erratum_stub<size, big_endian>*
1424 Stub_table<size, big_endian>::find_erratum_stub(
1425 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1426 {
1427 // A dummy object used as key to search in the set.
1428 The_erratum_stub key(a64relobj, ST_NONE,
1429 shndx, sh_offset);
1430 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1431 if (i != this->erratum_stubs_.end())
1432 {
1433 The_erratum_stub* stub(*i);
1434 gold_assert(stub->erratum_insn() != 0);
1435 return stub;
1436 }
1437 return NULL;
1438 }
1439
1440
1441 // Find all the errata for a given input section. The return value is a pair of
1442 // iterators [begin, end).
1443
1444 template<int size, bool big_endian>
1445 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1446 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1447 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1448 The_aarch64_relobj* a64relobj, unsigned int shndx)
1449 {
1450 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1451 Erratum_stub_set_iter start, end;
1452 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1453 start = this->erratum_stubs_.lower_bound(&low_key);
1454 if (start == this->erratum_stubs_.end())
1455 return Result_pair(this->erratum_stubs_.end(),
1456 this->erratum_stubs_.end());
1457 end = start;
1458 while (end != this->erratum_stubs_.end() &&
1459 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1460 ++end;
1461 return Result_pair(start, end);
1462 }
1463
1464
1465 // Add a STUB using KEY. The caller is responsible for avoiding addition
1466 // if a STUB with the same key has already been added.
1467
1468 template<int size, bool big_endian>
1469 void
1470 Stub_table<size, big_endian>::add_reloc_stub(
1471 The_reloc_stub* stub, const The_reloc_stub_key& key)
1472 {
1473 gold_assert(stub->type() == key.type());
1474 this->reloc_stubs_[key] = stub;
1475
1476 // Assign stub offset early. We can do this because we never remove
1477 // reloc stubs and they are in the beginning of the stub table.
1478 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1479 The_reloc_stub::STUB_ADDR_ALIGN);
1480 stub->set_offset(this->reloc_stubs_size_);
1481 this->reloc_stubs_size_ += stub->stub_size();
1482 }
1483
1484
1485 // Relocate all stubs in this stub table.
1486
1487 template<int size, bool big_endian>
1488 void
1489 Stub_table<size, big_endian>::
1490 relocate_stubs(const The_relocate_info* relinfo,
1491 The_target_aarch64* target_aarch64,
1492 Output_section* output_section,
1493 unsigned char* view,
1494 AArch64_address address,
1495 section_size_type view_size)
1496 {
1497 // "view_size" is the total size of the stub_table.
1498 gold_assert(address == this->address() &&
1499 view_size == static_cast<section_size_type>(this->data_size()));
1500 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1501 p != this->reloc_stubs_.end(); ++p)
1502 relocate_stub(p->second, relinfo, target_aarch64, output_section,
1503 view, address, view_size);
1504
1505 // Just for convenience.
1506 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1507
1508 // Now 'relocate' erratum stubs.
1509 for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
1510 i != this->erratum_stubs_.end(); ++i)
1511 {
1512 AArch64_address stub_address = this->erratum_stub_address(*i);
1513 // The address of "b" in the stub that is to be "relocated".
1514 AArch64_address stub_b_insn_address;
1515 // Branch offset that is to be filled in "b" insn.
1516 int b_offset = 0;
1517 switch ((*i)->type())
1518 {
1519 case ST_E_843419:
1520 case ST_E_835769:
1521 // For the erratum, the 2nd insn is a b-insn to be patched
1522 // (relocated).
1523 stub_b_insn_address = stub_address + 1 * BPI;
1524 b_offset = (*i)->destination_address() - stub_b_insn_address;
1525 AArch64_relocate_functions<size, big_endian>::construct_b(
1526 view + (stub_b_insn_address - this->address()),
1527 ((unsigned int)(b_offset)) & 0xfffffff);
1528 break;
1529 default:
1530 gold_unreachable();
1531 break;
1532 }
1533 }
1534 }
1535
1536
1537 // Relocate one stub. This is a helper for Stub_table::relocate_stubs().
1538
1539 template<int size, bool big_endian>
1540 void
1541 Stub_table<size, big_endian>::
1542 relocate_stub(The_reloc_stub* stub,
1543 const The_relocate_info* relinfo,
1544 The_target_aarch64* target_aarch64,
1545 Output_section* output_section,
1546 unsigned char* view,
1547 AArch64_address address,
1548 section_size_type view_size)
1549 {
1550 // "offset" is the offset from the beginning of the stub_table.
1551 section_size_type offset = stub->offset();
1552 section_size_type stub_size = stub->stub_size();
1553 // "view_size" is the total size of the stub_table.
1554 gold_assert(offset + stub_size <= view_size);
1555
1556 target_aarch64->relocate_stub(stub, relinfo, output_section,
1557 view + offset, address + offset, view_size);
1558 }
1559
1560
1561 // Write out the stubs to file.
1562
1563 template<int size, bool big_endian>
1564 void
1565 Stub_table<size, big_endian>::do_write(Output_file* of)
1566 {
1567 off_t offset = this->offset();
1568 const section_size_type oview_size =
1569 convert_to_section_size_type(this->data_size());
1570 unsigned char* const oview = of->get_output_view(offset, oview_size);
1571
1572 // Write relocation stubs.
1573 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1574 p != this->reloc_stubs_.end(); ++p)
1575 {
1576 The_reloc_stub* stub = p->second;
1577 AArch64_address address = this->address() + stub->offset();
1578 gold_assert(address ==
1579 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1580 stub->write(oview + stub->offset(), stub->stub_size());
1581 }
1582
1583 // Write erratum stubs.
1584 unsigned int erratum_stub_start_offset =
1585 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1586 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1587 p != this->erratum_stubs_.end(); ++p)
1588 {
1589 The_erratum_stub* stub(*p);
1590 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1591 stub->stub_size());
1592 }
1593
1594 of->write_output_view(this->offset(), oview_size, oview);
1595 }
1596
1597
1598 // AArch64_relobj class.
1599
1600 template<int size, bool big_endian>
1601 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1602 {
1603 public:
1604 typedef AArch64_relobj<size, big_endian> This;
1605 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1606 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1607 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1608 typedef Stub_table<size, big_endian> The_stub_table;
1609 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1610 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1611 typedef std::vector<The_stub_table*> Stub_table_list;
1612 static const AArch64_address invalid_address =
1613 static_cast<AArch64_address>(-1);
1614
1615 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1616 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1617 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1618 stub_tables_()
1619 { }
1620
1621 ~AArch64_relobj()
1622 { }
1623
1624 // Return the stub table of the SHNDX-th section if there is one.
1625 The_stub_table*
1626 stub_table(unsigned int shndx) const
1627 {
1628 gold_assert(shndx < this->stub_tables_.size());
1629 return this->stub_tables_[shndx];
1630 }
1631
1632 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1633 void
1634 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1635 {
1636 gold_assert(shndx < this->stub_tables_.size());
1637 this->stub_tables_[shndx] = stub_table;
1638 }
1639
1640 // Entrance to errata scanning.
1641 void
1642 scan_errata(unsigned int shndx,
1643 const elfcpp::Shdr<size, big_endian>&,
1644 Output_section*, const Symbol_table*,
1645 The_target_aarch64*);
1646
1647 // Scan all relocation sections for stub generation.
1648 void
1649 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1650 const Layout*);
1651
1652 // Whether a section is a scannable text section.
1653 bool
1654 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1655 const Output_section*, const Symbol_table*);
1656
1657 // Convert regular input section with index SHNDX to a relaxed section.
1658 void
1659 convert_input_section_to_relaxed_section(unsigned /* shndx */)
1660 {
1661 // The stubs have relocations and we need to process them after writing
1662 // out the stubs. So relocation now must follow section write.
1663 this->set_relocs_must_follow_section_writes();
1664 }
1665
1666 // Structure for mapping symbol position.
1667 struct Mapping_symbol_position
1668 {
1669 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1670 shndx_(shndx), offset_(offset)
1671 {}
1672
1673 // "<" comparator used in ordered_map container.
1674 bool
1675 operator<(const Mapping_symbol_position& p) const
1676 {
1677 return (this->shndx_ < p.shndx_
1678 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1679 }
1680
1681 // Section index.
1682 unsigned int shndx_;
1683
1684 // Section offset.
1685 AArch64_address offset_;
1686 };
1687
1688 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1689
1690 protected:
1691 // Post constructor setup.
1692 void
1693 do_setup()
1694 {
1695 // Call parent's setup method.
1696 Sized_relobj_file<size, big_endian>::do_setup();
1697
1698 // Initialize look-up tables.
1699 this->stub_tables_.resize(this->shnum());
1700 }
1701
1702 virtual void
1703 do_relocate_sections(
1704 const Symbol_table* symtab, const Layout* layout,
1705 const unsigned char* pshdrs, Output_file* of,
1706 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1707
1708 // Count local symbols and (optionally) record mapping info.
1709 virtual void
1710 do_count_local_symbols(Stringpool_template<char>*,
1711 Stringpool_template<char>*);
1712
1713 private:
1714 // Fix all errata in the object.
1715 void
1716 fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
1717
1718 // Whether a section needs to be scanned for relocation stubs.
1719 bool
1720 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1721 const Relobj::Output_sections&,
1722 const Symbol_table*, const unsigned char*);
1723
1724 // List of stub tables.
1725 Stub_table_list stub_tables_;
1726
1727 // Mapping symbol information sorted by (section index, section_offset).
1728 Mapping_symbol_info mapping_symbol_info_;
1729 }; // End of AArch64_relobj
1730
1731
1732 // Override to record mapping symbol information.
1733 template<int size, bool big_endian>
1734 void
1735 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1736 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1737 {
1738 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1739
1740 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1741 // processing if not fixing erratum.
1742 if (!parameters->options().fix_cortex_a53_843419()
1743 && !parameters->options().fix_cortex_a53_835769())
1744 return;
1745
1746 const unsigned int loccount = this->local_symbol_count();
1747 if (loccount == 0)
1748 return;
1749
1750 // Read the symbol table section header.
1751 const unsigned int symtab_shndx = this->symtab_shndx();
1752 elfcpp::Shdr<size, big_endian>
1753 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1754 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1755
1756 // Read the local symbols.
1757 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1758 gold_assert(loccount == symtabshdr.get_sh_info());
1759 off_t locsize = loccount * sym_size;
1760 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1761 locsize, true, true);
1762
1763 // For mapping symbol processing, we need to read the symbol names.
1764 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1765 if (strtab_shndx >= this->shnum())
1766 {
1767 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1768 return;
1769 }
1770
1771 elfcpp::Shdr<size, big_endian>
1772 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1773 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1774 {
1775 this->error(_("symbol table name section has wrong type: %u"),
1776 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1777 return;
1778 }
1779
1780 const char* pnames =
1781 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1782 strtabshdr.get_sh_size(),
1783 false, false));
1784
1785 // Skip the first dummy symbol.
1786 psyms += sym_size;
1787 typename Sized_relobj_file<size, big_endian>::Local_values*
1788 plocal_values = this->local_values();
1789 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1790 {
1791 elfcpp::Sym<size, big_endian> sym(psyms);
1792 Symbol_value<size>& lv((*plocal_values)[i]);
1793 AArch64_address input_value = lv.input_value();
1794
1795 // Check to see if this is a mapping symbol.
1796 const char* sym_name = pnames + sym.get_st_name();
1797 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1798 && sym_name[2] == '\0')
1799 {
1800 bool is_ordinary;
1801 unsigned int input_shndx =
1802 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1803 gold_assert(is_ordinary);
1804
1805 Mapping_symbol_position msp(input_shndx, input_value);
1806 // Insert mapping_symbol_info into map whose ordering is defined by
1807 // (shndx, offset_within_section).
1808 this->mapping_symbol_info_[msp] = sym_name[1];
1809 }
1810 }
1811 }
1812
1813
1814 // Fix all errata in the object.
1815
1816 template<int size, bool big_endian>
1817 void
1818 AArch64_relobj<size, big_endian>::fix_errata(
1819 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1820 {
1821 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1822 unsigned int shnum = this->shnum();
1823 for (unsigned int i = 1; i < shnum; ++i)
1824 {
1825 The_stub_table* stub_table = this->stub_table(i);
1826 if (!stub_table)
1827 continue;
1828 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1829 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1830 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1831 while (p != end)
1832 {
1833 The_erratum_stub* stub = *p;
1834 typename Sized_relobj_file<size, big_endian>::View_size&
1835 pview((*pviews)[i]);
1836
1837 // Double check data before fix.
1838 Insntype* ip =
1839 reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
1840 Insntype insn_to_fix = ip[0];
1841 gold_assert(insn_to_fix == stub->erratum_insn());
1842 gold_assert(pview.address + stub->sh_offset()
1843 == stub->erratum_address());
1844
1845 AArch64_address stub_address =
1846 stub_table->erratum_stub_address(stub);
1847 unsigned int b_offset = stub_address - stub->erratum_address();
1848 AArch64_relocate_functions<size, big_endian>::construct_b(
1849 pview.view + stub->sh_offset(), b_offset & 0xfffffff);
1850 ++p;
1851 }
1852 }
1853 }
1854
1855
1856 // Relocate sections.
1857
1858 template<int size, bool big_endian>
1859 void
1860 AArch64_relobj<size, big_endian>::do_relocate_sections(
1861 const Symbol_table* symtab, const Layout* layout,
1862 const unsigned char* pshdrs, Output_file* of,
1863 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1864 {
1865 // Call parent to relocate sections.
1866 Sized_relobj_file<size, big_endian>::do_relocate_sections(symtab, layout,
1867 pshdrs, of, pviews);
1868
1869 // We do not generate stubs if doing a relocatable link.
1870 if (parameters->options().relocatable())
1871 return;
1872
1873 if (parameters->options().fix_cortex_a53_843419()
1874 || parameters->options().fix_cortex_a53_835769())
1875 this->fix_errata(pviews);
1876
1877 Relocate_info<size, big_endian> relinfo;
1878 relinfo.symtab = symtab;
1879 relinfo.layout = layout;
1880 relinfo.object = this;
1881
1882 // Relocate stub tables.
1883 unsigned int shnum = this->shnum();
1884 The_target_aarch64* target = The_target_aarch64::current_target();
1885
1886 for (unsigned int i = 1; i < shnum; ++i)
1887 {
1888 The_aarch64_input_section* aarch64_input_section =
1889 target->find_aarch64_input_section(this, i);
1890 if (aarch64_input_section != NULL
1891 && aarch64_input_section->is_stub_table_owner()
1892 && !aarch64_input_section->stub_table()->empty())
1893 {
1894 Output_section* os = this->output_section(i);
1895 gold_assert(os != NULL);
1896
1897 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
1898 relinfo.reloc_shdr = NULL;
1899 relinfo.data_shndx = i;
1900 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
1901
1902 typename Sized_relobj_file<size, big_endian>::View_size&
1903 view_struct = (*pviews)[i];
1904 gold_assert(view_struct.view != NULL);
1905
1906 The_stub_table* stub_table = aarch64_input_section->stub_table();
1907 off_t offset = stub_table->address() - view_struct.address;
1908 unsigned char* view = view_struct.view + offset;
1909 AArch64_address address = stub_table->address();
1910 section_size_type view_size = stub_table->data_size();
1911 stub_table->relocate_stubs(&relinfo, target, os, view, address,
1912 view_size);
1913 }
1914 }
1915 }
1916
1917
1918 // Determine if an input section is scannable for stub processing. SHDR is
1919 // the header of the section and SHNDX is the section index. OS is the output
1920 // section for the input section and SYMTAB is the global symbol table used to
1921 // look up ICF information.
1922
1923 template<int size, bool big_endian>
1924 bool
1925 AArch64_relobj<size, big_endian>::text_section_is_scannable(
1926 const elfcpp::Shdr<size, big_endian>& text_shdr,
1927 unsigned int text_shndx,
1928 const Output_section* os,
1929 const Symbol_table* symtab)
1930 {
1931 // Skip any empty sections, unallocated sections or sections whose
1932 // type are not SHT_PROGBITS.
1933 if (text_shdr.get_sh_size() == 0
1934 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
1935 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
1936 return false;
1937
1938 // Skip any discarded or ICF'ed sections.
1939 if (os == NULL || symtab->is_section_folded(this, text_shndx))
1940 return false;
1941
1942 // Skip exception frame.
1943 if (strcmp(os->name(), ".eh_frame") == 0)
1944 return false ;
1945
1946 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
1947 os->find_relaxed_input_section(this, text_shndx) != NULL);
1948
1949 return true;
1950 }
1951
1952
1953 // Determine if we want to scan the SHNDX-th section for relocation stubs.
1954 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
1955
1956 template<int size, bool big_endian>
1957 bool
1958 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
1959 const elfcpp::Shdr<size, big_endian>& shdr,
1960 const Relobj::Output_sections& out_sections,
1961 const Symbol_table* symtab,
1962 const unsigned char* pshdrs)
1963 {
1964 unsigned int sh_type = shdr.get_sh_type();
1965 if (sh_type != elfcpp::SHT_RELA)
1966 return false;
1967
1968 // Ignore empty section.
1969 off_t sh_size = shdr.get_sh_size();
1970 if (sh_size == 0)
1971 return false;
1972
1973 // Ignore reloc section with unexpected symbol table. The
1974 // error will be reported in the final link.
1975 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
1976 return false;
1977
1978 gold_assert(sh_type == elfcpp::SHT_RELA);
1979 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
1980
1981 // Ignore reloc section with unexpected entsize or uneven size.
1982 // The error will be reported in the final link.
1983 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
1984 return false;
1985
1986 // Ignore reloc section with bad info. This error will be
1987 // reported in the final link.
1988 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
1989 if (text_shndx >= this->shnum())
1990 return false;
1991
1992 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
1993 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
1994 text_shndx * shdr_size);
1995 return this->text_section_is_scannable(text_shdr, text_shndx,
1996 out_sections[text_shndx], symtab);
1997 }
1998
1999
2000 // Scan section SHNDX for erratum 843419 and 835769.
2001
2002 template<int size, bool big_endian>
2003 void
2004 AArch64_relobj<size, big_endian>::scan_errata(
2005 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2006 Output_section* os, const Symbol_table* symtab,
2007 The_target_aarch64* target)
2008 {
2009 if (shdr.get_sh_size() == 0
2010 || (shdr.get_sh_flags() &
2011 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2012 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2013 return;
2014
2015 if (!os || symtab->is_section_folded(this, shndx)) return;
2016
2017 AArch64_address output_offset = this->get_output_section_offset(shndx);
2018 AArch64_address output_address;
2019 if (output_offset != invalid_address)
2020 output_address = os->address() + output_offset;
2021 else
2022 {
2023 const Output_relaxed_input_section* poris =
2024 os->find_relaxed_input_section(this, shndx);
2025 if (!poris) return;
2026 output_address = poris->address();
2027 }
2028
2029 section_size_type input_view_size = 0;
2030 const unsigned char* input_view =
2031 this->section_contents(shndx, &input_view_size, false);
2032
2033 Mapping_symbol_position section_start(shndx, 0);
2034 // Find the first mapping symbol record within section shndx.
2035 typename Mapping_symbol_info::const_iterator p =
2036 this->mapping_symbol_info_.lower_bound(section_start);
2037 if (p == this->mapping_symbol_info_.end() || p->first.shndx_ != shndx)
2038 gold_warning(_("cannot scan executable section %u of %s for Cortex-A53 "
2039 "erratum because it has no mapping symbols."),
2040 shndx, this->name().c_str());
2041 while (p != this->mapping_symbol_info_.end() &&
2042 p->first.shndx_ == shndx)
2043 {
2044 typename Mapping_symbol_info::const_iterator prev = p;
2045 ++p;
2046 if (prev->second == 'x')
2047 {
2048 section_size_type span_start =
2049 convert_to_section_size_type(prev->first.offset_);
2050 section_size_type span_end;
2051 if (p != this->mapping_symbol_info_.end()
2052 && p->first.shndx_ == shndx)
2053 span_end = convert_to_section_size_type(p->first.offset_);
2054 else
2055 span_end = convert_to_section_size_type(shdr.get_sh_size());
2056
2057 // Here we do not share the scanning code of both errata. For 843419,
2058 // only the last few insns of each page are examined, which is fast,
2059 // whereas, for 835769, every insn pair needs to be checked.
2060
2061 if (parameters->options().fix_cortex_a53_843419())
2062 target->scan_erratum_843419_span(
2063 this, shndx, span_start, span_end,
2064 const_cast<unsigned char*>(input_view), output_address);
2065
2066 if (parameters->options().fix_cortex_a53_835769())
2067 target->scan_erratum_835769_span(
2068 this, shndx, span_start, span_end,
2069 const_cast<unsigned char*>(input_view), output_address);
2070 }
2071 }
2072 }
2073
2074
2075 // Scan relocations for stub generation.
2076
2077 template<int size, bool big_endian>
2078 void
2079 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2080 The_target_aarch64* target,
2081 const Symbol_table* symtab,
2082 const Layout* layout)
2083 {
2084 unsigned int shnum = this->shnum();
2085 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2086
2087 // Read the section headers.
2088 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2089 shnum * shdr_size,
2090 true, true);
2091
2092 // To speed up processing, we set up hash tables for fast lookup of
2093 // input offsets to output addresses.
2094 this->initialize_input_to_output_maps();
2095
2096 const Relobj::Output_sections& out_sections(this->output_sections());
2097
2098 Relocate_info<size, big_endian> relinfo;
2099 relinfo.symtab = symtab;
2100 relinfo.layout = layout;
2101 relinfo.object = this;
2102
2103 // Do relocation stubs scanning.
2104 const unsigned char* p = pshdrs + shdr_size;
2105 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2106 {
2107 const elfcpp::Shdr<size, big_endian> shdr(p);
2108 if (parameters->options().fix_cortex_a53_843419()
2109 || parameters->options().fix_cortex_a53_835769())
2110 scan_errata(i, shdr, out_sections[i], symtab, target);
2111 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2112 pshdrs))
2113 {
2114 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2115 AArch64_address output_offset =
2116 this->get_output_section_offset(index);
2117 AArch64_address output_address;
2118 if (output_offset != invalid_address)
2119 {
2120 output_address = out_sections[index]->address() + output_offset;
2121 }
2122 else
2123 {
2124 // Currently this only happens for a relaxed section.
2125 const Output_relaxed_input_section* poris =
2126 out_sections[index]->find_relaxed_input_section(this, index);
2127 gold_assert(poris != NULL);
2128 output_address = poris->address();
2129 }
2130
2131 // Get the relocations.
2132 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2133 shdr.get_sh_size(),
2134 true, false);
2135
2136 // Get the section contents.
2137 section_size_type input_view_size = 0;
2138 const unsigned char* input_view =
2139 this->section_contents(index, &input_view_size, false);
2140
2141 relinfo.reloc_shndx = i;
2142 relinfo.data_shndx = index;
2143 unsigned int sh_type = shdr.get_sh_type();
2144 unsigned int reloc_size;
2145 gold_assert (sh_type == elfcpp::SHT_RELA);
2146 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2147
2148 Output_section* os = out_sections[index];
2149 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2150 shdr.get_sh_size() / reloc_size,
2151 os,
2152 output_offset == invalid_address,
2153 input_view, output_address,
2154 input_view_size);
2155 }
2156 }
2157 }
2158
2159
2160 // A class to wrap an ordinary input section containing executable code.
2161
2162 template<int size, bool big_endian>
2163 class AArch64_input_section : public Output_relaxed_input_section
2164 {
2165 public:
2166 typedef Stub_table<size, big_endian> The_stub_table;
2167
2168 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2169 : Output_relaxed_input_section(relobj, shndx, 1),
2170 stub_table_(NULL),
2171 original_contents_(NULL), original_size_(0),
2172 original_addralign_(1)
2173 { }
2174
2175 ~AArch64_input_section()
2176 { delete[] this->original_contents_; }
2177
2178 // Initialize.
2179 void
2180 init();
2181
2182 // Set the stub_table.
2183 void
2184 set_stub_table(The_stub_table* st)
2185 { this->stub_table_ = st; }
2186
2187 // Whether this is a stub table owner.
2188 bool
2189 is_stub_table_owner() const
2190 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2191
2192 // Return the original size of the section.
2193 uint32_t
2194 original_size() const
2195 { return this->original_size_; }
2196
2197 // Return the stub table.
2198 The_stub_table*
2199 stub_table()
2200 { return stub_table_; }
2201
2202 protected:
2203 // Write out this input section.
2204 void
2205 do_write(Output_file*);
2206
2207 // Return required alignment of this.
2208 uint64_t
2209 do_addralign() const
2210 {
2211 if (this->is_stub_table_owner())
2212 return std::max(this->stub_table_->addralign(),
2213 static_cast<uint64_t>(this->original_addralign_));
2214 else
2215 return this->original_addralign_;
2216 }
2217
2218 // Finalize data size.
2219 void
2220 set_final_data_size();
2221
2222 // Reset address and file offset.
2223 void
2224 do_reset_address_and_file_offset();
2225
2226 // Output offset.
2227 bool
2228 do_output_offset(const Relobj* object, unsigned int shndx,
2229 section_offset_type offset,
2230 section_offset_type* poutput) const
2231 {
2232 if ((object == this->relobj())
2233 && (shndx == this->shndx())
2234 && (offset >= 0)
2235 && (offset <=
2236 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2237 {
2238 *poutput = offset;
2239 return true;
2240 }
2241 else
2242 return false;
2243 }
2244
2245 private:
2246 // Copying is not allowed.
2247 AArch64_input_section(const AArch64_input_section&);
2248 AArch64_input_section& operator=(const AArch64_input_section&);
2249
2250 // The relocation stubs.
2251 The_stub_table* stub_table_;
2252 // Original section contents. We have to make a copy here since the file
2253 // containing the original section may not be locked when we need to access
2254 // the contents.
2255 unsigned char* original_contents_;
2256 // Section size of the original input section.
2257 uint32_t original_size_;
2258 // Address alignment of the original input section.
2259 uint32_t original_addralign_;
2260 }; // End of AArch64_input_section
2261
2262
2263 // Finalize data size.
2264
2265 template<int size, bool big_endian>
2266 void
2267 AArch64_input_section<size, big_endian>::set_final_data_size()
2268 {
2269 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2270
2271 if (this->is_stub_table_owner())
2272 {
2273 this->stub_table_->finalize_data_size();
2274 off = align_address(off, this->stub_table_->addralign());
2275 off += this->stub_table_->data_size();
2276 }
2277 this->set_data_size(off);
2278 }
2279
2280
2281 // Reset address and file offset.
2282
2283 template<int size, bool big_endian>
2284 void
2285 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2286 {
2287 // Size of the original input section contents.
2288 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2289
2290 // If this is a stub table owner, account for the stub table size.
2291 if (this->is_stub_table_owner())
2292 {
2293 The_stub_table* stub_table = this->stub_table_;
2294
2295 // Reset the stub table's address and file offset. The
2296 // current data size for child will be updated after that.
2297 stub_table_->reset_address_and_file_offset();
2298 off = align_address(off, stub_table_->addralign());
2299 off += stub_table->current_data_size();
2300 }
2301
2302 this->set_current_data_size(off);
2303 }
2304
2305
2306 // Initialize an Arm_input_section.
2307
2308 template<int size, bool big_endian>
2309 void
2310 AArch64_input_section<size, big_endian>::init()
2311 {
2312 Relobj* relobj = this->relobj();
2313 unsigned int shndx = this->shndx();
2314
2315 // We have to cache original size, alignment and contents to avoid locking
2316 // the original file.
2317 this->original_addralign_ =
2318 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2319
2320 // This is not efficient but we expect only a small number of relaxed
2321 // input sections for stubs.
2322 section_size_type section_size;
2323 const unsigned char* section_contents =
2324 relobj->section_contents(shndx, &section_size, false);
2325 this->original_size_ =
2326 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2327
2328 gold_assert(this->original_contents_ == NULL);
2329 this->original_contents_ = new unsigned char[section_size];
2330 memcpy(this->original_contents_, section_contents, section_size);
2331
2332 // We want to make this look like the original input section after
2333 // output sections are finalized.
2334 Output_section* os = relobj->output_section(shndx);
2335 off_t offset = relobj->output_section_offset(shndx);
2336 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2337 this->set_address(os->address() + offset);
2338 this->set_file_offset(os->offset() + offset);
2339 this->set_current_data_size(this->original_size_);
2340 this->finalize_data_size();
2341 }
2342
2343
2344 // Write data to output file.
2345
2346 template<int size, bool big_endian>
2347 void
2348 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2349 {
2350 // We have to write out the original section content.
2351 gold_assert(this->original_contents_ != NULL);
2352 of->write(this->offset(), this->original_contents_,
2353 this->original_size_);
2354
2355 // If this owns a stub table and it is not empty, write it.
2356 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2357 this->stub_table_->write(of);
2358 }
2359
2360
2361 // Arm output section class. This is defined mainly to add a number of stub
2362 // generation methods.
2363
2364 template<int size, bool big_endian>
2365 class AArch64_output_section : public Output_section
2366 {
2367 public:
2368 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2369 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2370 typedef Stub_table<size, big_endian> The_stub_table;
2371 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2372
2373 public:
2374 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2375 elfcpp::Elf_Xword flags)
2376 : Output_section(name, type, flags)
2377 { }
2378
2379 ~AArch64_output_section() {}
2380
2381 // Group input sections for stub generation.
2382 void
2383 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2384 const Task*);
2385
2386 private:
2387 typedef Output_section::Input_section Input_section;
2388 typedef Output_section::Input_section_list Input_section_list;
2389
2390 // Create a stub group.
2391 void
2392 create_stub_group(Input_section_list::const_iterator,
2393 Input_section_list::const_iterator,
2394 Input_section_list::const_iterator,
2395 The_target_aarch64*,
2396 std::vector<Output_relaxed_input_section*>&,
2397 const Task*);
2398 }; // End of AArch64_output_section
2399
2400
2401 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2402 // the input section that will be the owner of the stub table.
2403
2404 template<int size, bool big_endian> void
2405 AArch64_output_section<size, big_endian>::create_stub_group(
2406 Input_section_list::const_iterator first,
2407 Input_section_list::const_iterator last,
2408 Input_section_list::const_iterator owner,
2409 The_target_aarch64* target,
2410 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2411 const Task* task)
2412 {
2413 // Currently we convert ordinary input sections into relaxed sections only
2414 // at this point.
2415 The_aarch64_input_section* input_section;
2416 if (owner->is_relaxed_input_section())
2417 gold_unreachable();
2418 else
2419 {
2420 gold_assert(owner->is_input_section());
2421 // Create a new relaxed input section. We need to lock the original
2422 // file.
2423 Task_lock_obj<Object> tl(task, owner->relobj());
2424 input_section =
2425 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2426 new_relaxed_sections.push_back(input_section);
2427 }
2428
2429 // Create a stub table.
2430 The_stub_table* stub_table =
2431 target->new_stub_table(input_section);
2432
2433 input_section->set_stub_table(stub_table);
2434
2435 Input_section_list::const_iterator p = first;
2436 // Look for input sections or relaxed input sections in [first ... last].
2437 do
2438 {
2439 if (p->is_input_section() || p->is_relaxed_input_section())
2440 {
2441 // The stub table information for input sections live
2442 // in their objects.
2443 The_aarch64_relobj* aarch64_relobj =
2444 static_cast<The_aarch64_relobj*>(p->relobj());
2445 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2446 }
2447 }
2448 while (p++ != last);
2449 }
2450
2451
2452 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2453 // stub groups. We grow a stub group by adding input section until the size is
2454 // just below GROUP_SIZE. The last input section will be converted into a stub
2455 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2456 // after the stub table, effectively doubling the group size.
2457 //
2458 // This is similar to the group_sections() function in elf32-arm.c but is
2459 // implemented differently.
2460
2461 template<int size, bool big_endian>
2462 void AArch64_output_section<size, big_endian>::group_sections(
2463 section_size_type group_size,
2464 bool stubs_always_after_branch,
2465 Target_aarch64<size, big_endian>* target,
2466 const Task* task)
2467 {
2468 typedef enum
2469 {
2470 NO_GROUP,
2471 FINDING_STUB_SECTION,
2472 HAS_STUB_SECTION
2473 } State;
2474
2475 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2476
2477 State state = NO_GROUP;
2478 section_size_type off = 0;
2479 section_size_type group_begin_offset = 0;
2480 section_size_type group_end_offset = 0;
2481 section_size_type stub_table_end_offset = 0;
2482 Input_section_list::const_iterator group_begin =
2483 this->input_sections().end();
2484 Input_section_list::const_iterator stub_table =
2485 this->input_sections().end();
2486 Input_section_list::const_iterator group_end = this->input_sections().end();
2487 for (Input_section_list::const_iterator p = this->input_sections().begin();
2488 p != this->input_sections().end();
2489 ++p)
2490 {
2491 section_size_type section_begin_offset =
2492 align_address(off, p->addralign());
2493 section_size_type section_end_offset =
2494 section_begin_offset + p->data_size();
2495
2496 // Check to see if we should group the previously seen sections.
2497 switch (state)
2498 {
2499 case NO_GROUP:
2500 break;
2501
2502 case FINDING_STUB_SECTION:
2503 // Adding this section makes the group larger than GROUP_SIZE.
2504 if (section_end_offset - group_begin_offset >= group_size)
2505 {
2506 if (stubs_always_after_branch)
2507 {
2508 gold_assert(group_end != this->input_sections().end());
2509 this->create_stub_group(group_begin, group_end, group_end,
2510 target, new_relaxed_sections,
2511 task);
2512 state = NO_GROUP;
2513 }
2514 else
2515 {
2516 // Input sections up to stub_group_size bytes after the stub
2517 // table can be handled by it too.
2518 state = HAS_STUB_SECTION;
2519 stub_table = group_end;
2520 stub_table_end_offset = group_end_offset;
2521 }
2522 }
2523 break;
2524
2525 case HAS_STUB_SECTION:
2526 // Adding this section makes the post stub-section group larger
2527 // than GROUP_SIZE.
2528 gold_unreachable();
2529 // NOT SUPPORTED YET. For completeness only.
2530 if (section_end_offset - stub_table_end_offset >= group_size)
2531 {
2532 gold_assert(group_end != this->input_sections().end());
2533 this->create_stub_group(group_begin, group_end, stub_table,
2534 target, new_relaxed_sections, task);
2535 state = NO_GROUP;
2536 }
2537 break;
2538
2539 default:
2540 gold_unreachable();
2541 }
2542
2543 // If we see an input section and currently there is no group, start
2544 // a new one. Skip any empty sections. We look at the data size
2545 // instead of calling p->relobj()->section_size() to avoid locking.
2546 if ((p->is_input_section() || p->is_relaxed_input_section())
2547 && (p->data_size() != 0))
2548 {
2549 if (state == NO_GROUP)
2550 {
2551 state = FINDING_STUB_SECTION;
2552 group_begin = p;
2553 group_begin_offset = section_begin_offset;
2554 }
2555
2556 // Keep track of the last input section seen.
2557 group_end = p;
2558 group_end_offset = section_end_offset;
2559 }
2560
2561 off = section_end_offset;
2562 }
2563
2564 // Create a stub group for any ungrouped sections.
2565 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2566 {
2567 gold_assert(group_end != this->input_sections().end());
2568 this->create_stub_group(group_begin, group_end,
2569 (state == FINDING_STUB_SECTION
2570 ? group_end
2571 : stub_table),
2572 target, new_relaxed_sections, task);
2573 }
2574
2575 if (!new_relaxed_sections.empty())
2576 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2577
2578 // Update the section offsets
2579 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2580 {
2581 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2582 new_relaxed_sections[i]->relobj());
2583 unsigned int shndx = new_relaxed_sections[i]->shndx();
2584 // Tell AArch64_relobj that this input section is converted.
2585 relobj->convert_input_section_to_relaxed_section(shndx);
2586 }
2587 } // End of AArch64_output_section::group_sections
2588
2589
2590 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2591
2592
2593 // The aarch64 target class.
2594 // See the ABI at
2595 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2596 template<int size, bool big_endian>
2597 class Target_aarch64 : public Sized_target<size, big_endian>
2598 {
2599 public:
2600 typedef Target_aarch64<size, big_endian> This;
2601 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2602 Reloc_section;
2603 typedef Relocate_info<size, big_endian> The_relocate_info;
2604 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2605 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2606 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2607 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2608 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2609 typedef Stub_table<size, big_endian> The_stub_table;
2610 typedef std::vector<The_stub_table*> Stub_table_list;
2611 typedef typename Stub_table_list::iterator Stub_table_iterator;
2612 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2613 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2614 typedef Unordered_map<Section_id,
2615 AArch64_input_section<size, big_endian>*,
2616 Section_id_hash> AArch64_input_section_map;
2617 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2618 const static int TCB_SIZE = size / 8 * 2;
2619
2620 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2621 : Sized_target<size, big_endian>(info),
2622 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2623 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2624 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2625 got_mod_index_offset_(-1U),
2626 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2627 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2628 { }
2629
2630 // Scan the relocations to determine unreferenced sections for
2631 // garbage collection.
2632 void
2633 gc_process_relocs(Symbol_table* symtab,
2634 Layout* layout,
2635 Sized_relobj_file<size, big_endian>* object,
2636 unsigned int data_shndx,
2637 unsigned int sh_type,
2638 const unsigned char* prelocs,
2639 size_t reloc_count,
2640 Output_section* output_section,
2641 bool needs_special_offset_handling,
2642 size_t local_symbol_count,
2643 const unsigned char* plocal_symbols);
2644
2645 // Scan the relocations to look for symbol adjustments.
2646 void
2647 scan_relocs(Symbol_table* symtab,
2648 Layout* layout,
2649 Sized_relobj_file<size, big_endian>* object,
2650 unsigned int data_shndx,
2651 unsigned int sh_type,
2652 const unsigned char* prelocs,
2653 size_t reloc_count,
2654 Output_section* output_section,
2655 bool needs_special_offset_handling,
2656 size_t local_symbol_count,
2657 const unsigned char* plocal_symbols);
2658
2659 // Finalize the sections.
2660 void
2661 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2662
2663 // Return the value to use for a dynamic which requires special
2664 // treatment.
2665 uint64_t
2666 do_dynsym_value(const Symbol*) const;
2667
2668 // Relocate a section.
2669 void
2670 relocate_section(const Relocate_info<size, big_endian>*,
2671 unsigned int sh_type,
2672 const unsigned char* prelocs,
2673 size_t reloc_count,
2674 Output_section* output_section,
2675 bool needs_special_offset_handling,
2676 unsigned char* view,
2677 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2678 section_size_type view_size,
2679 const Reloc_symbol_changes*);
2680
2681 // Scan the relocs during a relocatable link.
2682 void
2683 scan_relocatable_relocs(Symbol_table* symtab,
2684 Layout* layout,
2685 Sized_relobj_file<size, big_endian>* object,
2686 unsigned int data_shndx,
2687 unsigned int sh_type,
2688 const unsigned char* prelocs,
2689 size_t reloc_count,
2690 Output_section* output_section,
2691 bool needs_special_offset_handling,
2692 size_t local_symbol_count,
2693 const unsigned char* plocal_symbols,
2694 Relocatable_relocs*);
2695
2696 // Relocate a section during a relocatable link.
2697 void
2698 relocate_relocs(
2699 const Relocate_info<size, big_endian>*,
2700 unsigned int sh_type,
2701 const unsigned char* prelocs,
2702 size_t reloc_count,
2703 Output_section* output_section,
2704 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2705 const Relocatable_relocs*,
2706 unsigned char* view,
2707 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2708 section_size_type view_size,
2709 unsigned char* reloc_view,
2710 section_size_type reloc_view_size);
2711
2712 // Return the symbol index to use for a target specific relocation.
2713 // The only target specific relocation is R_AARCH64_TLSDESC for a
2714 // local symbol, which is an absolute reloc.
2715 unsigned int
2716 do_reloc_symbol_index(void*, unsigned int r_type) const
2717 {
2718 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
2719 return 0;
2720 }
2721
2722 // Return the addend to use for a target specific relocation.
2723 uint64_t
2724 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
2725
2726 // Return the PLT section.
2727 uint64_t
2728 do_plt_address_for_global(const Symbol* gsym) const
2729 { return this->plt_section()->address_for_global(gsym); }
2730
2731 uint64_t
2732 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
2733 { return this->plt_section()->address_for_local(relobj, symndx); }
2734
2735 // This function should be defined in targets that can use relocation
2736 // types to determine (implemented in local_reloc_may_be_function_pointer
2737 // and global_reloc_may_be_function_pointer)
2738 // if a function's pointer is taken. ICF uses this in safe mode to only
2739 // fold those functions whose pointer is defintely not taken.
2740 bool
2741 do_can_check_for_function_pointers() const
2742 { return true; }
2743
2744 // Return the number of entries in the PLT.
2745 unsigned int
2746 plt_entry_count() const;
2747
2748 //Return the offset of the first non-reserved PLT entry.
2749 unsigned int
2750 first_plt_entry_offset() const;
2751
2752 // Return the size of each PLT entry.
2753 unsigned int
2754 plt_entry_size() const;
2755
2756 // Create a stub table.
2757 The_stub_table*
2758 new_stub_table(The_aarch64_input_section*);
2759
2760 // Create an aarch64 input section.
2761 The_aarch64_input_section*
2762 new_aarch64_input_section(Relobj*, unsigned int);
2763
2764 // Find an aarch64 input section instance for a given OBJ and SHNDX.
2765 The_aarch64_input_section*
2766 find_aarch64_input_section(Relobj*, unsigned int) const;
2767
2768 // Return the thread control block size.
2769 unsigned int
2770 tcb_size() const { return This::TCB_SIZE; }
2771
2772 // Scan a section for stub generation.
2773 void
2774 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
2775 const unsigned char*, size_t, Output_section*,
2776 bool, const unsigned char*,
2777 Address,
2778 section_size_type);
2779
2780 // Scan a relocation section for stub.
2781 template<int sh_type>
2782 void
2783 scan_reloc_section_for_stubs(
2784 const The_relocate_info* relinfo,
2785 const unsigned char* prelocs,
2786 size_t reloc_count,
2787 Output_section* output_section,
2788 bool needs_special_offset_handling,
2789 const unsigned char* view,
2790 Address view_address,
2791 section_size_type);
2792
2793 // Relocate a single stub.
2794 void
2795 relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
2796 Output_section*, unsigned char*, Address,
2797 section_size_type);
2798
2799 // Get the default AArch64 target.
2800 static This*
2801 current_target()
2802 {
2803 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
2804 && parameters->target().get_size() == size
2805 && parameters->target().is_big_endian() == big_endian);
2806 return static_cast<This*>(parameters->sized_target<size, big_endian>());
2807 }
2808
2809
2810 // Scan erratum 843419 for a part of a section.
2811 void
2812 scan_erratum_843419_span(
2813 AArch64_relobj<size, big_endian>*,
2814 unsigned int,
2815 const section_size_type,
2816 const section_size_type,
2817 unsigned char*,
2818 Address);
2819
2820 // Scan erratum 835769 for a part of a section.
2821 void
2822 scan_erratum_835769_span(
2823 AArch64_relobj<size, big_endian>*,
2824 unsigned int,
2825 const section_size_type,
2826 const section_size_type,
2827 unsigned char*,
2828 Address);
2829
2830 protected:
2831 void
2832 do_select_as_default_target()
2833 {
2834 gold_assert(aarch64_reloc_property_table == NULL);
2835 aarch64_reloc_property_table = new AArch64_reloc_property_table();
2836 }
2837
2838 // Add a new reloc argument, returning the index in the vector.
2839 size_t
2840 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
2841 unsigned int r_sym)
2842 {
2843 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
2844 return this->tlsdesc_reloc_info_.size() - 1;
2845 }
2846
2847 virtual Output_data_plt_aarch64<size, big_endian>*
2848 do_make_data_plt(Layout* layout,
2849 Output_data_got_aarch64<size, big_endian>* got,
2850 Output_data_space* got_plt,
2851 Output_data_space* got_irelative)
2852 {
2853 return new Output_data_plt_aarch64_standard<size, big_endian>(
2854 layout, got, got_plt, got_irelative);
2855 }
2856
2857
2858 // do_make_elf_object to override the same function in the base class.
2859 Object*
2860 do_make_elf_object(const std::string&, Input_file*, off_t,
2861 const elfcpp::Ehdr<size, big_endian>&);
2862
2863 Output_data_plt_aarch64<size, big_endian>*
2864 make_data_plt(Layout* layout,
2865 Output_data_got_aarch64<size, big_endian>* got,
2866 Output_data_space* got_plt,
2867 Output_data_space* got_irelative)
2868 {
2869 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
2870 }
2871
2872 // We only need to generate stubs, and hence perform relaxation if we are
2873 // not doing relocatable linking.
2874 virtual bool
2875 do_may_relax() const
2876 { return !parameters->options().relocatable(); }
2877
2878 // Relaxation hook. This is where we do stub generation.
2879 virtual bool
2880 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
2881
2882 void
2883 group_sections(Layout* layout,
2884 section_size_type group_size,
2885 bool stubs_always_after_branch,
2886 const Task* task);
2887
2888 void
2889 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
2890 const Sized_symbol<size>*, unsigned int,
2891 const Symbol_value<size>*,
2892 typename elfcpp::Elf_types<size>::Elf_Swxword,
2893 Address Elf_Addr);
2894
2895 // Make an output section.
2896 Output_section*
2897 do_make_output_section(const char* name, elfcpp::Elf_Word type,
2898 elfcpp::Elf_Xword flags)
2899 { return new The_aarch64_output_section(name, type, flags); }
2900
2901 private:
2902 // The class which scans relocations.
2903 class Scan
2904 {
2905 public:
2906 Scan()
2907 : issued_non_pic_error_(false)
2908 { }
2909
2910 inline void
2911 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
2912 Sized_relobj_file<size, big_endian>* object,
2913 unsigned int data_shndx,
2914 Output_section* output_section,
2915 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
2916 const elfcpp::Sym<size, big_endian>& lsym,
2917 bool is_discarded);
2918
2919 inline void
2920 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
2921 Sized_relobj_file<size, big_endian>* object,
2922 unsigned int data_shndx,
2923 Output_section* output_section,
2924 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
2925 Symbol* gsym);
2926
2927 inline bool
2928 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
2929 Target_aarch64<size, big_endian>* ,
2930 Sized_relobj_file<size, big_endian>* ,
2931 unsigned int ,
2932 Output_section* ,
2933 const elfcpp::Rela<size, big_endian>& ,
2934 unsigned int r_type,
2935 const elfcpp::Sym<size, big_endian>&);
2936
2937 inline bool
2938 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
2939 Target_aarch64<size, big_endian>* ,
2940 Sized_relobj_file<size, big_endian>* ,
2941 unsigned int ,
2942 Output_section* ,
2943 const elfcpp::Rela<size, big_endian>& ,
2944 unsigned int r_type,
2945 Symbol* gsym);
2946
2947 private:
2948 static void
2949 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
2950 unsigned int r_type);
2951
2952 static void
2953 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
2954 unsigned int r_type, Symbol*);
2955
2956 inline bool
2957 possible_function_pointer_reloc(unsigned int r_type);
2958
2959 void
2960 check_non_pic(Relobj*, unsigned int r_type);
2961
2962 bool
2963 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
2964 unsigned int r_type);
2965
2966 // Whether we have issued an error about a non-PIC compilation.
2967 bool issued_non_pic_error_;
2968 };
2969
2970 // The class which implements relocation.
2971 class Relocate
2972 {
2973 public:
2974 Relocate()
2975 : skip_call_tls_get_addr_(false)
2976 { }
2977
2978 ~Relocate()
2979 { }
2980
2981 // Do a relocation. Return false if the caller should not issue
2982 // any warnings about this relocation.
2983 inline bool
2984 relocate(const Relocate_info<size, big_endian>*, Target_aarch64*,
2985 Output_section*,
2986 size_t relnum, const elfcpp::Rela<size, big_endian>&,
2987 unsigned int r_type, const Sized_symbol<size>*,
2988 const Symbol_value<size>*,
2989 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
2990 section_size_type);
2991
2992 private:
2993 inline typename AArch64_relocate_functions<size, big_endian>::Status
2994 relocate_tls(const Relocate_info<size, big_endian>*,
2995 Target_aarch64<size, big_endian>*,
2996 size_t,
2997 const elfcpp::Rela<size, big_endian>&,
2998 unsigned int r_type, const Sized_symbol<size>*,
2999 const Symbol_value<size>*,
3000 unsigned char*,
3001 typename elfcpp::Elf_types<size>::Elf_Addr);
3002
3003 inline typename AArch64_relocate_functions<size, big_endian>::Status
3004 tls_gd_to_le(
3005 const Relocate_info<size, big_endian>*,
3006 Target_aarch64<size, big_endian>*,
3007 const elfcpp::Rela<size, big_endian>&,
3008 unsigned int,
3009 unsigned char*,
3010 const Symbol_value<size>*);
3011
3012 inline typename AArch64_relocate_functions<size, big_endian>::Status
3013 tls_ld_to_le(
3014 const Relocate_info<size, big_endian>*,
3015 Target_aarch64<size, big_endian>*,
3016 const elfcpp::Rela<size, big_endian>&,
3017 unsigned int,
3018 unsigned char*,
3019 const Symbol_value<size>*);
3020
3021 inline typename AArch64_relocate_functions<size, big_endian>::Status
3022 tls_ie_to_le(
3023 const Relocate_info<size, big_endian>*,
3024 Target_aarch64<size, big_endian>*,
3025 const elfcpp::Rela<size, big_endian>&,
3026 unsigned int,
3027 unsigned char*,
3028 const Symbol_value<size>*);
3029
3030 inline typename AArch64_relocate_functions<size, big_endian>::Status
3031 tls_desc_gd_to_le(
3032 const Relocate_info<size, big_endian>*,
3033 Target_aarch64<size, big_endian>*,
3034 const elfcpp::Rela<size, big_endian>&,
3035 unsigned int,
3036 unsigned char*,
3037 const Symbol_value<size>*);
3038
3039 inline typename AArch64_relocate_functions<size, big_endian>::Status
3040 tls_desc_gd_to_ie(
3041 const Relocate_info<size, big_endian>*,
3042 Target_aarch64<size, big_endian>*,
3043 const elfcpp::Rela<size, big_endian>&,
3044 unsigned int,
3045 unsigned char*,
3046 const Symbol_value<size>*,
3047 typename elfcpp::Elf_types<size>::Elf_Addr,
3048 typename elfcpp::Elf_types<size>::Elf_Addr);
3049
3050 bool skip_call_tls_get_addr_;
3051
3052 }; // End of class Relocate
3053
3054 // A class which returns the size required for a relocation type,
3055 // used while scanning relocs during a relocatable link.
3056 class Relocatable_size_for_reloc
3057 {
3058 public:
3059 unsigned int
3060 get_size_for_reloc(unsigned int, Relobj*);
3061 };
3062
3063 // Adjust TLS relocation type based on the options and whether this
3064 // is a local symbol.
3065 static tls::Tls_optimization
3066 optimize_tls_reloc(bool is_final, int r_type);
3067
3068 // Get the GOT section, creating it if necessary.
3069 Output_data_got_aarch64<size, big_endian>*
3070 got_section(Symbol_table*, Layout*);
3071
3072 // Get the GOT PLT section.
3073 Output_data_space*
3074 got_plt_section() const
3075 {
3076 gold_assert(this->got_plt_ != NULL);
3077 return this->got_plt_;
3078 }
3079
3080 // Get the GOT section for TLSDESC entries.
3081 Output_data_got<size, big_endian>*
3082 got_tlsdesc_section() const
3083 {
3084 gold_assert(this->got_tlsdesc_ != NULL);
3085 return this->got_tlsdesc_;
3086 }
3087
3088 // Create the PLT section.
3089 void
3090 make_plt_section(Symbol_table* symtab, Layout* layout);
3091
3092 // Create a PLT entry for a global symbol.
3093 void
3094 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3095
3096 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3097 void
3098 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3099 Sized_relobj_file<size, big_endian>* relobj,
3100 unsigned int local_sym_index);
3101
3102 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3103 void
3104 define_tls_base_symbol(Symbol_table*, Layout*);
3105
3106 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3107 void
3108 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3109
3110 // Create a GOT entry for the TLS module index.
3111 unsigned int
3112 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3113 Sized_relobj_file<size, big_endian>* object);
3114
3115 // Get the PLT section.
3116 Output_data_plt_aarch64<size, big_endian>*
3117 plt_section() const
3118 {
3119 gold_assert(this->plt_ != NULL);
3120 return this->plt_;
3121 }
3122
3123 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769.
3124 void create_erratum_stub(
3125 AArch64_relobj<size, big_endian>* relobj,
3126 unsigned int shndx,
3127 section_size_type erratum_insn_offset,
3128 Address erratum_address,
3129 typename Insn_utilities::Insntype erratum_insn,
3130 int erratum_type);
3131
3132 // Return whether this is a 3-insn erratum sequence.
3133 bool is_erratum_843419_sequence(
3134 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3135 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3136 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3137
3138 // Return whether this is a 835769 sequence.
3139 // (Similarly implemented as in elfnn-aarch64.c.)
3140 bool is_erratum_835769_sequence(
3141 typename elfcpp::Swap<32,big_endian>::Valtype,
3142 typename elfcpp::Swap<32,big_endian>::Valtype);
3143
3144 // Get the dynamic reloc section, creating it if necessary.
3145 Reloc_section*
3146 rela_dyn_section(Layout*);
3147
3148 // Get the section to use for TLSDESC relocations.
3149 Reloc_section*
3150 rela_tlsdesc_section(Layout*) const;
3151
3152 // Get the section to use for IRELATIVE relocations.
3153 Reloc_section*
3154 rela_irelative_section(Layout*);
3155
3156 // Add a potential copy relocation.
3157 void
3158 copy_reloc(Symbol_table* symtab, Layout* layout,
3159 Sized_relobj_file<size, big_endian>* object,
3160 unsigned int shndx, Output_section* output_section,
3161 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3162 {
3163 this->copy_relocs_.copy_reloc(symtab, layout,
3164 symtab->get_sized_symbol<size>(sym),
3165 object, shndx, output_section,
3166 reloc, this->rela_dyn_section(layout));
3167 }
3168
3169 // Information about this specific target which we pass to the
3170 // general Target structure.
3171 static const Target::Target_info aarch64_info;
3172
3173 // The types of GOT entries needed for this platform.
3174 // These values are exposed to the ABI in an incremental link.
3175 // Do not renumber existing values without changing the version
3176 // number of the .gnu_incremental_inputs section.
3177 enum Got_type
3178 {
3179 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3180 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3181 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3182 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3183 };
3184
3185 // This type is used as the argument to the target specific
3186 // relocation routines. The only target specific reloc is
3187 // R_AARCh64_TLSDESC against a local symbol.
3188 struct Tlsdesc_info
3189 {
3190 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3191 unsigned int a_r_sym)
3192 : object(a_object), r_sym(a_r_sym)
3193 { }
3194
3195 // The object in which the local symbol is defined.
3196 Sized_relobj_file<size, big_endian>* object;
3197 // The local symbol index in the object.
3198 unsigned int r_sym;
3199 };
3200
3201 // The GOT section.
3202 Output_data_got_aarch64<size, big_endian>* got_;
3203 // The PLT section.
3204 Output_data_plt_aarch64<size, big_endian>* plt_;
3205 // The GOT PLT section.
3206 Output_data_space* got_plt_;
3207 // The GOT section for IRELATIVE relocations.
3208 Output_data_space* got_irelative_;
3209 // The GOT section for TLSDESC relocations.
3210 Output_data_got<size, big_endian>* got_tlsdesc_;
3211 // The _GLOBAL_OFFSET_TABLE_ symbol.
3212 Symbol* global_offset_table_;
3213 // The dynamic reloc section.
3214 Reloc_section* rela_dyn_;
3215 // The section to use for IRELATIVE relocs.
3216 Reloc_section* rela_irelative_;
3217 // Relocs saved to avoid a COPY reloc.
3218 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3219 // Offset of the GOT entry for the TLS module index.
3220 unsigned int got_mod_index_offset_;
3221 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3222 // specific relocation. Here we store the object and local symbol
3223 // index for the relocation.
3224 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3225 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3226 bool tls_base_symbol_defined_;
3227 // List of stub_tables
3228 Stub_table_list stub_tables_;
3229 // Actual stub group size
3230 section_size_type stub_group_size_;
3231 AArch64_input_section_map aarch64_input_section_map_;
3232 }; // End of Target_aarch64
3233
3234
3235 template<>
3236 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3237 {
3238 64, // size
3239 false, // is_big_endian
3240 elfcpp::EM_AARCH64, // machine_code
3241 false, // has_make_symbol
3242 false, // has_resolve
3243 false, // has_code_fill
3244 true, // is_default_stack_executable
3245 true, // can_icf_inline_merge_sections
3246 '\0', // wrap_char
3247 "/lib/ld.so.1", // program interpreter
3248 0x400000, // default_text_segment_address
3249 0x1000, // abi_pagesize (overridable by -z max-page-size)
3250 0x1000, // common_pagesize (overridable by -z common-page-size)
3251 false, // isolate_execinstr
3252 0, // rosegment_gap
3253 elfcpp::SHN_UNDEF, // small_common_shndx
3254 elfcpp::SHN_UNDEF, // large_common_shndx
3255 0, // small_common_section_flags
3256 0, // large_common_section_flags
3257 NULL, // attributes_section
3258 NULL, // attributes_vendor
3259 "_start" // entry_symbol_name
3260 };
3261
3262 template<>
3263 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3264 {
3265 32, // size
3266 false, // is_big_endian
3267 elfcpp::EM_AARCH64, // machine_code
3268 false, // has_make_symbol
3269 false, // has_resolve
3270 false, // has_code_fill
3271 true, // is_default_stack_executable
3272 false, // can_icf_inline_merge_sections
3273 '\0', // wrap_char
3274 "/lib/ld.so.1", // program interpreter
3275 0x400000, // default_text_segment_address
3276 0x1000, // abi_pagesize (overridable by -z max-page-size)
3277 0x1000, // common_pagesize (overridable by -z common-page-size)
3278 false, // isolate_execinstr
3279 0, // rosegment_gap
3280 elfcpp::SHN_UNDEF, // small_common_shndx
3281 elfcpp::SHN_UNDEF, // large_common_shndx
3282 0, // small_common_section_flags
3283 0, // large_common_section_flags
3284 NULL, // attributes_section
3285 NULL, // attributes_vendor
3286 "_start" // entry_symbol_name
3287 };
3288
3289 template<>
3290 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3291 {
3292 64, // size
3293 true, // is_big_endian
3294 elfcpp::EM_AARCH64, // machine_code
3295 false, // has_make_symbol
3296 false, // has_resolve
3297 false, // has_code_fill
3298 true, // is_default_stack_executable
3299 true, // can_icf_inline_merge_sections
3300 '\0', // wrap_char
3301 "/lib/ld.so.1", // program interpreter
3302 0x400000, // default_text_segment_address
3303 0x1000, // abi_pagesize (overridable by -z max-page-size)
3304 0x1000, // common_pagesize (overridable by -z common-page-size)
3305 false, // isolate_execinstr
3306 0, // rosegment_gap
3307 elfcpp::SHN_UNDEF, // small_common_shndx
3308 elfcpp::SHN_UNDEF, // large_common_shndx
3309 0, // small_common_section_flags
3310 0, // large_common_section_flags
3311 NULL, // attributes_section
3312 NULL, // attributes_vendor
3313 "_start" // entry_symbol_name
3314 };
3315
3316 template<>
3317 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3318 {
3319 32, // size
3320 true, // is_big_endian
3321 elfcpp::EM_AARCH64, // machine_code
3322 false, // has_make_symbol
3323 false, // has_resolve
3324 false, // has_code_fill
3325 true, // is_default_stack_executable
3326 false, // can_icf_inline_merge_sections
3327 '\0', // wrap_char
3328 "/lib/ld.so.1", // program interpreter
3329 0x400000, // default_text_segment_address
3330 0x1000, // abi_pagesize (overridable by -z max-page-size)
3331 0x1000, // common_pagesize (overridable by -z common-page-size)
3332 false, // isolate_execinstr
3333 0, // rosegment_gap
3334 elfcpp::SHN_UNDEF, // small_common_shndx
3335 elfcpp::SHN_UNDEF, // large_common_shndx
3336 0, // small_common_section_flags
3337 0, // large_common_section_flags
3338 NULL, // attributes_section
3339 NULL, // attributes_vendor
3340 "_start" // entry_symbol_name
3341 };
3342
3343 // Get the GOT section, creating it if necessary.
3344
3345 template<int size, bool big_endian>
3346 Output_data_got_aarch64<size, big_endian>*
3347 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3348 Layout* layout)
3349 {
3350 if (this->got_ == NULL)
3351 {
3352 gold_assert(symtab != NULL && layout != NULL);
3353
3354 // When using -z now, we can treat .got.plt as a relro section.
3355 // Without -z now, it is modified after program startup by lazy
3356 // PLT relocations.
3357 bool is_got_plt_relro = parameters->options().now();
3358 Output_section_order got_order = (is_got_plt_relro
3359 ? ORDER_RELRO
3360 : ORDER_RELRO_LAST);
3361 Output_section_order got_plt_order = (is_got_plt_relro
3362 ? ORDER_RELRO
3363 : ORDER_NON_RELRO_FIRST);
3364
3365 // Layout of .got and .got.plt sections.
3366 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3367 // ...
3368 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3369 // .gotplt[1] reserved for ld.so (resolver)
3370 // .gotplt[2] reserved
3371
3372 // Generate .got section.
3373 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3374 layout);
3375 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3376 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3377 this->got_, got_order, true);
3378 // The first word of GOT is reserved for the address of .dynamic.
3379 // We put 0 here now. The value will be replaced later in
3380 // Output_data_got_aarch64::do_write.
3381 this->got_->add_constant(0);
3382
3383 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3384 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3385 // even if there is a .got.plt section.
3386 this->global_offset_table_ =
3387 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3388 Symbol_table::PREDEFINED,
3389 this->got_,
3390 0, 0, elfcpp::STT_OBJECT,
3391 elfcpp::STB_LOCAL,
3392 elfcpp::STV_HIDDEN, 0,
3393 false, false);
3394
3395 // Generate .got.plt section.
3396 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3397 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3398 (elfcpp::SHF_ALLOC
3399 | elfcpp::SHF_WRITE),
3400 this->got_plt_, got_plt_order,
3401 is_got_plt_relro);
3402
3403 // The first three entries are reserved.
3404 this->got_plt_->set_current_data_size(
3405 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3406
3407 // If there are any IRELATIVE relocations, they get GOT entries
3408 // in .got.plt after the jump slot entries.
3409 this->got_irelative_ = new Output_data_space(size / 8,
3410 "** GOT IRELATIVE PLT");
3411 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3412 (elfcpp::SHF_ALLOC
3413 | elfcpp::SHF_WRITE),
3414 this->got_irelative_,
3415 got_plt_order,
3416 is_got_plt_relro);
3417
3418 // If there are any TLSDESC relocations, they get GOT entries in
3419 // .got.plt after the jump slot and IRELATIVE entries.
3420 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3421 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3422 (elfcpp::SHF_ALLOC
3423 | elfcpp::SHF_WRITE),
3424 this->got_tlsdesc_,
3425 got_plt_order,
3426 is_got_plt_relro);
3427
3428 if (!is_got_plt_relro)
3429 {
3430 // Those bytes can go into the relro segment.
3431 layout->increase_relro(
3432 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3433 }
3434
3435 }
3436 return this->got_;
3437 }
3438
3439 // Get the dynamic reloc section, creating it if necessary.
3440
3441 template<int size, bool big_endian>
3442 typename Target_aarch64<size, big_endian>::Reloc_section*
3443 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3444 {
3445 if (this->rela_dyn_ == NULL)
3446 {
3447 gold_assert(layout != NULL);
3448 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3449 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3450 elfcpp::SHF_ALLOC, this->rela_dyn_,
3451 ORDER_DYNAMIC_RELOCS, false);
3452 }
3453 return this->rela_dyn_;
3454 }
3455
3456 // Get the section to use for IRELATIVE relocs, creating it if
3457 // necessary. These go in .rela.dyn, but only after all other dynamic
3458 // relocations. They need to follow the other dynamic relocations so
3459 // that they can refer to global variables initialized by those
3460 // relocs.
3461
3462 template<int size, bool big_endian>
3463 typename Target_aarch64<size, big_endian>::Reloc_section*
3464 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3465 {
3466 if (this->rela_irelative_ == NULL)
3467 {
3468 // Make sure we have already created the dynamic reloc section.
3469 this->rela_dyn_section(layout);
3470 this->rela_irelative_ = new Reloc_section(false);
3471 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3472 elfcpp::SHF_ALLOC, this->rela_irelative_,
3473 ORDER_DYNAMIC_RELOCS, false);
3474 gold_assert(this->rela_dyn_->output_section()
3475 == this->rela_irelative_->output_section());
3476 }
3477 return this->rela_irelative_;
3478 }
3479
3480
3481 // do_make_elf_object to override the same function in the base class. We need
3482 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3483 // store backend specific information. Hence we need to have our own ELF object
3484 // creation.
3485
3486 template<int size, bool big_endian>
3487 Object*
3488 Target_aarch64<size, big_endian>::do_make_elf_object(
3489 const std::string& name,
3490 Input_file* input_file,
3491 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3492 {
3493 int et = ehdr.get_e_type();
3494 // ET_EXEC files are valid input for --just-symbols/-R,
3495 // and we treat them as relocatable objects.
3496 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3497 return Sized_target<size, big_endian>::do_make_elf_object(
3498 name, input_file, offset, ehdr);
3499 else if (et == elfcpp::ET_REL)
3500 {
3501 AArch64_relobj<size, big_endian>* obj =
3502 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3503 obj->setup();
3504 return obj;
3505 }
3506 else if (et == elfcpp::ET_DYN)
3507 {
3508 // Keep base implementation.
3509 Sized_dynobj<size, big_endian>* obj =
3510 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3511 obj->setup();
3512 return obj;
3513 }
3514 else
3515 {
3516 gold_error(_("%s: unsupported ELF file type %d"),
3517 name.c_str(), et);
3518 return NULL;
3519 }
3520 }
3521
3522
3523 // Scan a relocation for stub generation.
3524
3525 template<int size, bool big_endian>
3526 void
3527 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3528 const Relocate_info<size, big_endian>* relinfo,
3529 unsigned int r_type,
3530 const Sized_symbol<size>* gsym,
3531 unsigned int r_sym,
3532 const Symbol_value<size>* psymval,
3533 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3534 Address address)
3535 {
3536 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3537 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3538
3539 Symbol_value<size> symval;
3540 if (gsym != NULL)
3541 {
3542 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3543 get_reloc_property(r_type);
3544 if (gsym->use_plt_offset(arp->reference_flags()))
3545 {
3546 // This uses a PLT, change the symbol value.
3547 symval.set_output_value(this->plt_section()->address()
3548 + gsym->plt_offset());
3549 psymval = &symval;
3550 }
3551 else if (gsym->is_undefined())
3552 // There is no need to generate a stub symbol is undefined.
3553 return;
3554 }
3555
3556 // Get the symbol value.
3557 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3558
3559 // Owing to pipelining, the PC relative branches below actually skip
3560 // two instructions when the branch offset is 0.
3561 Address destination = static_cast<Address>(-1);
3562 switch (r_type)
3563 {
3564 case elfcpp::R_AARCH64_CALL26:
3565 case elfcpp::R_AARCH64_JUMP26:
3566 destination = value + addend;
3567 break;
3568 default:
3569 gold_unreachable();
3570 }
3571
3572 int stub_type = The_reloc_stub::
3573 stub_type_for_reloc(r_type, address, destination);
3574 if (stub_type == ST_NONE)
3575 return;
3576
3577 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3578 gold_assert(stub_table != NULL);
3579
3580 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3581 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3582 if (stub == NULL)
3583 {
3584 stub = new The_reloc_stub(stub_type);
3585 stub_table->add_reloc_stub(stub, key);
3586 }
3587 stub->set_destination_address(destination);
3588 } // End of Target_aarch64::scan_reloc_for_stub
3589
3590
3591 // This function scans a relocation section for stub generation.
3592 // The template parameter Relocate must be a class type which provides
3593 // a single function, relocate(), which implements the machine
3594 // specific part of a relocation.
3595
3596 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3597 // SHT_REL or SHT_RELA.
3598
3599 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3600 // of relocs. OUTPUT_SECTION is the output section.
3601 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3602 // mapped to output offsets.
3603
3604 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3605 // VIEW_SIZE is the size. These refer to the input section, unless
3606 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3607 // the output section.
3608
3609 template<int size, bool big_endian>
3610 template<int sh_type>
3611 void inline
3612 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3613 const Relocate_info<size, big_endian>* relinfo,
3614 const unsigned char* prelocs,
3615 size_t reloc_count,
3616 Output_section* /*output_section*/,
3617 bool /*needs_special_offset_handling*/,
3618 const unsigned char* /*view*/,
3619 Address view_address,
3620 section_size_type)
3621 {
3622 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3623
3624 const int reloc_size =
3625 Reloc_types<sh_type,size,big_endian>::reloc_size;
3626 AArch64_relobj<size, big_endian>* object =
3627 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3628 unsigned int local_count = object->local_symbol_count();
3629
3630 gold::Default_comdat_behavior default_comdat_behavior;
3631 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3632
3633 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3634 {
3635 Reltype reloc(prelocs);
3636 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3637 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3638 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3639 if (r_type != elfcpp::R_AARCH64_CALL26
3640 && r_type != elfcpp::R_AARCH64_JUMP26)
3641 continue;
3642
3643 section_offset_type offset =
3644 convert_to_section_size_type(reloc.get_r_offset());
3645
3646 // Get the addend.
3647 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3648 reloc.get_r_addend();
3649
3650 const Sized_symbol<size>* sym;
3651 Symbol_value<size> symval;
3652 const Symbol_value<size> *psymval;
3653 bool is_defined_in_discarded_section;
3654 unsigned int shndx;
3655 if (r_sym < local_count)
3656 {
3657 sym = NULL;
3658 psymval = object->local_symbol(r_sym);
3659
3660 // If the local symbol belongs to a section we are discarding,
3661 // and that section is a debug section, try to find the
3662 // corresponding kept section and map this symbol to its
3663 // counterpart in the kept section. The symbol must not
3664 // correspond to a section we are folding.
3665 bool is_ordinary;
3666 shndx = psymval->input_shndx(&is_ordinary);
3667 is_defined_in_discarded_section =
3668 (is_ordinary
3669 && shndx != elfcpp::SHN_UNDEF
3670 && !object->is_section_included(shndx)
3671 && !relinfo->symtab->is_section_folded(object, shndx));
3672
3673 // We need to compute the would-be final value of this local
3674 // symbol.
3675 if (!is_defined_in_discarded_section)
3676 {
3677 typedef Sized_relobj_file<size, big_endian> ObjType;
3678 typename ObjType::Compute_final_local_value_status status =
3679 object->compute_final_local_value(r_sym, psymval, &symval,
3680 relinfo->symtab);
3681 if (status == ObjType::CFLV_OK)
3682 {
3683 // Currently we cannot handle a branch to a target in
3684 // a merged section. If this is the case, issue an error
3685 // and also free the merge symbol value.
3686 if (!symval.has_output_value())
3687 {
3688 const std::string& section_name =
3689 object->section_name(shndx);
3690 object->error(_("cannot handle branch to local %u "
3691 "in a merged section %s"),
3692 r_sym, section_name.c_str());
3693 }
3694 psymval = &symval;
3695 }
3696 else
3697 {
3698 // We cannot determine the final value.
3699 continue;
3700 }
3701 }
3702 }
3703 else
3704 {
3705 const Symbol* gsym;
3706 gsym = object->global_symbol(r_sym);
3707 gold_assert(gsym != NULL);
3708 if (gsym->is_forwarder())
3709 gsym = relinfo->symtab->resolve_forwards(gsym);
3710
3711 sym = static_cast<const Sized_symbol<size>*>(gsym);
3712 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
3713 symval.set_output_symtab_index(sym->symtab_index());
3714 else
3715 symval.set_no_output_symtab_entry();
3716
3717 // We need to compute the would-be final value of this global
3718 // symbol.
3719 const Symbol_table* symtab = relinfo->symtab;
3720 const Sized_symbol<size>* sized_symbol =
3721 symtab->get_sized_symbol<size>(gsym);
3722 Symbol_table::Compute_final_value_status status;
3723 typename elfcpp::Elf_types<size>::Elf_Addr value =
3724 symtab->compute_final_value<size>(sized_symbol, &status);
3725
3726 // Skip this if the symbol has not output section.
3727 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
3728 continue;
3729 symval.set_output_value(value);
3730
3731 if (gsym->type() == elfcpp::STT_TLS)
3732 symval.set_is_tls_symbol();
3733 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
3734 symval.set_is_ifunc_symbol();
3735 psymval = &symval;
3736
3737 is_defined_in_discarded_section =
3738 (gsym->is_defined_in_discarded_section()
3739 && gsym->is_undefined());
3740 shndx = 0;
3741 }
3742
3743 Symbol_value<size> symval2;
3744 if (is_defined_in_discarded_section)
3745 {
3746 if (comdat_behavior == CB_UNDETERMINED)
3747 {
3748 std::string name = object->section_name(relinfo->data_shndx);
3749 comdat_behavior = default_comdat_behavior.get(name.c_str());
3750 }
3751 if (comdat_behavior == CB_PRETEND)
3752 {
3753 bool found;
3754 typename elfcpp::Elf_types<size>::Elf_Addr value =
3755 object->map_to_kept_section(shndx, &found);
3756 if (found)
3757 symval2.set_output_value(value + psymval->input_value());
3758 else
3759 symval2.set_output_value(0);
3760 }
3761 else
3762 {
3763 if (comdat_behavior == CB_WARNING)
3764 gold_warning_at_location(relinfo, i, offset,
3765 _("relocation refers to discarded "
3766 "section"));
3767 symval2.set_output_value(0);
3768 }
3769 symval2.set_no_output_symtab_entry();
3770 psymval = &symval2;
3771 }
3772
3773 // If symbol is a section symbol, we don't know the actual type of
3774 // destination. Give up.
3775 if (psymval->is_section_symbol())
3776 continue;
3777
3778 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
3779 addend, view_address + offset);
3780 } // End of iterating relocs in a section
3781 } // End of Target_aarch64::scan_reloc_section_for_stubs
3782
3783
3784 // Scan an input section for stub generation.
3785
3786 template<int size, bool big_endian>
3787 void
3788 Target_aarch64<size, big_endian>::scan_section_for_stubs(
3789 const Relocate_info<size, big_endian>* relinfo,
3790 unsigned int sh_type,
3791 const unsigned char* prelocs,
3792 size_t reloc_count,
3793 Output_section* output_section,
3794 bool needs_special_offset_handling,
3795 const unsigned char* view,
3796 Address view_address,
3797 section_size_type view_size)
3798 {
3799 gold_assert(sh_type == elfcpp::SHT_RELA);
3800 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
3801 relinfo,
3802 prelocs,
3803 reloc_count,
3804 output_section,
3805 needs_special_offset_handling,
3806 view,
3807 view_address,
3808 view_size);
3809 }
3810
3811
3812 // Relocate a single stub.
3813
3814 template<int size, bool big_endian>
3815 void Target_aarch64<size, big_endian>::
3816 relocate_stub(The_reloc_stub* stub,
3817 const The_relocate_info*,
3818 Output_section*,
3819 unsigned char* view,
3820 Address address,
3821 section_size_type)
3822 {
3823 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
3824 typedef typename The_reloc_functions::Status The_reloc_functions_status;
3825 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
3826
3827 Insntype* ip = reinterpret_cast<Insntype*>(view);
3828 int insn_number = stub->insn_num();
3829 const uint32_t* insns = stub->insns();
3830 // Check the insns are really those stub insns.
3831 for (int i = 0; i < insn_number; ++i)
3832 {
3833 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
3834 gold_assert(((uint32_t)insn == insns[i]));
3835 }
3836
3837 Address dest = stub->destination_address();
3838
3839 switch(stub->type())
3840 {
3841 case ST_ADRP_BRANCH:
3842 {
3843 // 1st reloc is ADR_PREL_PG_HI21
3844 The_reloc_functions_status status =
3845 The_reloc_functions::adrp(view, dest, address);
3846 // An error should never arise in the above step. If so, please
3847 // check 'aarch64_valid_for_adrp_p'.
3848 gold_assert(status == The_reloc_functions::STATUS_OKAY);
3849
3850 // 2nd reloc is ADD_ABS_LO12_NC
3851 const AArch64_reloc_property* arp =
3852 aarch64_reloc_property_table->get_reloc_property(
3853 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
3854 gold_assert(arp != NULL);
3855 status = The_reloc_functions::template
3856 rela_general<32>(view + 4, dest, 0, arp);
3857 // An error should never arise, it is an "_NC" relocation.
3858 gold_assert(status == The_reloc_functions::STATUS_OKAY);
3859 }
3860 break;
3861
3862 case ST_LONG_BRANCH_ABS:
3863 // 1st reloc is R_AARCH64_PREL64, at offset 8
3864 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
3865 break;
3866
3867 case ST_LONG_BRANCH_PCREL:
3868 {
3869 // "PC" calculation is the 2nd insn in the stub.
3870 uint64_t offset = dest - (address + 4);
3871 // Offset is placed at offset 4 and 5.
3872 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
3873 }
3874 break;
3875
3876 default:
3877 gold_unreachable();
3878 }
3879 }
3880
3881
3882 // A class to handle the PLT data.
3883 // This is an abstract base class that handles most of the linker details
3884 // but does not know the actual contents of PLT entries. The derived
3885 // classes below fill in those details.
3886
3887 template<int size, bool big_endian>
3888 class Output_data_plt_aarch64 : public Output_section_data
3889 {
3890 public:
3891 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
3892 Reloc_section;
3893 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
3894
3895 Output_data_plt_aarch64(Layout* layout,
3896 uint64_t addralign,
3897 Output_data_got_aarch64<size, big_endian>* got,
3898 Output_data_space* got_plt,
3899 Output_data_space* got_irelative)
3900 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
3901 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
3902 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
3903 { this->init(layout); }
3904
3905 // Initialize the PLT section.
3906 void
3907 init(Layout* layout);
3908
3909 // Add an entry to the PLT.
3910 void
3911 add_entry(Symbol_table*, Layout*, Symbol* gsym);
3912
3913 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
3914 unsigned int
3915 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
3916 Sized_relobj_file<size, big_endian>* relobj,
3917 unsigned int local_sym_index);
3918
3919 // Add the relocation for a PLT entry.
3920 void
3921 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
3922 unsigned int got_offset);
3923
3924 // Add the reserved TLSDESC_PLT entry to the PLT.
3925 void
3926 reserve_tlsdesc_entry(unsigned int got_offset)
3927 { this->tlsdesc_got_offset_ = got_offset; }
3928
3929 // Return true if a TLSDESC_PLT entry has been reserved.
3930 bool
3931 has_tlsdesc_entry() const
3932 { return this->tlsdesc_got_offset_ != -1U; }
3933
3934 // Return the GOT offset for the reserved TLSDESC_PLT entry.
3935 unsigned int
3936 get_tlsdesc_got_offset() const
3937 { return this->tlsdesc_got_offset_; }
3938
3939 // Return the PLT offset of the reserved TLSDESC_PLT entry.
3940 unsigned int
3941 get_tlsdesc_plt_offset() const
3942 {
3943 return (this->first_plt_entry_offset() +
3944 (this->count_ + this->irelative_count_)
3945 * this->get_plt_entry_size());
3946 }
3947
3948 // Return the .rela.plt section data.
3949 Reloc_section*
3950 rela_plt()
3951 { return this->rel_; }
3952
3953 // Return where the TLSDESC relocations should go.
3954 Reloc_section*
3955 rela_tlsdesc(Layout*);
3956
3957 // Return where the IRELATIVE relocations should go in the PLT
3958 // relocations.
3959 Reloc_section*
3960 rela_irelative(Symbol_table*, Layout*);
3961
3962 // Return whether we created a section for IRELATIVE relocations.
3963 bool
3964 has_irelative_section() const
3965 { return this->irelative_rel_ != NULL; }
3966
3967 // Return the number of PLT entries.
3968 unsigned int
3969 entry_count() const
3970 { return this->count_ + this->irelative_count_; }
3971
3972 // Return the offset of the first non-reserved PLT entry.
3973 unsigned int
3974 first_plt_entry_offset() const
3975 { return this->do_first_plt_entry_offset(); }
3976
3977 // Return the size of a PLT entry.
3978 unsigned int
3979 get_plt_entry_size() const
3980 { return this->do_get_plt_entry_size(); }
3981
3982 // Return the reserved tlsdesc entry size.
3983 unsigned int
3984 get_plt_tlsdesc_entry_size() const
3985 { return this->do_get_plt_tlsdesc_entry_size(); }
3986
3987 // Return the PLT address to use for a global symbol.
3988 uint64_t
3989 address_for_global(const Symbol*);
3990
3991 // Return the PLT address to use for a local symbol.
3992 uint64_t
3993 address_for_local(const Relobj*, unsigned int symndx);
3994
3995 protected:
3996 // Fill in the first PLT entry.
3997 void
3998 fill_first_plt_entry(unsigned char* pov,
3999 Address got_address,
4000 Address plt_address)
4001 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4002
4003 // Fill in a normal PLT entry.
4004 void
4005 fill_plt_entry(unsigned char* pov,
4006 Address got_address,
4007 Address plt_address,
4008 unsigned int got_offset,
4009 unsigned int plt_offset)
4010 {
4011 this->do_fill_plt_entry(pov, got_address, plt_address,
4012 got_offset, plt_offset);
4013 }
4014
4015 // Fill in the reserved TLSDESC PLT entry.
4016 void
4017 fill_tlsdesc_entry(unsigned char* pov,
4018 Address gotplt_address,
4019 Address plt_address,
4020 Address got_base,
4021 unsigned int tlsdesc_got_offset,
4022 unsigned int plt_offset)
4023 {
4024 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4025 tlsdesc_got_offset, plt_offset);
4026 }
4027
4028 virtual unsigned int
4029 do_first_plt_entry_offset() const = 0;
4030
4031 virtual unsigned int
4032 do_get_plt_entry_size() const = 0;
4033
4034 virtual unsigned int
4035 do_get_plt_tlsdesc_entry_size() const = 0;
4036
4037 virtual void
4038 do_fill_first_plt_entry(unsigned char* pov,
4039 Address got_addr,
4040 Address plt_addr) = 0;
4041
4042 virtual void
4043 do_fill_plt_entry(unsigned char* pov,
4044 Address got_address,
4045 Address plt_address,
4046 unsigned int got_offset,
4047 unsigned int plt_offset) = 0;
4048
4049 virtual void
4050 do_fill_tlsdesc_entry(unsigned char* pov,
4051 Address gotplt_address,
4052 Address plt_address,
4053 Address got_base,
4054 unsigned int tlsdesc_got_offset,
4055 unsigned int plt_offset) = 0;
4056
4057 void
4058 do_adjust_output_section(Output_section* os);
4059
4060 // Write to a map file.
4061 void
4062 do_print_to_mapfile(Mapfile* mapfile) const
4063 { mapfile->print_output_data(this, _("** PLT")); }
4064
4065 private:
4066 // Set the final size.
4067 void
4068 set_final_data_size();
4069
4070 // Write out the PLT data.
4071 void
4072 do_write(Output_file*);
4073
4074 // The reloc section.
4075 Reloc_section* rel_;
4076
4077 // The TLSDESC relocs, if necessary. These must follow the regular
4078 // PLT relocs.
4079 Reloc_section* tlsdesc_rel_;
4080
4081 // The IRELATIVE relocs, if necessary. These must follow the
4082 // regular PLT relocations.
4083 Reloc_section* irelative_rel_;
4084
4085 // The .got section.
4086 Output_data_got_aarch64<size, big_endian>* got_;
4087
4088 // The .got.plt section.
4089 Output_data_space* got_plt_;
4090
4091 // The part of the .got.plt section used for IRELATIVE relocs.
4092 Output_data_space* got_irelative_;
4093
4094 // The number of PLT entries.
4095 unsigned int count_;
4096
4097 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4098 // follow the regular PLT entries.
4099 unsigned int irelative_count_;
4100
4101 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4102 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4103 // indicates an offset is not allocated.
4104 unsigned int tlsdesc_got_offset_;
4105 };
4106
4107 // Initialize the PLT section.
4108
4109 template<int size, bool big_endian>
4110 void
4111 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4112 {
4113 this->rel_ = new Reloc_section(false);
4114 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4115 elfcpp::SHF_ALLOC, this->rel_,
4116 ORDER_DYNAMIC_PLT_RELOCS, false);
4117 }
4118
4119 template<int size, bool big_endian>
4120 void
4121 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4122 Output_section* os)
4123 {
4124 os->set_entsize(this->get_plt_entry_size());
4125 }
4126
4127 // Add an entry to the PLT.
4128
4129 template<int size, bool big_endian>
4130 void
4131 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4132 Layout* layout, Symbol* gsym)
4133 {
4134 gold_assert(!gsym->has_plt_offset());
4135
4136 unsigned int* pcount;
4137 unsigned int plt_reserved;
4138 Output_section_data_build* got;
4139
4140 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4141 && gsym->can_use_relative_reloc(false))
4142 {
4143 pcount = &this->irelative_count_;
4144 plt_reserved = 0;
4145 got = this->got_irelative_;
4146 }
4147 else
4148 {
4149 pcount = &this->count_;
4150 plt_reserved = this->first_plt_entry_offset();
4151 got = this->got_plt_;
4152 }
4153
4154 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4155 + plt_reserved);
4156
4157 ++*pcount;
4158
4159 section_offset_type got_offset = got->current_data_size();
4160
4161 // Every PLT entry needs a GOT entry which points back to the PLT
4162 // entry (this will be changed by the dynamic linker, normally
4163 // lazily when the function is called).
4164 got->set_current_data_size(got_offset + size / 8);
4165
4166 // Every PLT entry needs a reloc.
4167 this->add_relocation(symtab, layout, gsym, got_offset);
4168
4169 // Note that we don't need to save the symbol. The contents of the
4170 // PLT are independent of which symbols are used. The symbols only
4171 // appear in the relocations.
4172 }
4173
4174 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4175 // the PLT offset.
4176
4177 template<int size, bool big_endian>
4178 unsigned int
4179 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4180 Symbol_table* symtab,
4181 Layout* layout,
4182 Sized_relobj_file<size, big_endian>* relobj,
4183 unsigned int local_sym_index)
4184 {
4185 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4186 ++this->irelative_count_;
4187
4188 section_offset_type got_offset = this->got_irelative_->current_data_size();
4189
4190 // Every PLT entry needs a GOT entry which points back to the PLT
4191 // entry.
4192 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4193
4194 // Every PLT entry needs a reloc.
4195 Reloc_section* rela = this->rela_irelative(symtab, layout);
4196 rela->add_symbolless_local_addend(relobj, local_sym_index,
4197 elfcpp::R_AARCH64_IRELATIVE,
4198 this->got_irelative_, got_offset, 0);
4199
4200 return plt_offset;
4201 }
4202
4203 // Add the relocation for a PLT entry.
4204
4205 template<int size, bool big_endian>
4206 void
4207 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4208 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4209 {
4210 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4211 && gsym->can_use_relative_reloc(false))
4212 {
4213 Reloc_section* rela = this->rela_irelative(symtab, layout);
4214 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4215 this->got_irelative_, got_offset, 0);
4216 }
4217 else
4218 {
4219 gsym->set_needs_dynsym_entry();
4220 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4221 got_offset, 0);
4222 }
4223 }
4224
4225 // Return where the TLSDESC relocations should go, creating it if
4226 // necessary. These follow the JUMP_SLOT relocations.
4227
4228 template<int size, bool big_endian>
4229 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4230 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4231 {
4232 if (this->tlsdesc_rel_ == NULL)
4233 {
4234 this->tlsdesc_rel_ = new Reloc_section(false);
4235 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4236 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4237 ORDER_DYNAMIC_PLT_RELOCS, false);
4238 gold_assert(this->tlsdesc_rel_->output_section()
4239 == this->rel_->output_section());
4240 }
4241 return this->tlsdesc_rel_;
4242 }
4243
4244 // Return where the IRELATIVE relocations should go in the PLT. These
4245 // follow the JUMP_SLOT and the TLSDESC relocations.
4246
4247 template<int size, bool big_endian>
4248 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4249 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4250 Layout* layout)
4251 {
4252 if (this->irelative_rel_ == NULL)
4253 {
4254 // Make sure we have a place for the TLSDESC relocations, in
4255 // case we see any later on.
4256 this->rela_tlsdesc(layout);
4257 this->irelative_rel_ = new Reloc_section(false);
4258 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4259 elfcpp::SHF_ALLOC, this->irelative_rel_,
4260 ORDER_DYNAMIC_PLT_RELOCS, false);
4261 gold_assert(this->irelative_rel_->output_section()
4262 == this->rel_->output_section());
4263
4264 if (parameters->doing_static_link())
4265 {
4266 // A statically linked executable will only have a .rela.plt
4267 // section to hold R_AARCH64_IRELATIVE relocs for
4268 // STT_GNU_IFUNC symbols. The library will use these
4269 // symbols to locate the IRELATIVE relocs at program startup
4270 // time.
4271 symtab->define_in_output_data("__rela_iplt_start", NULL,
4272 Symbol_table::PREDEFINED,
4273 this->irelative_rel_, 0, 0,
4274 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4275 elfcpp::STV_HIDDEN, 0, false, true);
4276 symtab->define_in_output_data("__rela_iplt_end", NULL,
4277 Symbol_table::PREDEFINED,
4278 this->irelative_rel_, 0, 0,
4279 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4280 elfcpp::STV_HIDDEN, 0, true, true);
4281 }
4282 }
4283 return this->irelative_rel_;
4284 }
4285
4286 // Return the PLT address to use for a global symbol.
4287
4288 template<int size, bool big_endian>
4289 uint64_t
4290 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4291 const Symbol* gsym)
4292 {
4293 uint64_t offset = 0;
4294 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4295 && gsym->can_use_relative_reloc(false))
4296 offset = (this->first_plt_entry_offset() +
4297 this->count_ * this->get_plt_entry_size());
4298 return this->address() + offset + gsym->plt_offset();
4299 }
4300
4301 // Return the PLT address to use for a local symbol. These are always
4302 // IRELATIVE relocs.
4303
4304 template<int size, bool big_endian>
4305 uint64_t
4306 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4307 const Relobj* object,
4308 unsigned int r_sym)
4309 {
4310 return (this->address()
4311 + this->first_plt_entry_offset()
4312 + this->count_ * this->get_plt_entry_size()
4313 + object->local_plt_offset(r_sym));
4314 }
4315
4316 // Set the final size.
4317
4318 template<int size, bool big_endian>
4319 void
4320 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4321 {
4322 unsigned int count = this->count_ + this->irelative_count_;
4323 unsigned int extra_size = 0;
4324 if (this->has_tlsdesc_entry())
4325 extra_size += this->get_plt_tlsdesc_entry_size();
4326 this->set_data_size(this->first_plt_entry_offset()
4327 + count * this->get_plt_entry_size()
4328 + extra_size);
4329 }
4330
4331 template<int size, bool big_endian>
4332 class Output_data_plt_aarch64_standard :
4333 public Output_data_plt_aarch64<size, big_endian>
4334 {
4335 public:
4336 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4337 Output_data_plt_aarch64_standard(
4338 Layout* layout,
4339 Output_data_got_aarch64<size, big_endian>* got,
4340 Output_data_space* got_plt,
4341 Output_data_space* got_irelative)
4342 : Output_data_plt_aarch64<size, big_endian>(layout,
4343 size == 32 ? 4 : 8,
4344 got, got_plt,
4345 got_irelative)
4346 { }
4347
4348 protected:
4349 // Return the offset of the first non-reserved PLT entry.
4350 virtual unsigned int
4351 do_first_plt_entry_offset() const
4352 { return this->first_plt_entry_size; }
4353
4354 // Return the size of a PLT entry
4355 virtual unsigned int
4356 do_get_plt_entry_size() const
4357 { return this->plt_entry_size; }
4358
4359 // Return the size of a tlsdesc entry
4360 virtual unsigned int
4361 do_get_plt_tlsdesc_entry_size() const
4362 { return this->plt_tlsdesc_entry_size; }
4363
4364 virtual void
4365 do_fill_first_plt_entry(unsigned char* pov,
4366 Address got_address,
4367 Address plt_address);
4368
4369 virtual void
4370 do_fill_plt_entry(unsigned char* pov,
4371 Address got_address,
4372 Address plt_address,
4373 unsigned int got_offset,
4374 unsigned int plt_offset);
4375
4376 virtual void
4377 do_fill_tlsdesc_entry(unsigned char* pov,
4378 Address gotplt_address,
4379 Address plt_address,
4380 Address got_base,
4381 unsigned int tlsdesc_got_offset,
4382 unsigned int plt_offset);
4383
4384 private:
4385 // The size of the first plt entry size.
4386 static const int first_plt_entry_size = 32;
4387 // The size of the plt entry size.
4388 static const int plt_entry_size = 16;
4389 // The size of the plt tlsdesc entry size.
4390 static const int plt_tlsdesc_entry_size = 32;
4391 // Template for the first PLT entry.
4392 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4393 // Template for subsequent PLT entries.
4394 static const uint32_t plt_entry[plt_entry_size / 4];
4395 // The reserved TLSDESC entry in the PLT for an executable.
4396 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4397 };
4398
4399 // The first entry in the PLT for an executable.
4400
4401 template<>
4402 const uint32_t
4403 Output_data_plt_aarch64_standard<32, false>::
4404 first_plt_entry[first_plt_entry_size / 4] =
4405 {
4406 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4407 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4408 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4409 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4410 0xd61f0220, /* br x17 */
4411 0xd503201f, /* nop */
4412 0xd503201f, /* nop */
4413 0xd503201f, /* nop */
4414 };
4415
4416
4417 template<>
4418 const uint32_t
4419 Output_data_plt_aarch64_standard<32, true>::
4420 first_plt_entry[first_plt_entry_size / 4] =
4421 {
4422 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4423 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4424 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4425 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4426 0xd61f0220, /* br x17 */
4427 0xd503201f, /* nop */
4428 0xd503201f, /* nop */
4429 0xd503201f, /* nop */
4430 };
4431
4432
4433 template<>
4434 const uint32_t
4435 Output_data_plt_aarch64_standard<64, false>::
4436 first_plt_entry[first_plt_entry_size / 4] =
4437 {
4438 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4439 0x90000010, /* adrp x16, PLT_GOT+16 */
4440 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4441 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4442 0xd61f0220, /* br x17 */
4443 0xd503201f, /* nop */
4444 0xd503201f, /* nop */
4445 0xd503201f, /* nop */
4446 };
4447
4448
4449 template<>
4450 const uint32_t
4451 Output_data_plt_aarch64_standard<64, true>::
4452 first_plt_entry[first_plt_entry_size / 4] =
4453 {
4454 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4455 0x90000010, /* adrp x16, PLT_GOT+16 */
4456 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4457 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4458 0xd61f0220, /* br x17 */
4459 0xd503201f, /* nop */
4460 0xd503201f, /* nop */
4461 0xd503201f, /* nop */
4462 };
4463
4464
4465 template<>
4466 const uint32_t
4467 Output_data_plt_aarch64_standard<32, false>::
4468 plt_entry[plt_entry_size / 4] =
4469 {
4470 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4471 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4472 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4473 0xd61f0220, /* br x17. */
4474 };
4475
4476
4477 template<>
4478 const uint32_t
4479 Output_data_plt_aarch64_standard<32, true>::
4480 plt_entry[plt_entry_size / 4] =
4481 {
4482 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4483 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4484 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4485 0xd61f0220, /* br x17. */
4486 };
4487
4488
4489 template<>
4490 const uint32_t
4491 Output_data_plt_aarch64_standard<64, false>::
4492 plt_entry[plt_entry_size / 4] =
4493 {
4494 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4495 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4496 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4497 0xd61f0220, /* br x17. */
4498 };
4499
4500
4501 template<>
4502 const uint32_t
4503 Output_data_plt_aarch64_standard<64, true>::
4504 plt_entry[plt_entry_size / 4] =
4505 {
4506 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4507 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4508 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4509 0xd61f0220, /* br x17. */
4510 };
4511
4512
4513 template<int size, bool big_endian>
4514 void
4515 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4516 unsigned char* pov,
4517 Address got_address,
4518 Address plt_address)
4519 {
4520 // PLT0 of the small PLT looks like this in ELF64 -
4521 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4522 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4523 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4524 // symbol resolver
4525 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4526 // GOTPLT entry for this.
4527 // br x17
4528 // PLT0 will be slightly different in ELF32 due to different got entry
4529 // size.
4530 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4531 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4532
4533 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4534 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4535 // FIXME: This only works for 64bit
4536 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4537 gotplt_2nd_ent, plt_address + 4);
4538
4539 // Fill in R_AARCH64_LDST8_LO12
4540 elfcpp::Swap<32, big_endian>::writeval(
4541 pov + 8,
4542 ((this->first_plt_entry[2] & 0xffc003ff)
4543 | ((gotplt_2nd_ent & 0xff8) << 7)));
4544
4545 // Fill in R_AARCH64_ADD_ABS_LO12
4546 elfcpp::Swap<32, big_endian>::writeval(
4547 pov + 12,
4548 ((this->first_plt_entry[3] & 0xffc003ff)
4549 | ((gotplt_2nd_ent & 0xfff) << 10)));
4550 }
4551
4552
4553 // Subsequent entries in the PLT for an executable.
4554 // FIXME: This only works for 64bit
4555
4556 template<int size, bool big_endian>
4557 void
4558 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4559 unsigned char* pov,
4560 Address got_address,
4561 Address plt_address,
4562 unsigned int got_offset,
4563 unsigned int plt_offset)
4564 {
4565 memcpy(pov, this->plt_entry, this->plt_entry_size);
4566
4567 Address gotplt_entry_address = got_address + got_offset;
4568 Address plt_entry_address = plt_address + plt_offset;
4569
4570 // Fill in R_AARCH64_PCREL_ADR_HI21
4571 AArch64_relocate_functions<size, big_endian>::adrp(
4572 pov,
4573 gotplt_entry_address,
4574 plt_entry_address);
4575
4576 // Fill in R_AARCH64_LDST64_ABS_LO12
4577 elfcpp::Swap<32, big_endian>::writeval(
4578 pov + 4,
4579 ((this->plt_entry[1] & 0xffc003ff)
4580 | ((gotplt_entry_address & 0xff8) << 7)));
4581
4582 // Fill in R_AARCH64_ADD_ABS_LO12
4583 elfcpp::Swap<32, big_endian>::writeval(
4584 pov + 8,
4585 ((this->plt_entry[2] & 0xffc003ff)
4586 | ((gotplt_entry_address & 0xfff) <<10)));
4587
4588 }
4589
4590
4591 template<>
4592 const uint32_t
4593 Output_data_plt_aarch64_standard<32, false>::
4594 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4595 {
4596 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4597 0x90000002, /* adrp x2, 0 */
4598 0x90000003, /* adrp x3, 0 */
4599 0xb9400042, /* ldr w2, [w2, #0] */
4600 0x11000063, /* add w3, w3, 0 */
4601 0xd61f0040, /* br x2 */
4602 0xd503201f, /* nop */
4603 0xd503201f, /* nop */
4604 };
4605
4606 template<>
4607 const uint32_t
4608 Output_data_plt_aarch64_standard<32, true>::
4609 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4610 {
4611 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4612 0x90000002, /* adrp x2, 0 */
4613 0x90000003, /* adrp x3, 0 */
4614 0xb9400042, /* ldr w2, [w2, #0] */
4615 0x11000063, /* add w3, w3, 0 */
4616 0xd61f0040, /* br x2 */
4617 0xd503201f, /* nop */
4618 0xd503201f, /* nop */
4619 };
4620
4621 template<>
4622 const uint32_t
4623 Output_data_plt_aarch64_standard<64, false>::
4624 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4625 {
4626 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4627 0x90000002, /* adrp x2, 0 */
4628 0x90000003, /* adrp x3, 0 */
4629 0xf9400042, /* ldr x2, [x2, #0] */
4630 0x91000063, /* add x3, x3, 0 */
4631 0xd61f0040, /* br x2 */
4632 0xd503201f, /* nop */
4633 0xd503201f, /* nop */
4634 };
4635
4636 template<>
4637 const uint32_t
4638 Output_data_plt_aarch64_standard<64, true>::
4639 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4640 {
4641 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4642 0x90000002, /* adrp x2, 0 */
4643 0x90000003, /* adrp x3, 0 */
4644 0xf9400042, /* ldr x2, [x2, #0] */
4645 0x91000063, /* add x3, x3, 0 */
4646 0xd61f0040, /* br x2 */
4647 0xd503201f, /* nop */
4648 0xd503201f, /* nop */
4649 };
4650
4651 template<int size, bool big_endian>
4652 void
4653 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4654 unsigned char* pov,
4655 Address gotplt_address,
4656 Address plt_address,
4657 Address got_base,
4658 unsigned int tlsdesc_got_offset,
4659 unsigned int plt_offset)
4660 {
4661 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4662
4663 // move DT_TLSDESC_GOT address into x2
4664 // move .got.plt address into x3
4665 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4666 Address plt_entry_address = plt_address + plt_offset;
4667
4668 // R_AARCH64_ADR_PREL_PG_HI21
4669 AArch64_relocate_functions<size, big_endian>::adrp(
4670 pov + 4,
4671 tlsdesc_got_entry,
4672 plt_entry_address + 4);
4673
4674 // R_AARCH64_ADR_PREL_PG_HI21
4675 AArch64_relocate_functions<size, big_endian>::adrp(
4676 pov + 8,
4677 gotplt_address,
4678 plt_entry_address + 8);
4679
4680 // R_AARCH64_LDST64_ABS_LO12
4681 elfcpp::Swap<32, big_endian>::writeval(
4682 pov + 12,
4683 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4684 | ((tlsdesc_got_entry & 0xff8) << 7)));
4685
4686 // R_AARCH64_ADD_ABS_LO12
4687 elfcpp::Swap<32, big_endian>::writeval(
4688 pov + 16,
4689 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4690 | ((gotplt_address & 0xfff) << 10)));
4691 }
4692
4693 // Write out the PLT. This uses the hand-coded instructions above,
4694 // and adjusts them as needed. This is specified by the AMD64 ABI.
4695
4696 template<int size, bool big_endian>
4697 void
4698 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4699 {
4700 const off_t offset = this->offset();
4701 const section_size_type oview_size =
4702 convert_to_section_size_type(this->data_size());
4703 unsigned char* const oview = of->get_output_view(offset, oview_size);
4704
4705 const off_t got_file_offset = this->got_plt_->offset();
4706 gold_assert(got_file_offset + this->got_plt_->data_size()
4707 == this->got_irelative_->offset());
4708
4709 const section_size_type got_size =
4710 convert_to_section_size_type(this->got_plt_->data_size()
4711 + this->got_irelative_->data_size());
4712 unsigned char* const got_view = of->get_output_view(got_file_offset,
4713 got_size);
4714
4715 unsigned char* pov = oview;
4716
4717 // The base address of the .plt section.
4718 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
4719 // The base address of the PLT portion of the .got section.
4720 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
4721 = this->got_plt_->address();
4722
4723 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
4724 pov += this->first_plt_entry_offset();
4725
4726 // The first three entries in .got.plt are reserved.
4727 unsigned char* got_pov = got_view;
4728 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
4729 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4730
4731 unsigned int plt_offset = this->first_plt_entry_offset();
4732 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
4733 const unsigned int count = this->count_ + this->irelative_count_;
4734 for (unsigned int plt_index = 0;
4735 plt_index < count;
4736 ++plt_index,
4737 pov += this->get_plt_entry_size(),
4738 got_pov += size / 8,
4739 plt_offset += this->get_plt_entry_size(),
4740 got_offset += size / 8)
4741 {
4742 // Set and adjust the PLT entry itself.
4743 this->fill_plt_entry(pov, gotplt_address, plt_address,
4744 got_offset, plt_offset);
4745
4746 // Set the entry in the GOT, which points to plt0.
4747 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
4748 }
4749
4750 if (this->has_tlsdesc_entry())
4751 {
4752 // Set and adjust the reserved TLSDESC PLT entry.
4753 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
4754 // The base address of the .base section.
4755 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
4756 this->got_->address();
4757 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4758 tlsdesc_got_offset, plt_offset);
4759 pov += this->get_plt_tlsdesc_entry_size();
4760 }
4761
4762 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
4763 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
4764
4765 of->write_output_view(offset, oview_size, oview);
4766 of->write_output_view(got_file_offset, got_size, got_view);
4767 }
4768
4769 // Telling how to update the immediate field of an instruction.
4770 struct AArch64_howto
4771 {
4772 // The immediate field mask.
4773 elfcpp::Elf_Xword dst_mask;
4774
4775 // The offset to apply relocation immediate
4776 int doffset;
4777
4778 // The second part offset, if the immediate field has two parts.
4779 // -1 if the immediate field has only one part.
4780 int doffset2;
4781 };
4782
4783 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
4784 {
4785 {0, -1, -1}, // DATA
4786 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
4787 {0xffffe0, 5, -1}, // LD [23:5]-imm19
4788 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
4789 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
4790 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
4791 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
4792 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
4793 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
4794 {0x3ffffff, 0, -1}, // B [25:0]-imm26
4795 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
4796 };
4797
4798 // AArch64 relocate function class
4799
4800 template<int size, bool big_endian>
4801 class AArch64_relocate_functions
4802 {
4803 public:
4804 typedef enum
4805 {
4806 STATUS_OKAY, // No error during relocation.
4807 STATUS_OVERFLOW, // Relocation overflow.
4808 STATUS_BAD_RELOC, // Relocation cannot be applied.
4809 } Status;
4810
4811 typedef AArch64_relocate_functions<size, big_endian> This;
4812 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4813 typedef Relocate_info<size, big_endian> The_relocate_info;
4814 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
4815 typedef Reloc_stub<size, big_endian> The_reloc_stub;
4816 typedef Stub_table<size, big_endian> The_stub_table;
4817 typedef elfcpp::Rela<size, big_endian> The_rela;
4818 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
4819
4820 // Return the page address of the address.
4821 // Page(address) = address & ~0xFFF
4822
4823 static inline AArch64_valtype
4824 Page(Address address)
4825 {
4826 return (address & (~static_cast<Address>(0xFFF)));
4827 }
4828
4829 private:
4830 // Update instruction (pointed by view) with selected bits (immed).
4831 // val = (val & ~dst_mask) | (immed << doffset)
4832
4833 template<int valsize>
4834 static inline void
4835 update_view(unsigned char* view,
4836 AArch64_valtype immed,
4837 elfcpp::Elf_Xword doffset,
4838 elfcpp::Elf_Xword dst_mask)
4839 {
4840 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
4841 Valtype* wv = reinterpret_cast<Valtype*>(view);
4842 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
4843
4844 // Clear immediate fields.
4845 val &= ~dst_mask;
4846 elfcpp::Swap<valsize, big_endian>::writeval(wv,
4847 static_cast<Valtype>(val | (immed << doffset)));
4848 }
4849
4850 // Update two parts of an instruction (pointed by view) with selected
4851 // bits (immed1 and immed2).
4852 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
4853
4854 template<int valsize>
4855 static inline void
4856 update_view_two_parts(
4857 unsigned char* view,
4858 AArch64_valtype immed1,
4859 AArch64_valtype immed2,
4860 elfcpp::Elf_Xword doffset1,
4861 elfcpp::Elf_Xword doffset2,
4862 elfcpp::Elf_Xword dst_mask)
4863 {
4864 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
4865 Valtype* wv = reinterpret_cast<Valtype*>(view);
4866 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
4867 val &= ~dst_mask;
4868 elfcpp::Swap<valsize, big_endian>::writeval(wv,
4869 static_cast<Valtype>(val | (immed1 << doffset1) |
4870 (immed2 << doffset2)));
4871 }
4872
4873 // Update adr or adrp instruction with immed.
4874 // In adr and adrp: [30:29] immlo [23:5] immhi
4875
4876 static inline void
4877 update_adr(unsigned char* view, AArch64_valtype immed)
4878 {
4879 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
4880 This::template update_view_two_parts<32>(
4881 view,
4882 immed & 0x3,
4883 (immed & 0x1ffffc) >> 2,
4884 29,
4885 5,
4886 dst_mask);
4887 }
4888
4889 // Update movz/movn instruction with bits immed.
4890 // Set instruction to movz if is_movz is true, otherwise set instruction
4891 // to movn.
4892
4893 static inline void
4894 update_movnz(unsigned char* view,
4895 AArch64_valtype immed,
4896 bool is_movz)
4897 {
4898 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
4899 Valtype* wv = reinterpret_cast<Valtype*>(view);
4900 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
4901
4902 const elfcpp::Elf_Xword doffset =
4903 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
4904 const elfcpp::Elf_Xword dst_mask =
4905 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
4906
4907 // Clear immediate fields and opc code.
4908 val &= ~(dst_mask | (0x3 << 29));
4909
4910 // Set instruction to movz or movn.
4911 // movz: [30:29] is 10 movn: [30:29] is 00
4912 if (is_movz)
4913 val |= (0x2 << 29);
4914
4915 elfcpp::Swap<32, big_endian>::writeval(wv,
4916 static_cast<Valtype>(val | (immed << doffset)));
4917 }
4918
4919 // Update selected bits in text.
4920
4921 template<int valsize>
4922 static inline typename This::Status
4923 reloc_common(unsigned char* view, Address x,
4924 const AArch64_reloc_property* reloc_property)
4925 {
4926 // Select bits from X.
4927 Address immed = reloc_property->select_x_value(x);
4928
4929 // Update view.
4930 const AArch64_reloc_property::Reloc_inst inst =
4931 reloc_property->reloc_inst();
4932 // If it is a data relocation or instruction has 2 parts of immediate
4933 // fields, you should not call pcrela_general.
4934 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
4935 aarch64_howto[inst].doffset != -1);
4936 This::template update_view<valsize>(view, immed,
4937 aarch64_howto[inst].doffset,
4938 aarch64_howto[inst].dst_mask);
4939
4940 // Do check overflow or alignment if needed.
4941 return (reloc_property->checkup_x_value(x)
4942 ? This::STATUS_OKAY
4943 : This::STATUS_OVERFLOW);
4944 }
4945
4946 public:
4947
4948 // Construct a B insn. Note, although we group it here with other relocation
4949 // operation, there is actually no 'relocation' involved here.
4950 static inline void
4951 construct_b(unsigned char* view, unsigned int branch_offset)
4952 {
4953 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
4954 26, 0, 0xffffffff);
4955 }
4956
4957 // Do a simple rela relocation at unaligned addresses.
4958
4959 template<int valsize>
4960 static inline typename This::Status
4961 rela_ua(unsigned char* view,
4962 const Sized_relobj_file<size, big_endian>* object,
4963 const Symbol_value<size>* psymval,
4964 AArch64_valtype addend,
4965 const AArch64_reloc_property* reloc_property)
4966 {
4967 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
4968 Valtype;
4969 typename elfcpp::Elf_types<size>::Elf_Addr x =
4970 psymval->value(object, addend);
4971 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
4972 static_cast<Valtype>(x));
4973 return (reloc_property->checkup_x_value(x)
4974 ? This::STATUS_OKAY
4975 : This::STATUS_OVERFLOW);
4976 }
4977
4978 // Do a simple pc-relative relocation at unaligned addresses.
4979
4980 template<int valsize>
4981 static inline typename This::Status
4982 pcrela_ua(unsigned char* view,
4983 const Sized_relobj_file<size, big_endian>* object,
4984 const Symbol_value<size>* psymval,
4985 AArch64_valtype addend,
4986 Address address,
4987 const AArch64_reloc_property* reloc_property)
4988 {
4989 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
4990 Valtype;
4991 Address x = psymval->value(object, addend) - address;
4992 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
4993 static_cast<Valtype>(x));
4994 return (reloc_property->checkup_x_value(x)
4995 ? This::STATUS_OKAY
4996 : This::STATUS_OVERFLOW);
4997 }
4998
4999 // Do a simple rela relocation at aligned addresses.
5000
5001 template<int valsize>
5002 static inline typename This::Status
5003 rela(
5004 unsigned char* view,
5005 const Sized_relobj_file<size, big_endian>* object,
5006 const Symbol_value<size>* psymval,
5007 AArch64_valtype addend,
5008 const AArch64_reloc_property* reloc_property)
5009 {
5010 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5011 Valtype* wv = reinterpret_cast<Valtype*>(view);
5012 Address x = psymval->value(object, addend);
5013 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5014 return (reloc_property->checkup_x_value(x)
5015 ? This::STATUS_OKAY
5016 : This::STATUS_OVERFLOW);
5017 }
5018
5019 // Do relocate. Update selected bits in text.
5020 // new_val = (val & ~dst_mask) | (immed << doffset)
5021
5022 template<int valsize>
5023 static inline typename This::Status
5024 rela_general(unsigned char* view,
5025 const Sized_relobj_file<size, big_endian>* object,
5026 const Symbol_value<size>* psymval,
5027 AArch64_valtype addend,
5028 const AArch64_reloc_property* reloc_property)
5029 {
5030 // Calculate relocation.
5031 Address x = psymval->value(object, addend);
5032 return This::template reloc_common<valsize>(view, x, reloc_property);
5033 }
5034
5035 // Do relocate. Update selected bits in text.
5036 // new val = (val & ~dst_mask) | (immed << doffset)
5037
5038 template<int valsize>
5039 static inline typename This::Status
5040 rela_general(
5041 unsigned char* view,
5042 AArch64_valtype s,
5043 AArch64_valtype addend,
5044 const AArch64_reloc_property* reloc_property)
5045 {
5046 // Calculate relocation.
5047 Address x = s + addend;
5048 return This::template reloc_common<valsize>(view, x, reloc_property);
5049 }
5050
5051 // Do address relative relocate. Update selected bits in text.
5052 // new val = (val & ~dst_mask) | (immed << doffset)
5053
5054 template<int valsize>
5055 static inline typename This::Status
5056 pcrela_general(
5057 unsigned char* view,
5058 const Sized_relobj_file<size, big_endian>* object,
5059 const Symbol_value<size>* psymval,
5060 AArch64_valtype addend,
5061 Address address,
5062 const AArch64_reloc_property* reloc_property)
5063 {
5064 // Calculate relocation.
5065 Address x = psymval->value(object, addend) - address;
5066 return This::template reloc_common<valsize>(view, x, reloc_property);
5067 }
5068
5069
5070 // Calculate (S + A) - address, update adr instruction.
5071
5072 static inline typename This::Status
5073 adr(unsigned char* view,
5074 const Sized_relobj_file<size, big_endian>* object,
5075 const Symbol_value<size>* psymval,
5076 Address addend,
5077 Address address,
5078 const AArch64_reloc_property* /* reloc_property */)
5079 {
5080 AArch64_valtype x = psymval->value(object, addend) - address;
5081 // Pick bits [20:0] of X.
5082 AArch64_valtype immed = x & 0x1fffff;
5083 update_adr(view, immed);
5084 // Check -2^20 <= X < 2^20
5085 return (size == 64 && Bits<21>::has_overflow((x))
5086 ? This::STATUS_OVERFLOW
5087 : This::STATUS_OKAY);
5088 }
5089
5090 // Calculate PG(S+A) - PG(address), update adrp instruction.
5091 // R_AARCH64_ADR_PREL_PG_HI21
5092
5093 static inline typename This::Status
5094 adrp(
5095 unsigned char* view,
5096 Address sa,
5097 Address address)
5098 {
5099 AArch64_valtype x = This::Page(sa) - This::Page(address);
5100 // Pick [32:12] of X.
5101 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5102 update_adr(view, immed);
5103 // Check -2^32 <= X < 2^32
5104 return (size == 64 && Bits<33>::has_overflow((x))
5105 ? This::STATUS_OVERFLOW
5106 : This::STATUS_OKAY);
5107 }
5108
5109 // Calculate PG(S+A) - PG(address), update adrp instruction.
5110 // R_AARCH64_ADR_PREL_PG_HI21
5111
5112 static inline typename This::Status
5113 adrp(unsigned char* view,
5114 const Sized_relobj_file<size, big_endian>* object,
5115 const Symbol_value<size>* psymval,
5116 Address addend,
5117 Address address,
5118 const AArch64_reloc_property* reloc_property)
5119 {
5120 Address sa = psymval->value(object, addend);
5121 AArch64_valtype x = This::Page(sa) - This::Page(address);
5122 // Pick [32:12] of X.
5123 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5124 update_adr(view, immed);
5125 return (reloc_property->checkup_x_value(x)
5126 ? This::STATUS_OKAY
5127 : This::STATUS_OVERFLOW);
5128 }
5129
5130 // Update mov[n/z] instruction. Check overflow if needed.
5131 // If X >=0, set the instruction to movz and its immediate value to the
5132 // selected bits S.
5133 // If X < 0, set the instruction to movn and its immediate value to
5134 // NOT (selected bits of).
5135
5136 static inline typename This::Status
5137 movnz(unsigned char* view,
5138 AArch64_valtype x,
5139 const AArch64_reloc_property* reloc_property)
5140 {
5141 // Select bits from X.
5142 Address immed;
5143 bool is_movz;
5144 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5145 if (static_cast<SignedW>(x) >= 0)
5146 {
5147 immed = reloc_property->select_x_value(x);
5148 is_movz = true;
5149 }
5150 else
5151 {
5152 immed = reloc_property->select_x_value(~x);;
5153 is_movz = false;
5154 }
5155
5156 // Update movnz instruction.
5157 update_movnz(view, immed, is_movz);
5158
5159 // Do check overflow or alignment if needed.
5160 return (reloc_property->checkup_x_value(x)
5161 ? This::STATUS_OKAY
5162 : This::STATUS_OVERFLOW);
5163 }
5164
5165 static inline bool
5166 maybe_apply_stub(unsigned int,
5167 const The_relocate_info*,
5168 const The_rela&,
5169 unsigned char*,
5170 Address,
5171 const Sized_symbol<size>*,
5172 const Symbol_value<size>*,
5173 const Sized_relobj_file<size, big_endian>*,
5174 section_size_type);
5175
5176 }; // End of AArch64_relocate_functions
5177
5178
5179 // For a certain relocation type (usually jump/branch), test to see if the
5180 // destination needs a stub to fulfil. If so, re-route the destination of the
5181 // original instruction to the stub, note, at this time, the stub has already
5182 // been generated.
5183
5184 template<int size, bool big_endian>
5185 bool
5186 AArch64_relocate_functions<size, big_endian>::
5187 maybe_apply_stub(unsigned int r_type,
5188 const The_relocate_info* relinfo,
5189 const The_rela& rela,
5190 unsigned char* view,
5191 Address address,
5192 const Sized_symbol<size>* gsym,
5193 const Symbol_value<size>* psymval,
5194 const Sized_relobj_file<size, big_endian>* object,
5195 section_size_type current_group_size)
5196 {
5197 if (parameters->options().relocatable())
5198 return false;
5199
5200 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5201 Address branch_target = psymval->value(object, 0) + addend;
5202 int stub_type =
5203 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5204 if (stub_type == ST_NONE)
5205 return false;
5206
5207 const The_aarch64_relobj* aarch64_relobj =
5208 static_cast<const The_aarch64_relobj*>(object);
5209 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5210 gold_assert(stub_table != NULL);
5211
5212 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5213 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5214 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5215 gold_assert(stub != NULL);
5216
5217 Address new_branch_target = stub_table->address() + stub->offset();
5218 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5219 new_branch_target - address;
5220 const AArch64_reloc_property* arp =
5221 aarch64_reloc_property_table->get_reloc_property(r_type);
5222 gold_assert(arp != NULL);
5223 typename This::Status status = This::template
5224 rela_general<32>(view, branch_offset, 0, arp);
5225 if (status != This::STATUS_OKAY)
5226 gold_error(_("Stub is too far away, try a smaller value "
5227 "for '--stub-group-size'. The current value is 0x%lx."),
5228 static_cast<unsigned long>(current_group_size));
5229 return true;
5230 }
5231
5232
5233 // Group input sections for stub generation.
5234 //
5235 // We group input sections in an output section so that the total size,
5236 // including any padding space due to alignment is smaller than GROUP_SIZE
5237 // unless the only input section in group is bigger than GROUP_SIZE already.
5238 // Then an ARM stub table is created to follow the last input section
5239 // in group. For each group an ARM stub table is created an is placed
5240 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5241 // extend the group after the stub table.
5242
5243 template<int size, bool big_endian>
5244 void
5245 Target_aarch64<size, big_endian>::group_sections(
5246 Layout* layout,
5247 section_size_type group_size,
5248 bool stubs_always_after_branch,
5249 const Task* task)
5250 {
5251 // Group input sections and insert stub table
5252 Layout::Section_list section_list;
5253 layout->get_executable_sections(&section_list);
5254 for (Layout::Section_list::const_iterator p = section_list.begin();
5255 p != section_list.end();
5256 ++p)
5257 {
5258 AArch64_output_section<size, big_endian>* output_section =
5259 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5260 output_section->group_sections(group_size, stubs_always_after_branch,
5261 this, task);
5262 }
5263 }
5264
5265
5266 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5267 // section of RELOBJ.
5268
5269 template<int size, bool big_endian>
5270 AArch64_input_section<size, big_endian>*
5271 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5272 Relobj* relobj, unsigned int shndx) const
5273 {
5274 Section_id sid(relobj, shndx);
5275 typename AArch64_input_section_map::const_iterator p =
5276 this->aarch64_input_section_map_.find(sid);
5277 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5278 }
5279
5280
5281 // Make a new AArch64_input_section object.
5282
5283 template<int size, bool big_endian>
5284 AArch64_input_section<size, big_endian>*
5285 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5286 Relobj* relobj, unsigned int shndx)
5287 {
5288 Section_id sid(relobj, shndx);
5289
5290 AArch64_input_section<size, big_endian>* input_section =
5291 new AArch64_input_section<size, big_endian>(relobj, shndx);
5292 input_section->init();
5293
5294 // Register new AArch64_input_section in map for look-up.
5295 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5296 this->aarch64_input_section_map_.insert(
5297 std::make_pair(sid, input_section));
5298
5299 // Make sure that it we have not created another AArch64_input_section
5300 // for this input section already.
5301 gold_assert(ins.second);
5302
5303 return input_section;
5304 }
5305
5306
5307 // Relaxation hook. This is where we do stub generation.
5308
5309 template<int size, bool big_endian>
5310 bool
5311 Target_aarch64<size, big_endian>::do_relax(
5312 int pass,
5313 const Input_objects* input_objects,
5314 Symbol_table* symtab,
5315 Layout* layout ,
5316 const Task* task)
5317 {
5318 gold_assert(!parameters->options().relocatable());
5319 if (pass == 1)
5320 {
5321 // We don't handle negative stub_group_size right now.
5322 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5323 if (this->stub_group_size_ == 1)
5324 {
5325 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5326 // will fail to link. The user will have to relink with an explicit
5327 // group size option.
5328 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5329 4096 * 4;
5330 }
5331 group_sections(layout, this->stub_group_size_, true, task);
5332 }
5333 else
5334 {
5335 // If this is not the first pass, addresses and file offsets have
5336 // been reset at this point, set them here.
5337 for (Stub_table_iterator sp = this->stub_tables_.begin();
5338 sp != this->stub_tables_.end(); ++sp)
5339 {
5340 The_stub_table* stt = *sp;
5341 The_aarch64_input_section* owner = stt->owner();
5342 off_t off = align_address(owner->original_size(),
5343 stt->addralign());
5344 stt->set_address_and_file_offset(owner->address() + off,
5345 owner->offset() + off);
5346 }
5347 }
5348
5349 // Scan relocs for relocation stubs
5350 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5351 op != input_objects->relobj_end();
5352 ++op)
5353 {
5354 The_aarch64_relobj* aarch64_relobj =
5355 static_cast<The_aarch64_relobj*>(*op);
5356 // Lock the object so we can read from it. This is only called
5357 // single-threaded from Layout::finalize, so it is OK to lock.
5358 Task_lock_obj<Object> tl(task, aarch64_relobj);
5359 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5360 }
5361
5362 bool any_stub_table_changed = false;
5363 for (Stub_table_iterator siter = this->stub_tables_.begin();
5364 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5365 {
5366 The_stub_table* stub_table = *siter;
5367 if (stub_table->update_data_size_changed_p())
5368 {
5369 The_aarch64_input_section* owner = stub_table->owner();
5370 uint64_t address = owner->address();
5371 off_t offset = owner->offset();
5372 owner->reset_address_and_file_offset();
5373 owner->set_address_and_file_offset(address, offset);
5374
5375 any_stub_table_changed = true;
5376 }
5377 }
5378
5379 // Do not continue relaxation.
5380 bool continue_relaxation = any_stub_table_changed;
5381 if (!continue_relaxation)
5382 for (Stub_table_iterator sp = this->stub_tables_.begin();
5383 (sp != this->stub_tables_.end());
5384 ++sp)
5385 (*sp)->finalize_stubs();
5386
5387 return continue_relaxation;
5388 }
5389
5390
5391 // Make a new Stub_table.
5392
5393 template<int size, bool big_endian>
5394 Stub_table<size, big_endian>*
5395 Target_aarch64<size, big_endian>::new_stub_table(
5396 AArch64_input_section<size, big_endian>* owner)
5397 {
5398 Stub_table<size, big_endian>* stub_table =
5399 new Stub_table<size, big_endian>(owner);
5400 stub_table->set_address(align_address(
5401 owner->address() + owner->data_size(), 8));
5402 stub_table->set_file_offset(owner->offset() + owner->data_size());
5403 stub_table->finalize_data_size();
5404
5405 this->stub_tables_.push_back(stub_table);
5406
5407 return stub_table;
5408 }
5409
5410
5411 template<int size, bool big_endian>
5412 uint64_t
5413 Target_aarch64<size, big_endian>::do_reloc_addend(
5414 void* arg, unsigned int r_type, uint64_t) const
5415 {
5416 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5417 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5418 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5419 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5420 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5421 gold_assert(psymval->is_tls_symbol());
5422 // The value of a TLS symbol is the offset in the TLS segment.
5423 return psymval->value(ti.object, 0);
5424 }
5425
5426 // Return the number of entries in the PLT.
5427
5428 template<int size, bool big_endian>
5429 unsigned int
5430 Target_aarch64<size, big_endian>::plt_entry_count() const
5431 {
5432 if (this->plt_ == NULL)
5433 return 0;
5434 return this->plt_->entry_count();
5435 }
5436
5437 // Return the offset of the first non-reserved PLT entry.
5438
5439 template<int size, bool big_endian>
5440 unsigned int
5441 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5442 {
5443 return this->plt_->first_plt_entry_offset();
5444 }
5445
5446 // Return the size of each PLT entry.
5447
5448 template<int size, bool big_endian>
5449 unsigned int
5450 Target_aarch64<size, big_endian>::plt_entry_size() const
5451 {
5452 return this->plt_->get_plt_entry_size();
5453 }
5454
5455 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5456
5457 template<int size, bool big_endian>
5458 void
5459 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5460 Symbol_table* symtab, Layout* layout)
5461 {
5462 if (this->tls_base_symbol_defined_)
5463 return;
5464
5465 Output_segment* tls_segment = layout->tls_segment();
5466 if (tls_segment != NULL)
5467 {
5468 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5469 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5470 Symbol_table::PREDEFINED,
5471 tls_segment, 0, 0,
5472 elfcpp::STT_TLS,
5473 elfcpp::STB_LOCAL,
5474 elfcpp::STV_HIDDEN, 0,
5475 Symbol::SEGMENT_START,
5476 true);
5477 }
5478 this->tls_base_symbol_defined_ = true;
5479 }
5480
5481 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5482
5483 template<int size, bool big_endian>
5484 void
5485 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5486 Symbol_table* symtab, Layout* layout)
5487 {
5488 if (this->plt_ == NULL)
5489 this->make_plt_section(symtab, layout);
5490
5491 if (!this->plt_->has_tlsdesc_entry())
5492 {
5493 // Allocate the TLSDESC_GOT entry.
5494 Output_data_got_aarch64<size, big_endian>* got =
5495 this->got_section(symtab, layout);
5496 unsigned int got_offset = got->add_constant(0);
5497
5498 // Allocate the TLSDESC_PLT entry.
5499 this->plt_->reserve_tlsdesc_entry(got_offset);
5500 }
5501 }
5502
5503 // Create a GOT entry for the TLS module index.
5504
5505 template<int size, bool big_endian>
5506 unsigned int
5507 Target_aarch64<size, big_endian>::got_mod_index_entry(
5508 Symbol_table* symtab, Layout* layout,
5509 Sized_relobj_file<size, big_endian>* object)
5510 {
5511 if (this->got_mod_index_offset_ == -1U)
5512 {
5513 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5514 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5515 Output_data_got_aarch64<size, big_endian>* got =
5516 this->got_section(symtab, layout);
5517 unsigned int got_offset = got->add_constant(0);
5518 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5519 got_offset, 0);
5520 got->add_constant(0);
5521 this->got_mod_index_offset_ = got_offset;
5522 }
5523 return this->got_mod_index_offset_;
5524 }
5525
5526 // Optimize the TLS relocation type based on what we know about the
5527 // symbol. IS_FINAL is true if the final address of this symbol is
5528 // known at link time.
5529
5530 template<int size, bool big_endian>
5531 tls::Tls_optimization
5532 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5533 int r_type)
5534 {
5535 // If we are generating a shared library, then we can't do anything
5536 // in the linker
5537 if (parameters->options().shared())
5538 return tls::TLSOPT_NONE;
5539
5540 switch (r_type)
5541 {
5542 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5543 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5544 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5545 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5546 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5547 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5548 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5549 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5550 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5551 case elfcpp::R_AARCH64_TLSDESC_LDR:
5552 case elfcpp::R_AARCH64_TLSDESC_ADD:
5553 case elfcpp::R_AARCH64_TLSDESC_CALL:
5554 // These are General-Dynamic which permits fully general TLS
5555 // access. Since we know that we are generating an executable,
5556 // we can convert this to Initial-Exec. If we also know that
5557 // this is a local symbol, we can further switch to Local-Exec.
5558 if (is_final)
5559 return tls::TLSOPT_TO_LE;
5560 return tls::TLSOPT_TO_IE;
5561
5562 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5563 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5564 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5565 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5566 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5567 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5568 // These are Local-Dynamic, which refer to local symbols in the
5569 // dynamic TLS block. Since we know that we generating an
5570 // executable, we can switch to Local-Exec.
5571 return tls::TLSOPT_TO_LE;
5572
5573 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5574 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5575 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5576 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5577 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5578 // These are Initial-Exec relocs which get the thread offset
5579 // from the GOT. If we know that we are linking against the
5580 // local symbol, we can switch to Local-Exec, which links the
5581 // thread offset into the instruction.
5582 if (is_final)
5583 return tls::TLSOPT_TO_LE;
5584 return tls::TLSOPT_NONE;
5585
5586 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5587 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5588 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5589 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5590 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5591 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5592 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5593 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5594 // When we already have Local-Exec, there is nothing further we
5595 // can do.
5596 return tls::TLSOPT_NONE;
5597
5598 default:
5599 gold_unreachable();
5600 }
5601 }
5602
5603 // Returns true if this relocation type could be that of a function pointer.
5604
5605 template<int size, bool big_endian>
5606 inline bool
5607 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5608 unsigned int r_type)
5609 {
5610 switch (r_type)
5611 {
5612 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5613 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5614 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5615 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5616 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5617 {
5618 return true;
5619 }
5620 }
5621 return false;
5622 }
5623
5624 // For safe ICF, scan a relocation for a local symbol to check if it
5625 // corresponds to a function pointer being taken. In that case mark
5626 // the function whose pointer was taken as not foldable.
5627
5628 template<int size, bool big_endian>
5629 inline bool
5630 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5631 Symbol_table* ,
5632 Layout* ,
5633 Target_aarch64<size, big_endian>* ,
5634 Sized_relobj_file<size, big_endian>* ,
5635 unsigned int ,
5636 Output_section* ,
5637 const elfcpp::Rela<size, big_endian>& ,
5638 unsigned int r_type,
5639 const elfcpp::Sym<size, big_endian>&)
5640 {
5641 // When building a shared library, do not fold any local symbols.
5642 return (parameters->options().shared()
5643 || possible_function_pointer_reloc(r_type));
5644 }
5645
5646 // For safe ICF, scan a relocation for a global symbol to check if it
5647 // corresponds to a function pointer being taken. In that case mark
5648 // the function whose pointer was taken as not foldable.
5649
5650 template<int size, bool big_endian>
5651 inline bool
5652 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5653 Symbol_table* ,
5654 Layout* ,
5655 Target_aarch64<size, big_endian>* ,
5656 Sized_relobj_file<size, big_endian>* ,
5657 unsigned int ,
5658 Output_section* ,
5659 const elfcpp::Rela<size, big_endian>& ,
5660 unsigned int r_type,
5661 Symbol* gsym)
5662 {
5663 // When building a shared library, do not fold symbols whose visibility
5664 // is hidden, internal or protected.
5665 return ((parameters->options().shared()
5666 && (gsym->visibility() == elfcpp::STV_INTERNAL
5667 || gsym->visibility() == elfcpp::STV_PROTECTED
5668 || gsym->visibility() == elfcpp::STV_HIDDEN))
5669 || possible_function_pointer_reloc(r_type));
5670 }
5671
5672 // Report an unsupported relocation against a local symbol.
5673
5674 template<int size, bool big_endian>
5675 void
5676 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5677 Sized_relobj_file<size, big_endian>* object,
5678 unsigned int r_type)
5679 {
5680 gold_error(_("%s: unsupported reloc %u against local symbol"),
5681 object->name().c_str(), r_type);
5682 }
5683
5684 // We are about to emit a dynamic relocation of type R_TYPE. If the
5685 // dynamic linker does not support it, issue an error.
5686
5687 template<int size, bool big_endian>
5688 void
5689 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5690 unsigned int r_type)
5691 {
5692 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5693
5694 switch (r_type)
5695 {
5696 // These are the relocation types supported by glibc for AARCH64.
5697 case elfcpp::R_AARCH64_NONE:
5698 case elfcpp::R_AARCH64_COPY:
5699 case elfcpp::R_AARCH64_GLOB_DAT:
5700 case elfcpp::R_AARCH64_JUMP_SLOT:
5701 case elfcpp::R_AARCH64_RELATIVE:
5702 case elfcpp::R_AARCH64_TLS_DTPREL64:
5703 case elfcpp::R_AARCH64_TLS_DTPMOD64:
5704 case elfcpp::R_AARCH64_TLS_TPREL64:
5705 case elfcpp::R_AARCH64_TLSDESC:
5706 case elfcpp::R_AARCH64_IRELATIVE:
5707 case elfcpp::R_AARCH64_ABS32:
5708 case elfcpp::R_AARCH64_ABS64:
5709 return;
5710
5711 default:
5712 break;
5713 }
5714
5715 // This prevents us from issuing more than one error per reloc
5716 // section. But we can still wind up issuing more than one
5717 // error per object file.
5718 if (this->issued_non_pic_error_)
5719 return;
5720 gold_assert(parameters->options().output_is_position_independent());
5721 object->error(_("requires unsupported dynamic reloc; "
5722 "recompile with -fPIC"));
5723 this->issued_non_pic_error_ = true;
5724 return;
5725 }
5726
5727 // Return whether we need to make a PLT entry for a relocation of the
5728 // given type against a STT_GNU_IFUNC symbol.
5729
5730 template<int size, bool big_endian>
5731 bool
5732 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
5733 Sized_relobj_file<size, big_endian>* object,
5734 unsigned int r_type)
5735 {
5736 const AArch64_reloc_property* arp =
5737 aarch64_reloc_property_table->get_reloc_property(r_type);
5738 gold_assert(arp != NULL);
5739
5740 int flags = arp->reference_flags();
5741 if (flags & Symbol::TLS_REF)
5742 {
5743 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
5744 object->name().c_str(), arp->name().c_str());
5745 return false;
5746 }
5747 return flags != 0;
5748 }
5749
5750 // Scan a relocation for a local symbol.
5751
5752 template<int size, bool big_endian>
5753 inline void
5754 Target_aarch64<size, big_endian>::Scan::local(
5755 Symbol_table* symtab,
5756 Layout* layout,
5757 Target_aarch64<size, big_endian>* target,
5758 Sized_relobj_file<size, big_endian>* object,
5759 unsigned int data_shndx,
5760 Output_section* output_section,
5761 const elfcpp::Rela<size, big_endian>& rela,
5762 unsigned int r_type,
5763 const elfcpp::Sym<size, big_endian>& lsym,
5764 bool is_discarded)
5765 {
5766 if (is_discarded)
5767 return;
5768
5769 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
5770 Reloc_section;
5771 Output_data_got_aarch64<size, big_endian>* got =
5772 target->got_section(symtab, layout);
5773 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5774
5775 // A local STT_GNU_IFUNC symbol may require a PLT entry.
5776 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
5777 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
5778 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
5779
5780 switch (r_type)
5781 {
5782 case elfcpp::R_AARCH64_ABS32:
5783 case elfcpp::R_AARCH64_ABS16:
5784 if (parameters->options().output_is_position_independent())
5785 {
5786 gold_error(_("%s: unsupported reloc %u in pos independent link."),
5787 object->name().c_str(), r_type);
5788 }
5789 break;
5790
5791 case elfcpp::R_AARCH64_ABS64:
5792 // If building a shared library or pie, we need to mark this as a dynmic
5793 // reloction, so that the dynamic loader can relocate it.
5794 if (parameters->options().output_is_position_independent())
5795 {
5796 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
5797 rela_dyn->add_local_relative(object, r_sym,
5798 elfcpp::R_AARCH64_RELATIVE,
5799 output_section,
5800 data_shndx,
5801 rela.get_r_offset(),
5802 rela.get_r_addend(),
5803 is_ifunc);
5804 }
5805 break;
5806
5807 case elfcpp::R_AARCH64_PREL64:
5808 case elfcpp::R_AARCH64_PREL32:
5809 case elfcpp::R_AARCH64_PREL16:
5810 break;
5811
5812 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
5813 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
5814 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
5815 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
5816 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
5817 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
5818 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
5819 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
5820 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
5821 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
5822 break;
5823
5824 // Control flow, pc-relative. We don't need to do anything for a relative
5825 // addressing relocation against a local symbol if it does not reference
5826 // the GOT.
5827 case elfcpp::R_AARCH64_TSTBR14:
5828 case elfcpp::R_AARCH64_CONDBR19:
5829 case elfcpp::R_AARCH64_JUMP26:
5830 case elfcpp::R_AARCH64_CALL26:
5831 break;
5832
5833 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5834 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5835 {
5836 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5837 optimize_tls_reloc(!parameters->options().shared(), r_type);
5838 if (tlsopt == tls::TLSOPT_TO_LE)
5839 break;
5840
5841 layout->set_has_static_tls();
5842 // Create a GOT entry for the tp-relative offset.
5843 if (!parameters->doing_static_link())
5844 {
5845 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
5846 target->rela_dyn_section(layout),
5847 elfcpp::R_AARCH64_TLS_TPREL64);
5848 }
5849 else if (!object->local_has_got_offset(r_sym,
5850 GOT_TYPE_TLS_OFFSET))
5851 {
5852 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
5853 unsigned int got_offset =
5854 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
5855 const elfcpp::Elf_Xword addend = rela.get_r_addend();
5856 gold_assert(addend == 0);
5857 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
5858 object, r_sym);
5859 }
5860 }
5861 break;
5862
5863 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5864 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5865 {
5866 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5867 optimize_tls_reloc(!parameters->options().shared(), r_type);
5868 if (tlsopt == tls::TLSOPT_TO_LE)
5869 {
5870 layout->set_has_static_tls();
5871 break;
5872 }
5873 gold_assert(tlsopt == tls::TLSOPT_NONE);
5874
5875 got->add_local_pair_with_rel(object,r_sym, data_shndx,
5876 GOT_TYPE_TLS_PAIR,
5877 target->rela_dyn_section(layout),
5878 elfcpp::R_AARCH64_TLS_DTPMOD64);
5879 }
5880 break;
5881
5882 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5883 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5884 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5885 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5886 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5887 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5888 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5889 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5890 {
5891 layout->set_has_static_tls();
5892 bool output_is_shared = parameters->options().shared();
5893 if (output_is_shared)
5894 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
5895 object->name().c_str(), r_type);
5896 }
5897 break;
5898
5899 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5900 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5901 {
5902 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5903 optimize_tls_reloc(!parameters->options().shared(), r_type);
5904 if (tlsopt == tls::TLSOPT_NONE)
5905 {
5906 // Create a GOT entry for the module index.
5907 target->got_mod_index_entry(symtab, layout, object);
5908 }
5909 else if (tlsopt != tls::TLSOPT_TO_LE)
5910 unsupported_reloc_local(object, r_type);
5911 }
5912 break;
5913
5914 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5915 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5916 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5917 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5918 break;
5919
5920 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5921 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5922 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5923 {
5924 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
5925 optimize_tls_reloc(!parameters->options().shared(), r_type);
5926 target->define_tls_base_symbol(symtab, layout);
5927 if (tlsopt == tls::TLSOPT_NONE)
5928 {
5929 // Create reserved PLT and GOT entries for the resolver.
5930 target->reserve_tlsdesc_entries(symtab, layout);
5931
5932 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
5933 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
5934 // entry needs to be in an area in .got.plt, not .got. Call
5935 // got_section to make sure the section has been created.
5936 target->got_section(symtab, layout);
5937 Output_data_got<size, big_endian>* got =
5938 target->got_tlsdesc_section();
5939 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5940 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
5941 {
5942 unsigned int got_offset = got->add_constant(0);
5943 got->add_constant(0);
5944 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
5945 got_offset);
5946 Reloc_section* rt = target->rela_tlsdesc_section(layout);
5947 // We store the arguments we need in a vector, and use
5948 // the index into the vector as the parameter to pass
5949 // to the target specific routines.
5950 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
5951 void* arg = reinterpret_cast<void*>(intarg);
5952 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
5953 got, got_offset, 0);
5954 }
5955 }
5956 else if (tlsopt != tls::TLSOPT_TO_LE)
5957 unsupported_reloc_local(object, r_type);
5958 }
5959 break;
5960
5961 case elfcpp::R_AARCH64_TLSDESC_CALL:
5962 break;
5963
5964 default:
5965 unsupported_reloc_local(object, r_type);
5966 }
5967 }
5968
5969
5970 // Report an unsupported relocation against a global symbol.
5971
5972 template<int size, bool big_endian>
5973 void
5974 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
5975 Sized_relobj_file<size, big_endian>* object,
5976 unsigned int r_type,
5977 Symbol* gsym)
5978 {
5979 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
5980 object->name().c_str(), r_type, gsym->demangled_name().c_str());
5981 }
5982
5983 template<int size, bool big_endian>
5984 inline void
5985 Target_aarch64<size, big_endian>::Scan::global(
5986 Symbol_table* symtab,
5987 Layout* layout,
5988 Target_aarch64<size, big_endian>* target,
5989 Sized_relobj_file<size, big_endian> * object,
5990 unsigned int data_shndx,
5991 Output_section* output_section,
5992 const elfcpp::Rela<size, big_endian>& rela,
5993 unsigned int r_type,
5994 Symbol* gsym)
5995 {
5996 // A STT_GNU_IFUNC symbol may require a PLT entry.
5997 if (gsym->type() == elfcpp::STT_GNU_IFUNC
5998 && this->reloc_needs_plt_for_ifunc(object, r_type))
5999 target->make_plt_entry(symtab, layout, gsym);
6000
6001 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6002 Reloc_section;
6003 const AArch64_reloc_property* arp =
6004 aarch64_reloc_property_table->get_reloc_property(r_type);
6005 gold_assert(arp != NULL);
6006
6007 switch (r_type)
6008 {
6009 case elfcpp::R_AARCH64_ABS16:
6010 case elfcpp::R_AARCH64_ABS32:
6011 case elfcpp::R_AARCH64_ABS64:
6012 {
6013 // Make a PLT entry if necessary.
6014 if (gsym->needs_plt_entry())
6015 {
6016 target->make_plt_entry(symtab, layout, gsym);
6017 // Since this is not a PC-relative relocation, we may be
6018 // taking the address of a function. In that case we need to
6019 // set the entry in the dynamic symbol table to the address of
6020 // the PLT entry.
6021 if (gsym->is_from_dynobj() && !parameters->options().shared())
6022 gsym->set_needs_dynsym_value();
6023 }
6024 // Make a dynamic relocation if necessary.
6025 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6026 {
6027 if (!parameters->options().output_is_position_independent()
6028 && gsym->may_need_copy_reloc())
6029 {
6030 target->copy_reloc(symtab, layout, object,
6031 data_shndx, output_section, gsym, rela);
6032 }
6033 else if (r_type == elfcpp::R_AARCH64_ABS64
6034 && gsym->type() == elfcpp::STT_GNU_IFUNC
6035 && gsym->can_use_relative_reloc(false)
6036 && !gsym->is_from_dynobj()
6037 && !gsym->is_undefined()
6038 && !gsym->is_preemptible())
6039 {
6040 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6041 // symbol. This makes a function address in a PIE executable
6042 // match the address in a shared library that it links against.
6043 Reloc_section* rela_dyn =
6044 target->rela_irelative_section(layout);
6045 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6046 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6047 output_section, object,
6048 data_shndx,
6049 rela.get_r_offset(),
6050 rela.get_r_addend());
6051 }
6052 else if (r_type == elfcpp::R_AARCH64_ABS64
6053 && gsym->can_use_relative_reloc(false))
6054 {
6055 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6056 rela_dyn->add_global_relative(gsym,
6057 elfcpp::R_AARCH64_RELATIVE,
6058 output_section,
6059 object,
6060 data_shndx,
6061 rela.get_r_offset(),
6062 rela.get_r_addend(),
6063 false);
6064 }
6065 else
6066 {
6067 check_non_pic(object, r_type);
6068 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6069 rela_dyn = target->rela_dyn_section(layout);
6070 rela_dyn->add_global(
6071 gsym, r_type, output_section, object,
6072 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6073 }
6074 }
6075 }
6076 break;
6077
6078 case elfcpp::R_AARCH64_PREL16:
6079 case elfcpp::R_AARCH64_PREL32:
6080 case elfcpp::R_AARCH64_PREL64:
6081 // This is used to fill the GOT absolute address.
6082 if (gsym->needs_plt_entry())
6083 {
6084 target->make_plt_entry(symtab, layout, gsym);
6085 }
6086 break;
6087
6088 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6089 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6090 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6091 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6092 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6093 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6094 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6095 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6096 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6097 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6098 {
6099 if (gsym->needs_plt_entry())
6100 target->make_plt_entry(symtab, layout, gsym);
6101 // Make a dynamic relocation if necessary.
6102 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6103 {
6104 if (parameters->options().output_is_executable()
6105 && gsym->may_need_copy_reloc())
6106 {
6107 target->copy_reloc(symtab, layout, object,
6108 data_shndx, output_section, gsym, rela);
6109 }
6110 }
6111 break;
6112 }
6113
6114 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6115 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6116 {
6117 // This pair of relocations is used to access a specific GOT entry.
6118 // Note a GOT entry is an *address* to a symbol.
6119 // The symbol requires a GOT entry
6120 Output_data_got_aarch64<size, big_endian>* got =
6121 target->got_section(symtab, layout);
6122 if (gsym->final_value_is_known())
6123 {
6124 // For a STT_GNU_IFUNC symbol we want the PLT address.
6125 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6126 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6127 else
6128 got->add_global(gsym, GOT_TYPE_STANDARD);
6129 }
6130 else
6131 {
6132 // If this symbol is not fully resolved, we need to add a dynamic
6133 // relocation for it.
6134 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6135
6136 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6137 //
6138 // 1) The symbol may be defined in some other module.
6139 // 2) We are building a shared library and this is a protected
6140 // symbol; using GLOB_DAT means that the dynamic linker can use
6141 // the address of the PLT in the main executable when appropriate
6142 // so that function address comparisons work.
6143 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6144 // again so that function address comparisons work.
6145 if (gsym->is_from_dynobj()
6146 || gsym->is_undefined()
6147 || gsym->is_preemptible()
6148 || (gsym->visibility() == elfcpp::STV_PROTECTED
6149 && parameters->options().shared())
6150 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6151 && parameters->options().output_is_position_independent()))
6152 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6153 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6154 else
6155 {
6156 // For a STT_GNU_IFUNC symbol we want to write the PLT
6157 // offset into the GOT, so that function pointer
6158 // comparisons work correctly.
6159 bool is_new;
6160 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6161 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6162 else
6163 {
6164 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6165 // Tell the dynamic linker to use the PLT address
6166 // when resolving relocations.
6167 if (gsym->is_from_dynobj()
6168 && !parameters->options().shared())
6169 gsym->set_needs_dynsym_value();
6170 }
6171 if (is_new)
6172 {
6173 rela_dyn->add_global_relative(
6174 gsym, elfcpp::R_AARCH64_RELATIVE,
6175 got,
6176 gsym->got_offset(GOT_TYPE_STANDARD),
6177 0,
6178 false);
6179 }
6180 }
6181 }
6182 break;
6183 }
6184
6185 case elfcpp::R_AARCH64_TSTBR14:
6186 case elfcpp::R_AARCH64_CONDBR19:
6187 case elfcpp::R_AARCH64_JUMP26:
6188 case elfcpp::R_AARCH64_CALL26:
6189 {
6190 if (gsym->final_value_is_known())
6191 break;
6192
6193 if (gsym->is_defined() &&
6194 !gsym->is_from_dynobj() &&
6195 !gsym->is_preemptible())
6196 break;
6197
6198 // Make plt entry for function call.
6199 target->make_plt_entry(symtab, layout, gsym);
6200 break;
6201 }
6202
6203 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6204 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6205 {
6206 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6207 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6208 if (tlsopt == tls::TLSOPT_TO_LE)
6209 {
6210 layout->set_has_static_tls();
6211 break;
6212 }
6213 gold_assert(tlsopt == tls::TLSOPT_NONE);
6214
6215 // General dynamic.
6216 Output_data_got_aarch64<size, big_endian>* got =
6217 target->got_section(symtab, layout);
6218 // Create 2 consecutive entries for module index and offset.
6219 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6220 target->rela_dyn_section(layout),
6221 elfcpp::R_AARCH64_TLS_DTPMOD64,
6222 elfcpp::R_AARCH64_TLS_DTPREL64);
6223 }
6224 break;
6225
6226 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6227 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6228 {
6229 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6230 optimize_tls_reloc(!parameters->options().shared(), r_type);
6231 if (tlsopt == tls::TLSOPT_NONE)
6232 {
6233 // Create a GOT entry for the module index.
6234 target->got_mod_index_entry(symtab, layout, object);
6235 }
6236 else if (tlsopt != tls::TLSOPT_TO_LE)
6237 unsupported_reloc_local(object, r_type);
6238 }
6239 break;
6240
6241 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6242 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6243 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6244 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6245 break;
6246
6247 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6248 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6249 {
6250 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6251 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6252 if (tlsopt == tls::TLSOPT_TO_LE)
6253 break;
6254
6255 layout->set_has_static_tls();
6256 // Create a GOT entry for the tp-relative offset.
6257 Output_data_got_aarch64<size, big_endian>* got
6258 = target->got_section(symtab, layout);
6259 if (!parameters->doing_static_link())
6260 {
6261 got->add_global_with_rel(
6262 gsym, GOT_TYPE_TLS_OFFSET,
6263 target->rela_dyn_section(layout),
6264 elfcpp::R_AARCH64_TLS_TPREL64);
6265 }
6266 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6267 {
6268 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6269 unsigned int got_offset =
6270 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6271 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6272 gold_assert(addend == 0);
6273 got->add_static_reloc(got_offset,
6274 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6275 }
6276 }
6277 break;
6278
6279 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6280 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6281 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6282 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6283 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6284 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6285 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6286 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6287 layout->set_has_static_tls();
6288 if (parameters->options().shared())
6289 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6290 object->name().c_str(), r_type);
6291 break;
6292
6293 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6294 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6295 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6296 {
6297 target->define_tls_base_symbol(symtab, layout);
6298 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6299 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6300 if (tlsopt == tls::TLSOPT_NONE)
6301 {
6302 // Create reserved PLT and GOT entries for the resolver.
6303 target->reserve_tlsdesc_entries(symtab, layout);
6304
6305 // Create a double GOT entry with an R_AARCH64_TLSDESC
6306 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6307 // entry needs to be in an area in .got.plt, not .got. Call
6308 // got_section to make sure the section has been created.
6309 target->got_section(symtab, layout);
6310 Output_data_got<size, big_endian>* got =
6311 target->got_tlsdesc_section();
6312 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6313 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6314 elfcpp::R_AARCH64_TLSDESC, 0);
6315 }
6316 else if (tlsopt == tls::TLSOPT_TO_IE)
6317 {
6318 // Create a GOT entry for the tp-relative offset.
6319 Output_data_got<size, big_endian>* got
6320 = target->got_section(symtab, layout);
6321 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6322 target->rela_dyn_section(layout),
6323 elfcpp::R_AARCH64_TLS_TPREL64);
6324 }
6325 else if (tlsopt != tls::TLSOPT_TO_LE)
6326 unsupported_reloc_global(object, r_type, gsym);
6327 }
6328 break;
6329
6330 case elfcpp::R_AARCH64_TLSDESC_CALL:
6331 break;
6332
6333 default:
6334 gold_error(_("%s: unsupported reloc type in global scan"),
6335 aarch64_reloc_property_table->
6336 reloc_name_in_error_message(r_type).c_str());
6337 }
6338 return;
6339 } // End of Scan::global
6340
6341
6342 // Create the PLT section.
6343 template<int size, bool big_endian>
6344 void
6345 Target_aarch64<size, big_endian>::make_plt_section(
6346 Symbol_table* symtab, Layout* layout)
6347 {
6348 if (this->plt_ == NULL)
6349 {
6350 // Create the GOT section first.
6351 this->got_section(symtab, layout);
6352
6353 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6354 this->got_irelative_);
6355
6356 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6357 (elfcpp::SHF_ALLOC
6358 | elfcpp::SHF_EXECINSTR),
6359 this->plt_, ORDER_PLT, false);
6360
6361 // Make the sh_info field of .rela.plt point to .plt.
6362 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6363 rela_plt_os->set_info_section(this->plt_->output_section());
6364 }
6365 }
6366
6367 // Return the section for TLSDESC relocations.
6368
6369 template<int size, bool big_endian>
6370 typename Target_aarch64<size, big_endian>::Reloc_section*
6371 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6372 {
6373 return this->plt_section()->rela_tlsdesc(layout);
6374 }
6375
6376 // Create a PLT entry for a global symbol.
6377
6378 template<int size, bool big_endian>
6379 void
6380 Target_aarch64<size, big_endian>::make_plt_entry(
6381 Symbol_table* symtab,
6382 Layout* layout,
6383 Symbol* gsym)
6384 {
6385 if (gsym->has_plt_offset())
6386 return;
6387
6388 if (this->plt_ == NULL)
6389 this->make_plt_section(symtab, layout);
6390
6391 this->plt_->add_entry(symtab, layout, gsym);
6392 }
6393
6394 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6395
6396 template<int size, bool big_endian>
6397 void
6398 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6399 Symbol_table* symtab, Layout* layout,
6400 Sized_relobj_file<size, big_endian>* relobj,
6401 unsigned int local_sym_index)
6402 {
6403 if (relobj->local_has_plt_offset(local_sym_index))
6404 return;
6405 if (this->plt_ == NULL)
6406 this->make_plt_section(symtab, layout);
6407 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6408 relobj,
6409 local_sym_index);
6410 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6411 }
6412
6413 template<int size, bool big_endian>
6414 void
6415 Target_aarch64<size, big_endian>::gc_process_relocs(
6416 Symbol_table* symtab,
6417 Layout* layout,
6418 Sized_relobj_file<size, big_endian>* object,
6419 unsigned int data_shndx,
6420 unsigned int sh_type,
6421 const unsigned char* prelocs,
6422 size_t reloc_count,
6423 Output_section* output_section,
6424 bool needs_special_offset_handling,
6425 size_t local_symbol_count,
6426 const unsigned char* plocal_symbols)
6427 {
6428 if (sh_type == elfcpp::SHT_REL)
6429 {
6430 return;
6431 }
6432
6433 gold::gc_process_relocs<
6434 size, big_endian,
6435 Target_aarch64<size, big_endian>,
6436 elfcpp::SHT_RELA,
6437 typename Target_aarch64<size, big_endian>::Scan,
6438 typename Target_aarch64<size, big_endian>::Relocatable_size_for_reloc>(
6439 symtab,
6440 layout,
6441 this,
6442 object,
6443 data_shndx,
6444 prelocs,
6445 reloc_count,
6446 output_section,
6447 needs_special_offset_handling,
6448 local_symbol_count,
6449 plocal_symbols);
6450 }
6451
6452 // Scan relocations for a section.
6453
6454 template<int size, bool big_endian>
6455 void
6456 Target_aarch64<size, big_endian>::scan_relocs(
6457 Symbol_table* symtab,
6458 Layout* layout,
6459 Sized_relobj_file<size, big_endian>* object,
6460 unsigned int data_shndx,
6461 unsigned int sh_type,
6462 const unsigned char* prelocs,
6463 size_t reloc_count,
6464 Output_section* output_section,
6465 bool needs_special_offset_handling,
6466 size_t local_symbol_count,
6467 const unsigned char* plocal_symbols)
6468 {
6469 if (sh_type == elfcpp::SHT_REL)
6470 {
6471 gold_error(_("%s: unsupported REL reloc section"),
6472 object->name().c_str());
6473 return;
6474 }
6475 gold::scan_relocs<size, big_endian, Target_aarch64, elfcpp::SHT_RELA, Scan>(
6476 symtab,
6477 layout,
6478 this,
6479 object,
6480 data_shndx,
6481 prelocs,
6482 reloc_count,
6483 output_section,
6484 needs_special_offset_handling,
6485 local_symbol_count,
6486 plocal_symbols);
6487 }
6488
6489 // Return the value to use for a dynamic which requires special
6490 // treatment. This is how we support equality comparisons of function
6491 // pointers across shared library boundaries, as described in the
6492 // processor specific ABI supplement.
6493
6494 template<int size, bool big_endian>
6495 uint64_t
6496 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6497 {
6498 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6499 return this->plt_address_for_global(gsym);
6500 }
6501
6502
6503 // Finalize the sections.
6504
6505 template<int size, bool big_endian>
6506 void
6507 Target_aarch64<size, big_endian>::do_finalize_sections(
6508 Layout* layout,
6509 const Input_objects*,
6510 Symbol_table* symtab)
6511 {
6512 const Reloc_section* rel_plt = (this->plt_ == NULL
6513 ? NULL
6514 : this->plt_->rela_plt());
6515 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6516 this->rela_dyn_, true, false);
6517
6518 // Emit any relocs we saved in an attempt to avoid generating COPY
6519 // relocs.
6520 if (this->copy_relocs_.any_saved_relocs())
6521 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6522
6523 // Fill in some more dynamic tags.
6524 Output_data_dynamic* const odyn = layout->dynamic_data();
6525 if (odyn != NULL)
6526 {
6527 if (this->plt_ != NULL
6528 && this->plt_->output_section() != NULL
6529 && this->plt_ ->has_tlsdesc_entry())
6530 {
6531 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6532 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6533 this->got_->finalize_data_size();
6534 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6535 this->plt_, plt_offset);
6536 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6537 this->got_, got_offset);
6538 }
6539 }
6540
6541 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6542 // the .got.plt section.
6543 Symbol* sym = this->global_offset_table_;
6544 if (sym != NULL)
6545 {
6546 uint64_t data_size = this->got_plt_->current_data_size();
6547 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6548
6549 // If the .got section is more than 0x8000 bytes, we add
6550 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6551 // bit relocations have a greater chance of working.
6552 if (data_size >= 0x8000)
6553 symtab->get_sized_symbol<size>(sym)->set_value(
6554 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6555 }
6556
6557 if (parameters->doing_static_link()
6558 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6559 {
6560 // If linking statically, make sure that the __rela_iplt symbols
6561 // were defined if necessary, even if we didn't create a PLT.
6562 static const Define_symbol_in_segment syms[] =
6563 {
6564 {
6565 "__rela_iplt_start", // name
6566 elfcpp::PT_LOAD, // segment_type
6567 elfcpp::PF_W, // segment_flags_set
6568 elfcpp::PF(0), // segment_flags_clear
6569 0, // value
6570 0, // size
6571 elfcpp::STT_NOTYPE, // type
6572 elfcpp::STB_GLOBAL, // binding
6573 elfcpp::STV_HIDDEN, // visibility
6574 0, // nonvis
6575 Symbol::SEGMENT_START, // offset_from_base
6576 true // only_if_ref
6577 },
6578 {
6579 "__rela_iplt_end", // name
6580 elfcpp::PT_LOAD, // segment_type
6581 elfcpp::PF_W, // segment_flags_set
6582 elfcpp::PF(0), // segment_flags_clear
6583 0, // value
6584 0, // size
6585 elfcpp::STT_NOTYPE, // type
6586 elfcpp::STB_GLOBAL, // binding
6587 elfcpp::STV_HIDDEN, // visibility
6588 0, // nonvis
6589 Symbol::SEGMENT_START, // offset_from_base
6590 true // only_if_ref
6591 }
6592 };
6593
6594 symtab->define_symbols(layout, 2, syms,
6595 layout->script_options()->saw_sections_clause());
6596 }
6597
6598 return;
6599 }
6600
6601 // Perform a relocation.
6602
6603 template<int size, bool big_endian>
6604 inline bool
6605 Target_aarch64<size, big_endian>::Relocate::relocate(
6606 const Relocate_info<size, big_endian>* relinfo,
6607 Target_aarch64<size, big_endian>* target,
6608 Output_section* ,
6609 size_t relnum,
6610 const elfcpp::Rela<size, big_endian>& rela,
6611 unsigned int r_type,
6612 const Sized_symbol<size>* gsym,
6613 const Symbol_value<size>* psymval,
6614 unsigned char* view,
6615 typename elfcpp::Elf_types<size>::Elf_Addr address,
6616 section_size_type /* view_size */)
6617 {
6618 if (view == NULL)
6619 return true;
6620
6621 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6622
6623 const AArch64_reloc_property* reloc_property =
6624 aarch64_reloc_property_table->get_reloc_property(r_type);
6625
6626 if (reloc_property == NULL)
6627 {
6628 std::string reloc_name =
6629 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
6630 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6631 _("cannot relocate %s in object file"),
6632 reloc_name.c_str());
6633 return true;
6634 }
6635
6636 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
6637
6638 // Pick the value to use for symbols defined in the PLT.
6639 Symbol_value<size> symval;
6640 if (gsym != NULL
6641 && gsym->use_plt_offset(reloc_property->reference_flags()))
6642 {
6643 symval.set_output_value(target->plt_address_for_global(gsym));
6644 psymval = &symval;
6645 }
6646 else if (gsym == NULL && psymval->is_ifunc_symbol())
6647 {
6648 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6649 if (object->local_has_plt_offset(r_sym))
6650 {
6651 symval.set_output_value(target->plt_address_for_local(object, r_sym));
6652 psymval = &symval;
6653 }
6654 }
6655
6656 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6657
6658 // Get the GOT offset if needed.
6659 // For aarch64, the GOT pointer points to the start of the GOT section.
6660 bool have_got_offset = false;
6661 int got_offset = 0;
6662 int got_base = (target->got_ != NULL
6663 ? (target->got_->current_data_size() >= 0x8000
6664 ? 0x8000 : 0)
6665 : 0);
6666 switch (r_type)
6667 {
6668 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
6669 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
6670 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
6671 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
6672 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
6673 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
6674 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
6675 case elfcpp::R_AARCH64_GOTREL64:
6676 case elfcpp::R_AARCH64_GOTREL32:
6677 case elfcpp::R_AARCH64_GOT_LD_PREL19:
6678 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
6679 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6680 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6681 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6682 if (gsym != NULL)
6683 {
6684 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
6685 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
6686 }
6687 else
6688 {
6689 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6690 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
6691 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
6692 - got_base);
6693 }
6694 have_got_offset = true;
6695 break;
6696
6697 default:
6698 break;
6699 }
6700
6701 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
6702 typename elfcpp::Elf_types<size>::Elf_Addr value;
6703 switch (r_type)
6704 {
6705 case elfcpp::R_AARCH64_NONE:
6706 break;
6707
6708 case elfcpp::R_AARCH64_ABS64:
6709 reloc_status = Reloc::template rela_ua<64>(
6710 view, object, psymval, addend, reloc_property);
6711 break;
6712
6713 case elfcpp::R_AARCH64_ABS32:
6714 reloc_status = Reloc::template rela_ua<32>(
6715 view, object, psymval, addend, reloc_property);
6716 break;
6717
6718 case elfcpp::R_AARCH64_ABS16:
6719 reloc_status = Reloc::template rela_ua<16>(
6720 view, object, psymval, addend, reloc_property);
6721 break;
6722
6723 case elfcpp::R_AARCH64_PREL64:
6724 reloc_status = Reloc::template pcrela_ua<64>(
6725 view, object, psymval, addend, address, reloc_property);
6726 break;
6727
6728 case elfcpp::R_AARCH64_PREL32:
6729 reloc_status = Reloc::template pcrela_ua<32>(
6730 view, object, psymval, addend, address, reloc_property);
6731 break;
6732
6733 case elfcpp::R_AARCH64_PREL16:
6734 reloc_status = Reloc::template pcrela_ua<16>(
6735 view, object, psymval, addend, address, reloc_property);
6736 break;
6737
6738 case elfcpp::R_AARCH64_LD_PREL_LO19:
6739 reloc_status = Reloc::template pcrela_general<32>(
6740 view, object, psymval, addend, address, reloc_property);
6741 break;
6742
6743 case elfcpp::R_AARCH64_ADR_PREL_LO21:
6744 reloc_status = Reloc::adr(view, object, psymval, addend,
6745 address, reloc_property);
6746 break;
6747
6748 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
6749 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
6750 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
6751 reloc_property);
6752 break;
6753
6754 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
6755 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
6756 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
6757 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
6758 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
6759 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
6760 reloc_status = Reloc::template rela_general<32>(
6761 view, object, psymval, addend, reloc_property);
6762 break;
6763
6764 case elfcpp::R_AARCH64_CALL26:
6765 if (this->skip_call_tls_get_addr_)
6766 {
6767 // Double check that the TLSGD insn has been optimized away.
6768 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
6769 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
6770 reinterpret_cast<Insntype*>(view));
6771 gold_assert((insn & 0xff000000) == 0x91000000);
6772
6773 reloc_status = Reloc::STATUS_OKAY;
6774 this->skip_call_tls_get_addr_ = false;
6775 // Return false to stop further processing this reloc.
6776 return false;
6777 }
6778 // Fallthrough
6779 case elfcpp::R_AARCH64_JUMP26:
6780 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
6781 gsym, psymval, object,
6782 target->stub_group_size_))
6783 break;
6784 // Fallthrough
6785 case elfcpp::R_AARCH64_TSTBR14:
6786 case elfcpp::R_AARCH64_CONDBR19:
6787 reloc_status = Reloc::template pcrela_general<32>(
6788 view, object, psymval, addend, address, reloc_property);
6789 break;
6790
6791 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6792 gold_assert(have_got_offset);
6793 value = target->got_->address() + got_base + got_offset;
6794 reloc_status = Reloc::adrp(view, value + addend, address);
6795 break;
6796
6797 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6798 gold_assert(have_got_offset);
6799 value = target->got_->address() + got_base + got_offset;
6800 reloc_status = Reloc::template rela_general<32>(
6801 view, value, addend, reloc_property);
6802 break;
6803
6804 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6805 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6806 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6807 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6808 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6809 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6810 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6811 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6812 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6813 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6814 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6815 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6816 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6817 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6818 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6819 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6820 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6821 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6822 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6823 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6824 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6825 case elfcpp::R_AARCH64_TLSDESC_CALL:
6826 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
6827 gsym, psymval, view, address);
6828 break;
6829
6830 // These are dynamic relocations, which are unexpected when linking.
6831 case elfcpp::R_AARCH64_COPY:
6832 case elfcpp::R_AARCH64_GLOB_DAT:
6833 case elfcpp::R_AARCH64_JUMP_SLOT:
6834 case elfcpp::R_AARCH64_RELATIVE:
6835 case elfcpp::R_AARCH64_IRELATIVE:
6836 case elfcpp::R_AARCH64_TLS_DTPREL64:
6837 case elfcpp::R_AARCH64_TLS_DTPMOD64:
6838 case elfcpp::R_AARCH64_TLS_TPREL64:
6839 case elfcpp::R_AARCH64_TLSDESC:
6840 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6841 _("unexpected reloc %u in object file"),
6842 r_type);
6843 break;
6844
6845 default:
6846 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6847 _("unsupported reloc %s"),
6848 reloc_property->name().c_str());
6849 break;
6850 }
6851
6852 // Report any errors.
6853 switch (reloc_status)
6854 {
6855 case Reloc::STATUS_OKAY:
6856 break;
6857 case Reloc::STATUS_OVERFLOW:
6858 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6859 _("relocation overflow in %s"),
6860 reloc_property->name().c_str());
6861 break;
6862 case Reloc::STATUS_BAD_RELOC:
6863 gold_error_at_location(
6864 relinfo,
6865 relnum,
6866 rela.get_r_offset(),
6867 _("unexpected opcode while processing relocation %s"),
6868 reloc_property->name().c_str());
6869 break;
6870 default:
6871 gold_unreachable();
6872 }
6873
6874 return true;
6875 }
6876
6877
6878 template<int size, bool big_endian>
6879 inline
6880 typename AArch64_relocate_functions<size, big_endian>::Status
6881 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
6882 const Relocate_info<size, big_endian>* relinfo,
6883 Target_aarch64<size, big_endian>* target,
6884 size_t relnum,
6885 const elfcpp::Rela<size, big_endian>& rela,
6886 unsigned int r_type, const Sized_symbol<size>* gsym,
6887 const Symbol_value<size>* psymval,
6888 unsigned char* view,
6889 typename elfcpp::Elf_types<size>::Elf_Addr address)
6890 {
6891 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
6892 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
6893
6894 Output_segment* tls_segment = relinfo->layout->tls_segment();
6895 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6896 const AArch64_reloc_property* reloc_property =
6897 aarch64_reloc_property_table->get_reloc_property(r_type);
6898 gold_assert(reloc_property != NULL);
6899
6900 const bool is_final = (gsym == NULL
6901 ? !parameters->options().shared()
6902 : gsym->final_value_is_known());
6903 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6904 optimize_tls_reloc(is_final, r_type);
6905
6906 Sized_relobj_file<size, big_endian>* object = relinfo->object;
6907 int tls_got_offset_type;
6908 switch (r_type)
6909 {
6910 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6911 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
6912 {
6913 if (tlsopt == tls::TLSOPT_TO_LE)
6914 {
6915 if (tls_segment == NULL)
6916 {
6917 gold_assert(parameters->errors()->error_count() > 0
6918 || issue_undefined_symbol_error(gsym));
6919 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
6920 }
6921 return tls_gd_to_le(relinfo, target, rela, r_type, view,
6922 psymval);
6923 }
6924 else if (tlsopt == tls::TLSOPT_NONE)
6925 {
6926 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
6927 // Firstly get the address for the got entry.
6928 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
6929 if (gsym != NULL)
6930 {
6931 gold_assert(gsym->has_got_offset(tls_got_offset_type));
6932 got_entry_address = target->got_->address() +
6933 gsym->got_offset(tls_got_offset_type);
6934 }
6935 else
6936 {
6937 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6938 gold_assert(
6939 object->local_has_got_offset(r_sym, tls_got_offset_type));
6940 got_entry_address = target->got_->address() +
6941 object->local_got_offset(r_sym, tls_got_offset_type);
6942 }
6943
6944 // Relocate the address into adrp/ld, adrp/add pair.
6945 switch (r_type)
6946 {
6947 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6948 return aarch64_reloc_funcs::adrp(
6949 view, got_entry_address + addend, address);
6950
6951 break;
6952
6953 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6954 return aarch64_reloc_funcs::template rela_general<32>(
6955 view, got_entry_address, addend, reloc_property);
6956 break;
6957
6958 default:
6959 gold_unreachable();
6960 }
6961 }
6962 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
6963 _("unsupported gd_to_ie relaxation on %u"),
6964 r_type);
6965 }
6966 break;
6967
6968 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6969 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
6970 {
6971 if (tlsopt == tls::TLSOPT_TO_LE)
6972 {
6973 if (tls_segment == NULL)
6974 {
6975 gold_assert(parameters->errors()->error_count() > 0
6976 || issue_undefined_symbol_error(gsym));
6977 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
6978 }
6979 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
6980 psymval);
6981 }
6982
6983 gold_assert(tlsopt == tls::TLSOPT_NONE);
6984 // Relocate the field with the offset of the GOT entry for
6985 // the module index.
6986 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
6987 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
6988 target->got_->address());
6989
6990 switch (r_type)
6991 {
6992 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6993 return aarch64_reloc_funcs::adrp(
6994 view, got_entry_address + addend, address);
6995 break;
6996
6997 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6998 return aarch64_reloc_funcs::template rela_general<32>(
6999 view, got_entry_address, addend, reloc_property);
7000 break;
7001
7002 default:
7003 gold_unreachable();
7004 }
7005 }
7006 break;
7007
7008 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7009 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7010 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7011 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7012 {
7013 AArch64_address value = psymval->value(object, 0);
7014 if (tlsopt == tls::TLSOPT_TO_LE)
7015 {
7016 if (tls_segment == NULL)
7017 {
7018 gold_assert(parameters->errors()->error_count() > 0
7019 || issue_undefined_symbol_error(gsym));
7020 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7021 }
7022 }
7023 switch (r_type)
7024 {
7025 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7026 return aarch64_reloc_funcs::movnz(view, value + addend,
7027 reloc_property);
7028 break;
7029
7030 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7031 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7032 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7033 return aarch64_reloc_funcs::template rela_general<32>(
7034 view, value, addend, reloc_property);
7035 break;
7036
7037 default:
7038 gold_unreachable();
7039 }
7040 // We should never reach here.
7041 }
7042 break;
7043
7044 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7045 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7046 {
7047 if (tlsopt == tls::TLSOPT_TO_LE)
7048 {
7049 if (tls_segment == NULL)
7050 {
7051 gold_assert(parameters->errors()->error_count() > 0
7052 || issue_undefined_symbol_error(gsym));
7053 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7054 }
7055 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7056 psymval);
7057 }
7058 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7059
7060 // Firstly get the address for the got entry.
7061 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7062 if (gsym != NULL)
7063 {
7064 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7065 got_entry_address = target->got_->address() +
7066 gsym->got_offset(tls_got_offset_type);
7067 }
7068 else
7069 {
7070 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7071 gold_assert(
7072 object->local_has_got_offset(r_sym, tls_got_offset_type));
7073 got_entry_address = target->got_->address() +
7074 object->local_got_offset(r_sym, tls_got_offset_type);
7075 }
7076 // Relocate the address into adrp/ld, adrp/add pair.
7077 switch (r_type)
7078 {
7079 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7080 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7081 address);
7082 break;
7083 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7084 return aarch64_reloc_funcs::template rela_general<32>(
7085 view, got_entry_address, addend, reloc_property);
7086 default:
7087 gold_unreachable();
7088 }
7089 }
7090 // We shall never reach here.
7091 break;
7092
7093 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7094 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7095 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7096 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7097 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7098 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7099 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7100 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7101 {
7102 gold_assert(tls_segment != NULL);
7103 AArch64_address value = psymval->value(object, 0);
7104
7105 if (!parameters->options().shared())
7106 {
7107 AArch64_address aligned_tcb_size =
7108 align_address(target->tcb_size(),
7109 tls_segment->maximum_alignment());
7110 value += aligned_tcb_size;
7111 switch (r_type)
7112 {
7113 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7114 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7115 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7116 return aarch64_reloc_funcs::movnz(view, value + addend,
7117 reloc_property);
7118 default:
7119 return aarch64_reloc_funcs::template
7120 rela_general<32>(view,
7121 value,
7122 addend,
7123 reloc_property);
7124 }
7125 }
7126 else
7127 gold_error(_("%s: unsupported reloc %u "
7128 "in non-static TLSLE mode."),
7129 object->name().c_str(), r_type);
7130 }
7131 break;
7132
7133 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7134 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7135 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7136 case elfcpp::R_AARCH64_TLSDESC_CALL:
7137 {
7138 if (tlsopt == tls::TLSOPT_TO_LE)
7139 {
7140 if (tls_segment == NULL)
7141 {
7142 gold_assert(parameters->errors()->error_count() > 0
7143 || issue_undefined_symbol_error(gsym));
7144 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7145 }
7146 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7147 view, psymval);
7148 }
7149 else
7150 {
7151 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7152 ? GOT_TYPE_TLS_OFFSET
7153 : GOT_TYPE_TLS_DESC);
7154 unsigned int got_tlsdesc_offset = 0;
7155 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7156 && tlsopt == tls::TLSOPT_NONE)
7157 {
7158 // We created GOT entries in the .got.tlsdesc portion of the
7159 // .got.plt section, but the offset stored in the symbol is the
7160 // offset within .got.tlsdesc.
7161 got_tlsdesc_offset = (target->got_->data_size()
7162 + target->got_plt_section()->data_size());
7163 }
7164 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7165 if (gsym != NULL)
7166 {
7167 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7168 got_entry_address = target->got_->address()
7169 + got_tlsdesc_offset
7170 + gsym->got_offset(tls_got_offset_type);
7171 }
7172 else
7173 {
7174 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7175 gold_assert(
7176 object->local_has_got_offset(r_sym, tls_got_offset_type));
7177 got_entry_address = target->got_->address() +
7178 got_tlsdesc_offset +
7179 object->local_got_offset(r_sym, tls_got_offset_type);
7180 }
7181 if (tlsopt == tls::TLSOPT_TO_IE)
7182 {
7183 if (tls_segment == NULL)
7184 {
7185 gold_assert(parameters->errors()->error_count() > 0
7186 || issue_undefined_symbol_error(gsym));
7187 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7188 }
7189 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7190 view, psymval, got_entry_address,
7191 address);
7192 }
7193
7194 // Now do tlsdesc relocation.
7195 switch (r_type)
7196 {
7197 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7198 return aarch64_reloc_funcs::adrp(view,
7199 got_entry_address + addend,
7200 address);
7201 break;
7202 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7203 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7204 return aarch64_reloc_funcs::template rela_general<32>(
7205 view, got_entry_address, addend, reloc_property);
7206 break;
7207 case elfcpp::R_AARCH64_TLSDESC_CALL:
7208 return aarch64_reloc_funcs::STATUS_OKAY;
7209 break;
7210 default:
7211 gold_unreachable();
7212 }
7213 }
7214 }
7215 break;
7216
7217 default:
7218 gold_error(_("%s: unsupported TLS reloc %u."),
7219 object->name().c_str(), r_type);
7220 }
7221 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7222 } // End of relocate_tls.
7223
7224
7225 template<int size, bool big_endian>
7226 inline
7227 typename AArch64_relocate_functions<size, big_endian>::Status
7228 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7229 const Relocate_info<size, big_endian>* relinfo,
7230 Target_aarch64<size, big_endian>* target,
7231 const elfcpp::Rela<size, big_endian>& rela,
7232 unsigned int r_type,
7233 unsigned char* view,
7234 const Symbol_value<size>* psymval)
7235 {
7236 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7237 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7238 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7239
7240 Insntype* ip = reinterpret_cast<Insntype*>(view);
7241 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7242 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7243 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7244
7245 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7246 {
7247 // This is the 2nd relocs, optimization should already have been
7248 // done.
7249 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7250 return aarch64_reloc_funcs::STATUS_OKAY;
7251 }
7252
7253 // The original sequence is -
7254 // 90000000 adrp x0, 0 <main>
7255 // 91000000 add x0, x0, #0x0
7256 // 94000000 bl 0 <__tls_get_addr>
7257 // optimized to sequence -
7258 // d53bd040 mrs x0, tpidr_el0
7259 // 91400000 add x0, x0, #0x0, lsl #12
7260 // 91000000 add x0, x0, #0x0
7261
7262 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7263 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7264 // have to change "bl tls_get_addr", which does not have a corresponding tls
7265 // relocation type. So before proceeding, we need to make sure compiler
7266 // does not change the sequence.
7267 if(!(insn1 == 0x90000000 // adrp x0,0
7268 && insn2 == 0x91000000 // add x0, x0, #0x0
7269 && insn3 == 0x94000000)) // bl 0
7270 {
7271 // Ideally we should give up gd_to_le relaxation and do gd access.
7272 // However the gd_to_le relaxation decision has been made early
7273 // in the scan stage, where we did not allocate any GOT entry for
7274 // this symbol. Therefore we have to exit and report error now.
7275 gold_error(_("unexpected reloc insn sequence while relaxing "
7276 "tls gd to le for reloc %u."), r_type);
7277 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7278 }
7279
7280 // Write new insns.
7281 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7282 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7283 insn3 = 0x91000000; // add x0, x0, #0x0
7284 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7285 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7286 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7287
7288 // Calculate tprel value.
7289 Output_segment* tls_segment = relinfo->layout->tls_segment();
7290 gold_assert(tls_segment != NULL);
7291 AArch64_address value = psymval->value(relinfo->object, 0);
7292 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7293 AArch64_address aligned_tcb_size =
7294 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7295 AArch64_address x = value + aligned_tcb_size;
7296
7297 // After new insns are written, apply TLSLE relocs.
7298 const AArch64_reloc_property* rp1 =
7299 aarch64_reloc_property_table->get_reloc_property(
7300 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7301 const AArch64_reloc_property* rp2 =
7302 aarch64_reloc_property_table->get_reloc_property(
7303 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7304 gold_assert(rp1 != NULL && rp2 != NULL);
7305
7306 typename aarch64_reloc_funcs::Status s1 =
7307 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7308 x,
7309 addend,
7310 rp1);
7311 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7312 return s1;
7313
7314 typename aarch64_reloc_funcs::Status s2 =
7315 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7316 x,
7317 addend,
7318 rp2);
7319
7320 this->skip_call_tls_get_addr_ = true;
7321 return s2;
7322 } // End of tls_gd_to_le
7323
7324
7325 template<int size, bool big_endian>
7326 inline
7327 typename AArch64_relocate_functions<size, big_endian>::Status
7328 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7329 const Relocate_info<size, big_endian>* relinfo,
7330 Target_aarch64<size, big_endian>* target,
7331 const elfcpp::Rela<size, big_endian>& rela,
7332 unsigned int r_type,
7333 unsigned char* view,
7334 const Symbol_value<size>* psymval)
7335 {
7336 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7337 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7338 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7339
7340 Insntype* ip = reinterpret_cast<Insntype*>(view);
7341 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7342 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7343 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7344
7345 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7346 {
7347 // This is the 2nd relocs, optimization should already have been
7348 // done.
7349 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7350 return aarch64_reloc_funcs::STATUS_OKAY;
7351 }
7352
7353 // The original sequence is -
7354 // 90000000 adrp x0, 0 <main>
7355 // 91000000 add x0, x0, #0x0
7356 // 94000000 bl 0 <__tls_get_addr>
7357 // optimized to sequence -
7358 // d53bd040 mrs x0, tpidr_el0
7359 // 91400000 add x0, x0, #0x0, lsl #12
7360 // 91000000 add x0, x0, #0x0
7361
7362 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7363 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7364 // have to change "bl tls_get_addr", which does not have a corresponding tls
7365 // relocation type. So before proceeding, we need to make sure compiler
7366 // does not change the sequence.
7367 if(!(insn1 == 0x90000000 // adrp x0,0
7368 && insn2 == 0x91000000 // add x0, x0, #0x0
7369 && insn3 == 0x94000000)) // bl 0
7370 {
7371 // Ideally we should give up gd_to_le relaxation and do gd access.
7372 // However the gd_to_le relaxation decision has been made early
7373 // in the scan stage, where we did not allocate any GOT entry for
7374 // this symbol. Therefore we have to exit and report error now.
7375 gold_error(_("unexpected reloc insn sequence while relaxing "
7376 "tls gd to le for reloc %u."), r_type);
7377 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7378 }
7379
7380 // Write new insns.
7381 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7382 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7383 insn3 = 0x91000000; // add x0, x0, #0x0
7384 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7385 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7386 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7387
7388 // Calculate tprel value.
7389 Output_segment* tls_segment = relinfo->layout->tls_segment();
7390 gold_assert(tls_segment != NULL);
7391 AArch64_address value = psymval->value(relinfo->object, 0);
7392 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7393 AArch64_address aligned_tcb_size =
7394 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7395 AArch64_address x = value + aligned_tcb_size;
7396
7397 // After new insns are written, apply TLSLE relocs.
7398 const AArch64_reloc_property* rp1 =
7399 aarch64_reloc_property_table->get_reloc_property(
7400 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7401 const AArch64_reloc_property* rp2 =
7402 aarch64_reloc_property_table->get_reloc_property(
7403 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7404 gold_assert(rp1 != NULL && rp2 != NULL);
7405
7406 typename aarch64_reloc_funcs::Status s1 =
7407 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7408 x,
7409 addend,
7410 rp1);
7411 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7412 return s1;
7413
7414 typename aarch64_reloc_funcs::Status s2 =
7415 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7416 x,
7417 addend,
7418 rp2);
7419
7420 this->skip_call_tls_get_addr_ = true;
7421 return s2;
7422
7423 } // End of tls_ld_to_le
7424
7425 template<int size, bool big_endian>
7426 inline
7427 typename AArch64_relocate_functions<size, big_endian>::Status
7428 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7429 const Relocate_info<size, big_endian>* relinfo,
7430 Target_aarch64<size, big_endian>* target,
7431 const elfcpp::Rela<size, big_endian>& rela,
7432 unsigned int r_type,
7433 unsigned char* view,
7434 const Symbol_value<size>* psymval)
7435 {
7436 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7437 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7438 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7439
7440 AArch64_address value = psymval->value(relinfo->object, 0);
7441 Output_segment* tls_segment = relinfo->layout->tls_segment();
7442 AArch64_address aligned_tcb_address =
7443 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7444 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7445 AArch64_address x = value + addend + aligned_tcb_address;
7446 // "x" is the offset to tp, we can only do this if x is within
7447 // range [0, 2^32-1]
7448 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7449 {
7450 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7451 r_type);
7452 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7453 }
7454
7455 Insntype* ip = reinterpret_cast<Insntype*>(view);
7456 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7457 unsigned int regno;
7458 Insntype newinsn;
7459 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7460 {
7461 // Generate movz.
7462 regno = (insn & 0x1f);
7463 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7464 }
7465 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7466 {
7467 // Generate movk.
7468 regno = (insn & 0x1f);
7469 gold_assert(regno == ((insn >> 5) & 0x1f));
7470 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7471 }
7472 else
7473 gold_unreachable();
7474
7475 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7476 return aarch64_reloc_funcs::STATUS_OKAY;
7477 } // End of tls_ie_to_le
7478
7479
7480 template<int size, bool big_endian>
7481 inline
7482 typename AArch64_relocate_functions<size, big_endian>::Status
7483 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7484 const Relocate_info<size, big_endian>* relinfo,
7485 Target_aarch64<size, big_endian>* target,
7486 const elfcpp::Rela<size, big_endian>& rela,
7487 unsigned int r_type,
7488 unsigned char* view,
7489 const Symbol_value<size>* psymval)
7490 {
7491 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7492 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7493 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7494
7495 // TLSDESC-GD sequence is like:
7496 // adrp x0, :tlsdesc:v1
7497 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7498 // add x0, x0, :tlsdesc_lo12:v1
7499 // .tlsdesccall v1
7500 // blr x1
7501 // After desc_gd_to_le optimization, the sequence will be like:
7502 // movz x0, #0x0, lsl #16
7503 // movk x0, #0x10
7504 // nop
7505 // nop
7506
7507 // Calculate tprel value.
7508 Output_segment* tls_segment = relinfo->layout->tls_segment();
7509 gold_assert(tls_segment != NULL);
7510 Insntype* ip = reinterpret_cast<Insntype*>(view);
7511 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7512 AArch64_address value = psymval->value(relinfo->object, addend);
7513 AArch64_address aligned_tcb_size =
7514 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7515 AArch64_address x = value + aligned_tcb_size;
7516 // x is the offset to tp, we can only do this if x is within range
7517 // [0, 2^32-1]. If x is out of range, fail and exit.
7518 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7519 {
7520 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7521 "We Can't do gd_to_le relaxation.\n"), r_type);
7522 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7523 }
7524 Insntype newinsn;
7525 switch (r_type)
7526 {
7527 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7528 case elfcpp::R_AARCH64_TLSDESC_CALL:
7529 // Change to nop
7530 newinsn = 0xd503201f;
7531 break;
7532
7533 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7534 // Change to movz.
7535 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7536 break;
7537
7538 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7539 // Change to movk.
7540 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7541 break;
7542
7543 default:
7544 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7545 r_type);
7546 gold_unreachable();
7547 }
7548 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7549 return aarch64_reloc_funcs::STATUS_OKAY;
7550 } // End of tls_desc_gd_to_le
7551
7552
7553 template<int size, bool big_endian>
7554 inline
7555 typename AArch64_relocate_functions<size, big_endian>::Status
7556 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7557 const Relocate_info<size, big_endian>* /* relinfo */,
7558 Target_aarch64<size, big_endian>* /* target */,
7559 const elfcpp::Rela<size, big_endian>& rela,
7560 unsigned int r_type,
7561 unsigned char* view,
7562 const Symbol_value<size>* /* psymval */,
7563 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7564 typename elfcpp::Elf_types<size>::Elf_Addr address)
7565 {
7566 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7567 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7568
7569 // TLSDESC-GD sequence is like:
7570 // adrp x0, :tlsdesc:v1
7571 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7572 // add x0, x0, :tlsdesc_lo12:v1
7573 // .tlsdesccall v1
7574 // blr x1
7575 // After desc_gd_to_ie optimization, the sequence will be like:
7576 // adrp x0, :tlsie:v1
7577 // ldr x0, [x0, :tlsie_lo12:v1]
7578 // nop
7579 // nop
7580
7581 Insntype* ip = reinterpret_cast<Insntype*>(view);
7582 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7583 Insntype newinsn;
7584 switch (r_type)
7585 {
7586 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7587 case elfcpp::R_AARCH64_TLSDESC_CALL:
7588 // Change to nop
7589 newinsn = 0xd503201f;
7590 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7591 break;
7592
7593 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7594 {
7595 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7596 address);
7597 }
7598 break;
7599
7600 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7601 {
7602 // Set ldr target register to be x0.
7603 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7604 insn &= 0xffffffe0;
7605 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
7606 // Do relocation.
7607 const AArch64_reloc_property* reloc_property =
7608 aarch64_reloc_property_table->get_reloc_property(
7609 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7610 return aarch64_reloc_funcs::template rela_general<32>(
7611 view, got_entry_address, addend, reloc_property);
7612 }
7613 break;
7614
7615 default:
7616 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
7617 r_type);
7618 gold_unreachable();
7619 }
7620 return aarch64_reloc_funcs::STATUS_OKAY;
7621 } // End of tls_desc_gd_to_ie
7622
7623 // Relocate section data.
7624
7625 template<int size, bool big_endian>
7626 void
7627 Target_aarch64<size, big_endian>::relocate_section(
7628 const Relocate_info<size, big_endian>* relinfo,
7629 unsigned int sh_type,
7630 const unsigned char* prelocs,
7631 size_t reloc_count,
7632 Output_section* output_section,
7633 bool needs_special_offset_handling,
7634 unsigned char* view,
7635 typename elfcpp::Elf_types<size>::Elf_Addr address,
7636 section_size_type view_size,
7637 const Reloc_symbol_changes* reloc_symbol_changes)
7638 {
7639 gold_assert(sh_type == elfcpp::SHT_RELA);
7640 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
7641 gold::relocate_section<size, big_endian, Target_aarch64, elfcpp::SHT_RELA,
7642 AArch64_relocate, gold::Default_comdat_behavior>(
7643 relinfo,
7644 this,
7645 prelocs,
7646 reloc_count,
7647 output_section,
7648 needs_special_offset_handling,
7649 view,
7650 address,
7651 view_size,
7652 reloc_symbol_changes);
7653 }
7654
7655 // Return the size of a relocation while scanning during a relocatable
7656 // link.
7657
7658 template<int size, bool big_endian>
7659 unsigned int
7660 Target_aarch64<size, big_endian>::Relocatable_size_for_reloc::
7661 get_size_for_reloc(
7662 unsigned int ,
7663 Relobj* )
7664 {
7665 // We will never support SHT_REL relocations.
7666 gold_unreachable();
7667 return 0;
7668 }
7669
7670 // Scan the relocs during a relocatable link.
7671
7672 template<int size, bool big_endian>
7673 void
7674 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
7675 Symbol_table* symtab,
7676 Layout* layout,
7677 Sized_relobj_file<size, big_endian>* object,
7678 unsigned int data_shndx,
7679 unsigned int sh_type,
7680 const unsigned char* prelocs,
7681 size_t reloc_count,
7682 Output_section* output_section,
7683 bool needs_special_offset_handling,
7684 size_t local_symbol_count,
7685 const unsigned char* plocal_symbols,
7686 Relocatable_relocs* rr)
7687 {
7688 gold_assert(sh_type == elfcpp::SHT_RELA);
7689
7690 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA,
7691 Relocatable_size_for_reloc> Scan_relocatable_relocs;
7692
7693 gold::scan_relocatable_relocs<size, big_endian, elfcpp::SHT_RELA,
7694 Scan_relocatable_relocs>(
7695 symtab,
7696 layout,
7697 object,
7698 data_shndx,
7699 prelocs,
7700 reloc_count,
7701 output_section,
7702 needs_special_offset_handling,
7703 local_symbol_count,
7704 plocal_symbols,
7705 rr);
7706 }
7707
7708 // Relocate a section during a relocatable link.
7709
7710 template<int size, bool big_endian>
7711 void
7712 Target_aarch64<size, big_endian>::relocate_relocs(
7713 const Relocate_info<size, big_endian>* relinfo,
7714 unsigned int sh_type,
7715 const unsigned char* prelocs,
7716 size_t reloc_count,
7717 Output_section* output_section,
7718 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
7719 const Relocatable_relocs* rr,
7720 unsigned char* view,
7721 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
7722 section_size_type view_size,
7723 unsigned char* reloc_view,
7724 section_size_type reloc_view_size)
7725 {
7726 gold_assert(sh_type == elfcpp::SHT_RELA);
7727
7728 gold::relocate_relocs<size, big_endian, elfcpp::SHT_RELA>(
7729 relinfo,
7730 prelocs,
7731 reloc_count,
7732 output_section,
7733 offset_in_output_section,
7734 rr,
7735 view,
7736 view_address,
7737 view_size,
7738 reloc_view,
7739 reloc_view_size);
7740 }
7741
7742
7743 // Return whether this is a 3-insn erratum sequence.
7744
7745 template<int size, bool big_endian>
7746 bool
7747 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
7748 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
7749 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
7750 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
7751 {
7752 unsigned rt1, rt2;
7753 bool load, pair;
7754
7755 // The 2nd insn is a single register load or store; or register pair
7756 // store.
7757 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
7758 && (!pair || (pair && !load)))
7759 {
7760 // The 3rd insn is a load or store instruction from the "Load/store
7761 // register (unsigned immediate)" encoding class, using Rn as the
7762 // base address register.
7763 if (Insn_utilities::aarch64_ldst_uimm(insn3)
7764 && (Insn_utilities::aarch64_rn(insn3)
7765 == Insn_utilities::aarch64_rd(insn1)))
7766 return true;
7767 }
7768 return false;
7769 }
7770
7771
7772 // Return whether this is a 835769 sequence.
7773 // (Similarly implemented as in elfnn-aarch64.c.)
7774
7775 template<int size, bool big_endian>
7776 bool
7777 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
7778 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
7779 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
7780 {
7781 uint32_t rt;
7782 uint32_t rt2;
7783 uint32_t rn;
7784 uint32_t rm;
7785 uint32_t ra;
7786 bool pair;
7787 bool load;
7788
7789 if (Insn_utilities::aarch64_mlxl(insn2)
7790 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
7791 {
7792 /* Any SIMD memory op is independent of the subsequent MLA
7793 by definition of the erratum. */
7794 if (Insn_utilities::aarch64_bit(insn1, 26))
7795 return true;
7796
7797 /* If not SIMD, check for integer memory ops and MLA relationship. */
7798 rn = Insn_utilities::aarch64_rn(insn2);
7799 ra = Insn_utilities::aarch64_ra(insn2);
7800 rm = Insn_utilities::aarch64_rm(insn2);
7801
7802 /* If this is a load and there's a true(RAW) dependency, we are safe
7803 and this is not an erratum sequence. */
7804 if (load &&
7805 (rt == rn || rt == rm || rt == ra
7806 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
7807 return false;
7808
7809 /* We conservatively put out stubs for all other cases (including
7810 writebacks). */
7811 return true;
7812 }
7813
7814 return false;
7815 }
7816
7817
7818 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
7819
7820 template<int size, bool big_endian>
7821 void
7822 Target_aarch64<size, big_endian>::create_erratum_stub(
7823 AArch64_relobj<size, big_endian>* relobj,
7824 unsigned int shndx,
7825 section_size_type erratum_insn_offset,
7826 Address erratum_address,
7827 typename Insn_utilities::Insntype erratum_insn,
7828 int erratum_type)
7829 {
7830 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
7831 The_stub_table* stub_table = relobj->stub_table(shndx);
7832 gold_assert(stub_table != NULL);
7833 if (stub_table->find_erratum_stub(relobj,
7834 shndx,
7835 erratum_insn_offset) == NULL)
7836 {
7837 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
7838 The_erratum_stub* stub = new The_erratum_stub(
7839 relobj, erratum_type, shndx, erratum_insn_offset);
7840 stub->set_erratum_insn(erratum_insn);
7841 stub->set_erratum_address(erratum_address);
7842 // For erratum ST_E_843419 and ST_E_835769, the destination address is
7843 // always the next insn after erratum insn.
7844 stub->set_destination_address(erratum_address + BPI);
7845 stub_table->add_erratum_stub(stub);
7846 }
7847 }
7848
7849
7850 // Scan erratum for section SHNDX range [output_address + span_start,
7851 // output_address + span_end). Note here we do not share the code with
7852 // scan_erratum_843419_span function, because for 843419 we optimize by only
7853 // scanning the last few insns of a page, whereas for 835769, we need to scan
7854 // every insn.
7855
7856 template<int size, bool big_endian>
7857 void
7858 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
7859 AArch64_relobj<size, big_endian>* relobj,
7860 unsigned int shndx,
7861 const section_size_type span_start,
7862 const section_size_type span_end,
7863 unsigned char* input_view,
7864 Address output_address)
7865 {
7866 typedef typename Insn_utilities::Insntype Insntype;
7867
7868 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
7869
7870 // Adjust output_address and view to the start of span.
7871 output_address += span_start;
7872 input_view += span_start;
7873
7874 section_size_type span_length = span_end - span_start;
7875 section_size_type offset = 0;
7876 for (offset = 0; offset + BPI < span_length; offset += BPI)
7877 {
7878 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
7879 Insntype insn1 = ip[0];
7880 Insntype insn2 = ip[1];
7881 if (is_erratum_835769_sequence(insn1, insn2))
7882 {
7883 Insntype erratum_insn = insn2;
7884 // "span_start + offset" is the offset for insn1. So for insn2, it is
7885 // "span_start + offset + BPI".
7886 section_size_type erratum_insn_offset = span_start + offset + BPI;
7887 Address erratum_address = output_address + offset + BPI;
7888 gold_warning(_("Erratum 835769 found and fixed at \"%s\", "
7889 "section %d, offset 0x%08x."),
7890 relobj->name().c_str(), shndx,
7891 (unsigned int)(span_start + offset));
7892
7893 this->create_erratum_stub(relobj, shndx,
7894 erratum_insn_offset, erratum_address,
7895 erratum_insn, ST_E_835769);
7896 offset += BPI; // Skip mac insn.
7897 }
7898 }
7899 } // End of "Target_aarch64::scan_erratum_835769_span".
7900
7901
7902 // Scan erratum for section SHNDX range
7903 // [output_address + span_start, output_address + span_end).
7904
7905 template<int size, bool big_endian>
7906 void
7907 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
7908 AArch64_relobj<size, big_endian>* relobj,
7909 unsigned int shndx,
7910 const section_size_type span_start,
7911 const section_size_type span_end,
7912 unsigned char* input_view,
7913 Address output_address)
7914 {
7915 typedef typename Insn_utilities::Insntype Insntype;
7916
7917 // Adjust output_address and view to the start of span.
7918 output_address += span_start;
7919 input_view += span_start;
7920
7921 if ((output_address & 0x03) != 0)
7922 return;
7923
7924 section_size_type offset = 0;
7925 section_size_type span_length = span_end - span_start;
7926 // The first instruction must be ending at 0xFF8 or 0xFFC.
7927 unsigned int page_offset = output_address & 0xFFF;
7928 // Make sure starting position, that is "output_address+offset",
7929 // starts at page position 0xff8 or 0xffc.
7930 if (page_offset < 0xff8)
7931 offset = 0xff8 - page_offset;
7932 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
7933 {
7934 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
7935 Insntype insn1 = ip[0];
7936 if (Insn_utilities::is_adrp(insn1))
7937 {
7938 Insntype insn2 = ip[1];
7939 Insntype insn3 = ip[2];
7940 Insntype erratum_insn;
7941 unsigned insn_offset;
7942 bool do_report = false;
7943 if (is_erratum_843419_sequence(insn1, insn2, insn3))
7944 {
7945 do_report = true;
7946 erratum_insn = insn3;
7947 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
7948 }
7949 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
7950 {
7951 // Optionally we can have an insn between ins2 and ins3
7952 Insntype insn_opt = ip[2];
7953 // And insn_opt must not be a branch.
7954 if (!Insn_utilities::aarch64_b(insn_opt)
7955 && !Insn_utilities::aarch64_bl(insn_opt)
7956 && !Insn_utilities::aarch64_blr(insn_opt)
7957 && !Insn_utilities::aarch64_br(insn_opt))
7958 {
7959 // And insn_opt must not write to dest reg in insn1. However
7960 // we do a conservative scan, which means we may fix/report
7961 // more than necessary, but it doesn't hurt.
7962
7963 Insntype insn4 = ip[3];
7964 if (is_erratum_843419_sequence(insn1, insn2, insn4))
7965 {
7966 do_report = true;
7967 erratum_insn = insn4;
7968 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
7969 }
7970 }
7971 }
7972 if (do_report)
7973 {
7974 gold_warning(_("Erratum 843419 found and fixed at \"%s\", "
7975 "section %d, offset 0x%08x."),
7976 relobj->name().c_str(), shndx,
7977 (unsigned int)(span_start + offset));
7978 unsigned int erratum_insn_offset =
7979 span_start + offset + insn_offset;
7980 Address erratum_address =
7981 output_address + offset + insn_offset;
7982 create_erratum_stub(relobj, shndx,
7983 erratum_insn_offset, erratum_address,
7984 erratum_insn, ST_E_843419);
7985 }
7986 }
7987
7988 // Advance to next candidate instruction. We only consider instruction
7989 // sequences starting at a page offset of 0xff8 or 0xffc.
7990 page_offset = (output_address + offset) & 0xfff;
7991 if (page_offset == 0xff8)
7992 offset += 4;
7993 else // (page_offset == 0xffc), we move to next page's 0xff8.
7994 offset += 0xffc;
7995 }
7996 } // End of "Target_aarch64::scan_erratum_843419_span".
7997
7998
7999 // The selector for aarch64 object files.
8000
8001 template<int size, bool big_endian>
8002 class Target_selector_aarch64 : public Target_selector
8003 {
8004 public:
8005 Target_selector_aarch64();
8006
8007 virtual Target*
8008 do_instantiate_target()
8009 { return new Target_aarch64<size, big_endian>(); }
8010 };
8011
8012 template<>
8013 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8014 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8015 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8016 { }
8017
8018 template<>
8019 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8020 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8021 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8022 { }
8023
8024 template<>
8025 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8026 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8027 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8028 { }
8029
8030 template<>
8031 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8032 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8033 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8034 { }
8035
8036 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8037 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8038 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8039 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8040
8041 } // End anonymous namespace.