]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gold/aarch64.cc
Update year range in copyright notice of binutils files
[thirdparty/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2023 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static bool
114 is_mrs_tpidr_el0(const Insntype insn)
115 { return (insn & 0xFFFFFFE0) == 0xd53bd040; }
116
117 static unsigned int
118 aarch64_rm(const Insntype insn)
119 { return aarch64_bits(insn, 16, 5); }
120
121 static unsigned int
122 aarch64_rn(const Insntype insn)
123 { return aarch64_bits(insn, 5, 5); }
124
125 static unsigned int
126 aarch64_rd(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt(const Insntype insn)
131 { return aarch64_bits(insn, 0, 5); }
132
133 static unsigned int
134 aarch64_rt2(const Insntype insn)
135 { return aarch64_bits(insn, 10, 5); }
136
137 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
138 static Insntype
139 aarch64_adr_encode_imm(Insntype adr, int imm21)
140 {
141 gold_assert(is_adr(adr));
142 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
143 const int mask19 = (1 << 19) - 1;
144 const int mask2 = 3;
145 adr &= ~((mask19 << 5) | (mask2 << 29));
146 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
147 return adr;
148 }
149
150 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
151 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
152 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
153 static int64_t
154 aarch64_adrp_decode_imm(const Insntype adrp)
155 {
156 const int mask19 = (1 << 19) - 1;
157 const int mask2 = 3;
158 gold_assert(is_adrp(adrp));
159 // 21-bit imm encoded in adrp.
160 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
161 // Retrieve msb of 21-bit-signed imm for sign extension.
162 uint64_t msbt = (imm >> 20) & 1;
163 // Real value is imm multiplied by 4k. Value now has 33-bit information.
164 int64_t value = imm << 12;
165 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
166 // with value.
167 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
168 }
169
170 static bool
171 aarch64_b(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x14000000; }
173
174 static bool
175 aarch64_bl(const Insntype insn)
176 { return (insn & 0xFC000000) == 0x94000000; }
177
178 static bool
179 aarch64_blr(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
181
182 static bool
183 aarch64_br(const Insntype insn)
184 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
185
186 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
187 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
188 static bool
189 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
190
191 static bool
192 aarch64_ldst(Insntype insn)
193 { return (insn & 0x0a000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_ex(Insntype insn)
197 { return (insn & 0x3f000000) == 0x08000000; }
198
199 static bool
200 aarch64_ldst_pcrel(Insntype insn)
201 { return (insn & 0x3b000000) == 0x18000000; }
202
203 static bool
204 aarch64_ldst_nap(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28000000; }
206
207 static bool
208 aarch64_ldstp_pi(Insntype insn)
209 { return (insn & 0x3b800000) == 0x28800000; }
210
211 static bool
212 aarch64_ldstp_o(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29000000; }
214
215 static bool
216 aarch64_ldstp_pre(Insntype insn)
217 { return (insn & 0x3b800000) == 0x29800000; }
218
219 static bool
220 aarch64_ldst_ui(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000000; }
222
223 static bool
224 aarch64_ldst_piimm(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000400; }
226
227 static bool
228 aarch64_ldst_u(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000800; }
230
231 static bool
232 aarch64_ldst_preimm(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38000c00; }
234
235 static bool
236 aarch64_ldst_ro(Insntype insn)
237 { return (insn & 0x3b200c00) == 0x38200800; }
238
239 static bool
240 aarch64_ldst_uimm(Insntype insn)
241 { return (insn & 0x3b000000) == 0x39000000; }
242
243 static bool
244 aarch64_ldst_simd_m(Insntype insn)
245 { return (insn & 0xbfbf0000) == 0x0c000000; }
246
247 static bool
248 aarch64_ldst_simd_m_pi(Insntype insn)
249 { return (insn & 0xbfa00000) == 0x0c800000; }
250
251 static bool
252 aarch64_ldst_simd_s(Insntype insn)
253 { return (insn & 0xbf9f0000) == 0x0d000000; }
254
255 static bool
256 aarch64_ldst_simd_s_pi(Insntype insn)
257 { return (insn & 0xbf800000) == 0x0d800000; }
258
259 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
260 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
261 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
262 // instructions PAIR is TRUE, RT and RT2 are returned.
263 static bool
264 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
265 bool *pair, bool *load)
266 {
267 uint32_t opcode;
268 unsigned int r;
269 uint32_t opc = 0;
270 uint32_t v = 0;
271 uint32_t opc_v = 0;
272
273 /* Bail out quickly if INSN doesn't fall into the load-store
274 encoding space. */
275 if (!aarch64_ldst (insn))
276 return false;
277
278 *pair = false;
279 *load = false;
280 if (aarch64_ldst_ex (insn))
281 {
282 *rt = aarch64_rt (insn);
283 *rt2 = *rt;
284 if (aarch64_bit (insn, 21) == 1)
285 {
286 *pair = true;
287 *rt2 = aarch64_rt2 (insn);
288 }
289 *load = aarch64_ld (insn);
290 return true;
291 }
292 else if (aarch64_ldst_nap (insn)
293 || aarch64_ldstp_pi (insn)
294 || aarch64_ldstp_o (insn)
295 || aarch64_ldstp_pre (insn))
296 {
297 *pair = true;
298 *rt = aarch64_rt (insn);
299 *rt2 = aarch64_rt2 (insn);
300 *load = aarch64_ld (insn);
301 return true;
302 }
303 else if (aarch64_ldst_pcrel (insn)
304 || aarch64_ldst_ui (insn)
305 || aarch64_ldst_piimm (insn)
306 || aarch64_ldst_u (insn)
307 || aarch64_ldst_preimm (insn)
308 || aarch64_ldst_ro (insn)
309 || aarch64_ldst_uimm (insn))
310 {
311 *rt = aarch64_rt (insn);
312 *rt2 = *rt;
313 if (aarch64_ldst_pcrel (insn))
314 *load = true;
315 opc = aarch64_bits (insn, 22, 2);
316 v = aarch64_bit (insn, 26);
317 opc_v = opc | (v << 2);
318 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
319 || opc_v == 5 || opc_v == 7);
320 return true;
321 }
322 else if (aarch64_ldst_simd_m (insn)
323 || aarch64_ldst_simd_m_pi (insn))
324 {
325 *rt = aarch64_rt (insn);
326 *load = aarch64_bit (insn, 22);
327 opcode = (insn >> 12) & 0xf;
328 switch (opcode)
329 {
330 case 0:
331 case 2:
332 *rt2 = *rt + 3;
333 break;
334
335 case 4:
336 case 6:
337 *rt2 = *rt + 2;
338 break;
339
340 case 7:
341 *rt2 = *rt;
342 break;
343
344 case 8:
345 case 10:
346 *rt2 = *rt + 1;
347 break;
348
349 default:
350 return false;
351 }
352 return true;
353 }
354 else if (aarch64_ldst_simd_s (insn)
355 || aarch64_ldst_simd_s_pi (insn))
356 {
357 *rt = aarch64_rt (insn);
358 r = (insn >> 21) & 1;
359 *load = aarch64_bit (insn, 22);
360 opcode = (insn >> 13) & 0x7;
361 switch (opcode)
362 {
363 case 0:
364 case 2:
365 case 4:
366 *rt2 = *rt + r;
367 break;
368
369 case 1:
370 case 3:
371 case 5:
372 *rt2 = *rt + (r == 0 ? 2 : 3);
373 break;
374
375 case 6:
376 *rt2 = *rt + r;
377 break;
378
379 case 7:
380 *rt2 = *rt + (r == 0 ? 2 : 3);
381 break;
382
383 default:
384 return false;
385 }
386 return true;
387 }
388 return false;
389 } // End of "aarch64_mem_op_p".
390
391 // Return true if INSN is mac insn.
392 static bool
393 aarch64_mac(Insntype insn)
394 { return (insn & 0xff000000) == 0x9b000000; }
395
396 // Return true if INSN is multiply-accumulate.
397 // (This is similar to implementaton in elfnn-aarch64.c.)
398 static bool
399 aarch64_mlxl(Insntype insn)
400 {
401 uint32_t op31 = aarch64_op31(insn);
402 if (aarch64_mac(insn)
403 && (op31 == 0 || op31 == 1 || op31 == 5)
404 /* Exclude MUL instructions which are encoded as a multiple-accumulate
405 with RA = XZR. */
406 && aarch64_ra(insn) != AARCH64_ZR)
407 {
408 return true;
409 }
410 return false;
411 }
412 }; // End of "AArch64_insn_utilities".
413
414
415 // Insn length in byte.
416
417 template<bool big_endian>
418 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
419
420
421 // Zero register encoding - 31.
422
423 template<bool big_endian>
424 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
425
426
427 // Output_data_got_aarch64 class.
428
429 template<int size, bool big_endian>
430 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
431 {
432 public:
433 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
434 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
435 : Output_data_got<size, big_endian>(),
436 symbol_table_(symtab), layout_(layout)
437 { }
438
439 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
440 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
441 // applied in a static link.
442 void
443 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
444 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
445
446
447 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
448 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
449 // relocation that needs to be applied in a static link.
450 void
451 add_static_reloc(unsigned int got_offset, unsigned int r_type,
452 Sized_relobj_file<size, big_endian>* relobj,
453 unsigned int index)
454 {
455 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
456 index));
457 }
458
459
460 protected:
461 // Write out the GOT table.
462 void
463 do_write(Output_file* of) {
464 // The first entry in the GOT is the address of the .dynamic section.
465 gold_assert(this->data_size() >= size / 8);
466 Output_section* dynamic = this->layout_->dynamic_section();
467 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
468 this->replace_constant(0, dynamic_addr);
469 Output_data_got<size, big_endian>::do_write(of);
470
471 // Handling static relocs
472 if (this->static_relocs_.empty())
473 return;
474
475 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
476
477 gold_assert(parameters->doing_static_link());
478 const off_t offset = this->offset();
479 const section_size_type oview_size =
480 convert_to_section_size_type(this->data_size());
481 unsigned char* const oview = of->get_output_view(offset, oview_size);
482
483 Output_segment* tls_segment = this->layout_->tls_segment();
484 gold_assert(tls_segment != NULL);
485
486 AArch64_address aligned_tcb_address =
487 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
488 tls_segment->maximum_alignment());
489
490 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
491 {
492 Static_reloc& reloc(this->static_relocs_[i]);
493 AArch64_address value;
494
495 if (!reloc.symbol_is_global())
496 {
497 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
498 const Symbol_value<size>* psymval =
499 reloc.relobj()->local_symbol(reloc.index());
500
501 // We are doing static linking. Issue an error and skip this
502 // relocation if the symbol is undefined or in a discarded_section.
503 bool is_ordinary;
504 unsigned int shndx = psymval->input_shndx(&is_ordinary);
505 if ((shndx == elfcpp::SHN_UNDEF)
506 || (is_ordinary
507 && shndx != elfcpp::SHN_UNDEF
508 && !object->is_section_included(shndx)
509 && !this->symbol_table_->is_section_folded(object, shndx)))
510 {
511 gold_error(_("undefined or discarded local symbol %u from "
512 " object %s in GOT"),
513 reloc.index(), reloc.relobj()->name().c_str());
514 continue;
515 }
516 value = psymval->value(object, 0);
517 }
518 else
519 {
520 const Symbol* gsym = reloc.symbol();
521 gold_assert(gsym != NULL);
522 if (gsym->is_forwarder())
523 gsym = this->symbol_table_->resolve_forwards(gsym);
524
525 // We are doing static linking. Issue an error and skip this
526 // relocation if the symbol is undefined or in a discarded_section
527 // unless it is a weakly_undefined symbol.
528 if ((gsym->is_defined_in_discarded_section()
529 || gsym->is_undefined())
530 && !gsym->is_weak_undefined())
531 {
532 gold_error(_("undefined or discarded symbol %s in GOT"),
533 gsym->name());
534 continue;
535 }
536
537 if (!gsym->is_weak_undefined())
538 {
539 const Sized_symbol<size>* sym =
540 static_cast<const Sized_symbol<size>*>(gsym);
541 value = sym->value();
542 }
543 else
544 value = 0;
545 }
546
547 unsigned got_offset = reloc.got_offset();
548 gold_assert(got_offset < oview_size);
549
550 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
551 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
552 Valtype x;
553 switch (reloc.r_type())
554 {
555 case elfcpp::R_AARCH64_TLS_DTPREL64:
556 x = value;
557 break;
558 case elfcpp::R_AARCH64_TLS_TPREL64:
559 x = value + aligned_tcb_address;
560 break;
561 default:
562 gold_unreachable();
563 }
564 elfcpp::Swap<size, big_endian>::writeval(wv, x);
565 }
566
567 of->write_output_view(offset, oview_size, oview);
568 }
569
570 private:
571 // Symbol table of the output object.
572 Symbol_table* symbol_table_;
573 // A pointer to the Layout class, so that we can find the .dynamic
574 // section when we write out the GOT section.
575 Layout* layout_;
576
577 // This class represent dynamic relocations that need to be applied by
578 // gold because we are using TLS relocations in a static link.
579 class Static_reloc
580 {
581 public:
582 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
583 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
584 { this->u_.global.symbol = gsym; }
585
586 Static_reloc(unsigned int got_offset, unsigned int r_type,
587 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
588 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
589 {
590 this->u_.local.relobj = relobj;
591 this->u_.local.index = index;
592 }
593
594 // Return the GOT offset.
595 unsigned int
596 got_offset() const
597 { return this->got_offset_; }
598
599 // Relocation type.
600 unsigned int
601 r_type() const
602 { return this->r_type_; }
603
604 // Whether the symbol is global or not.
605 bool
606 symbol_is_global() const
607 { return this->symbol_is_global_; }
608
609 // For a relocation against a global symbol, the global symbol.
610 Symbol*
611 symbol() const
612 {
613 gold_assert(this->symbol_is_global_);
614 return this->u_.global.symbol;
615 }
616
617 // For a relocation against a local symbol, the defining object.
618 Sized_relobj_file<size, big_endian>*
619 relobj() const
620 {
621 gold_assert(!this->symbol_is_global_);
622 return this->u_.local.relobj;
623 }
624
625 // For a relocation against a local symbol, the local symbol index.
626 unsigned int
627 index() const
628 {
629 gold_assert(!this->symbol_is_global_);
630 return this->u_.local.index;
631 }
632
633 private:
634 // GOT offset of the entry to which this relocation is applied.
635 unsigned int got_offset_;
636 // Type of relocation.
637 unsigned int r_type_;
638 // Whether this relocation is against a global symbol.
639 bool symbol_is_global_;
640 // A global or local symbol.
641 union
642 {
643 struct
644 {
645 // For a global symbol, the symbol itself.
646 Symbol* symbol;
647 } global;
648 struct
649 {
650 // For a local symbol, the object defining the symbol.
651 Sized_relobj_file<size, big_endian>* relobj;
652 // For a local symbol, the symbol index.
653 unsigned int index;
654 } local;
655 } u_;
656 }; // End of inner class Static_reloc
657
658 std::vector<Static_reloc> static_relocs_;
659 }; // End of Output_data_got_aarch64
660
661
662 template<int size, bool big_endian>
663 class AArch64_input_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_output_section;
668
669
670 template<int size, bool big_endian>
671 class AArch64_relobj;
672
673
674 // Stub type enum constants.
675
676 enum
677 {
678 ST_NONE = 0,
679
680 // Using adrp/add pair, 4 insns (including alignment) without mem access,
681 // the fastest stub. This has a limited jump distance, which is tested by
682 // aarch64_valid_for_adrp_p.
683 ST_ADRP_BRANCH = 1,
684
685 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
686 // unlimited in jump distance.
687 ST_LONG_BRANCH_ABS = 2,
688
689 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
690 // mem access, slowest one. Only used in position independent executables.
691 ST_LONG_BRANCH_PCREL = 3,
692
693 // Stub for erratum 843419 handling.
694 ST_E_843419 = 4,
695
696 // Stub for erratum 835769 handling.
697 ST_E_835769 = 5,
698
699 // Number of total stub types.
700 ST_NUMBER = 6
701 };
702
703
704 // Struct that wraps insns for a particular stub. All stub templates are
705 // created/initialized as constants by Stub_template_repertoire.
706
707 template<bool big_endian>
708 struct Stub_template
709 {
710 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
711 const int insn_num;
712 };
713
714
715 // Simple singleton class that creates/initializes/stores all types of stub
716 // templates.
717
718 template<bool big_endian>
719 class Stub_template_repertoire
720 {
721 public:
722 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
723
724 // Single static method to get stub template for a given stub type.
725 static const Stub_template<big_endian>*
726 get_stub_template(int type)
727 {
728 static Stub_template_repertoire<big_endian> singleton;
729 return singleton.stub_templates_[type];
730 }
731
732 private:
733 // Constructor - creates/initializes all stub templates.
734 Stub_template_repertoire();
735 ~Stub_template_repertoire()
736 { }
737
738 // Disallowing copy ctor and copy assignment operator.
739 Stub_template_repertoire(Stub_template_repertoire&);
740 Stub_template_repertoire& operator=(Stub_template_repertoire&);
741
742 // Data that stores all insn templates.
743 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
744 }; // End of "class Stub_template_repertoire".
745
746
747 // Constructor - creates/initilizes all stub templates.
748
749 template<bool big_endian>
750 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
751 {
752 // Insn array definitions.
753 const static Insntype ST_NONE_INSNS[] = {};
754
755 const static Insntype ST_ADRP_BRANCH_INSNS[] =
756 {
757 0x90000010, /* adrp ip0, X */
758 /* ADR_PREL_PG_HI21(X) */
759 0x91000210, /* add ip0, ip0, :lo12:X */
760 /* ADD_ABS_LO12_NC(X) */
761 0xd61f0200, /* br ip0 */
762 0x00000000, /* alignment padding */
763 };
764
765 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
766 {
767 0x58000050, /* ldr ip0, 0x8 */
768 0xd61f0200, /* br ip0 */
769 0x00000000, /* address field */
770 0x00000000, /* address fields */
771 };
772
773 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
774 {
775 0x58000090, /* ldr ip0, 0x10 */
776 0x10000011, /* adr ip1, #0 */
777 0x8b110210, /* add ip0, ip0, ip1 */
778 0xd61f0200, /* br ip0 */
779 0x00000000, /* address field */
780 0x00000000, /* address field */
781 0x00000000, /* alignment padding */
782 0x00000000, /* alignment padding */
783 };
784
785 const static Insntype ST_E_843419_INSNS[] =
786 {
787 0x00000000, /* Placeholder for erratum insn. */
788 0x14000000, /* b <label> */
789 };
790
791 // ST_E_835769 has the same stub template as ST_E_843419
792 // but we reproduce the array here so that the sizeof
793 // expressions in install_insn_template will work.
794 const static Insntype ST_E_835769_INSNS[] =
795 {
796 0x00000000, /* Placeholder for erratum insn. */
797 0x14000000, /* b <label> */
798 };
799
800 #define install_insn_template(T) \
801 const static Stub_template<big_endian> template_##T = { \
802 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
803 this->stub_templates_[T] = &template_##T
804
805 install_insn_template(ST_NONE);
806 install_insn_template(ST_ADRP_BRANCH);
807 install_insn_template(ST_LONG_BRANCH_ABS);
808 install_insn_template(ST_LONG_BRANCH_PCREL);
809 install_insn_template(ST_E_843419);
810 install_insn_template(ST_E_835769);
811
812 #undef install_insn_template
813 }
814
815
816 // Base class for stubs.
817
818 template<int size, bool big_endian>
819 class Stub_base
820 {
821 public:
822 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
823 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
824
825 static const AArch64_address invalid_address =
826 static_cast<AArch64_address>(-1);
827
828 static const section_offset_type invalid_offset =
829 static_cast<section_offset_type>(-1);
830
831 Stub_base(int type)
832 : destination_address_(invalid_address),
833 offset_(invalid_offset),
834 type_(type)
835 {}
836
837 ~Stub_base()
838 {}
839
840 // Get stub type.
841 int
842 type() const
843 { return this->type_; }
844
845 // Get stub template that provides stub insn information.
846 const Stub_template<big_endian>*
847 stub_template() const
848 {
849 return Stub_template_repertoire<big_endian>::
850 get_stub_template(this->type());
851 }
852
853 // Get destination address.
854 AArch64_address
855 destination_address() const
856 {
857 gold_assert(this->destination_address_ != this->invalid_address);
858 return this->destination_address_;
859 }
860
861 // Set destination address.
862 void
863 set_destination_address(AArch64_address address)
864 {
865 gold_assert(address != this->invalid_address);
866 this->destination_address_ = address;
867 }
868
869 // Reset the destination address.
870 void
871 reset_destination_address()
872 { this->destination_address_ = this->invalid_address; }
873
874 // Get offset of code stub. For Reloc_stub, it is the offset from the
875 // beginning of its containing stub table; for Erratum_stub, it is the offset
876 // from the end of reloc_stubs.
877 section_offset_type
878 offset() const
879 {
880 gold_assert(this->offset_ != this->invalid_offset);
881 return this->offset_;
882 }
883
884 // Set stub offset.
885 void
886 set_offset(section_offset_type offset)
887 { this->offset_ = offset; }
888
889 // Return the stub insn.
890 const Insntype*
891 insns() const
892 { return this->stub_template()->insns; }
893
894 // Return num of stub insns.
895 unsigned int
896 insn_num() const
897 { return this->stub_template()->insn_num; }
898
899 // Get size of the stub.
900 int
901 stub_size() const
902 {
903 return this->insn_num() *
904 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
905 }
906
907 // Write stub to output file.
908 void
909 write(unsigned char* view, section_size_type view_size)
910 { this->do_write(view, view_size); }
911
912 protected:
913 // Abstract method to be implemented by sub-classes.
914 virtual void
915 do_write(unsigned char*, section_size_type) = 0;
916
917 private:
918 // The last insn of a stub is a jump to destination insn. This field records
919 // the destination address.
920 AArch64_address destination_address_;
921 // The stub offset. Note this has difference interpretations between an
922 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
923 // beginning of the containing stub_table, whereas for Erratum_stub, this is
924 // the offset from the end of reloc_stubs.
925 section_offset_type offset_;
926 // Stub type.
927 const int type_;
928 }; // End of "Stub_base".
929
930
931 // Erratum stub class. An erratum stub differs from a reloc stub in that for
932 // each erratum occurrence, we generate an erratum stub. We never share erratum
933 // stubs, whereas for reloc stubs, different branch insns share a single reloc
934 // stub as long as the branch targets are the same. (More to the point, reloc
935 // stubs can be shared because they're used to reach a specific target, whereas
936 // erratum stubs branch back to the original control flow.)
937
938 template<int size, bool big_endian>
939 class Erratum_stub : public Stub_base<size, big_endian>
940 {
941 public:
942 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
943 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
944 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
945 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
946
947 static const int STUB_ADDR_ALIGN;
948
949 static const Insntype invalid_insn = static_cast<Insntype>(-1);
950
951 Erratum_stub(The_aarch64_relobj* relobj, int type,
952 unsigned shndx, unsigned int sh_offset)
953 : Stub_base<size, big_endian>(type), relobj_(relobj),
954 shndx_(shndx), sh_offset_(sh_offset),
955 erratum_insn_(invalid_insn),
956 erratum_address_(this->invalid_address)
957 {}
958
959 ~Erratum_stub() {}
960
961 // Return the object that contains the erratum.
962 The_aarch64_relobj*
963 relobj()
964 { return this->relobj_; }
965
966 // Get section index of the erratum.
967 unsigned int
968 shndx() const
969 { return this->shndx_; }
970
971 // Get section offset of the erratum.
972 unsigned int
973 sh_offset() const
974 { return this->sh_offset_; }
975
976 // Get the erratum insn. This is the insn located at erratum_insn_address.
977 Insntype
978 erratum_insn() const
979 {
980 gold_assert(this->erratum_insn_ != this->invalid_insn);
981 return this->erratum_insn_;
982 }
983
984 // Set the insn that the erratum happens to.
985 void
986 set_erratum_insn(Insntype insn)
987 { this->erratum_insn_ = insn; }
988
989 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
990 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
991 // is no longer the one we want to write out to the stub, update erratum_insn_
992 // with relocated version. Also note that in this case xn must not be "PC", so
993 // it is safe to move the erratum insn from the origin place to the stub. For
994 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
995 // relocation spot (assertion added though).
996 void
997 update_erratum_insn(Insntype insn)
998 {
999 gold_assert(this->erratum_insn_ != this->invalid_insn);
1000 switch (this->type())
1001 {
1002 case ST_E_843419:
1003 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
1004 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
1005 gold_assert(Insn_utilities::aarch64_rd(insn) ==
1006 Insn_utilities::aarch64_rd(this->erratum_insn()));
1007 gold_assert(Insn_utilities::aarch64_rn(insn) ==
1008 Insn_utilities::aarch64_rn(this->erratum_insn()));
1009 // Update plain ld/st insn with relocated insn.
1010 this->erratum_insn_ = insn;
1011 break;
1012 case ST_E_835769:
1013 gold_assert(insn == this->erratum_insn());
1014 break;
1015 default:
1016 gold_unreachable();
1017 }
1018 }
1019
1020
1021 // Return the address where an erratum must be done.
1022 AArch64_address
1023 erratum_address() const
1024 {
1025 gold_assert(this->erratum_address_ != this->invalid_address);
1026 return this->erratum_address_;
1027 }
1028
1029 // Set the address where an erratum must be done.
1030 void
1031 set_erratum_address(AArch64_address addr)
1032 { this->erratum_address_ = addr; }
1033
1034 // Later relaxation passes of may alter the recorded erratum and destination
1035 // address. Given an up to date output section address of shidx_ in
1036 // relobj_ we can derive the erratum_address and destination address.
1037 void
1038 update_erratum_address(AArch64_address output_section_addr)
1039 {
1040 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1041 AArch64_address updated_addr = output_section_addr + this->sh_offset_;
1042 this->set_erratum_address(updated_addr);
1043 this->set_destination_address(updated_addr + BPI);
1044 }
1045
1046 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1047 // sh_offset). We do not include 'type' in the calculation, because there is
1048 // at most one stub type at (obj, shndx, sh_offset).
1049 bool
1050 operator<(const Erratum_stub<size, big_endian>& k) const
1051 {
1052 if (this == &k)
1053 return false;
1054 // We group stubs by relobj.
1055 if (this->relobj_ != k.relobj_)
1056 return this->relobj_ < k.relobj_;
1057 // Then by section index.
1058 if (this->shndx_ != k.shndx_)
1059 return this->shndx_ < k.shndx_;
1060 // Lastly by section offset.
1061 return this->sh_offset_ < k.sh_offset_;
1062 }
1063
1064 void
1065 invalidate_erratum_stub()
1066 {
1067 gold_assert(this->erratum_insn_ != invalid_insn);
1068 this->erratum_insn_ = invalid_insn;
1069 }
1070
1071 bool
1072 is_invalidated_erratum_stub()
1073 { return this->erratum_insn_ == invalid_insn; }
1074
1075 protected:
1076 virtual void
1077 do_write(unsigned char*, section_size_type);
1078
1079 private:
1080 // The object that needs to be fixed.
1081 The_aarch64_relobj* relobj_;
1082 // The shndx in the object that needs to be fixed.
1083 const unsigned int shndx_;
1084 // The section offset in the obejct that needs to be fixed.
1085 const unsigned int sh_offset_;
1086 // The insn to be fixed.
1087 Insntype erratum_insn_;
1088 // The address of the above insn.
1089 AArch64_address erratum_address_;
1090 }; // End of "Erratum_stub".
1091
1092
1093 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1094 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1095 // adrp's code position (two or three insns before erratum insn itself).
1096
1097 template<int size, bool big_endian>
1098 class E843419_stub : public Erratum_stub<size, big_endian>
1099 {
1100 public:
1101 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1102
1103 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1104 unsigned int shndx, unsigned int sh_offset,
1105 unsigned int adrp_sh_offset)
1106 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1107 adrp_sh_offset_(adrp_sh_offset)
1108 {}
1109
1110 unsigned int
1111 adrp_sh_offset() const
1112 { return this->adrp_sh_offset_; }
1113
1114 private:
1115 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1116 // can obtain it from its parent.)
1117 const unsigned int adrp_sh_offset_;
1118 };
1119
1120
1121 template<int size, bool big_endian>
1122 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1123
1124 // Comparator used in set definition.
1125 template<int size, bool big_endian>
1126 struct Erratum_stub_less
1127 {
1128 bool
1129 operator()(const Erratum_stub<size, big_endian>* s1,
1130 const Erratum_stub<size, big_endian>* s2) const
1131 { return *s1 < *s2; }
1132 };
1133
1134 // Erratum_stub implementation for writing stub to output file.
1135
1136 template<int size, bool big_endian>
1137 void
1138 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1139 {
1140 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1141 const Insntype* insns = this->insns();
1142 uint32_t num_insns = this->insn_num();
1143 Insntype* ip = reinterpret_cast<Insntype*>(view);
1144 // For current implemented erratum 843419 and 835769, the first insn in the
1145 // stub is always a copy of the problematic insn (in 843419, the mem access
1146 // insn, in 835769, the mac insn), followed by a jump-back.
1147 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1148 for (uint32_t i = 1; i < num_insns; ++i)
1149 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1150 }
1151
1152
1153 // Reloc stub class.
1154
1155 template<int size, bool big_endian>
1156 class Reloc_stub : public Stub_base<size, big_endian>
1157 {
1158 public:
1159 typedef Reloc_stub<size, big_endian> This;
1160 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1161
1162 // Branch range. This is used to calculate the section group size, as well as
1163 // determine whether a stub is needed.
1164 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1165 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1166
1167 // Constant used to determine if an offset fits in the adrp instruction
1168 // encoding.
1169 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1170 static const int MIN_ADRP_IMM = -(1 << 20);
1171
1172 static const int BYTES_PER_INSN = 4;
1173 static const int STUB_ADDR_ALIGN;
1174
1175 // Determine whether the offset fits in the jump/branch instruction.
1176 static bool
1177 aarch64_valid_branch_offset_p(int64_t offset)
1178 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1179
1180 // Determine whether the offset fits in the adrp immediate field.
1181 static bool
1182 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1183 {
1184 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1185 int64_t adrp_imm = Reloc::Page (dest) - Reloc::Page (location);
1186 adrp_imm = adrp_imm < 0 ? ~(~adrp_imm >> 12) : adrp_imm >> 12;
1187 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1188 }
1189
1190 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1191 // needed.
1192 static int
1193 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1194 AArch64_address target);
1195
1196 Reloc_stub(int type)
1197 : Stub_base<size, big_endian>(type)
1198 { }
1199
1200 ~Reloc_stub()
1201 { }
1202
1203 // The key class used to index the stub instance in the stub table's stub map.
1204 class Key
1205 {
1206 public:
1207 Key(int type, const Symbol* symbol, const Relobj* relobj,
1208 unsigned int r_sym, int32_t addend)
1209 : type_(type), addend_(addend)
1210 {
1211 if (symbol != NULL)
1212 {
1213 this->r_sym_ = Reloc_stub::invalid_index;
1214 this->u_.symbol = symbol;
1215 }
1216 else
1217 {
1218 gold_assert(relobj != NULL && r_sym != invalid_index);
1219 this->r_sym_ = r_sym;
1220 this->u_.relobj = relobj;
1221 }
1222 }
1223
1224 ~Key()
1225 { }
1226
1227 // Return stub type.
1228 int
1229 type() const
1230 { return this->type_; }
1231
1232 // Return the local symbol index or invalid_index.
1233 unsigned int
1234 r_sym() const
1235 { return this->r_sym_; }
1236
1237 // Return the symbol if there is one.
1238 const Symbol*
1239 symbol() const
1240 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1241
1242 // Return the relobj if there is one.
1243 const Relobj*
1244 relobj() const
1245 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1246
1247 // Whether this equals to another key k.
1248 bool
1249 eq(const Key& k) const
1250 {
1251 return ((this->type_ == k.type_)
1252 && (this->r_sym_ == k.r_sym_)
1253 && ((this->r_sym_ != Reloc_stub::invalid_index)
1254 ? (this->u_.relobj == k.u_.relobj)
1255 : (this->u_.symbol == k.u_.symbol))
1256 && (this->addend_ == k.addend_));
1257 }
1258
1259 // Return a hash value.
1260 size_t
1261 hash_value() const
1262 {
1263 size_t name_hash_value = gold::string_hash<char>(
1264 (this->r_sym_ != Reloc_stub::invalid_index)
1265 ? this->u_.relobj->name().c_str()
1266 : this->u_.symbol->name());
1267 // We only have 4 stub types.
1268 size_t stub_type_hash_value = 0x03 & this->type_;
1269 return (name_hash_value
1270 ^ stub_type_hash_value
1271 ^ ((this->r_sym_ & 0x3fff) << 2)
1272 ^ ((this->addend_ & 0xffff) << 16));
1273 }
1274
1275 // Functors for STL associative containers.
1276 struct hash
1277 {
1278 size_t
1279 operator()(const Key& k) const
1280 { return k.hash_value(); }
1281 };
1282
1283 struct equal_to
1284 {
1285 bool
1286 operator()(const Key& k1, const Key& k2) const
1287 { return k1.eq(k2); }
1288 };
1289
1290 private:
1291 // Stub type.
1292 const int type_;
1293 // If this is a local symbol, this is the index in the defining object.
1294 // Otherwise, it is invalid_index for a global symbol.
1295 unsigned int r_sym_;
1296 // If r_sym_ is an invalid index, this points to a global symbol.
1297 // Otherwise, it points to a relobj. We used the unsized and target
1298 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1299 // Arm_relobj, in order to avoid making the stub class a template
1300 // as most of the stub machinery is endianness-neutral. However, it
1301 // may require a bit of casting done by users of this class.
1302 union
1303 {
1304 const Symbol* symbol;
1305 const Relobj* relobj;
1306 } u_;
1307 // Addend associated with a reloc.
1308 int32_t addend_;
1309 }; // End of inner class Reloc_stub::Key
1310
1311 protected:
1312 // This may be overridden in the child class.
1313 virtual void
1314 do_write(unsigned char*, section_size_type);
1315
1316 private:
1317 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1318 }; // End of Reloc_stub
1319
1320 template<int size, bool big_endian>
1321 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1322
1323 // Write data to output file.
1324
1325 template<int size, bool big_endian>
1326 void
1327 Reloc_stub<size, big_endian>::
1328 do_write(unsigned char* view, section_size_type)
1329 {
1330 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1331 const uint32_t* insns = this->insns();
1332 uint32_t num_insns = this->insn_num();
1333 Insntype* ip = reinterpret_cast<Insntype*>(view);
1334 for (uint32_t i = 0; i < num_insns; ++i)
1335 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1336 }
1337
1338
1339 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1340 // needed.
1341
1342 template<int size, bool big_endian>
1343 inline int
1344 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1345 unsigned int r_type, AArch64_address location, AArch64_address dest)
1346 {
1347 int64_t branch_offset = 0;
1348 switch(r_type)
1349 {
1350 case elfcpp::R_AARCH64_CALL26:
1351 case elfcpp::R_AARCH64_JUMP26:
1352 branch_offset = dest - location;
1353 break;
1354 default:
1355 gold_unreachable();
1356 }
1357
1358 if (aarch64_valid_branch_offset_p(branch_offset))
1359 return ST_NONE;
1360
1361 if (aarch64_valid_for_adrp_p(location, dest))
1362 return ST_ADRP_BRANCH;
1363
1364 // Always use PC-relative addressing in case of -shared or -pie.
1365 if (parameters->options().output_is_position_independent())
1366 return ST_LONG_BRANCH_PCREL;
1367
1368 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
1369 // But is only applicable to non-shared or non-pie.
1370 return ST_LONG_BRANCH_ABS;
1371 }
1372
1373 // A class to hold stubs for the ARM target. This contains 2 different types of
1374 // stubs - reloc stubs and erratum stubs.
1375
1376 template<int size, bool big_endian>
1377 class Stub_table : public Output_data
1378 {
1379 public:
1380 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1381 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1382 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1383 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1384 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1385 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1386 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1387 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1388 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1389 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1390 typedef Stub_table<size, big_endian> The_stub_table;
1391 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1392 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1393 Reloc_stub_map;
1394 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1395 typedef Relocate_info<size, big_endian> The_relocate_info;
1396
1397 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1398 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1399
1400 Stub_table(The_aarch64_input_section* owner)
1401 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1402 erratum_stubs_size_(0), prev_data_size_(0)
1403 { }
1404
1405 ~Stub_table()
1406 { }
1407
1408 The_aarch64_input_section*
1409 owner() const
1410 { return owner_; }
1411
1412 // Whether this stub table is empty.
1413 bool
1414 empty() const
1415 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1416
1417 // Return the current data size.
1418 off_t
1419 current_data_size() const
1420 { return this->current_data_size_for_child(); }
1421
1422 // Add a STUB using KEY. The caller is responsible for avoiding addition
1423 // if a STUB with the same key has already been added.
1424 void
1425 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1426
1427 // Add an erratum stub into the erratum stub set. The set is ordered by
1428 // (relobj, shndx, sh_offset).
1429 void
1430 add_erratum_stub(The_erratum_stub* stub);
1431
1432 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1433 The_erratum_stub*
1434 find_erratum_stub(The_aarch64_relobj* a64relobj,
1435 unsigned int shndx, unsigned int sh_offset);
1436
1437 // Find all the erratums for a given input section. The return value is a pair
1438 // of iterators [begin, end).
1439 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1440 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1441 unsigned int shndx);
1442
1443 // Compute the erratum stub address.
1444 AArch64_address
1445 erratum_stub_address(The_erratum_stub* stub) const
1446 {
1447 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1448 The_erratum_stub::STUB_ADDR_ALIGN);
1449 r += stub->offset();
1450 return r;
1451 }
1452
1453 // Finalize stubs. No-op here, just for completeness.
1454 void
1455 finalize_stubs()
1456 { }
1457
1458 // Look up a relocation stub using KEY. Return NULL if there is none.
1459 The_reloc_stub*
1460 find_reloc_stub(The_reloc_stub_key& key)
1461 {
1462 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1463 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1464 }
1465
1466 // Relocate reloc stubs in this stub table. This does not relocate erratum stubs.
1467 void
1468 relocate_reloc_stubs(const The_relocate_info*,
1469 The_target_aarch64*,
1470 Output_section*,
1471 unsigned char*,
1472 AArch64_address,
1473 section_size_type);
1474
1475 // Relocate an erratum stub.
1476 void
1477 relocate_erratum_stub(The_erratum_stub*, unsigned char*);
1478
1479 // Update data size at the end of a relaxation pass. Return true if data size
1480 // is different from that of the previous relaxation pass.
1481 bool
1482 update_data_size_changed_p()
1483 {
1484 // No addralign changed here.
1485 off_t s = align_address(this->reloc_stubs_size_,
1486 The_erratum_stub::STUB_ADDR_ALIGN)
1487 + this->erratum_stubs_size_;
1488 bool changed = (s != this->prev_data_size_);
1489 this->prev_data_size_ = s;
1490 return changed;
1491 }
1492
1493 protected:
1494 // Write out section contents.
1495 void
1496 do_write(Output_file*);
1497
1498 // Return the required alignment.
1499 uint64_t
1500 do_addralign() const
1501 {
1502 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1503 The_erratum_stub::STUB_ADDR_ALIGN);
1504 }
1505
1506 // Reset address and file offset.
1507 void
1508 do_reset_address_and_file_offset()
1509 { this->set_current_data_size_for_child(this->prev_data_size_); }
1510
1511 // Set final data size.
1512 void
1513 set_final_data_size()
1514 { this->set_data_size(this->current_data_size()); }
1515
1516 private:
1517 // Relocate one reloc stub.
1518 void
1519 relocate_reloc_stub(The_reloc_stub*,
1520 const The_relocate_info*,
1521 The_target_aarch64*,
1522 Output_section*,
1523 unsigned char*,
1524 AArch64_address,
1525 section_size_type);
1526
1527 private:
1528 // Owner of this stub table.
1529 The_aarch64_input_section* owner_;
1530 // The relocation stubs.
1531 Reloc_stub_map reloc_stubs_;
1532 // The erratum stubs.
1533 Erratum_stub_set erratum_stubs_;
1534 // Size of reloc stubs.
1535 off_t reloc_stubs_size_;
1536 // Size of erratum stubs.
1537 off_t erratum_stubs_size_;
1538 // data size of this in the previous pass.
1539 off_t prev_data_size_;
1540 }; // End of Stub_table
1541
1542
1543 // Add an erratum stub into the erratum stub set. The set is ordered by
1544 // (relobj, shndx, sh_offset).
1545
1546 template<int size, bool big_endian>
1547 void
1548 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1549 {
1550 std::pair<Erratum_stub_set_iter, bool> ret =
1551 this->erratum_stubs_.insert(stub);
1552 gold_assert(ret.second);
1553 this->erratum_stubs_size_ = align_address(
1554 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1555 stub->set_offset(this->erratum_stubs_size_);
1556 this->erratum_stubs_size_ += stub->stub_size();
1557 }
1558
1559
1560 // Find if such erratum exists for given (obj, shndx, sh_offset).
1561
1562 template<int size, bool big_endian>
1563 Erratum_stub<size, big_endian>*
1564 Stub_table<size, big_endian>::find_erratum_stub(
1565 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1566 {
1567 // A dummy object used as key to search in the set.
1568 The_erratum_stub key(a64relobj, ST_NONE,
1569 shndx, sh_offset);
1570 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1571 if (i != this->erratum_stubs_.end())
1572 {
1573 The_erratum_stub* stub(*i);
1574 gold_assert(stub->erratum_insn() != 0);
1575 return stub;
1576 }
1577 return NULL;
1578 }
1579
1580
1581 // Find all the errata for a given input section. The return value is a pair of
1582 // iterators [begin, end).
1583
1584 template<int size, bool big_endian>
1585 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1586 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1587 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1588 The_aarch64_relobj* a64relobj, unsigned int shndx)
1589 {
1590 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1591 Erratum_stub_set_iter start, end;
1592 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1593 start = this->erratum_stubs_.lower_bound(&low_key);
1594 if (start == this->erratum_stubs_.end())
1595 return Result_pair(this->erratum_stubs_.end(),
1596 this->erratum_stubs_.end());
1597 end = start;
1598 while (end != this->erratum_stubs_.end() &&
1599 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1600 ++end;
1601 return Result_pair(start, end);
1602 }
1603
1604
1605 // Add a STUB using KEY. The caller is responsible for avoiding addition
1606 // if a STUB with the same key has already been added.
1607
1608 template<int size, bool big_endian>
1609 void
1610 Stub_table<size, big_endian>::add_reloc_stub(
1611 The_reloc_stub* stub, const The_reloc_stub_key& key)
1612 {
1613 gold_assert(stub->type() == key.type());
1614 this->reloc_stubs_[key] = stub;
1615
1616 // Assign stub offset early. We can do this because we never remove
1617 // reloc stubs and they are in the beginning of the stub table.
1618 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1619 The_reloc_stub::STUB_ADDR_ALIGN);
1620 stub->set_offset(this->reloc_stubs_size_);
1621 this->reloc_stubs_size_ += stub->stub_size();
1622 }
1623
1624
1625 // Relocate an erratum stub.
1626
1627 template<int size, bool big_endian>
1628 void
1629 Stub_table<size, big_endian>::
1630 relocate_erratum_stub(The_erratum_stub* estub,
1631 unsigned char* view)
1632 {
1633 // Just for convenience.
1634 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1635
1636 gold_assert(!estub->is_invalidated_erratum_stub());
1637 AArch64_address stub_address = this->erratum_stub_address(estub);
1638 // The address of "b" in the stub that is to be "relocated".
1639 AArch64_address stub_b_insn_address;
1640 // Branch offset that is to be filled in "b" insn.
1641 int b_offset = 0;
1642 switch (estub->type())
1643 {
1644 case ST_E_843419:
1645 case ST_E_835769:
1646 // The 1st insn of the erratum could be a relocation spot,
1647 // in this case we need to fix it with
1648 // "(*i)->erratum_insn()".
1649 elfcpp::Swap<32, big_endian>::writeval(
1650 view + (stub_address - this->address()),
1651 estub->erratum_insn());
1652 // For the erratum, the 2nd insn is a b-insn to be patched
1653 // (relocated).
1654 stub_b_insn_address = stub_address + 1 * BPI;
1655 b_offset = estub->destination_address() - stub_b_insn_address;
1656 AArch64_relocate_functions<size, big_endian>::construct_b(
1657 view + (stub_b_insn_address - this->address()),
1658 ((unsigned int)(b_offset)) & 0xfffffff);
1659 break;
1660 default:
1661 gold_unreachable();
1662 break;
1663 }
1664 estub->invalidate_erratum_stub();
1665 }
1666
1667
1668 // Relocate only reloc stubs in this stub table. This does not relocate erratum
1669 // stubs.
1670
1671 template<int size, bool big_endian>
1672 void
1673 Stub_table<size, big_endian>::
1674 relocate_reloc_stubs(const The_relocate_info* relinfo,
1675 The_target_aarch64* target_aarch64,
1676 Output_section* output_section,
1677 unsigned char* view,
1678 AArch64_address address,
1679 section_size_type view_size)
1680 {
1681 // "view_size" is the total size of the stub_table.
1682 gold_assert(address == this->address() &&
1683 view_size == static_cast<section_size_type>(this->data_size()));
1684 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1685 p != this->reloc_stubs_.end(); ++p)
1686 relocate_reloc_stub(p->second, relinfo, target_aarch64, output_section,
1687 view, address, view_size);
1688 }
1689
1690
1691 // Relocate one reloc stub. This is a helper for
1692 // Stub_table::relocate_reloc_stubs().
1693
1694 template<int size, bool big_endian>
1695 void
1696 Stub_table<size, big_endian>::
1697 relocate_reloc_stub(The_reloc_stub* stub,
1698 const The_relocate_info* relinfo,
1699 The_target_aarch64* target_aarch64,
1700 Output_section* output_section,
1701 unsigned char* view,
1702 AArch64_address address,
1703 section_size_type view_size)
1704 {
1705 // "offset" is the offset from the beginning of the stub_table.
1706 section_size_type offset = stub->offset();
1707 section_size_type stub_size = stub->stub_size();
1708 // "view_size" is the total size of the stub_table.
1709 gold_assert(offset + stub_size <= view_size);
1710
1711 target_aarch64->relocate_reloc_stub(stub, relinfo, output_section,
1712 view + offset, address + offset, view_size);
1713 }
1714
1715
1716 // Write out the stubs to file.
1717
1718 template<int size, bool big_endian>
1719 void
1720 Stub_table<size, big_endian>::do_write(Output_file* of)
1721 {
1722 off_t offset = this->offset();
1723 const section_size_type oview_size =
1724 convert_to_section_size_type(this->data_size());
1725 unsigned char* const oview = of->get_output_view(offset, oview_size);
1726
1727 // Write relocation stubs.
1728 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1729 p != this->reloc_stubs_.end(); ++p)
1730 {
1731 The_reloc_stub* stub = p->second;
1732 AArch64_address address = this->address() + stub->offset();
1733 gold_assert(address ==
1734 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1735 stub->write(oview + stub->offset(), stub->stub_size());
1736 }
1737
1738 // Write erratum stubs.
1739 unsigned int erratum_stub_start_offset =
1740 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1741 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1742 p != this->erratum_stubs_.end(); ++p)
1743 {
1744 The_erratum_stub* stub(*p);
1745 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1746 stub->stub_size());
1747 }
1748
1749 of->write_output_view(this->offset(), oview_size, oview);
1750 }
1751
1752
1753 // AArch64_relobj class.
1754
1755 template<int size, bool big_endian>
1756 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1757 {
1758 public:
1759 typedef AArch64_relobj<size, big_endian> This;
1760 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1761 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1762 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1763 typedef Stub_table<size, big_endian> The_stub_table;
1764 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1765 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1766 typedef std::vector<The_stub_table*> Stub_table_list;
1767 static const AArch64_address invalid_address =
1768 static_cast<AArch64_address>(-1);
1769
1770 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1771 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1772 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1773 stub_tables_()
1774 { }
1775
1776 ~AArch64_relobj()
1777 { }
1778
1779 // Return the stub table of the SHNDX-th section if there is one.
1780 The_stub_table*
1781 stub_table(unsigned int shndx) const
1782 {
1783 gold_assert(shndx < this->stub_tables_.size());
1784 return this->stub_tables_[shndx];
1785 }
1786
1787 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1788 void
1789 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1790 {
1791 gold_assert(shndx < this->stub_tables_.size());
1792 this->stub_tables_[shndx] = stub_table;
1793 }
1794
1795 // Entrance to errata scanning.
1796 void
1797 scan_errata(unsigned int shndx,
1798 const elfcpp::Shdr<size, big_endian>&,
1799 Output_section*, const Symbol_table*,
1800 The_target_aarch64*);
1801
1802 // Scan all relocation sections for stub generation.
1803 void
1804 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1805 const Layout*);
1806
1807 // Whether a section is a scannable text section.
1808 bool
1809 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1810 const Output_section*, const Symbol_table*);
1811
1812 // Convert regular input section with index SHNDX to a relaxed section.
1813 void
1814 convert_input_section_to_relaxed_section(unsigned shndx)
1815 {
1816 // The stubs have relocations and we need to process them after writing
1817 // out the stubs. So relocation now must follow section write.
1818 this->set_section_offset(shndx, -1ULL);
1819 this->set_relocs_must_follow_section_writes();
1820 }
1821
1822 // Structure for mapping symbol position.
1823 struct Mapping_symbol_position
1824 {
1825 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1826 shndx_(shndx), offset_(offset)
1827 {}
1828
1829 // "<" comparator used in ordered_map container.
1830 bool
1831 operator<(const Mapping_symbol_position& p) const
1832 {
1833 return (this->shndx_ < p.shndx_
1834 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1835 }
1836
1837 // Section index.
1838 unsigned int shndx_;
1839
1840 // Section offset.
1841 AArch64_address offset_;
1842 };
1843
1844 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1845
1846 protected:
1847 // Post constructor setup.
1848 void
1849 do_setup()
1850 {
1851 // Call parent's setup method.
1852 Sized_relobj_file<size, big_endian>::do_setup();
1853
1854 // Initialize look-up tables.
1855 this->stub_tables_.resize(this->shnum());
1856 }
1857
1858 virtual void
1859 do_relocate_sections(
1860 const Symbol_table* symtab, const Layout* layout,
1861 const unsigned char* pshdrs, Output_file* of,
1862 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1863
1864 // Count local symbols and (optionally) record mapping info.
1865 virtual void
1866 do_count_local_symbols(Stringpool_template<char>*,
1867 Stringpool_template<char>*);
1868
1869 private:
1870 // Fix all errata in the object, and for each erratum, relocate corresponding
1871 // erratum stub.
1872 void
1873 fix_errata_and_relocate_erratum_stubs(
1874 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1875
1876 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1877 // applied.
1878 bool
1879 try_fix_erratum_843419_optimized(
1880 The_erratum_stub*, AArch64_address,
1881 typename Sized_relobj_file<size, big_endian>::View_size&);
1882
1883 // Whether a section needs to be scanned for relocation stubs.
1884 bool
1885 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1886 const Relobj::Output_sections&,
1887 const Symbol_table*, const unsigned char*);
1888
1889 // List of stub tables.
1890 Stub_table_list stub_tables_;
1891
1892 // Mapping symbol information sorted by (section index, section_offset).
1893 Mapping_symbol_info mapping_symbol_info_;
1894 }; // End of AArch64_relobj
1895
1896
1897 // Override to record mapping symbol information.
1898 template<int size, bool big_endian>
1899 void
1900 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1901 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1902 {
1903 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1904
1905 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1906 // processing if not fixing erratum.
1907 if (!parameters->options().fix_cortex_a53_843419()
1908 && !parameters->options().fix_cortex_a53_835769())
1909 return;
1910
1911 const unsigned int loccount = this->local_symbol_count();
1912 if (loccount == 0)
1913 return;
1914
1915 // Read the symbol table section header.
1916 const unsigned int symtab_shndx = this->symtab_shndx();
1917 elfcpp::Shdr<size, big_endian>
1918 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1919 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1920
1921 // Read the local symbols.
1922 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1923 gold_assert(loccount == symtabshdr.get_sh_info());
1924 off_t locsize = loccount * sym_size;
1925 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1926 locsize, true, true);
1927
1928 // For mapping symbol processing, we need to read the symbol names.
1929 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1930 if (strtab_shndx >= this->shnum())
1931 {
1932 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1933 return;
1934 }
1935
1936 elfcpp::Shdr<size, big_endian>
1937 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1938 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1939 {
1940 this->error(_("symbol table name section has wrong type: %u"),
1941 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1942 return;
1943 }
1944
1945 const char* pnames =
1946 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1947 strtabshdr.get_sh_size(),
1948 false, false));
1949
1950 // Skip the first dummy symbol.
1951 psyms += sym_size;
1952 typename Sized_relobj_file<size, big_endian>::Local_values*
1953 plocal_values = this->local_values();
1954 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1955 {
1956 elfcpp::Sym<size, big_endian> sym(psyms);
1957 Symbol_value<size>& lv((*plocal_values)[i]);
1958 AArch64_address input_value = lv.input_value();
1959
1960 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1961 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1962 // symbols.
1963 // Mapping symbols could be one of the following 4 forms -
1964 // a) $x
1965 // b) $x.<any...>
1966 // c) $d
1967 // d) $d.<any...>
1968 const char* sym_name = pnames + sym.get_st_name();
1969 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1970 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1971 {
1972 bool is_ordinary;
1973 unsigned int input_shndx =
1974 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1975 gold_assert(is_ordinary);
1976
1977 Mapping_symbol_position msp(input_shndx, input_value);
1978 // Insert mapping_symbol_info into map whose ordering is defined by
1979 // (shndx, offset_within_section).
1980 this->mapping_symbol_info_[msp] = sym_name[1];
1981 }
1982 }
1983 }
1984
1985
1986 // Fix all errata in the object and for each erratum, we relocate the
1987 // corresponding erratum stub (by calling Stub_table::relocate_erratum_stub).
1988
1989 template<int size, bool big_endian>
1990 void
1991 AArch64_relobj<size, big_endian>::fix_errata_and_relocate_erratum_stubs(
1992 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1993 {
1994 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1995 unsigned int shnum = this->shnum();
1996 const Relobj::Output_sections& out_sections(this->output_sections());
1997 for (unsigned int i = 1; i < shnum; ++i)
1998 {
1999 The_stub_table* stub_table = this->stub_table(i);
2000 if (!stub_table)
2001 continue;
2002 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
2003 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
2004 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
2005 typename Sized_relobj_file<size, big_endian>::View_size&
2006 pview((*pviews)[i]);
2007 AArch64_address view_offset = 0;
2008 if (pview.is_input_output_view)
2009 {
2010 // In this case, write_sections has not added the output offset to
2011 // the view's address, so we must do so. Currently this only happens
2012 // for a relaxed section.
2013 unsigned int index = this->adjust_shndx(i);
2014 const Output_relaxed_input_section* poris =
2015 out_sections[index]->find_relaxed_input_section(this, index);
2016 gold_assert(poris != NULL);
2017 view_offset = poris->address() - pview.address;
2018 }
2019
2020 while (p != end)
2021 {
2022 The_erratum_stub* stub = *p;
2023
2024 // Double check data before fix.
2025 gold_assert(pview.address + view_offset + stub->sh_offset()
2026 == stub->erratum_address());
2027
2028 // Update previously recorded erratum insn with relocated
2029 // version.
2030 Insntype* ip =
2031 reinterpret_cast<Insntype*>(
2032 pview.view + view_offset + stub->sh_offset());
2033 Insntype insn_to_fix = ip[0];
2034 stub->update_erratum_insn(insn_to_fix);
2035
2036 // First try to see if erratum is 843419 and if it can be fixed
2037 // without using branch-to-stub.
2038 if (!try_fix_erratum_843419_optimized(stub, view_offset, pview))
2039 {
2040 // Replace the erratum insn with a branch-to-stub.
2041 AArch64_address stub_address =
2042 stub_table->erratum_stub_address(stub);
2043 unsigned int b_offset = stub_address - stub->erratum_address();
2044 AArch64_relocate_functions<size, big_endian>::construct_b(
2045 pview.view + view_offset + stub->sh_offset(),
2046 b_offset & 0xfffffff);
2047 }
2048
2049 // Erratum fix is done (or skipped), continue to relocate erratum
2050 // stub. Note, when erratum fix is skipped (either because we
2051 // proactively change the code sequence or the code sequence is
2052 // changed by relaxation, etc), we can still safely relocate the
2053 // erratum stub, ignoring the fact the erratum could never be
2054 // executed.
2055 stub_table->relocate_erratum_stub(
2056 stub,
2057 pview.view + (stub_table->address() - pview.address));
2058
2059 // Next erratum stub.
2060 ++p;
2061 }
2062 }
2063 }
2064
2065
2066 // This is an optimization for 843419. This erratum requires the sequence begin
2067 // with 'adrp', when final value calculated by adrp fits in adr, we can just
2068 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
2069 // in this case, we do not delete the erratum stub (too late to do so), it is
2070 // merely generated without ever being called.)
2071
2072 template<int size, bool big_endian>
2073 bool
2074 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
2075 The_erratum_stub* stub, AArch64_address view_offset,
2076 typename Sized_relobj_file<size, big_endian>::View_size& pview)
2077 {
2078 if (stub->type() != ST_E_843419)
2079 return false;
2080
2081 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2082 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2083 E843419_stub<size, big_endian>* e843419_stub =
2084 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2085 AArch64_address pc =
2086 pview.address + view_offset + e843419_stub->adrp_sh_offset();
2087 unsigned int adrp_offset = e843419_stub->adrp_sh_offset ();
2088 Insntype* adrp_view =
2089 reinterpret_cast<Insntype*>(pview.view + view_offset + adrp_offset);
2090 Insntype adrp_insn = adrp_view[0];
2091
2092 // If the instruction at adrp_sh_offset is "mrs R, tpidr_el0", it may come
2093 // from IE -> LE relaxation etc. This is a side-effect of TLS relaxation that
2094 // ADRP has been turned into MRS, there is no erratum risk anymore.
2095 // Therefore, we return true to avoid doing unnecessary branch-to-stub.
2096 if (Insn_utilities::is_mrs_tpidr_el0(adrp_insn))
2097 return true;
2098
2099 // If the instruction at adrp_sh_offset is not ADRP and the instruction before
2100 // it is "mrs R, tpidr_el0", it may come from LD -> LE relaxation etc.
2101 // Like the above case, there is no erratum risk any more, we can safely
2102 // return true.
2103 if (!Insn_utilities::is_adrp(adrp_insn) && adrp_offset)
2104 {
2105 Insntype* prev_view =
2106 reinterpret_cast<Insntype*>(
2107 pview.view + view_offset + adrp_offset - 4);
2108 Insntype prev_insn = prev_view[0];
2109
2110 if (Insn_utilities::is_mrs_tpidr_el0(prev_insn))
2111 return true;
2112 }
2113
2114 /* If we reach here, the first instruction must be ADRP. */
2115 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2116 // Get adrp 33-bit signed imm value.
2117 int64_t adrp_imm = Insn_utilities::
2118 aarch64_adrp_decode_imm(adrp_insn);
2119 // adrp - final value transferred to target register is calculated as:
2120 // PC[11:0] = Zeros(12)
2121 // adrp_dest_value = PC + adrp_imm;
2122 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2123 // adr -final value transferred to target register is calucalted as:
2124 // PC + adr_imm
2125 // So we have:
2126 // PC + adr_imm = adrp_dest_value
2127 // ==>
2128 // adr_imm = adrp_dest_value - PC
2129 int64_t adr_imm = adrp_dest_value - pc;
2130 // Check if imm fits in adr (21-bit signed).
2131 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2132 {
2133 // Convert 'adrp' into 'adr'.
2134 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2135 adr_insn = Insn_utilities::
2136 aarch64_adr_encode_imm(adr_insn, adr_imm);
2137 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2138 return true;
2139 }
2140 return false;
2141 }
2142
2143
2144 // Relocate sections.
2145
2146 template<int size, bool big_endian>
2147 void
2148 AArch64_relobj<size, big_endian>::do_relocate_sections(
2149 const Symbol_table* symtab, const Layout* layout,
2150 const unsigned char* pshdrs, Output_file* of,
2151 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2152 {
2153 // Relocate the section data.
2154 this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
2155 1, this->shnum() - 1);
2156
2157 // We do not generate stubs if doing a relocatable link.
2158 if (parameters->options().relocatable())
2159 return;
2160
2161 // This part only relocates erratum stubs that belong to input sections of this
2162 // object file.
2163 if (parameters->options().fix_cortex_a53_843419()
2164 || parameters->options().fix_cortex_a53_835769())
2165 this->fix_errata_and_relocate_erratum_stubs(pviews);
2166
2167 Relocate_info<size, big_endian> relinfo;
2168 relinfo.symtab = symtab;
2169 relinfo.layout = layout;
2170 relinfo.object = this;
2171
2172 // This part relocates all reloc stubs that are contained in stub_tables of
2173 // this object file.
2174 unsigned int shnum = this->shnum();
2175 The_target_aarch64* target = The_target_aarch64::current_target();
2176
2177 for (unsigned int i = 1; i < shnum; ++i)
2178 {
2179 The_aarch64_input_section* aarch64_input_section =
2180 target->find_aarch64_input_section(this, i);
2181 if (aarch64_input_section != NULL
2182 && aarch64_input_section->is_stub_table_owner()
2183 && !aarch64_input_section->stub_table()->empty())
2184 {
2185 Output_section* os = this->output_section(i);
2186 gold_assert(os != NULL);
2187
2188 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2189 relinfo.reloc_shdr = NULL;
2190 relinfo.data_shndx = i;
2191 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2192
2193 typename Sized_relobj_file<size, big_endian>::View_size&
2194 view_struct = (*pviews)[i];
2195 gold_assert(view_struct.view != NULL);
2196
2197 The_stub_table* stub_table = aarch64_input_section->stub_table();
2198 off_t offset = stub_table->address() - view_struct.address;
2199 unsigned char* view = view_struct.view + offset;
2200 AArch64_address address = stub_table->address();
2201 section_size_type view_size = stub_table->data_size();
2202 stub_table->relocate_reloc_stubs(&relinfo, target, os, view, address,
2203 view_size);
2204 }
2205 }
2206 }
2207
2208
2209 // Determine if an input section is scannable for stub processing. SHDR is
2210 // the header of the section and SHNDX is the section index. OS is the output
2211 // section for the input section and SYMTAB is the global symbol table used to
2212 // look up ICF information.
2213
2214 template<int size, bool big_endian>
2215 bool
2216 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2217 const elfcpp::Shdr<size, big_endian>& text_shdr,
2218 unsigned int text_shndx,
2219 const Output_section* os,
2220 const Symbol_table* symtab)
2221 {
2222 // Skip any empty sections, unallocated sections or sections whose
2223 // type are not SHT_PROGBITS.
2224 if (text_shdr.get_sh_size() == 0
2225 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2226 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2227 return false;
2228
2229 // Skip any discarded or ICF'ed sections.
2230 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2231 return false;
2232
2233 // Skip exception frame.
2234 if (strcmp(os->name(), ".eh_frame") == 0)
2235 return false ;
2236
2237 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2238 os->find_relaxed_input_section(this, text_shndx) != NULL);
2239
2240 return true;
2241 }
2242
2243
2244 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2245 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2246
2247 template<int size, bool big_endian>
2248 bool
2249 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2250 const elfcpp::Shdr<size, big_endian>& shdr,
2251 const Relobj::Output_sections& out_sections,
2252 const Symbol_table* symtab,
2253 const unsigned char* pshdrs)
2254 {
2255 unsigned int sh_type = shdr.get_sh_type();
2256 if (sh_type != elfcpp::SHT_RELA)
2257 return false;
2258
2259 // Ignore empty section.
2260 off_t sh_size = shdr.get_sh_size();
2261 if (sh_size == 0)
2262 return false;
2263
2264 // Ignore reloc section with unexpected symbol table. The
2265 // error will be reported in the final link.
2266 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2267 return false;
2268
2269 gold_assert(sh_type == elfcpp::SHT_RELA);
2270 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2271
2272 // Ignore reloc section with unexpected entsize or uneven size.
2273 // The error will be reported in the final link.
2274 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2275 return false;
2276
2277 // Ignore reloc section with bad info. This error will be
2278 // reported in the final link.
2279 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2280 if (text_shndx >= this->shnum())
2281 return false;
2282
2283 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2284 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2285 text_shndx * shdr_size);
2286 return this->text_section_is_scannable(text_shdr, text_shndx,
2287 out_sections[text_shndx], symtab);
2288 }
2289
2290
2291 // Scan section SHNDX for erratum 843419 and 835769.
2292
2293 template<int size, bool big_endian>
2294 void
2295 AArch64_relobj<size, big_endian>::scan_errata(
2296 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2297 Output_section* os, const Symbol_table* symtab,
2298 The_target_aarch64* target)
2299 {
2300 if (shdr.get_sh_size() == 0
2301 || (shdr.get_sh_flags() &
2302 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2303 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2304 return;
2305
2306 if (!os || symtab->is_section_folded(this, shndx)) return;
2307
2308 AArch64_address output_offset = this->get_output_section_offset(shndx);
2309 AArch64_address output_address;
2310 if (output_offset != invalid_address)
2311 output_address = os->address() + output_offset;
2312 else
2313 {
2314 const Output_relaxed_input_section* poris =
2315 os->find_relaxed_input_section(this, shndx);
2316 if (!poris) return;
2317 output_address = poris->address();
2318 }
2319
2320 // Update the addresses in previously generated erratum stubs. Unlike when
2321 // we scan relocations for stubs, if section addresses have changed due to
2322 // other relaxations we are unlikely to scan the same erratum instances
2323 // again.
2324 The_stub_table* stub_table = this->stub_table(shndx);
2325 if (stub_table)
2326 {
2327 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
2328 ipair(stub_table->find_erratum_stubs_for_input_section(this, shndx));
2329 for (Erratum_stub_set_iter p = ipair.first; p != ipair.second; ++p)
2330 (*p)->update_erratum_address(output_address);
2331 }
2332
2333 section_size_type input_view_size = 0;
2334 const unsigned char* input_view =
2335 this->section_contents(shndx, &input_view_size, false);
2336
2337 Mapping_symbol_position section_start(shndx, 0);
2338 // Find the first mapping symbol record within section shndx.
2339 typename Mapping_symbol_info::const_iterator p =
2340 this->mapping_symbol_info_.lower_bound(section_start);
2341 while (p != this->mapping_symbol_info_.end() &&
2342 p->first.shndx_ == shndx)
2343 {
2344 typename Mapping_symbol_info::const_iterator prev = p;
2345 ++p;
2346 if (prev->second == 'x')
2347 {
2348 section_size_type span_start =
2349 convert_to_section_size_type(prev->first.offset_);
2350 section_size_type span_end;
2351 if (p != this->mapping_symbol_info_.end()
2352 && p->first.shndx_ == shndx)
2353 span_end = convert_to_section_size_type(p->first.offset_);
2354 else
2355 span_end = convert_to_section_size_type(shdr.get_sh_size());
2356
2357 // Here we do not share the scanning code of both errata. For 843419,
2358 // only the last few insns of each page are examined, which is fast,
2359 // whereas, for 835769, every insn pair needs to be checked.
2360
2361 if (parameters->options().fix_cortex_a53_843419())
2362 target->scan_erratum_843419_span(
2363 this, shndx, span_start, span_end,
2364 const_cast<unsigned char*>(input_view), output_address);
2365
2366 if (parameters->options().fix_cortex_a53_835769())
2367 target->scan_erratum_835769_span(
2368 this, shndx, span_start, span_end,
2369 const_cast<unsigned char*>(input_view), output_address);
2370 }
2371 }
2372 }
2373
2374
2375 // Scan relocations for stub generation.
2376
2377 template<int size, bool big_endian>
2378 void
2379 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2380 The_target_aarch64* target,
2381 const Symbol_table* symtab,
2382 const Layout* layout)
2383 {
2384 unsigned int shnum = this->shnum();
2385 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2386
2387 // Read the section headers.
2388 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2389 shnum * shdr_size,
2390 true, true);
2391
2392 // To speed up processing, we set up hash tables for fast lookup of
2393 // input offsets to output addresses.
2394 this->initialize_input_to_output_maps();
2395
2396 const Relobj::Output_sections& out_sections(this->output_sections());
2397
2398 Relocate_info<size, big_endian> relinfo;
2399 relinfo.symtab = symtab;
2400 relinfo.layout = layout;
2401 relinfo.object = this;
2402
2403 // Do relocation stubs scanning.
2404 const unsigned char* p = pshdrs + shdr_size;
2405 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2406 {
2407 const elfcpp::Shdr<size, big_endian> shdr(p);
2408 if (parameters->options().fix_cortex_a53_843419()
2409 || parameters->options().fix_cortex_a53_835769())
2410 scan_errata(i, shdr, out_sections[i], symtab, target);
2411 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2412 pshdrs))
2413 {
2414 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2415 AArch64_address output_offset =
2416 this->get_output_section_offset(index);
2417 AArch64_address output_address;
2418 if (output_offset != invalid_address)
2419 {
2420 output_address = out_sections[index]->address() + output_offset;
2421 }
2422 else
2423 {
2424 // Currently this only happens for a relaxed section.
2425 const Output_relaxed_input_section* poris =
2426 out_sections[index]->find_relaxed_input_section(this, index);
2427 gold_assert(poris != NULL);
2428 output_address = poris->address();
2429 }
2430
2431 // Get the relocations.
2432 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2433 shdr.get_sh_size(),
2434 true, false);
2435
2436 // Get the section contents.
2437 section_size_type input_view_size = 0;
2438 const unsigned char* input_view =
2439 this->section_contents(index, &input_view_size, false);
2440
2441 relinfo.reloc_shndx = i;
2442 relinfo.data_shndx = index;
2443 unsigned int sh_type = shdr.get_sh_type();
2444 unsigned int reloc_size;
2445 gold_assert (sh_type == elfcpp::SHT_RELA);
2446 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2447
2448 Output_section* os = out_sections[index];
2449 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2450 shdr.get_sh_size() / reloc_size,
2451 os,
2452 output_offset == invalid_address,
2453 input_view, output_address,
2454 input_view_size);
2455 }
2456 }
2457 }
2458
2459
2460 // A class to wrap an ordinary input section containing executable code.
2461
2462 template<int size, bool big_endian>
2463 class AArch64_input_section : public Output_relaxed_input_section
2464 {
2465 public:
2466 typedef Stub_table<size, big_endian> The_stub_table;
2467
2468 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2469 : Output_relaxed_input_section(relobj, shndx, 1),
2470 stub_table_(NULL),
2471 original_contents_(NULL), original_size_(0),
2472 original_addralign_(1)
2473 { }
2474
2475 ~AArch64_input_section()
2476 { delete[] this->original_contents_; }
2477
2478 // Initialize.
2479 void
2480 init();
2481
2482 // Set the stub_table.
2483 void
2484 set_stub_table(The_stub_table* st)
2485 { this->stub_table_ = st; }
2486
2487 // Whether this is a stub table owner.
2488 bool
2489 is_stub_table_owner() const
2490 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2491
2492 // Return the original size of the section.
2493 uint32_t
2494 original_size() const
2495 { return this->original_size_; }
2496
2497 // Return the stub table.
2498 The_stub_table*
2499 stub_table()
2500 { return stub_table_; }
2501
2502 protected:
2503 // Write out this input section.
2504 void
2505 do_write(Output_file*);
2506
2507 // Return required alignment of this.
2508 uint64_t
2509 do_addralign() const
2510 {
2511 if (this->is_stub_table_owner())
2512 return std::max(this->stub_table_->addralign(),
2513 static_cast<uint64_t>(this->original_addralign_));
2514 else
2515 return this->original_addralign_;
2516 }
2517
2518 // Finalize data size.
2519 void
2520 set_final_data_size();
2521
2522 // Reset address and file offset.
2523 void
2524 do_reset_address_and_file_offset();
2525
2526 // Output offset.
2527 bool
2528 do_output_offset(const Relobj* object, unsigned int shndx,
2529 section_offset_type offset,
2530 section_offset_type* poutput) const
2531 {
2532 if ((object == this->relobj())
2533 && (shndx == this->shndx())
2534 && (offset >= 0)
2535 && (offset <=
2536 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2537 {
2538 *poutput = offset;
2539 return true;
2540 }
2541 else
2542 return false;
2543 }
2544
2545 private:
2546 // Copying is not allowed.
2547 AArch64_input_section(const AArch64_input_section&);
2548 AArch64_input_section& operator=(const AArch64_input_section&);
2549
2550 // The relocation stubs.
2551 The_stub_table* stub_table_;
2552 // Original section contents. We have to make a copy here since the file
2553 // containing the original section may not be locked when we need to access
2554 // the contents.
2555 unsigned char* original_contents_;
2556 // Section size of the original input section.
2557 uint32_t original_size_;
2558 // Address alignment of the original input section.
2559 uint32_t original_addralign_;
2560 }; // End of AArch64_input_section
2561
2562
2563 // Finalize data size.
2564
2565 template<int size, bool big_endian>
2566 void
2567 AArch64_input_section<size, big_endian>::set_final_data_size()
2568 {
2569 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2570
2571 if (this->is_stub_table_owner())
2572 {
2573 this->stub_table_->finalize_data_size();
2574 off = align_address(off, this->stub_table_->addralign());
2575 off += this->stub_table_->data_size();
2576 }
2577 this->set_data_size(off);
2578 }
2579
2580
2581 // Reset address and file offset.
2582
2583 template<int size, bool big_endian>
2584 void
2585 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2586 {
2587 // Size of the original input section contents.
2588 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2589
2590 // If this is a stub table owner, account for the stub table size.
2591 if (this->is_stub_table_owner())
2592 {
2593 The_stub_table* stub_table = this->stub_table_;
2594
2595 // Reset the stub table's address and file offset. The
2596 // current data size for child will be updated after that.
2597 stub_table_->reset_address_and_file_offset();
2598 off = align_address(off, stub_table_->addralign());
2599 off += stub_table->current_data_size();
2600 }
2601
2602 this->set_current_data_size(off);
2603 }
2604
2605
2606 // Initialize an Arm_input_section.
2607
2608 template<int size, bool big_endian>
2609 void
2610 AArch64_input_section<size, big_endian>::init()
2611 {
2612 Relobj* relobj = this->relobj();
2613 unsigned int shndx = this->shndx();
2614
2615 // We have to cache original size, alignment and contents to avoid locking
2616 // the original file.
2617 this->original_addralign_ =
2618 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2619
2620 // This is not efficient but we expect only a small number of relaxed
2621 // input sections for stubs.
2622 section_size_type section_size;
2623 const unsigned char* section_contents =
2624 relobj->section_contents(shndx, &section_size, false);
2625 this->original_size_ =
2626 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2627
2628 gold_assert(this->original_contents_ == NULL);
2629 this->original_contents_ = new unsigned char[section_size];
2630 memcpy(this->original_contents_, section_contents, section_size);
2631
2632 // We want to make this look like the original input section after
2633 // output sections are finalized.
2634 Output_section* os = relobj->output_section(shndx);
2635 off_t offset = relobj->output_section_offset(shndx);
2636 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2637 this->set_address(os->address() + offset);
2638 this->set_file_offset(os->offset() + offset);
2639 this->set_current_data_size(this->original_size_);
2640 this->finalize_data_size();
2641 }
2642
2643
2644 // Write data to output file.
2645
2646 template<int size, bool big_endian>
2647 void
2648 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2649 {
2650 // We have to write out the original section content.
2651 gold_assert(this->original_contents_ != NULL);
2652 of->write(this->offset(), this->original_contents_,
2653 this->original_size_);
2654
2655 // If this owns a stub table and it is not empty, write it.
2656 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2657 this->stub_table_->write(of);
2658 }
2659
2660
2661 // Arm output section class. This is defined mainly to add a number of stub
2662 // generation methods.
2663
2664 template<int size, bool big_endian>
2665 class AArch64_output_section : public Output_section
2666 {
2667 public:
2668 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2669 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2670 typedef Stub_table<size, big_endian> The_stub_table;
2671 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2672
2673 public:
2674 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2675 elfcpp::Elf_Xword flags)
2676 : Output_section(name, type, flags)
2677 { }
2678
2679 ~AArch64_output_section() {}
2680
2681 // Group input sections for stub generation.
2682 void
2683 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2684 const Task*);
2685
2686 private:
2687 typedef Output_section::Input_section Input_section;
2688 typedef Output_section::Input_section_list Input_section_list;
2689
2690 // Create a stub group.
2691 void
2692 create_stub_group(Input_section_list::const_iterator,
2693 Input_section_list::const_iterator,
2694 Input_section_list::const_iterator,
2695 The_target_aarch64*,
2696 std::vector<Output_relaxed_input_section*>&,
2697 const Task*);
2698 }; // End of AArch64_output_section
2699
2700
2701 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2702 // the input section that will be the owner of the stub table.
2703
2704 template<int size, bool big_endian> void
2705 AArch64_output_section<size, big_endian>::create_stub_group(
2706 Input_section_list::const_iterator first,
2707 Input_section_list::const_iterator last,
2708 Input_section_list::const_iterator owner,
2709 The_target_aarch64* target,
2710 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2711 const Task* task)
2712 {
2713 // Currently we convert ordinary input sections into relaxed sections only
2714 // at this point.
2715 The_aarch64_input_section* input_section;
2716 if (owner->is_relaxed_input_section())
2717 gold_unreachable();
2718 else
2719 {
2720 gold_assert(owner->is_input_section());
2721 // Create a new relaxed input section. We need to lock the original
2722 // file.
2723 Task_lock_obj<Object> tl(task, owner->relobj());
2724 input_section =
2725 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2726 new_relaxed_sections.push_back(input_section);
2727 }
2728
2729 // Create a stub table.
2730 The_stub_table* stub_table =
2731 target->new_stub_table(input_section);
2732
2733 input_section->set_stub_table(stub_table);
2734
2735 Input_section_list::const_iterator p = first;
2736 // Look for input sections or relaxed input sections in [first ... last].
2737 do
2738 {
2739 if (p->is_input_section() || p->is_relaxed_input_section())
2740 {
2741 // The stub table information for input sections live
2742 // in their objects.
2743 The_aarch64_relobj* aarch64_relobj =
2744 static_cast<The_aarch64_relobj*>(p->relobj());
2745 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2746 }
2747 }
2748 while (p++ != last);
2749 }
2750
2751
2752 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2753 // stub groups. We grow a stub group by adding input section until the size is
2754 // just below GROUP_SIZE. The last input section will be converted into a stub
2755 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2756 // after the stub table, effectively doubling the group size.
2757 //
2758 // This is similar to the group_sections() function in elf32-arm.c but is
2759 // implemented differently.
2760
2761 template<int size, bool big_endian>
2762 void AArch64_output_section<size, big_endian>::group_sections(
2763 section_size_type group_size,
2764 bool stubs_always_after_branch,
2765 Target_aarch64<size, big_endian>* target,
2766 const Task* task)
2767 {
2768 typedef enum
2769 {
2770 NO_GROUP,
2771 FINDING_STUB_SECTION,
2772 HAS_STUB_SECTION
2773 } State;
2774
2775 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2776
2777 State state = NO_GROUP;
2778 section_size_type off = 0;
2779 section_size_type group_begin_offset = 0;
2780 section_size_type group_end_offset = 0;
2781 section_size_type stub_table_end_offset = 0;
2782 Input_section_list::const_iterator group_begin =
2783 this->input_sections().end();
2784 Input_section_list::const_iterator stub_table =
2785 this->input_sections().end();
2786 Input_section_list::const_iterator group_end = this->input_sections().end();
2787 for (Input_section_list::const_iterator p = this->input_sections().begin();
2788 p != this->input_sections().end();
2789 ++p)
2790 {
2791 section_size_type section_begin_offset =
2792 align_address(off, p->addralign());
2793 section_size_type section_end_offset =
2794 section_begin_offset + p->data_size();
2795
2796 // Check to see if we should group the previously seen sections.
2797 switch (state)
2798 {
2799 case NO_GROUP:
2800 break;
2801
2802 case FINDING_STUB_SECTION:
2803 // Adding this section makes the group larger than GROUP_SIZE.
2804 if (section_end_offset - group_begin_offset >= group_size)
2805 {
2806 if (stubs_always_after_branch)
2807 {
2808 gold_assert(group_end != this->input_sections().end());
2809 this->create_stub_group(group_begin, group_end, group_end,
2810 target, new_relaxed_sections,
2811 task);
2812 state = NO_GROUP;
2813 }
2814 else
2815 {
2816 // Input sections up to stub_group_size bytes after the stub
2817 // table can be handled by it too.
2818 state = HAS_STUB_SECTION;
2819 stub_table = group_end;
2820 stub_table_end_offset = group_end_offset;
2821 }
2822 }
2823 break;
2824
2825 case HAS_STUB_SECTION:
2826 // Adding this section makes the post stub-section group larger
2827 // than GROUP_SIZE.
2828 gold_unreachable();
2829 // NOT SUPPORTED YET. For completeness only.
2830 if (section_end_offset - stub_table_end_offset >= group_size)
2831 {
2832 gold_assert(group_end != this->input_sections().end());
2833 this->create_stub_group(group_begin, group_end, stub_table,
2834 target, new_relaxed_sections, task);
2835 state = NO_GROUP;
2836 }
2837 break;
2838
2839 default:
2840 gold_unreachable();
2841 }
2842
2843 // If we see an input section and currently there is no group, start
2844 // a new one. Skip any empty sections. We look at the data size
2845 // instead of calling p->relobj()->section_size() to avoid locking.
2846 if ((p->is_input_section() || p->is_relaxed_input_section())
2847 && (p->data_size() != 0))
2848 {
2849 if (state == NO_GROUP)
2850 {
2851 state = FINDING_STUB_SECTION;
2852 group_begin = p;
2853 group_begin_offset = section_begin_offset;
2854 }
2855
2856 // Keep track of the last input section seen.
2857 group_end = p;
2858 group_end_offset = section_end_offset;
2859 }
2860
2861 off = section_end_offset;
2862 }
2863
2864 // Create a stub group for any ungrouped sections.
2865 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2866 {
2867 gold_assert(group_end != this->input_sections().end());
2868 this->create_stub_group(group_begin, group_end,
2869 (state == FINDING_STUB_SECTION
2870 ? group_end
2871 : stub_table),
2872 target, new_relaxed_sections, task);
2873 }
2874
2875 if (!new_relaxed_sections.empty())
2876 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2877
2878 // Update the section offsets
2879 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2880 {
2881 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2882 new_relaxed_sections[i]->relobj());
2883 unsigned int shndx = new_relaxed_sections[i]->shndx();
2884 // Tell AArch64_relobj that this input section is converted.
2885 relobj->convert_input_section_to_relaxed_section(shndx);
2886 }
2887 } // End of AArch64_output_section::group_sections
2888
2889
2890 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2891
2892
2893 // The aarch64 target class.
2894 // See the ABI at
2895 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2896 template<int size, bool big_endian>
2897 class Target_aarch64 : public Sized_target<size, big_endian>
2898 {
2899 public:
2900 typedef Target_aarch64<size, big_endian> This;
2901 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2902 Reloc_section;
2903 typedef Relocate_info<size, big_endian> The_relocate_info;
2904 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2905 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2906 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2907 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2908 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2909 typedef Stub_table<size, big_endian> The_stub_table;
2910 typedef std::vector<The_stub_table*> Stub_table_list;
2911 typedef typename Stub_table_list::iterator Stub_table_iterator;
2912 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2913 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2914 typedef Unordered_map<Section_id,
2915 AArch64_input_section<size, big_endian>*,
2916 Section_id_hash> AArch64_input_section_map;
2917 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2918 const static int TCB_SIZE = size / 8 * 2;
2919
2920 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2921 : Sized_target<size, big_endian>(info),
2922 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2923 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2924 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2925 got_mod_index_offset_(-1U),
2926 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2927 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2928 { }
2929
2930 // Scan the relocations to determine unreferenced sections for
2931 // garbage collection.
2932 void
2933 gc_process_relocs(Symbol_table* symtab,
2934 Layout* layout,
2935 Sized_relobj_file<size, big_endian>* object,
2936 unsigned int data_shndx,
2937 unsigned int sh_type,
2938 const unsigned char* prelocs,
2939 size_t reloc_count,
2940 Output_section* output_section,
2941 bool needs_special_offset_handling,
2942 size_t local_symbol_count,
2943 const unsigned char* plocal_symbols);
2944
2945 // Scan the relocations to look for symbol adjustments.
2946 void
2947 scan_relocs(Symbol_table* symtab,
2948 Layout* layout,
2949 Sized_relobj_file<size, big_endian>* object,
2950 unsigned int data_shndx,
2951 unsigned int sh_type,
2952 const unsigned char* prelocs,
2953 size_t reloc_count,
2954 Output_section* output_section,
2955 bool needs_special_offset_handling,
2956 size_t local_symbol_count,
2957 const unsigned char* plocal_symbols);
2958
2959 // Finalize the sections.
2960 void
2961 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2962
2963 // Return the value to use for a dynamic which requires special
2964 // treatment.
2965 uint64_t
2966 do_dynsym_value(const Symbol*) const;
2967
2968 // Relocate a section.
2969 void
2970 relocate_section(const Relocate_info<size, big_endian>*,
2971 unsigned int sh_type,
2972 const unsigned char* prelocs,
2973 size_t reloc_count,
2974 Output_section* output_section,
2975 bool needs_special_offset_handling,
2976 unsigned char* view,
2977 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2978 section_size_type view_size,
2979 const Reloc_symbol_changes*);
2980
2981 // Scan the relocs during a relocatable link.
2982 void
2983 scan_relocatable_relocs(Symbol_table* symtab,
2984 Layout* layout,
2985 Sized_relobj_file<size, big_endian>* object,
2986 unsigned int data_shndx,
2987 unsigned int sh_type,
2988 const unsigned char* prelocs,
2989 size_t reloc_count,
2990 Output_section* output_section,
2991 bool needs_special_offset_handling,
2992 size_t local_symbol_count,
2993 const unsigned char* plocal_symbols,
2994 Relocatable_relocs*);
2995
2996 // Scan the relocs for --emit-relocs.
2997 void
2998 emit_relocs_scan(Symbol_table* symtab,
2999 Layout* layout,
3000 Sized_relobj_file<size, big_endian>* object,
3001 unsigned int data_shndx,
3002 unsigned int sh_type,
3003 const unsigned char* prelocs,
3004 size_t reloc_count,
3005 Output_section* output_section,
3006 bool needs_special_offset_handling,
3007 size_t local_symbol_count,
3008 const unsigned char* plocal_syms,
3009 Relocatable_relocs* rr);
3010
3011 // Relocate a section during a relocatable link.
3012 void
3013 relocate_relocs(
3014 const Relocate_info<size, big_endian>*,
3015 unsigned int sh_type,
3016 const unsigned char* prelocs,
3017 size_t reloc_count,
3018 Output_section* output_section,
3019 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
3020 unsigned char* view,
3021 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
3022 section_size_type view_size,
3023 unsigned char* reloc_view,
3024 section_size_type reloc_view_size);
3025
3026 // Return the symbol index to use for a target specific relocation.
3027 // The only target specific relocation is R_AARCH64_TLSDESC for a
3028 // local symbol, which is an absolute reloc.
3029 unsigned int
3030 do_reloc_symbol_index(void*, unsigned int r_type) const
3031 {
3032 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
3033 return 0;
3034 }
3035
3036 // Return the addend to use for a target specific relocation.
3037 uint64_t
3038 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
3039
3040 // Return the PLT section.
3041 uint64_t
3042 do_plt_address_for_global(const Symbol* gsym) const
3043 { return this->plt_section()->address_for_global(gsym); }
3044
3045 uint64_t
3046 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
3047 { return this->plt_section()->address_for_local(relobj, symndx); }
3048
3049 // This function should be defined in targets that can use relocation
3050 // types to determine (implemented in local_reloc_may_be_function_pointer
3051 // and global_reloc_may_be_function_pointer)
3052 // if a function's pointer is taken. ICF uses this in safe mode to only
3053 // fold those functions whose pointer is defintely not taken.
3054 bool
3055 do_can_check_for_function_pointers() const
3056 { return true; }
3057
3058 // Return the number of entries in the PLT.
3059 unsigned int
3060 plt_entry_count() const;
3061
3062 //Return the offset of the first non-reserved PLT entry.
3063 unsigned int
3064 first_plt_entry_offset() const;
3065
3066 // Return the size of each PLT entry.
3067 unsigned int
3068 plt_entry_size() const;
3069
3070 // Create a stub table.
3071 The_stub_table*
3072 new_stub_table(The_aarch64_input_section*);
3073
3074 // Create an aarch64 input section.
3075 The_aarch64_input_section*
3076 new_aarch64_input_section(Relobj*, unsigned int);
3077
3078 // Find an aarch64 input section instance for a given OBJ and SHNDX.
3079 The_aarch64_input_section*
3080 find_aarch64_input_section(Relobj*, unsigned int) const;
3081
3082 // Return the thread control block size.
3083 unsigned int
3084 tcb_size() const { return This::TCB_SIZE; }
3085
3086 // Scan a section for stub generation.
3087 void
3088 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
3089 const unsigned char*, size_t, Output_section*,
3090 bool, const unsigned char*,
3091 Address,
3092 section_size_type);
3093
3094 // Scan a relocation section for stub.
3095 template<int sh_type>
3096 void
3097 scan_reloc_section_for_stubs(
3098 const The_relocate_info* relinfo,
3099 const unsigned char* prelocs,
3100 size_t reloc_count,
3101 Output_section* output_section,
3102 bool needs_special_offset_handling,
3103 const unsigned char* view,
3104 Address view_address,
3105 section_size_type);
3106
3107 // Relocate a single reloc stub.
3108 void
3109 relocate_reloc_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
3110 Output_section*, unsigned char*, Address,
3111 section_size_type);
3112
3113 // Get the default AArch64 target.
3114 static This*
3115 current_target()
3116 {
3117 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
3118 && parameters->target().get_size() == size
3119 && parameters->target().is_big_endian() == big_endian);
3120 return static_cast<This*>(parameters->sized_target<size, big_endian>());
3121 }
3122
3123
3124 // Scan erratum 843419 for a part of a section.
3125 void
3126 scan_erratum_843419_span(
3127 AArch64_relobj<size, big_endian>*,
3128 unsigned int,
3129 const section_size_type,
3130 const section_size_type,
3131 unsigned char*,
3132 Address);
3133
3134 // Scan erratum 835769 for a part of a section.
3135 void
3136 scan_erratum_835769_span(
3137 AArch64_relobj<size, big_endian>*,
3138 unsigned int,
3139 const section_size_type,
3140 const section_size_type,
3141 unsigned char*,
3142 Address);
3143
3144 protected:
3145 void
3146 do_select_as_default_target()
3147 {
3148 gold_assert(aarch64_reloc_property_table == NULL);
3149 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3150 }
3151
3152 // Add a new reloc argument, returning the index in the vector.
3153 size_t
3154 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3155 unsigned int r_sym)
3156 {
3157 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3158 return this->tlsdesc_reloc_info_.size() - 1;
3159 }
3160
3161 virtual Output_data_plt_aarch64<size, big_endian>*
3162 do_make_data_plt(Layout* layout,
3163 Output_data_got_aarch64<size, big_endian>* got,
3164 Output_data_space* got_plt,
3165 Output_data_space* got_irelative)
3166 {
3167 return new Output_data_plt_aarch64_standard<size, big_endian>(
3168 layout, got, got_plt, got_irelative);
3169 }
3170
3171
3172 // do_make_elf_object to override the same function in the base class.
3173 Object*
3174 do_make_elf_object(const std::string&, Input_file*, off_t,
3175 const elfcpp::Ehdr<size, big_endian>&);
3176
3177 Output_data_plt_aarch64<size, big_endian>*
3178 make_data_plt(Layout* layout,
3179 Output_data_got_aarch64<size, big_endian>* got,
3180 Output_data_space* got_plt,
3181 Output_data_space* got_irelative)
3182 {
3183 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3184 }
3185
3186 // We only need to generate stubs, and hence perform relaxation if we are
3187 // not doing relocatable linking.
3188 virtual bool
3189 do_may_relax() const
3190 { return !parameters->options().relocatable(); }
3191
3192 // Relaxation hook. This is where we do stub generation.
3193 virtual bool
3194 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3195
3196 void
3197 group_sections(Layout* layout,
3198 section_size_type group_size,
3199 bool stubs_always_after_branch,
3200 const Task* task);
3201
3202 void
3203 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3204 const Sized_symbol<size>*, unsigned int,
3205 const Symbol_value<size>*,
3206 typename elfcpp::Elf_types<size>::Elf_Swxword,
3207 Address Elf_Addr);
3208
3209 // Make an output section.
3210 Output_section*
3211 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3212 elfcpp::Elf_Xword flags)
3213 { return new The_aarch64_output_section(name, type, flags); }
3214
3215 private:
3216 // The class which scans relocations.
3217 class Scan
3218 {
3219 public:
3220 Scan()
3221 : issued_non_pic_error_(false)
3222 { }
3223
3224 inline void
3225 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3226 Sized_relobj_file<size, big_endian>* object,
3227 unsigned int data_shndx,
3228 Output_section* output_section,
3229 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3230 const elfcpp::Sym<size, big_endian>& lsym,
3231 bool is_discarded);
3232
3233 inline void
3234 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3235 Sized_relobj_file<size, big_endian>* object,
3236 unsigned int data_shndx,
3237 Output_section* output_section,
3238 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3239 Symbol* gsym);
3240
3241 inline bool
3242 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3243 Target_aarch64<size, big_endian>* ,
3244 Sized_relobj_file<size, big_endian>* ,
3245 unsigned int ,
3246 Output_section* ,
3247 const elfcpp::Rela<size, big_endian>& ,
3248 unsigned int r_type,
3249 const elfcpp::Sym<size, big_endian>&);
3250
3251 inline bool
3252 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3253 Target_aarch64<size, big_endian>* ,
3254 Sized_relobj_file<size, big_endian>* ,
3255 unsigned int ,
3256 Output_section* ,
3257 const elfcpp::Rela<size, big_endian>& ,
3258 unsigned int r_type,
3259 Symbol* gsym);
3260
3261 private:
3262 static void
3263 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3264 unsigned int r_type);
3265
3266 static void
3267 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3268 unsigned int r_type, Symbol*);
3269
3270 inline bool
3271 possible_function_pointer_reloc(unsigned int r_type);
3272
3273 void
3274 check_non_pic(Relobj*, unsigned int r_type);
3275
3276 bool
3277 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3278 unsigned int r_type);
3279
3280 // Whether we have issued an error about a non-PIC compilation.
3281 bool issued_non_pic_error_;
3282 };
3283
3284 // The class which implements relocation.
3285 class Relocate
3286 {
3287 public:
3288 Relocate()
3289 : skip_call_tls_get_addr_(false)
3290 { }
3291
3292 ~Relocate()
3293 { }
3294
3295 // Do a relocation. Return false if the caller should not issue
3296 // any warnings about this relocation.
3297 inline bool
3298 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3299 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3300 const Sized_symbol<size>*, const Symbol_value<size>*,
3301 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3302 section_size_type);
3303
3304 private:
3305 inline typename AArch64_relocate_functions<size, big_endian>::Status
3306 relocate_tls(const Relocate_info<size, big_endian>*,
3307 Target_aarch64<size, big_endian>*,
3308 size_t,
3309 const elfcpp::Rela<size, big_endian>&,
3310 unsigned int r_type, const Sized_symbol<size>*,
3311 const Symbol_value<size>*,
3312 unsigned char*,
3313 typename elfcpp::Elf_types<size>::Elf_Addr);
3314
3315 inline typename AArch64_relocate_functions<size, big_endian>::Status
3316 tls_gd_to_le(
3317 const Relocate_info<size, big_endian>*,
3318 Target_aarch64<size, big_endian>*,
3319 const elfcpp::Rela<size, big_endian>&,
3320 unsigned int,
3321 unsigned char*,
3322 const Symbol_value<size>*);
3323
3324 inline typename AArch64_relocate_functions<size, big_endian>::Status
3325 tls_ld_to_le(
3326 const Relocate_info<size, big_endian>*,
3327 Target_aarch64<size, big_endian>*,
3328 const elfcpp::Rela<size, big_endian>&,
3329 unsigned int,
3330 unsigned char*,
3331 const Symbol_value<size>*);
3332
3333 inline typename AArch64_relocate_functions<size, big_endian>::Status
3334 tls_ie_to_le(
3335 const Relocate_info<size, big_endian>*,
3336 Target_aarch64<size, big_endian>*,
3337 const elfcpp::Rela<size, big_endian>&,
3338 unsigned int,
3339 unsigned char*,
3340 const Symbol_value<size>*);
3341
3342 inline typename AArch64_relocate_functions<size, big_endian>::Status
3343 tls_desc_gd_to_le(
3344 const Relocate_info<size, big_endian>*,
3345 Target_aarch64<size, big_endian>*,
3346 const elfcpp::Rela<size, big_endian>&,
3347 unsigned int,
3348 unsigned char*,
3349 const Symbol_value<size>*);
3350
3351 inline typename AArch64_relocate_functions<size, big_endian>::Status
3352 tls_desc_gd_to_ie(
3353 const Relocate_info<size, big_endian>*,
3354 Target_aarch64<size, big_endian>*,
3355 const elfcpp::Rela<size, big_endian>&,
3356 unsigned int,
3357 unsigned char*,
3358 const Symbol_value<size>*,
3359 typename elfcpp::Elf_types<size>::Elf_Addr,
3360 typename elfcpp::Elf_types<size>::Elf_Addr);
3361
3362 bool skip_call_tls_get_addr_;
3363
3364 }; // End of class Relocate
3365
3366 // Adjust TLS relocation type based on the options and whether this
3367 // is a local symbol.
3368 static tls::Tls_optimization
3369 optimize_tls_reloc(bool is_final, int r_type);
3370
3371 // Get the GOT section, creating it if necessary.
3372 Output_data_got_aarch64<size, big_endian>*
3373 got_section(Symbol_table*, Layout*);
3374
3375 // Get the GOT PLT section.
3376 Output_data_space*
3377 got_plt_section() const
3378 {
3379 gold_assert(this->got_plt_ != NULL);
3380 return this->got_plt_;
3381 }
3382
3383 // Get the GOT section for TLSDESC entries.
3384 Output_data_got<size, big_endian>*
3385 got_tlsdesc_section() const
3386 {
3387 gold_assert(this->got_tlsdesc_ != NULL);
3388 return this->got_tlsdesc_;
3389 }
3390
3391 // Create the PLT section.
3392 void
3393 make_plt_section(Symbol_table* symtab, Layout* layout);
3394
3395 // Create a PLT entry for a global symbol.
3396 void
3397 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3398
3399 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3400 void
3401 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3402 Sized_relobj_file<size, big_endian>* relobj,
3403 unsigned int local_sym_index);
3404
3405 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3406 void
3407 define_tls_base_symbol(Symbol_table*, Layout*);
3408
3409 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3410 void
3411 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3412
3413 // Create a GOT entry for the TLS module index.
3414 unsigned int
3415 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3416 Sized_relobj_file<size, big_endian>* object);
3417
3418 // Get the PLT section.
3419 Output_data_plt_aarch64<size, big_endian>*
3420 plt_section() const
3421 {
3422 gold_assert(this->plt_ != NULL);
3423 return this->plt_;
3424 }
3425
3426 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3427 // ST_E_843419, we need an additional field for adrp offset.
3428 void create_erratum_stub(
3429 AArch64_relobj<size, big_endian>* relobj,
3430 unsigned int shndx,
3431 section_size_type erratum_insn_offset,
3432 Address erratum_address,
3433 typename Insn_utilities::Insntype erratum_insn,
3434 int erratum_type,
3435 unsigned int e843419_adrp_offset=0);
3436
3437 // Return whether this is a 3-insn erratum sequence.
3438 bool is_erratum_843419_sequence(
3439 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3440 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3441 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3442
3443 // Return whether this is a 835769 sequence.
3444 // (Similarly implemented as in elfnn-aarch64.c.)
3445 bool is_erratum_835769_sequence(
3446 typename elfcpp::Swap<32,big_endian>::Valtype,
3447 typename elfcpp::Swap<32,big_endian>::Valtype);
3448
3449 // Get the dynamic reloc section, creating it if necessary.
3450 Reloc_section*
3451 rela_dyn_section(Layout*);
3452
3453 // Get the section to use for TLSDESC relocations.
3454 Reloc_section*
3455 rela_tlsdesc_section(Layout*) const;
3456
3457 // Get the section to use for IRELATIVE relocations.
3458 Reloc_section*
3459 rela_irelative_section(Layout*);
3460
3461 // Add a potential copy relocation.
3462 void
3463 copy_reloc(Symbol_table* symtab, Layout* layout,
3464 Sized_relobj_file<size, big_endian>* object,
3465 unsigned int shndx, Output_section* output_section,
3466 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3467 {
3468 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3469 this->copy_relocs_.copy_reloc(symtab, layout,
3470 symtab->get_sized_symbol<size>(sym),
3471 object, shndx, output_section,
3472 r_type, reloc.get_r_offset(),
3473 reloc.get_r_addend(),
3474 this->rela_dyn_section(layout));
3475 }
3476
3477 // Information about this specific target which we pass to the
3478 // general Target structure.
3479 static const Target::Target_info aarch64_info;
3480
3481 // The types of GOT entries needed for this platform.
3482 // These values are exposed to the ABI in an incremental link.
3483 // Do not renumber existing values without changing the version
3484 // number of the .gnu_incremental_inputs section.
3485 enum Got_type
3486 {
3487 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3488 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3489 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3490 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3491 };
3492
3493 // This type is used as the argument to the target specific
3494 // relocation routines. The only target specific reloc is
3495 // R_AARCh64_TLSDESC against a local symbol.
3496 struct Tlsdesc_info
3497 {
3498 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3499 unsigned int a_r_sym)
3500 : object(a_object), r_sym(a_r_sym)
3501 { }
3502
3503 // The object in which the local symbol is defined.
3504 Sized_relobj_file<size, big_endian>* object;
3505 // The local symbol index in the object.
3506 unsigned int r_sym;
3507 };
3508
3509 // The GOT section.
3510 Output_data_got_aarch64<size, big_endian>* got_;
3511 // The PLT section.
3512 Output_data_plt_aarch64<size, big_endian>* plt_;
3513 // The GOT PLT section.
3514 Output_data_space* got_plt_;
3515 // The GOT section for IRELATIVE relocations.
3516 Output_data_space* got_irelative_;
3517 // The GOT section for TLSDESC relocations.
3518 Output_data_got<size, big_endian>* got_tlsdesc_;
3519 // The _GLOBAL_OFFSET_TABLE_ symbol.
3520 Symbol* global_offset_table_;
3521 // The dynamic reloc section.
3522 Reloc_section* rela_dyn_;
3523 // The section to use for IRELATIVE relocs.
3524 Reloc_section* rela_irelative_;
3525 // Relocs saved to avoid a COPY reloc.
3526 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3527 // Offset of the GOT entry for the TLS module index.
3528 unsigned int got_mod_index_offset_;
3529 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3530 // specific relocation. Here we store the object and local symbol
3531 // index for the relocation.
3532 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3533 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3534 bool tls_base_symbol_defined_;
3535 // List of stub_tables
3536 Stub_table_list stub_tables_;
3537 // Actual stub group size
3538 section_size_type stub_group_size_;
3539 AArch64_input_section_map aarch64_input_section_map_;
3540 }; // End of Target_aarch64
3541
3542
3543 template<>
3544 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3545 {
3546 64, // size
3547 false, // is_big_endian
3548 elfcpp::EM_AARCH64, // machine_code
3549 false, // has_make_symbol
3550 false, // has_resolve
3551 false, // has_code_fill
3552 false, // is_default_stack_executable
3553 true, // can_icf_inline_merge_sections
3554 '\0', // wrap_char
3555 "/lib/ld.so.1", // program interpreter
3556 0x400000, // default_text_segment_address
3557 0x10000, // abi_pagesize (overridable by -z max-page-size)
3558 0x1000, // common_pagesize (overridable by -z common-page-size)
3559 false, // isolate_execinstr
3560 0, // rosegment_gap
3561 elfcpp::SHN_UNDEF, // small_common_shndx
3562 elfcpp::SHN_UNDEF, // large_common_shndx
3563 0, // small_common_section_flags
3564 0, // large_common_section_flags
3565 NULL, // attributes_section
3566 NULL, // attributes_vendor
3567 "_start", // entry_symbol_name
3568 32, // hash_entry_size
3569 elfcpp::SHT_PROGBITS, // unwind_section_type
3570 };
3571
3572 template<>
3573 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3574 {
3575 32, // size
3576 false, // is_big_endian
3577 elfcpp::EM_AARCH64, // machine_code
3578 false, // has_make_symbol
3579 false, // has_resolve
3580 false, // has_code_fill
3581 false, // is_default_stack_executable
3582 false, // can_icf_inline_merge_sections
3583 '\0', // wrap_char
3584 "/lib/ld.so.1", // program interpreter
3585 0x400000, // default_text_segment_address
3586 0x10000, // abi_pagesize (overridable by -z max-page-size)
3587 0x1000, // common_pagesize (overridable by -z common-page-size)
3588 false, // isolate_execinstr
3589 0, // rosegment_gap
3590 elfcpp::SHN_UNDEF, // small_common_shndx
3591 elfcpp::SHN_UNDEF, // large_common_shndx
3592 0, // small_common_section_flags
3593 0, // large_common_section_flags
3594 NULL, // attributes_section
3595 NULL, // attributes_vendor
3596 "_start", // entry_symbol_name
3597 32, // hash_entry_size
3598 elfcpp::SHT_PROGBITS, // unwind_section_type
3599 };
3600
3601 template<>
3602 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3603 {
3604 64, // size
3605 true, // is_big_endian
3606 elfcpp::EM_AARCH64, // machine_code
3607 false, // has_make_symbol
3608 false, // has_resolve
3609 false, // has_code_fill
3610 false, // is_default_stack_executable
3611 true, // can_icf_inline_merge_sections
3612 '\0', // wrap_char
3613 "/lib/ld.so.1", // program interpreter
3614 0x400000, // default_text_segment_address
3615 0x10000, // abi_pagesize (overridable by -z max-page-size)
3616 0x1000, // common_pagesize (overridable by -z common-page-size)
3617 false, // isolate_execinstr
3618 0, // rosegment_gap
3619 elfcpp::SHN_UNDEF, // small_common_shndx
3620 elfcpp::SHN_UNDEF, // large_common_shndx
3621 0, // small_common_section_flags
3622 0, // large_common_section_flags
3623 NULL, // attributes_section
3624 NULL, // attributes_vendor
3625 "_start", // entry_symbol_name
3626 32, // hash_entry_size
3627 elfcpp::SHT_PROGBITS, // unwind_section_type
3628 };
3629
3630 template<>
3631 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3632 {
3633 32, // size
3634 true, // is_big_endian
3635 elfcpp::EM_AARCH64, // machine_code
3636 false, // has_make_symbol
3637 false, // has_resolve
3638 false, // has_code_fill
3639 false, // is_default_stack_executable
3640 false, // can_icf_inline_merge_sections
3641 '\0', // wrap_char
3642 "/lib/ld.so.1", // program interpreter
3643 0x400000, // default_text_segment_address
3644 0x10000, // abi_pagesize (overridable by -z max-page-size)
3645 0x1000, // common_pagesize (overridable by -z common-page-size)
3646 false, // isolate_execinstr
3647 0, // rosegment_gap
3648 elfcpp::SHN_UNDEF, // small_common_shndx
3649 elfcpp::SHN_UNDEF, // large_common_shndx
3650 0, // small_common_section_flags
3651 0, // large_common_section_flags
3652 NULL, // attributes_section
3653 NULL, // attributes_vendor
3654 "_start", // entry_symbol_name
3655 32, // hash_entry_size
3656 elfcpp::SHT_PROGBITS, // unwind_section_type
3657 };
3658
3659 // Get the GOT section, creating it if necessary.
3660
3661 template<int size, bool big_endian>
3662 Output_data_got_aarch64<size, big_endian>*
3663 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3664 Layout* layout)
3665 {
3666 if (this->got_ == NULL)
3667 {
3668 gold_assert(symtab != NULL && layout != NULL);
3669
3670 // When using -z now, we can treat .got.plt as a relro section.
3671 // Without -z now, it is modified after program startup by lazy
3672 // PLT relocations.
3673 bool is_got_plt_relro = parameters->options().now();
3674 Output_section_order got_order = (is_got_plt_relro
3675 ? ORDER_RELRO
3676 : ORDER_RELRO_LAST);
3677 Output_section_order got_plt_order = (is_got_plt_relro
3678 ? ORDER_RELRO
3679 : ORDER_NON_RELRO_FIRST);
3680
3681 // Layout of .got and .got.plt sections.
3682 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3683 // ...
3684 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3685 // .gotplt[1] reserved for ld.so (resolver)
3686 // .gotplt[2] reserved
3687
3688 // Generate .got section.
3689 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3690 layout);
3691 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3692 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3693 this->got_, got_order, true);
3694 // The first word of GOT is reserved for the address of .dynamic.
3695 // We put 0 here now. The value will be replaced later in
3696 // Output_data_got_aarch64::do_write.
3697 this->got_->add_constant(0);
3698
3699 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3700 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3701 // even if there is a .got.plt section.
3702 this->global_offset_table_ =
3703 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3704 Symbol_table::PREDEFINED,
3705 this->got_,
3706 0, 0, elfcpp::STT_OBJECT,
3707 elfcpp::STB_LOCAL,
3708 elfcpp::STV_HIDDEN, 0,
3709 false, false);
3710
3711 // Generate .got.plt section.
3712 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3713 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3714 (elfcpp::SHF_ALLOC
3715 | elfcpp::SHF_WRITE),
3716 this->got_plt_, got_plt_order,
3717 is_got_plt_relro);
3718
3719 // The first three entries are reserved.
3720 this->got_plt_->set_current_data_size(
3721 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3722
3723 // If there are any IRELATIVE relocations, they get GOT entries
3724 // in .got.plt after the jump slot entries.
3725 this->got_irelative_ = new Output_data_space(size / 8,
3726 "** GOT IRELATIVE PLT");
3727 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3728 (elfcpp::SHF_ALLOC
3729 | elfcpp::SHF_WRITE),
3730 this->got_irelative_,
3731 got_plt_order,
3732 is_got_plt_relro);
3733
3734 // If there are any TLSDESC relocations, they get GOT entries in
3735 // .got.plt after the jump slot and IRELATIVE entries.
3736 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3737 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3738 (elfcpp::SHF_ALLOC
3739 | elfcpp::SHF_WRITE),
3740 this->got_tlsdesc_,
3741 got_plt_order,
3742 is_got_plt_relro);
3743
3744 if (!is_got_plt_relro)
3745 {
3746 // Those bytes can go into the relro segment.
3747 layout->increase_relro(
3748 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3749 }
3750
3751 }
3752 return this->got_;
3753 }
3754
3755 // Get the dynamic reloc section, creating it if necessary.
3756
3757 template<int size, bool big_endian>
3758 typename Target_aarch64<size, big_endian>::Reloc_section*
3759 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3760 {
3761 if (this->rela_dyn_ == NULL)
3762 {
3763 gold_assert(layout != NULL);
3764 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3765 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3766 elfcpp::SHF_ALLOC, this->rela_dyn_,
3767 ORDER_DYNAMIC_RELOCS, false);
3768 }
3769 return this->rela_dyn_;
3770 }
3771
3772 // Get the section to use for IRELATIVE relocs, creating it if
3773 // necessary. These go in .rela.dyn, but only after all other dynamic
3774 // relocations. They need to follow the other dynamic relocations so
3775 // that they can refer to global variables initialized by those
3776 // relocs.
3777
3778 template<int size, bool big_endian>
3779 typename Target_aarch64<size, big_endian>::Reloc_section*
3780 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3781 {
3782 if (this->rela_irelative_ == NULL)
3783 {
3784 // Make sure we have already created the dynamic reloc section.
3785 this->rela_dyn_section(layout);
3786 this->rela_irelative_ = new Reloc_section(false);
3787 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3788 elfcpp::SHF_ALLOC, this->rela_irelative_,
3789 ORDER_DYNAMIC_RELOCS, false);
3790 gold_assert(this->rela_dyn_->output_section()
3791 == this->rela_irelative_->output_section());
3792 }
3793 return this->rela_irelative_;
3794 }
3795
3796
3797 // do_make_elf_object to override the same function in the base class. We need
3798 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3799 // store backend specific information. Hence we need to have our own ELF object
3800 // creation.
3801
3802 template<int size, bool big_endian>
3803 Object*
3804 Target_aarch64<size, big_endian>::do_make_elf_object(
3805 const std::string& name,
3806 Input_file* input_file,
3807 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3808 {
3809 int et = ehdr.get_e_type();
3810 // ET_EXEC files are valid input for --just-symbols/-R,
3811 // and we treat them as relocatable objects.
3812 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3813 return Sized_target<size, big_endian>::do_make_elf_object(
3814 name, input_file, offset, ehdr);
3815 else if (et == elfcpp::ET_REL)
3816 {
3817 AArch64_relobj<size, big_endian>* obj =
3818 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3819 obj->setup();
3820 return obj;
3821 }
3822 else if (et == elfcpp::ET_DYN)
3823 {
3824 // Keep base implementation.
3825 Sized_dynobj<size, big_endian>* obj =
3826 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3827 obj->setup();
3828 return obj;
3829 }
3830 else
3831 {
3832 gold_error(_("%s: unsupported ELF file type %d"),
3833 name.c_str(), et);
3834 return NULL;
3835 }
3836 }
3837
3838
3839 // Scan a relocation for stub generation.
3840
3841 template<int size, bool big_endian>
3842 void
3843 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3844 const Relocate_info<size, big_endian>* relinfo,
3845 unsigned int r_type,
3846 const Sized_symbol<size>* gsym,
3847 unsigned int r_sym,
3848 const Symbol_value<size>* psymval,
3849 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3850 Address address)
3851 {
3852 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3853 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3854
3855 Symbol_value<size> symval;
3856 if (gsym != NULL)
3857 {
3858 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3859 get_reloc_property(r_type);
3860 if (gsym->use_plt_offset(arp->reference_flags()))
3861 {
3862 // This uses a PLT, change the symbol value.
3863 symval.set_output_value(this->plt_address_for_global(gsym));
3864 psymval = &symval;
3865 }
3866 else if (gsym->is_undefined())
3867 {
3868 // There is no need to generate a stub symbol if the original symbol
3869 // is undefined.
3870 gold_debug(DEBUG_TARGET,
3871 "stub: not creating a stub for undefined symbol %s in file %s",
3872 gsym->name(), aarch64_relobj->name().c_str());
3873 return;
3874 }
3875 }
3876
3877 // Get the symbol value.
3878 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3879
3880 // Owing to pipelining, the PC relative branches below actually skip
3881 // two instructions when the branch offset is 0.
3882 Address destination = static_cast<Address>(-1);
3883 switch (r_type)
3884 {
3885 case elfcpp::R_AARCH64_CALL26:
3886 case elfcpp::R_AARCH64_JUMP26:
3887 destination = value + addend;
3888 break;
3889 default:
3890 gold_unreachable();
3891 }
3892
3893 int stub_type = The_reloc_stub::
3894 stub_type_for_reloc(r_type, address, destination);
3895 if (stub_type == ST_NONE)
3896 return;
3897
3898 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3899 gold_assert(stub_table != NULL);
3900
3901 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3902 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3903 if (stub == NULL)
3904 {
3905 stub = new The_reloc_stub(stub_type);
3906 stub_table->add_reloc_stub(stub, key);
3907 }
3908 stub->set_destination_address(destination);
3909 } // End of Target_aarch64::scan_reloc_for_stub
3910
3911
3912 // This function scans a relocation section for stub generation.
3913 // The template parameter Relocate must be a class type which provides
3914 // a single function, relocate(), which implements the machine
3915 // specific part of a relocation.
3916
3917 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3918 // SHT_REL or SHT_RELA.
3919
3920 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3921 // of relocs. OUTPUT_SECTION is the output section.
3922 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3923 // mapped to output offsets.
3924
3925 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3926 // VIEW_SIZE is the size. These refer to the input section, unless
3927 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3928 // the output section.
3929
3930 template<int size, bool big_endian>
3931 template<int sh_type>
3932 void inline
3933 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3934 const Relocate_info<size, big_endian>* relinfo,
3935 const unsigned char* prelocs,
3936 size_t reloc_count,
3937 Output_section* /*output_section*/,
3938 bool /*needs_special_offset_handling*/,
3939 const unsigned char* /*view*/,
3940 Address view_address,
3941 section_size_type)
3942 {
3943 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3944
3945 const int reloc_size =
3946 Reloc_types<sh_type,size,big_endian>::reloc_size;
3947 AArch64_relobj<size, big_endian>* object =
3948 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3949 unsigned int local_count = object->local_symbol_count();
3950
3951 gold::Default_comdat_behavior default_comdat_behavior;
3952 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3953
3954 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3955 {
3956 Reltype reloc(prelocs);
3957 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3958 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3959 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3960 if (r_type != elfcpp::R_AARCH64_CALL26
3961 && r_type != elfcpp::R_AARCH64_JUMP26)
3962 continue;
3963
3964 section_offset_type offset =
3965 convert_to_section_size_type(reloc.get_r_offset());
3966
3967 // Get the addend.
3968 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3969 reloc.get_r_addend();
3970
3971 const Sized_symbol<size>* sym;
3972 Symbol_value<size> symval;
3973 const Symbol_value<size> *psymval;
3974 bool is_defined_in_discarded_section;
3975 unsigned int shndx;
3976 const Symbol* gsym = NULL;
3977 if (r_sym < local_count)
3978 {
3979 sym = NULL;
3980 psymval = object->local_symbol(r_sym);
3981
3982 // If the local symbol belongs to a section we are discarding,
3983 // and that section is a debug section, try to find the
3984 // corresponding kept section and map this symbol to its
3985 // counterpart in the kept section. The symbol must not
3986 // correspond to a section we are folding.
3987 bool is_ordinary;
3988 shndx = psymval->input_shndx(&is_ordinary);
3989 is_defined_in_discarded_section =
3990 (is_ordinary
3991 && shndx != elfcpp::SHN_UNDEF
3992 && !object->is_section_included(shndx)
3993 && !relinfo->symtab->is_section_folded(object, shndx));
3994
3995 // We need to compute the would-be final value of this local
3996 // symbol.
3997 if (!is_defined_in_discarded_section)
3998 {
3999 typedef Sized_relobj_file<size, big_endian> ObjType;
4000 if (psymval->is_section_symbol())
4001 symval.set_is_section_symbol();
4002 typename ObjType::Compute_final_local_value_status status =
4003 object->compute_final_local_value(r_sym, psymval, &symval,
4004 relinfo->symtab);
4005 if (status == ObjType::CFLV_OK)
4006 {
4007 // Currently we cannot handle a branch to a target in
4008 // a merged section. If this is the case, issue an error
4009 // and also free the merge symbol value.
4010 if (!symval.has_output_value())
4011 {
4012 const std::string& section_name =
4013 object->section_name(shndx);
4014 object->error(_("cannot handle branch to local %u "
4015 "in a merged section %s"),
4016 r_sym, section_name.c_str());
4017 }
4018 psymval = &symval;
4019 }
4020 else
4021 {
4022 // We cannot determine the final value.
4023 continue;
4024 }
4025 }
4026 }
4027 else
4028 {
4029 gsym = object->global_symbol(r_sym);
4030 gold_assert(gsym != NULL);
4031 if (gsym->is_forwarder())
4032 gsym = relinfo->symtab->resolve_forwards(gsym);
4033
4034 sym = static_cast<const Sized_symbol<size>*>(gsym);
4035 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
4036 symval.set_output_symtab_index(sym->symtab_index());
4037 else
4038 symval.set_no_output_symtab_entry();
4039
4040 // We need to compute the would-be final value of this global
4041 // symbol.
4042 const Symbol_table* symtab = relinfo->symtab;
4043 const Sized_symbol<size>* sized_symbol =
4044 symtab->get_sized_symbol<size>(gsym);
4045 Symbol_table::Compute_final_value_status status;
4046 typename elfcpp::Elf_types<size>::Elf_Addr value =
4047 symtab->compute_final_value<size>(sized_symbol, &status);
4048
4049 // Skip this if the symbol has not output section.
4050 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
4051 continue;
4052 symval.set_output_value(value);
4053
4054 if (gsym->type() == elfcpp::STT_TLS)
4055 symval.set_is_tls_symbol();
4056 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
4057 symval.set_is_ifunc_symbol();
4058 psymval = &symval;
4059
4060 is_defined_in_discarded_section =
4061 (gsym->is_defined_in_discarded_section()
4062 && gsym->is_undefined());
4063 shndx = 0;
4064 }
4065
4066 Symbol_value<size> symval2;
4067 if (is_defined_in_discarded_section)
4068 {
4069 std::string name = object->section_name(relinfo->data_shndx);
4070
4071 if (comdat_behavior == CB_UNDETERMINED)
4072 comdat_behavior = default_comdat_behavior.get(name.c_str());
4073
4074 if (comdat_behavior == CB_PRETEND)
4075 {
4076 bool found;
4077 typename elfcpp::Elf_types<size>::Elf_Addr value =
4078 object->map_to_kept_section(shndx, name, &found);
4079 if (found)
4080 symval2.set_output_value(value + psymval->input_value());
4081 else
4082 symval2.set_output_value(0);
4083 }
4084 else
4085 {
4086 if (comdat_behavior == CB_ERROR)
4087 issue_discarded_error(relinfo, i, offset, r_sym, gsym);
4088 symval2.set_output_value(0);
4089 }
4090 symval2.set_no_output_symtab_entry();
4091 psymval = &symval2;
4092 }
4093
4094 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
4095 addend, view_address + offset);
4096 } // End of iterating relocs in a section
4097 } // End of Target_aarch64::scan_reloc_section_for_stubs
4098
4099
4100 // Scan an input section for stub generation.
4101
4102 template<int size, bool big_endian>
4103 void
4104 Target_aarch64<size, big_endian>::scan_section_for_stubs(
4105 const Relocate_info<size, big_endian>* relinfo,
4106 unsigned int sh_type,
4107 const unsigned char* prelocs,
4108 size_t reloc_count,
4109 Output_section* output_section,
4110 bool needs_special_offset_handling,
4111 const unsigned char* view,
4112 Address view_address,
4113 section_size_type view_size)
4114 {
4115 gold_assert(sh_type == elfcpp::SHT_RELA);
4116 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
4117 relinfo,
4118 prelocs,
4119 reloc_count,
4120 output_section,
4121 needs_special_offset_handling,
4122 view,
4123 view_address,
4124 view_size);
4125 }
4126
4127
4128 // Relocate a single reloc stub.
4129
4130 template<int size, bool big_endian>
4131 void Target_aarch64<size, big_endian>::
4132 relocate_reloc_stub(The_reloc_stub* stub,
4133 const The_relocate_info*,
4134 Output_section*,
4135 unsigned char* view,
4136 Address address,
4137 section_size_type)
4138 {
4139 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4140 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4141 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4142
4143 Insntype* ip = reinterpret_cast<Insntype*>(view);
4144 int insn_number = stub->insn_num();
4145 const uint32_t* insns = stub->insns();
4146 // Check the insns are really those stub insns.
4147 for (int i = 0; i < insn_number; ++i)
4148 {
4149 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4150 gold_assert(((uint32_t)insn == insns[i]));
4151 }
4152
4153 Address dest = stub->destination_address();
4154
4155 switch(stub->type())
4156 {
4157 case ST_ADRP_BRANCH:
4158 {
4159 // 1st reloc is ADR_PREL_PG_HI21
4160 The_reloc_functions_status status =
4161 The_reloc_functions::adrp(view, dest, address);
4162 // An error should never arise in the above step. If so, please
4163 // check 'aarch64_valid_for_adrp_p'.
4164 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4165
4166 // 2nd reloc is ADD_ABS_LO12_NC
4167 const AArch64_reloc_property* arp =
4168 aarch64_reloc_property_table->get_reloc_property(
4169 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4170 gold_assert(arp != NULL);
4171 status = The_reloc_functions::template
4172 rela_general<32>(view + 4, dest, 0, arp);
4173 // An error should never arise, it is an "_NC" relocation.
4174 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4175 }
4176 break;
4177
4178 case ST_LONG_BRANCH_ABS:
4179 // 1st reloc is R_AARCH64_PREL64, at offset 8
4180 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4181 break;
4182
4183 case ST_LONG_BRANCH_PCREL:
4184 {
4185 // "PC" calculation is the 2nd insn in the stub.
4186 uint64_t offset = dest - (address + 4);
4187 // Offset is placed at offset 4 and 5.
4188 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4189 }
4190 break;
4191
4192 default:
4193 gold_unreachable();
4194 }
4195 }
4196
4197
4198 // A class to handle the PLT data.
4199 // This is an abstract base class that handles most of the linker details
4200 // but does not know the actual contents of PLT entries. The derived
4201 // classes below fill in those details.
4202
4203 template<int size, bool big_endian>
4204 class Output_data_plt_aarch64 : public Output_section_data
4205 {
4206 public:
4207 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4208 Reloc_section;
4209 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4210
4211 Output_data_plt_aarch64(Layout* layout,
4212 uint64_t addralign,
4213 Output_data_got_aarch64<size, big_endian>* got,
4214 Output_data_space* got_plt,
4215 Output_data_space* got_irelative)
4216 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4217 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4218 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4219 { this->init(layout); }
4220
4221 // Initialize the PLT section.
4222 void
4223 init(Layout* layout);
4224
4225 // Add an entry to the PLT.
4226 void
4227 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4228
4229 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4230 unsigned int
4231 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4232 Sized_relobj_file<size, big_endian>* relobj,
4233 unsigned int local_sym_index);
4234
4235 // Add the relocation for a PLT entry.
4236 void
4237 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4238 unsigned int got_offset);
4239
4240 // Add the reserved TLSDESC_PLT entry to the PLT.
4241 void
4242 reserve_tlsdesc_entry(unsigned int got_offset)
4243 { this->tlsdesc_got_offset_ = got_offset; }
4244
4245 // Return true if a TLSDESC_PLT entry has been reserved.
4246 bool
4247 has_tlsdesc_entry() const
4248 { return this->tlsdesc_got_offset_ != -1U; }
4249
4250 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4251 unsigned int
4252 get_tlsdesc_got_offset() const
4253 { return this->tlsdesc_got_offset_; }
4254
4255 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4256 unsigned int
4257 get_tlsdesc_plt_offset() const
4258 {
4259 return (this->first_plt_entry_offset() +
4260 (this->count_ + this->irelative_count_)
4261 * this->get_plt_entry_size());
4262 }
4263
4264 // Return the .rela.plt section data.
4265 Reloc_section*
4266 rela_plt()
4267 { return this->rel_; }
4268
4269 // Return where the TLSDESC relocations should go.
4270 Reloc_section*
4271 rela_tlsdesc(Layout*);
4272
4273 // Return where the IRELATIVE relocations should go in the PLT
4274 // relocations.
4275 Reloc_section*
4276 rela_irelative(Symbol_table*, Layout*);
4277
4278 // Return whether we created a section for IRELATIVE relocations.
4279 bool
4280 has_irelative_section() const
4281 { return this->irelative_rel_ != NULL; }
4282
4283 // Return the number of PLT entries.
4284 unsigned int
4285 entry_count() const
4286 { return this->count_ + this->irelative_count_; }
4287
4288 // Return the offset of the first non-reserved PLT entry.
4289 unsigned int
4290 first_plt_entry_offset() const
4291 { return this->do_first_plt_entry_offset(); }
4292
4293 // Return the size of a PLT entry.
4294 unsigned int
4295 get_plt_entry_size() const
4296 { return this->do_get_plt_entry_size(); }
4297
4298 // Return the reserved tlsdesc entry size.
4299 unsigned int
4300 get_plt_tlsdesc_entry_size() const
4301 { return this->do_get_plt_tlsdesc_entry_size(); }
4302
4303 // Return the PLT address to use for a global symbol.
4304 uint64_t
4305 address_for_global(const Symbol*);
4306
4307 // Return the PLT address to use for a local symbol.
4308 uint64_t
4309 address_for_local(const Relobj*, unsigned int symndx);
4310
4311 protected:
4312 // Fill in the first PLT entry.
4313 void
4314 fill_first_plt_entry(unsigned char* pov,
4315 Address got_address,
4316 Address plt_address)
4317 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4318
4319 // Fill in a normal PLT entry.
4320 void
4321 fill_plt_entry(unsigned char* pov,
4322 Address got_address,
4323 Address plt_address,
4324 unsigned int got_offset,
4325 unsigned int plt_offset)
4326 {
4327 this->do_fill_plt_entry(pov, got_address, plt_address,
4328 got_offset, plt_offset);
4329 }
4330
4331 // Fill in the reserved TLSDESC PLT entry.
4332 void
4333 fill_tlsdesc_entry(unsigned char* pov,
4334 Address gotplt_address,
4335 Address plt_address,
4336 Address got_base,
4337 unsigned int tlsdesc_got_offset,
4338 unsigned int plt_offset)
4339 {
4340 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4341 tlsdesc_got_offset, plt_offset);
4342 }
4343
4344 virtual unsigned int
4345 do_first_plt_entry_offset() const = 0;
4346
4347 virtual unsigned int
4348 do_get_plt_entry_size() const = 0;
4349
4350 virtual unsigned int
4351 do_get_plt_tlsdesc_entry_size() const = 0;
4352
4353 virtual void
4354 do_fill_first_plt_entry(unsigned char* pov,
4355 Address got_addr,
4356 Address plt_addr) = 0;
4357
4358 virtual void
4359 do_fill_plt_entry(unsigned char* pov,
4360 Address got_address,
4361 Address plt_address,
4362 unsigned int got_offset,
4363 unsigned int plt_offset) = 0;
4364
4365 virtual void
4366 do_fill_tlsdesc_entry(unsigned char* pov,
4367 Address gotplt_address,
4368 Address plt_address,
4369 Address got_base,
4370 unsigned int tlsdesc_got_offset,
4371 unsigned int plt_offset) = 0;
4372
4373 void
4374 do_adjust_output_section(Output_section* os);
4375
4376 // Write to a map file.
4377 void
4378 do_print_to_mapfile(Mapfile* mapfile) const
4379 { mapfile->print_output_data(this, _("** PLT")); }
4380
4381 private:
4382 // Set the final size.
4383 void
4384 set_final_data_size();
4385
4386 // Write out the PLT data.
4387 void
4388 do_write(Output_file*);
4389
4390 // The reloc section.
4391 Reloc_section* rel_;
4392
4393 // The TLSDESC relocs, if necessary. These must follow the regular
4394 // PLT relocs.
4395 Reloc_section* tlsdesc_rel_;
4396
4397 // The IRELATIVE relocs, if necessary. These must follow the
4398 // regular PLT relocations.
4399 Reloc_section* irelative_rel_;
4400
4401 // The .got section.
4402 Output_data_got_aarch64<size, big_endian>* got_;
4403
4404 // The .got.plt section.
4405 Output_data_space* got_plt_;
4406
4407 // The part of the .got.plt section used for IRELATIVE relocs.
4408 Output_data_space* got_irelative_;
4409
4410 // The number of PLT entries.
4411 unsigned int count_;
4412
4413 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4414 // follow the regular PLT entries.
4415 unsigned int irelative_count_;
4416
4417 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4418 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4419 // indicates an offset is not allocated.
4420 unsigned int tlsdesc_got_offset_;
4421 };
4422
4423 // Initialize the PLT section.
4424
4425 template<int size, bool big_endian>
4426 void
4427 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4428 {
4429 this->rel_ = new Reloc_section(false);
4430 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4431 elfcpp::SHF_ALLOC, this->rel_,
4432 ORDER_DYNAMIC_PLT_RELOCS, false);
4433 }
4434
4435 template<int size, bool big_endian>
4436 void
4437 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4438 Output_section* os)
4439 {
4440 os->set_entsize(this->get_plt_entry_size());
4441 }
4442
4443 // Add an entry to the PLT.
4444
4445 template<int size, bool big_endian>
4446 void
4447 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4448 Layout* layout, Symbol* gsym)
4449 {
4450 gold_assert(!gsym->has_plt_offset());
4451
4452 unsigned int* pcount;
4453 unsigned int plt_reserved;
4454 Output_section_data_build* got;
4455
4456 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4457 && gsym->can_use_relative_reloc(false))
4458 {
4459 pcount = &this->irelative_count_;
4460 plt_reserved = 0;
4461 got = this->got_irelative_;
4462 }
4463 else
4464 {
4465 pcount = &this->count_;
4466 plt_reserved = this->first_plt_entry_offset();
4467 got = this->got_plt_;
4468 }
4469
4470 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4471 + plt_reserved);
4472
4473 ++*pcount;
4474
4475 section_offset_type got_offset = got->current_data_size();
4476
4477 // Every PLT entry needs a GOT entry which points back to the PLT
4478 // entry (this will be changed by the dynamic linker, normally
4479 // lazily when the function is called).
4480 got->set_current_data_size(got_offset + size / 8);
4481
4482 // Every PLT entry needs a reloc.
4483 this->add_relocation(symtab, layout, gsym, got_offset);
4484
4485 // Note that we don't need to save the symbol. The contents of the
4486 // PLT are independent of which symbols are used. The symbols only
4487 // appear in the relocations.
4488 }
4489
4490 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4491 // the PLT offset.
4492
4493 template<int size, bool big_endian>
4494 unsigned int
4495 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4496 Symbol_table* symtab,
4497 Layout* layout,
4498 Sized_relobj_file<size, big_endian>* relobj,
4499 unsigned int local_sym_index)
4500 {
4501 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4502 ++this->irelative_count_;
4503
4504 section_offset_type got_offset = this->got_irelative_->current_data_size();
4505
4506 // Every PLT entry needs a GOT entry which points back to the PLT
4507 // entry.
4508 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4509
4510 // Every PLT entry needs a reloc.
4511 Reloc_section* rela = this->rela_irelative(symtab, layout);
4512 rela->add_symbolless_local_addend(relobj, local_sym_index,
4513 elfcpp::R_AARCH64_IRELATIVE,
4514 this->got_irelative_, got_offset, 0);
4515
4516 return plt_offset;
4517 }
4518
4519 // Add the relocation for a PLT entry.
4520
4521 template<int size, bool big_endian>
4522 void
4523 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4524 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4525 {
4526 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4527 && gsym->can_use_relative_reloc(false))
4528 {
4529 Reloc_section* rela = this->rela_irelative(symtab, layout);
4530 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4531 this->got_irelative_, got_offset, 0);
4532 }
4533 else
4534 {
4535 gsym->set_needs_dynsym_entry();
4536 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4537 got_offset, 0);
4538 }
4539 }
4540
4541 // Return where the TLSDESC relocations should go, creating it if
4542 // necessary. These follow the JUMP_SLOT relocations.
4543
4544 template<int size, bool big_endian>
4545 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4546 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4547 {
4548 if (this->tlsdesc_rel_ == NULL)
4549 {
4550 this->tlsdesc_rel_ = new Reloc_section(false);
4551 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4552 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4553 ORDER_DYNAMIC_PLT_RELOCS, false);
4554 gold_assert(this->tlsdesc_rel_->output_section()
4555 == this->rel_->output_section());
4556 }
4557 return this->tlsdesc_rel_;
4558 }
4559
4560 // Return where the IRELATIVE relocations should go in the PLT. These
4561 // follow the JUMP_SLOT and the TLSDESC relocations.
4562
4563 template<int size, bool big_endian>
4564 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4565 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4566 Layout* layout)
4567 {
4568 if (this->irelative_rel_ == NULL)
4569 {
4570 // Make sure we have a place for the TLSDESC relocations, in
4571 // case we see any later on.
4572 this->rela_tlsdesc(layout);
4573 this->irelative_rel_ = new Reloc_section(false);
4574 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4575 elfcpp::SHF_ALLOC, this->irelative_rel_,
4576 ORDER_DYNAMIC_PLT_RELOCS, false);
4577 gold_assert(this->irelative_rel_->output_section()
4578 == this->rel_->output_section());
4579
4580 if (parameters->doing_static_link())
4581 {
4582 // A statically linked executable will only have a .rela.plt
4583 // section to hold R_AARCH64_IRELATIVE relocs for
4584 // STT_GNU_IFUNC symbols. The library will use these
4585 // symbols to locate the IRELATIVE relocs at program startup
4586 // time.
4587 symtab->define_in_output_data("__rela_iplt_start", NULL,
4588 Symbol_table::PREDEFINED,
4589 this->irelative_rel_, 0, 0,
4590 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4591 elfcpp::STV_HIDDEN, 0, false, true);
4592 symtab->define_in_output_data("__rela_iplt_end", NULL,
4593 Symbol_table::PREDEFINED,
4594 this->irelative_rel_, 0, 0,
4595 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4596 elfcpp::STV_HIDDEN, 0, true, true);
4597 }
4598 }
4599 return this->irelative_rel_;
4600 }
4601
4602 // Return the PLT address to use for a global symbol.
4603
4604 template<int size, bool big_endian>
4605 uint64_t
4606 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4607 const Symbol* gsym)
4608 {
4609 uint64_t offset = 0;
4610 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4611 && gsym->can_use_relative_reloc(false))
4612 offset = (this->first_plt_entry_offset() +
4613 this->count_ * this->get_plt_entry_size());
4614 return this->address() + offset + gsym->plt_offset();
4615 }
4616
4617 // Return the PLT address to use for a local symbol. These are always
4618 // IRELATIVE relocs.
4619
4620 template<int size, bool big_endian>
4621 uint64_t
4622 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4623 const Relobj* object,
4624 unsigned int r_sym)
4625 {
4626 return (this->address()
4627 + this->first_plt_entry_offset()
4628 + this->count_ * this->get_plt_entry_size()
4629 + object->local_plt_offset(r_sym));
4630 }
4631
4632 // Set the final size.
4633
4634 template<int size, bool big_endian>
4635 void
4636 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4637 {
4638 unsigned int count = this->count_ + this->irelative_count_;
4639 unsigned int extra_size = 0;
4640 if (this->has_tlsdesc_entry())
4641 extra_size += this->get_plt_tlsdesc_entry_size();
4642 this->set_data_size(this->first_plt_entry_offset()
4643 + count * this->get_plt_entry_size()
4644 + extra_size);
4645 }
4646
4647 template<int size, bool big_endian>
4648 class Output_data_plt_aarch64_standard :
4649 public Output_data_plt_aarch64<size, big_endian>
4650 {
4651 public:
4652 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4653 Output_data_plt_aarch64_standard(
4654 Layout* layout,
4655 Output_data_got_aarch64<size, big_endian>* got,
4656 Output_data_space* got_plt,
4657 Output_data_space* got_irelative)
4658 : Output_data_plt_aarch64<size, big_endian>(layout,
4659 size == 32 ? 4 : 8,
4660 got, got_plt,
4661 got_irelative)
4662 { }
4663
4664 protected:
4665 // Return the offset of the first non-reserved PLT entry.
4666 virtual unsigned int
4667 do_first_plt_entry_offset() const
4668 { return this->first_plt_entry_size; }
4669
4670 // Return the size of a PLT entry
4671 virtual unsigned int
4672 do_get_plt_entry_size() const
4673 { return this->plt_entry_size; }
4674
4675 // Return the size of a tlsdesc entry
4676 virtual unsigned int
4677 do_get_plt_tlsdesc_entry_size() const
4678 { return this->plt_tlsdesc_entry_size; }
4679
4680 virtual void
4681 do_fill_first_plt_entry(unsigned char* pov,
4682 Address got_address,
4683 Address plt_address);
4684
4685 virtual void
4686 do_fill_plt_entry(unsigned char* pov,
4687 Address got_address,
4688 Address plt_address,
4689 unsigned int got_offset,
4690 unsigned int plt_offset);
4691
4692 virtual void
4693 do_fill_tlsdesc_entry(unsigned char* pov,
4694 Address gotplt_address,
4695 Address plt_address,
4696 Address got_base,
4697 unsigned int tlsdesc_got_offset,
4698 unsigned int plt_offset);
4699
4700 private:
4701 // The size of the first plt entry size.
4702 static const int first_plt_entry_size = 32;
4703 // The size of the plt entry size.
4704 static const int plt_entry_size = 16;
4705 // The size of the plt tlsdesc entry size.
4706 static const int plt_tlsdesc_entry_size = 32;
4707 // Template for the first PLT entry.
4708 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4709 // Template for subsequent PLT entries.
4710 static const uint32_t plt_entry[plt_entry_size / 4];
4711 // The reserved TLSDESC entry in the PLT for an executable.
4712 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4713 };
4714
4715 // The first entry in the PLT for an executable.
4716
4717 template<>
4718 const uint32_t
4719 Output_data_plt_aarch64_standard<32, false>::
4720 first_plt_entry[first_plt_entry_size / 4] =
4721 {
4722 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4723 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4724 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4725 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4726 0xd61f0220, /* br x17 */
4727 0xd503201f, /* nop */
4728 0xd503201f, /* nop */
4729 0xd503201f, /* nop */
4730 };
4731
4732
4733 template<>
4734 const uint32_t
4735 Output_data_plt_aarch64_standard<32, true>::
4736 first_plt_entry[first_plt_entry_size / 4] =
4737 {
4738 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4739 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4740 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4741 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4742 0xd61f0220, /* br x17 */
4743 0xd503201f, /* nop */
4744 0xd503201f, /* nop */
4745 0xd503201f, /* nop */
4746 };
4747
4748
4749 template<>
4750 const uint32_t
4751 Output_data_plt_aarch64_standard<64, false>::
4752 first_plt_entry[first_plt_entry_size / 4] =
4753 {
4754 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4755 0x90000010, /* adrp x16, PLT_GOT+16 */
4756 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4757 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4758 0xd61f0220, /* br x17 */
4759 0xd503201f, /* nop */
4760 0xd503201f, /* nop */
4761 0xd503201f, /* nop */
4762 };
4763
4764
4765 template<>
4766 const uint32_t
4767 Output_data_plt_aarch64_standard<64, true>::
4768 first_plt_entry[first_plt_entry_size / 4] =
4769 {
4770 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4771 0x90000010, /* adrp x16, PLT_GOT+16 */
4772 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4773 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4774 0xd61f0220, /* br x17 */
4775 0xd503201f, /* nop */
4776 0xd503201f, /* nop */
4777 0xd503201f, /* nop */
4778 };
4779
4780
4781 template<>
4782 const uint32_t
4783 Output_data_plt_aarch64_standard<32, false>::
4784 plt_entry[plt_entry_size / 4] =
4785 {
4786 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4787 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4788 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4789 0xd61f0220, /* br x17. */
4790 };
4791
4792
4793 template<>
4794 const uint32_t
4795 Output_data_plt_aarch64_standard<32, true>::
4796 plt_entry[plt_entry_size / 4] =
4797 {
4798 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4799 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4800 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4801 0xd61f0220, /* br x17. */
4802 };
4803
4804
4805 template<>
4806 const uint32_t
4807 Output_data_plt_aarch64_standard<64, false>::
4808 plt_entry[plt_entry_size / 4] =
4809 {
4810 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4811 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4812 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4813 0xd61f0220, /* br x17. */
4814 };
4815
4816
4817 template<>
4818 const uint32_t
4819 Output_data_plt_aarch64_standard<64, true>::
4820 plt_entry[plt_entry_size / 4] =
4821 {
4822 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4823 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4824 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4825 0xd61f0220, /* br x17. */
4826 };
4827
4828
4829 template<int size, bool big_endian>
4830 void
4831 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4832 unsigned char* pov,
4833 Address got_address,
4834 Address plt_address)
4835 {
4836 // PLT0 of the small PLT looks like this in ELF64 -
4837 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4838 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4839 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4840 // symbol resolver
4841 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4842 // GOTPLT entry for this.
4843 // br x17
4844 // PLT0 will be slightly different in ELF32 due to different got entry
4845 // size.
4846 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4847 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4848
4849 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4850 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4851 // FIXME: This only works for 64bit
4852 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4853 gotplt_2nd_ent, plt_address + 4);
4854
4855 // Fill in R_AARCH64_LDST8_LO12
4856 elfcpp::Swap<32, big_endian>::writeval(
4857 pov + 8,
4858 ((this->first_plt_entry[2] & 0xffc003ff)
4859 | ((gotplt_2nd_ent & 0xff8) << 7)));
4860
4861 // Fill in R_AARCH64_ADD_ABS_LO12
4862 elfcpp::Swap<32, big_endian>::writeval(
4863 pov + 12,
4864 ((this->first_plt_entry[3] & 0xffc003ff)
4865 | ((gotplt_2nd_ent & 0xfff) << 10)));
4866 }
4867
4868
4869 // Subsequent entries in the PLT for an executable.
4870 // FIXME: This only works for 64bit
4871
4872 template<int size, bool big_endian>
4873 void
4874 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4875 unsigned char* pov,
4876 Address got_address,
4877 Address plt_address,
4878 unsigned int got_offset,
4879 unsigned int plt_offset)
4880 {
4881 memcpy(pov, this->plt_entry, this->plt_entry_size);
4882
4883 Address gotplt_entry_address = got_address + got_offset;
4884 Address plt_entry_address = plt_address + plt_offset;
4885
4886 // Fill in R_AARCH64_PCREL_ADR_HI21
4887 AArch64_relocate_functions<size, big_endian>::adrp(
4888 pov,
4889 gotplt_entry_address,
4890 plt_entry_address);
4891
4892 // Fill in R_AARCH64_LDST64_ABS_LO12
4893 elfcpp::Swap<32, big_endian>::writeval(
4894 pov + 4,
4895 ((this->plt_entry[1] & 0xffc003ff)
4896 | ((gotplt_entry_address & 0xff8) << 7)));
4897
4898 // Fill in R_AARCH64_ADD_ABS_LO12
4899 elfcpp::Swap<32, big_endian>::writeval(
4900 pov + 8,
4901 ((this->plt_entry[2] & 0xffc003ff)
4902 | ((gotplt_entry_address & 0xfff) <<10)));
4903
4904 }
4905
4906
4907 template<>
4908 const uint32_t
4909 Output_data_plt_aarch64_standard<32, false>::
4910 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4911 {
4912 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4913 0x90000002, /* adrp x2, 0 */
4914 0x90000003, /* adrp x3, 0 */
4915 0xb9400042, /* ldr w2, [w2, #0] */
4916 0x11000063, /* add w3, w3, 0 */
4917 0xd61f0040, /* br x2 */
4918 0xd503201f, /* nop */
4919 0xd503201f, /* nop */
4920 };
4921
4922 template<>
4923 const uint32_t
4924 Output_data_plt_aarch64_standard<32, true>::
4925 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4926 {
4927 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4928 0x90000002, /* adrp x2, 0 */
4929 0x90000003, /* adrp x3, 0 */
4930 0xb9400042, /* ldr w2, [w2, #0] */
4931 0x11000063, /* add w3, w3, 0 */
4932 0xd61f0040, /* br x2 */
4933 0xd503201f, /* nop */
4934 0xd503201f, /* nop */
4935 };
4936
4937 template<>
4938 const uint32_t
4939 Output_data_plt_aarch64_standard<64, false>::
4940 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4941 {
4942 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4943 0x90000002, /* adrp x2, 0 */
4944 0x90000003, /* adrp x3, 0 */
4945 0xf9400042, /* ldr x2, [x2, #0] */
4946 0x91000063, /* add x3, x3, 0 */
4947 0xd61f0040, /* br x2 */
4948 0xd503201f, /* nop */
4949 0xd503201f, /* nop */
4950 };
4951
4952 template<>
4953 const uint32_t
4954 Output_data_plt_aarch64_standard<64, true>::
4955 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4956 {
4957 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4958 0x90000002, /* adrp x2, 0 */
4959 0x90000003, /* adrp x3, 0 */
4960 0xf9400042, /* ldr x2, [x2, #0] */
4961 0x91000063, /* add x3, x3, 0 */
4962 0xd61f0040, /* br x2 */
4963 0xd503201f, /* nop */
4964 0xd503201f, /* nop */
4965 };
4966
4967 template<int size, bool big_endian>
4968 void
4969 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4970 unsigned char* pov,
4971 Address gotplt_address,
4972 Address plt_address,
4973 Address got_base,
4974 unsigned int tlsdesc_got_offset,
4975 unsigned int plt_offset)
4976 {
4977 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4978
4979 // move DT_TLSDESC_GOT address into x2
4980 // move .got.plt address into x3
4981 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4982 Address plt_entry_address = plt_address + plt_offset;
4983
4984 // R_AARCH64_ADR_PREL_PG_HI21
4985 AArch64_relocate_functions<size, big_endian>::adrp(
4986 pov + 4,
4987 tlsdesc_got_entry,
4988 plt_entry_address + 4);
4989
4990 // R_AARCH64_ADR_PREL_PG_HI21
4991 AArch64_relocate_functions<size, big_endian>::adrp(
4992 pov + 8,
4993 gotplt_address,
4994 plt_entry_address + 8);
4995
4996 // R_AARCH64_LDST64_ABS_LO12
4997 elfcpp::Swap<32, big_endian>::writeval(
4998 pov + 12,
4999 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
5000 | ((tlsdesc_got_entry & 0xff8) << 7)));
5001
5002 // R_AARCH64_ADD_ABS_LO12
5003 elfcpp::Swap<32, big_endian>::writeval(
5004 pov + 16,
5005 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
5006 | ((gotplt_address & 0xfff) << 10)));
5007 }
5008
5009 // Write out the PLT. This uses the hand-coded instructions above,
5010 // and adjusts them as needed. This is specified by the AMD64 ABI.
5011
5012 template<int size, bool big_endian>
5013 void
5014 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
5015 {
5016 const off_t offset = this->offset();
5017 const section_size_type oview_size =
5018 convert_to_section_size_type(this->data_size());
5019 unsigned char* const oview = of->get_output_view(offset, oview_size);
5020
5021 const off_t got_file_offset = this->got_plt_->offset();
5022 gold_assert(got_file_offset + this->got_plt_->data_size()
5023 == this->got_irelative_->offset());
5024
5025 const section_size_type got_size =
5026 convert_to_section_size_type(this->got_plt_->data_size()
5027 + this->got_irelative_->data_size());
5028 unsigned char* const got_view = of->get_output_view(got_file_offset,
5029 got_size);
5030
5031 unsigned char* pov = oview;
5032
5033 // The base address of the .plt section.
5034 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
5035 // The base address of the PLT portion of the .got section.
5036 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
5037 = this->got_plt_->address();
5038
5039 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
5040 pov += this->first_plt_entry_offset();
5041
5042 // The first three entries in .got.plt are reserved.
5043 unsigned char* got_pov = got_view;
5044 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
5045 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
5046
5047 unsigned int plt_offset = this->first_plt_entry_offset();
5048 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
5049 const unsigned int count = this->count_ + this->irelative_count_;
5050 for (unsigned int plt_index = 0;
5051 plt_index < count;
5052 ++plt_index,
5053 pov += this->get_plt_entry_size(),
5054 got_pov += size / 8,
5055 plt_offset += this->get_plt_entry_size(),
5056 got_offset += size / 8)
5057 {
5058 // Set and adjust the PLT entry itself.
5059 this->fill_plt_entry(pov, gotplt_address, plt_address,
5060 got_offset, plt_offset);
5061
5062 // Set the entry in the GOT, which points to plt0.
5063 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
5064 }
5065
5066 if (this->has_tlsdesc_entry())
5067 {
5068 // Set and adjust the reserved TLSDESC PLT entry.
5069 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
5070 // The base address of the .base section.
5071 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
5072 this->got_->address();
5073 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
5074 tlsdesc_got_offset, plt_offset);
5075 pov += this->get_plt_tlsdesc_entry_size();
5076 }
5077
5078 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
5079 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
5080
5081 of->write_output_view(offset, oview_size, oview);
5082 of->write_output_view(got_file_offset, got_size, got_view);
5083 }
5084
5085 // Telling how to update the immediate field of an instruction.
5086 struct AArch64_howto
5087 {
5088 // The immediate field mask.
5089 elfcpp::Elf_Xword dst_mask;
5090
5091 // The offset to apply relocation immediate
5092 int doffset;
5093
5094 // The second part offset, if the immediate field has two parts.
5095 // -1 if the immediate field has only one part.
5096 int doffset2;
5097 };
5098
5099 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
5100 {
5101 {0, -1, -1}, // DATA
5102 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
5103 {0xffffe0, 5, -1}, // LD [23:5]-imm19
5104 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
5105 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
5106 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
5107 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
5108 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
5109 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
5110 {0x3ffffff, 0, -1}, // B [25:0]-imm26
5111 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
5112 };
5113
5114 // AArch64 relocate function class
5115
5116 template<int size, bool big_endian>
5117 class AArch64_relocate_functions
5118 {
5119 public:
5120 typedef enum
5121 {
5122 STATUS_OKAY, // No error during relocation.
5123 STATUS_OVERFLOW, // Relocation overflow.
5124 STATUS_BAD_RELOC, // Relocation cannot be applied.
5125 } Status;
5126
5127 typedef AArch64_relocate_functions<size, big_endian> This;
5128 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5129 typedef Relocate_info<size, big_endian> The_relocate_info;
5130 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5131 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5132 typedef Stub_table<size, big_endian> The_stub_table;
5133 typedef elfcpp::Rela<size, big_endian> The_rela;
5134 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5135
5136 // Return the page address of the address.
5137 // Page(address) = address & ~0xFFF
5138
5139 static inline AArch64_valtype
5140 Page(Address address)
5141 {
5142 return (address & (~static_cast<Address>(0xFFF)));
5143 }
5144
5145 private:
5146 // Update instruction (pointed by view) with selected bits (immed).
5147 // val = (val & ~dst_mask) | (immed << doffset)
5148
5149 template<int valsize>
5150 static inline void
5151 update_view(unsigned char* view,
5152 AArch64_valtype immed,
5153 elfcpp::Elf_Xword doffset,
5154 elfcpp::Elf_Xword dst_mask)
5155 {
5156 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5157 Valtype* wv = reinterpret_cast<Valtype*>(view);
5158 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5159
5160 // Clear immediate fields.
5161 val &= ~dst_mask;
5162 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5163 static_cast<Valtype>(val | (immed << doffset)));
5164 }
5165
5166 // Update two parts of an instruction (pointed by view) with selected
5167 // bits (immed1 and immed2).
5168 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5169
5170 template<int valsize>
5171 static inline void
5172 update_view_two_parts(
5173 unsigned char* view,
5174 AArch64_valtype immed1,
5175 AArch64_valtype immed2,
5176 elfcpp::Elf_Xword doffset1,
5177 elfcpp::Elf_Xword doffset2,
5178 elfcpp::Elf_Xword dst_mask)
5179 {
5180 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5181 Valtype* wv = reinterpret_cast<Valtype*>(view);
5182 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5183 val &= ~dst_mask;
5184 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5185 static_cast<Valtype>(val | (immed1 << doffset1) |
5186 (immed2 << doffset2)));
5187 }
5188
5189 // Update adr or adrp instruction with immed.
5190 // In adr and adrp: [30:29] immlo [23:5] immhi
5191
5192 static inline void
5193 update_adr(unsigned char* view, AArch64_valtype immed)
5194 {
5195 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5196 This::template update_view_two_parts<32>(
5197 view,
5198 immed & 0x3,
5199 (immed & 0x1ffffc) >> 2,
5200 29,
5201 5,
5202 dst_mask);
5203 }
5204
5205 // Update movz/movn instruction with bits immed.
5206 // Set instruction to movz if is_movz is true, otherwise set instruction
5207 // to movn.
5208
5209 static inline void
5210 update_movnz(unsigned char* view,
5211 AArch64_valtype immed,
5212 bool is_movz)
5213 {
5214 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5215 Valtype* wv = reinterpret_cast<Valtype*>(view);
5216 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5217
5218 const elfcpp::Elf_Xword doffset =
5219 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5220 const elfcpp::Elf_Xword dst_mask =
5221 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5222
5223 // Clear immediate fields and opc code.
5224 val &= ~(dst_mask | (0x3 << 29));
5225
5226 // Set instruction to movz or movn.
5227 // movz: [30:29] is 10 movn: [30:29] is 00
5228 if (is_movz)
5229 val |= (0x2 << 29);
5230
5231 elfcpp::Swap<32, big_endian>::writeval(wv,
5232 static_cast<Valtype>(val | (immed << doffset)));
5233 }
5234
5235 public:
5236
5237 // Update selected bits in text.
5238
5239 template<int valsize>
5240 static inline typename This::Status
5241 reloc_common(unsigned char* view, Address x,
5242 const AArch64_reloc_property* reloc_property)
5243 {
5244 // Select bits from X.
5245 Address immed = reloc_property->select_x_value(x);
5246
5247 // Update view.
5248 const AArch64_reloc_property::Reloc_inst inst =
5249 reloc_property->reloc_inst();
5250 // If it is a data relocation or instruction has 2 parts of immediate
5251 // fields, you should not call pcrela_general.
5252 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5253 aarch64_howto[inst].doffset != -1);
5254 This::template update_view<valsize>(view, immed,
5255 aarch64_howto[inst].doffset,
5256 aarch64_howto[inst].dst_mask);
5257
5258 // Do check overflow or alignment if needed.
5259 return (reloc_property->checkup_x_value(x)
5260 ? This::STATUS_OKAY
5261 : This::STATUS_OVERFLOW);
5262 }
5263
5264 // Construct a B insn. Note, although we group it here with other relocation
5265 // operation, there is actually no 'relocation' involved here.
5266 static inline void
5267 construct_b(unsigned char* view, unsigned int branch_offset)
5268 {
5269 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5270 26, 0, 0xffffffff);
5271 }
5272
5273 // Do a simple rela relocation at unaligned addresses.
5274
5275 template<int valsize>
5276 static inline typename This::Status
5277 rela_ua(unsigned char* view,
5278 const Sized_relobj_file<size, big_endian>* object,
5279 const Symbol_value<size>* psymval,
5280 AArch64_valtype addend,
5281 const AArch64_reloc_property* reloc_property)
5282 {
5283 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5284 Valtype;
5285 typename elfcpp::Elf_types<size>::Elf_Addr x =
5286 psymval->value(object, addend);
5287 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5288 static_cast<Valtype>(x));
5289 return (reloc_property->checkup_x_value(x)
5290 ? This::STATUS_OKAY
5291 : This::STATUS_OVERFLOW);
5292 }
5293
5294 // Do a simple pc-relative relocation at unaligned addresses.
5295
5296 template<int valsize>
5297 static inline typename This::Status
5298 pcrela_ua(unsigned char* view,
5299 const Sized_relobj_file<size, big_endian>* object,
5300 const Symbol_value<size>* psymval,
5301 AArch64_valtype addend,
5302 Address address,
5303 const AArch64_reloc_property* reloc_property)
5304 {
5305 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5306 Valtype;
5307 Address x = psymval->value(object, addend) - address;
5308 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5309 static_cast<Valtype>(x));
5310 return (reloc_property->checkup_x_value(x)
5311 ? This::STATUS_OKAY
5312 : This::STATUS_OVERFLOW);
5313 }
5314
5315 // Do a simple rela relocation at aligned addresses.
5316
5317 template<int valsize>
5318 static inline typename This::Status
5319 rela(
5320 unsigned char* view,
5321 const Sized_relobj_file<size, big_endian>* object,
5322 const Symbol_value<size>* psymval,
5323 AArch64_valtype addend,
5324 const AArch64_reloc_property* reloc_property)
5325 {
5326 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5327 Valtype* wv = reinterpret_cast<Valtype*>(view);
5328 Address x = psymval->value(object, addend);
5329 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5330 return (reloc_property->checkup_x_value(x)
5331 ? This::STATUS_OKAY
5332 : This::STATUS_OVERFLOW);
5333 }
5334
5335 // Do relocate. Update selected bits in text.
5336 // new_val = (val & ~dst_mask) | (immed << doffset)
5337
5338 template<int valsize>
5339 static inline typename This::Status
5340 rela_general(unsigned char* view,
5341 const Sized_relobj_file<size, big_endian>* object,
5342 const Symbol_value<size>* psymval,
5343 AArch64_valtype addend,
5344 const AArch64_reloc_property* reloc_property)
5345 {
5346 // Calculate relocation.
5347 Address x = psymval->value(object, addend);
5348 return This::template reloc_common<valsize>(view, x, reloc_property);
5349 }
5350
5351 // Do relocate. Update selected bits in text.
5352 // new val = (val & ~dst_mask) | (immed << doffset)
5353
5354 template<int valsize>
5355 static inline typename This::Status
5356 rela_general(
5357 unsigned char* view,
5358 AArch64_valtype s,
5359 AArch64_valtype addend,
5360 const AArch64_reloc_property* reloc_property)
5361 {
5362 // Calculate relocation.
5363 Address x = s + addend;
5364 return This::template reloc_common<valsize>(view, x, reloc_property);
5365 }
5366
5367 // Do address relative relocate. Update selected bits in text.
5368 // new val = (val & ~dst_mask) | (immed << doffset)
5369
5370 template<int valsize>
5371 static inline typename This::Status
5372 pcrela_general(
5373 unsigned char* view,
5374 const Sized_relobj_file<size, big_endian>* object,
5375 const Symbol_value<size>* psymval,
5376 AArch64_valtype addend,
5377 Address address,
5378 const AArch64_reloc_property* reloc_property)
5379 {
5380 // Calculate relocation.
5381 Address x = psymval->value(object, addend) - address;
5382 return This::template reloc_common<valsize>(view, x, reloc_property);
5383 }
5384
5385
5386 // Calculate (S + A) - address, update adr instruction.
5387
5388 static inline typename This::Status
5389 adr(unsigned char* view,
5390 const Sized_relobj_file<size, big_endian>* object,
5391 const Symbol_value<size>* psymval,
5392 Address addend,
5393 Address address,
5394 const AArch64_reloc_property* /* reloc_property */)
5395 {
5396 AArch64_valtype x = psymval->value(object, addend) - address;
5397 // Pick bits [20:0] of X.
5398 AArch64_valtype immed = x & 0x1fffff;
5399 update_adr(view, immed);
5400 // Check -2^20 <= X < 2^20
5401 return (size == 64 && Bits<21>::has_overflow((x))
5402 ? This::STATUS_OVERFLOW
5403 : This::STATUS_OKAY);
5404 }
5405
5406 // Calculate PG(S+A) - PG(address), update adrp instruction.
5407 // R_AARCH64_ADR_PREL_PG_HI21
5408
5409 static inline typename This::Status
5410 adrp(
5411 unsigned char* view,
5412 Address sa,
5413 Address address)
5414 {
5415 AArch64_valtype x = This::Page(sa) - This::Page(address);
5416 // Pick [32:12] of X.
5417 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5418 update_adr(view, immed);
5419 // Check -2^32 <= X < 2^32
5420 return (size == 64 && Bits<33>::has_overflow((x))
5421 ? This::STATUS_OVERFLOW
5422 : This::STATUS_OKAY);
5423 }
5424
5425 // Calculate PG(S+A) - PG(address), update adrp instruction.
5426 // R_AARCH64_ADR_PREL_PG_HI21
5427
5428 static inline typename This::Status
5429 adrp(unsigned char* view,
5430 const Sized_relobj_file<size, big_endian>* object,
5431 const Symbol_value<size>* psymval,
5432 Address addend,
5433 Address address,
5434 const AArch64_reloc_property* reloc_property)
5435 {
5436 Address sa = psymval->value(object, addend);
5437 AArch64_valtype x = This::Page(sa) - This::Page(address);
5438 // Pick [32:12] of X.
5439 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5440 update_adr(view, immed);
5441 return (reloc_property->checkup_x_value(x)
5442 ? This::STATUS_OKAY
5443 : This::STATUS_OVERFLOW);
5444 }
5445
5446 // Update mov[n/z] instruction. Check overflow if needed.
5447 // If X >=0, set the instruction to movz and its immediate value to the
5448 // selected bits S.
5449 // If X < 0, set the instruction to movn and its immediate value to
5450 // NOT (selected bits of).
5451
5452 static inline typename This::Status
5453 movnz(unsigned char* view,
5454 AArch64_valtype x,
5455 const AArch64_reloc_property* reloc_property)
5456 {
5457 // Select bits from X.
5458 Address immed;
5459 bool is_movz;
5460 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5461 if (static_cast<SignedW>(x) >= 0)
5462 {
5463 immed = reloc_property->select_x_value(x);
5464 is_movz = true;
5465 }
5466 else
5467 {
5468 immed = reloc_property->select_x_value(~x);;
5469 is_movz = false;
5470 }
5471
5472 // Update movnz instruction.
5473 update_movnz(view, immed, is_movz);
5474
5475 // Do check overflow or alignment if needed.
5476 return (reloc_property->checkup_x_value(x)
5477 ? This::STATUS_OKAY
5478 : This::STATUS_OVERFLOW);
5479 }
5480
5481 static inline bool
5482 maybe_apply_stub(unsigned int,
5483 const The_relocate_info*,
5484 const The_rela&,
5485 unsigned char*,
5486 Address,
5487 const Sized_symbol<size>*,
5488 const Symbol_value<size>*,
5489 const Sized_relobj_file<size, big_endian>*,
5490 section_size_type);
5491
5492 }; // End of AArch64_relocate_functions
5493
5494
5495 // For a certain relocation type (usually jump/branch), test to see if the
5496 // destination needs a stub to fulfil. If so, re-route the destination of the
5497 // original instruction to the stub, note, at this time, the stub has already
5498 // been generated.
5499
5500 template<int size, bool big_endian>
5501 bool
5502 AArch64_relocate_functions<size, big_endian>::
5503 maybe_apply_stub(unsigned int r_type,
5504 const The_relocate_info* relinfo,
5505 const The_rela& rela,
5506 unsigned char* view,
5507 Address address,
5508 const Sized_symbol<size>* gsym,
5509 const Symbol_value<size>* psymval,
5510 const Sized_relobj_file<size, big_endian>* object,
5511 section_size_type current_group_size)
5512 {
5513 if (parameters->options().relocatable())
5514 return false;
5515
5516 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5517 Address branch_target = psymval->value(object, 0) + addend;
5518 int stub_type =
5519 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5520 if (stub_type == ST_NONE)
5521 return false;
5522
5523 const The_aarch64_relobj* aarch64_relobj =
5524 static_cast<const The_aarch64_relobj*>(object);
5525 const AArch64_reloc_property* arp =
5526 aarch64_reloc_property_table->get_reloc_property(r_type);
5527 gold_assert(arp != NULL);
5528
5529 // We don't create stubs for undefined symbols, but do for weak.
5530 if (gsym
5531 && !gsym->use_plt_offset(arp->reference_flags())
5532 && gsym->is_undefined())
5533 {
5534 gold_debug(DEBUG_TARGET,
5535 "stub: looking for a stub for undefined symbol %s in file %s",
5536 gsym->name(), aarch64_relobj->name().c_str());
5537 return false;
5538 }
5539
5540 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5541 gold_assert(stub_table != NULL);
5542
5543 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5544 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5545 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5546 gold_assert(stub != NULL);
5547
5548 Address new_branch_target = stub_table->address() + stub->offset();
5549 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5550 new_branch_target - address;
5551 typename This::Status status = This::template
5552 rela_general<32>(view, branch_offset, 0, arp);
5553 if (status != This::STATUS_OKAY)
5554 gold_error(_("Stub is too far away, try a smaller value "
5555 "for '--stub-group-size'. The current value is 0x%lx."),
5556 static_cast<unsigned long>(current_group_size));
5557 return true;
5558 }
5559
5560
5561 // Group input sections for stub generation.
5562 //
5563 // We group input sections in an output section so that the total size,
5564 // including any padding space due to alignment is smaller than GROUP_SIZE
5565 // unless the only input section in group is bigger than GROUP_SIZE already.
5566 // Then an ARM stub table is created to follow the last input section
5567 // in group. For each group an ARM stub table is created an is placed
5568 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5569 // extend the group after the stub table.
5570
5571 template<int size, bool big_endian>
5572 void
5573 Target_aarch64<size, big_endian>::group_sections(
5574 Layout* layout,
5575 section_size_type group_size,
5576 bool stubs_always_after_branch,
5577 const Task* task)
5578 {
5579 // Group input sections and insert stub table
5580 Layout::Section_list section_list;
5581 layout->get_executable_sections(&section_list);
5582 for (Layout::Section_list::const_iterator p = section_list.begin();
5583 p != section_list.end();
5584 ++p)
5585 {
5586 AArch64_output_section<size, big_endian>* output_section =
5587 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5588 output_section->group_sections(group_size, stubs_always_after_branch,
5589 this, task);
5590 }
5591 }
5592
5593
5594 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5595 // section of RELOBJ.
5596
5597 template<int size, bool big_endian>
5598 AArch64_input_section<size, big_endian>*
5599 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5600 Relobj* relobj, unsigned int shndx) const
5601 {
5602 Section_id sid(relobj, shndx);
5603 typename AArch64_input_section_map::const_iterator p =
5604 this->aarch64_input_section_map_.find(sid);
5605 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5606 }
5607
5608
5609 // Make a new AArch64_input_section object.
5610
5611 template<int size, bool big_endian>
5612 AArch64_input_section<size, big_endian>*
5613 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5614 Relobj* relobj, unsigned int shndx)
5615 {
5616 Section_id sid(relobj, shndx);
5617
5618 AArch64_input_section<size, big_endian>* input_section =
5619 new AArch64_input_section<size, big_endian>(relobj, shndx);
5620 input_section->init();
5621
5622 // Register new AArch64_input_section in map for look-up.
5623 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5624 this->aarch64_input_section_map_.insert(
5625 std::make_pair(sid, input_section));
5626
5627 // Make sure that it we have not created another AArch64_input_section
5628 // for this input section already.
5629 gold_assert(ins.second);
5630
5631 return input_section;
5632 }
5633
5634
5635 // Relaxation hook. This is where we do stub generation.
5636
5637 template<int size, bool big_endian>
5638 bool
5639 Target_aarch64<size, big_endian>::do_relax(
5640 int pass,
5641 const Input_objects* input_objects,
5642 Symbol_table* symtab,
5643 Layout* layout ,
5644 const Task* task)
5645 {
5646 gold_assert(!parameters->options().relocatable());
5647 if (pass == 1)
5648 {
5649 // We don't handle negative stub_group_size right now.
5650 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5651 if (this->stub_group_size_ == 1)
5652 {
5653 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5654 // will fail to link. The user will have to relink with an explicit
5655 // group size option.
5656 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5657 4096 * 4;
5658 }
5659 group_sections(layout, this->stub_group_size_, true, task);
5660 }
5661 else
5662 {
5663 // If this is not the first pass, addresses and file offsets have
5664 // been reset at this point, set them here.
5665 for (Stub_table_iterator sp = this->stub_tables_.begin();
5666 sp != this->stub_tables_.end(); ++sp)
5667 {
5668 The_stub_table* stt = *sp;
5669 The_aarch64_input_section* owner = stt->owner();
5670 off_t off = align_address(owner->original_size(),
5671 stt->addralign());
5672 stt->set_address_and_file_offset(owner->address() + off,
5673 owner->offset() + off);
5674 }
5675 }
5676
5677 // Scan relocs for relocation stubs
5678 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5679 op != input_objects->relobj_end();
5680 ++op)
5681 {
5682 The_aarch64_relobj* aarch64_relobj =
5683 static_cast<The_aarch64_relobj*>(*op);
5684 // Lock the object so we can read from it. This is only called
5685 // single-threaded from Layout::finalize, so it is OK to lock.
5686 Task_lock_obj<Object> tl(task, aarch64_relobj);
5687 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5688 }
5689
5690 bool any_stub_table_changed = false;
5691 for (Stub_table_iterator siter = this->stub_tables_.begin();
5692 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5693 {
5694 The_stub_table* stub_table = *siter;
5695 if (stub_table->update_data_size_changed_p())
5696 {
5697 The_aarch64_input_section* owner = stub_table->owner();
5698 uint64_t address = owner->address();
5699 off_t offset = owner->offset();
5700 owner->reset_address_and_file_offset();
5701 owner->set_address_and_file_offset(address, offset);
5702
5703 any_stub_table_changed = true;
5704 }
5705 }
5706
5707 // Do not continue relaxation.
5708 bool continue_relaxation = any_stub_table_changed;
5709 if (!continue_relaxation)
5710 for (Stub_table_iterator sp = this->stub_tables_.begin();
5711 (sp != this->stub_tables_.end());
5712 ++sp)
5713 (*sp)->finalize_stubs();
5714
5715 return continue_relaxation;
5716 }
5717
5718
5719 // Make a new Stub_table.
5720
5721 template<int size, bool big_endian>
5722 Stub_table<size, big_endian>*
5723 Target_aarch64<size, big_endian>::new_stub_table(
5724 AArch64_input_section<size, big_endian>* owner)
5725 {
5726 Stub_table<size, big_endian>* stub_table =
5727 new Stub_table<size, big_endian>(owner);
5728 stub_table->set_address(align_address(
5729 owner->address() + owner->data_size(), 8));
5730 stub_table->set_file_offset(owner->offset() + owner->data_size());
5731 stub_table->finalize_data_size();
5732
5733 this->stub_tables_.push_back(stub_table);
5734
5735 return stub_table;
5736 }
5737
5738
5739 template<int size, bool big_endian>
5740 uint64_t
5741 Target_aarch64<size, big_endian>::do_reloc_addend(
5742 void* arg, unsigned int r_type, uint64_t) const
5743 {
5744 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5745 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5746 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5747 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5748 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5749 gold_assert(psymval->is_tls_symbol());
5750 // The value of a TLS symbol is the offset in the TLS segment.
5751 return psymval->value(ti.object, 0);
5752 }
5753
5754 // Return the number of entries in the PLT.
5755
5756 template<int size, bool big_endian>
5757 unsigned int
5758 Target_aarch64<size, big_endian>::plt_entry_count() const
5759 {
5760 if (this->plt_ == NULL)
5761 return 0;
5762 return this->plt_->entry_count();
5763 }
5764
5765 // Return the offset of the first non-reserved PLT entry.
5766
5767 template<int size, bool big_endian>
5768 unsigned int
5769 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5770 {
5771 return this->plt_->first_plt_entry_offset();
5772 }
5773
5774 // Return the size of each PLT entry.
5775
5776 template<int size, bool big_endian>
5777 unsigned int
5778 Target_aarch64<size, big_endian>::plt_entry_size() const
5779 {
5780 return this->plt_->get_plt_entry_size();
5781 }
5782
5783 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5784
5785 template<int size, bool big_endian>
5786 void
5787 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5788 Symbol_table* symtab, Layout* layout)
5789 {
5790 if (this->tls_base_symbol_defined_)
5791 return;
5792
5793 Output_segment* tls_segment = layout->tls_segment();
5794 if (tls_segment != NULL)
5795 {
5796 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5797 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5798 Symbol_table::PREDEFINED,
5799 tls_segment, 0, 0,
5800 elfcpp::STT_TLS,
5801 elfcpp::STB_LOCAL,
5802 elfcpp::STV_HIDDEN, 0,
5803 Symbol::SEGMENT_START,
5804 true);
5805 }
5806 this->tls_base_symbol_defined_ = true;
5807 }
5808
5809 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5810
5811 template<int size, bool big_endian>
5812 void
5813 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5814 Symbol_table* symtab, Layout* layout)
5815 {
5816 if (this->plt_ == NULL)
5817 this->make_plt_section(symtab, layout);
5818
5819 if (!this->plt_->has_tlsdesc_entry())
5820 {
5821 // Allocate the TLSDESC_GOT entry.
5822 Output_data_got_aarch64<size, big_endian>* got =
5823 this->got_section(symtab, layout);
5824 unsigned int got_offset = got->add_constant(0);
5825
5826 // Allocate the TLSDESC_PLT entry.
5827 this->plt_->reserve_tlsdesc_entry(got_offset);
5828 }
5829 }
5830
5831 // Create a GOT entry for the TLS module index.
5832
5833 template<int size, bool big_endian>
5834 unsigned int
5835 Target_aarch64<size, big_endian>::got_mod_index_entry(
5836 Symbol_table* symtab, Layout* layout,
5837 Sized_relobj_file<size, big_endian>* object)
5838 {
5839 if (this->got_mod_index_offset_ == -1U)
5840 {
5841 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5842 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5843 Output_data_got_aarch64<size, big_endian>* got =
5844 this->got_section(symtab, layout);
5845 unsigned int got_offset = got->add_constant(0);
5846 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5847 got_offset, 0);
5848 got->add_constant(0);
5849 this->got_mod_index_offset_ = got_offset;
5850 }
5851 return this->got_mod_index_offset_;
5852 }
5853
5854 // Optimize the TLS relocation type based on what we know about the
5855 // symbol. IS_FINAL is true if the final address of this symbol is
5856 // known at link time.
5857
5858 template<int size, bool big_endian>
5859 tls::Tls_optimization
5860 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5861 int r_type)
5862 {
5863 // If we are generating a shared library, then we can't do anything
5864 // in the linker
5865 if (parameters->options().shared())
5866 return tls::TLSOPT_NONE;
5867
5868 switch (r_type)
5869 {
5870 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5871 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5872 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5873 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5874 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5875 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5876 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5877 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5878 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5879 case elfcpp::R_AARCH64_TLSDESC_LDR:
5880 case elfcpp::R_AARCH64_TLSDESC_ADD:
5881 case elfcpp::R_AARCH64_TLSDESC_CALL:
5882 // These are General-Dynamic which permits fully general TLS
5883 // access. Since we know that we are generating an executable,
5884 // we can convert this to Initial-Exec. If we also know that
5885 // this is a local symbol, we can further switch to Local-Exec.
5886 if (is_final)
5887 return tls::TLSOPT_TO_LE;
5888 return tls::TLSOPT_TO_IE;
5889
5890 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5891 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5892 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5893 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5894 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5895 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5896 // These are Local-Dynamic, which refer to local symbols in the
5897 // dynamic TLS block. Since we know that we generating an
5898 // executable, we can switch to Local-Exec.
5899 return tls::TLSOPT_TO_LE;
5900
5901 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5902 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5903 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5904 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5905 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5906 // These are Initial-Exec relocs which get the thread offset
5907 // from the GOT. If we know that we are linking against the
5908 // local symbol, we can switch to Local-Exec, which links the
5909 // thread offset into the instruction.
5910 if (is_final)
5911 return tls::TLSOPT_TO_LE;
5912 return tls::TLSOPT_NONE;
5913
5914 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5915 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5916 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5917 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5918 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5919 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5920 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5921 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5922 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
5923 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
5924 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
5925 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
5926 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
5927 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
5928 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
5929 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
5930 // When we already have Local-Exec, there is nothing further we
5931 // can do.
5932 return tls::TLSOPT_NONE;
5933
5934 default:
5935 gold_unreachable();
5936 }
5937 }
5938
5939 // Returns true if this relocation type could be that of a function pointer.
5940
5941 template<int size, bool big_endian>
5942 inline bool
5943 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5944 unsigned int r_type)
5945 {
5946 switch (r_type)
5947 {
5948 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5949 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5950 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5951 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5952 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5953 {
5954 return true;
5955 }
5956 }
5957 return false;
5958 }
5959
5960 // For safe ICF, scan a relocation for a local symbol to check if it
5961 // corresponds to a function pointer being taken. In that case mark
5962 // the function whose pointer was taken as not foldable.
5963
5964 template<int size, bool big_endian>
5965 inline bool
5966 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5967 Symbol_table* ,
5968 Layout* ,
5969 Target_aarch64<size, big_endian>* ,
5970 Sized_relobj_file<size, big_endian>* ,
5971 unsigned int ,
5972 Output_section* ,
5973 const elfcpp::Rela<size, big_endian>& ,
5974 unsigned int r_type,
5975 const elfcpp::Sym<size, big_endian>&)
5976 {
5977 // When building a shared library, do not fold any local symbols.
5978 return (parameters->options().shared()
5979 || possible_function_pointer_reloc(r_type));
5980 }
5981
5982 // For safe ICF, scan a relocation for a global symbol to check if it
5983 // corresponds to a function pointer being taken. In that case mark
5984 // the function whose pointer was taken as not foldable.
5985
5986 template<int size, bool big_endian>
5987 inline bool
5988 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5989 Symbol_table* ,
5990 Layout* ,
5991 Target_aarch64<size, big_endian>* ,
5992 Sized_relobj_file<size, big_endian>* ,
5993 unsigned int ,
5994 Output_section* ,
5995 const elfcpp::Rela<size, big_endian>& ,
5996 unsigned int r_type,
5997 Symbol* gsym)
5998 {
5999 // When building a shared library, do not fold symbols whose visibility
6000 // is hidden, internal or protected.
6001 return ((parameters->options().shared()
6002 && (gsym->visibility() == elfcpp::STV_INTERNAL
6003 || gsym->visibility() == elfcpp::STV_PROTECTED
6004 || gsym->visibility() == elfcpp::STV_HIDDEN))
6005 || possible_function_pointer_reloc(r_type));
6006 }
6007
6008 // Report an unsupported relocation against a local symbol.
6009
6010 template<int size, bool big_endian>
6011 void
6012 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
6013 Sized_relobj_file<size, big_endian>* object,
6014 unsigned int r_type)
6015 {
6016 gold_error(_("%s: unsupported reloc %u against local symbol"),
6017 object->name().c_str(), r_type);
6018 }
6019
6020 // We are about to emit a dynamic relocation of type R_TYPE. If the
6021 // dynamic linker does not support it, issue an error.
6022
6023 template<int size, bool big_endian>
6024 void
6025 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
6026 unsigned int r_type)
6027 {
6028 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
6029
6030 switch (r_type)
6031 {
6032 // These are the relocation types supported by glibc for AARCH64.
6033 case elfcpp::R_AARCH64_NONE:
6034 case elfcpp::R_AARCH64_COPY:
6035 case elfcpp::R_AARCH64_GLOB_DAT:
6036 case elfcpp::R_AARCH64_JUMP_SLOT:
6037 case elfcpp::R_AARCH64_RELATIVE:
6038 case elfcpp::R_AARCH64_TLS_DTPREL64:
6039 case elfcpp::R_AARCH64_TLS_DTPMOD64:
6040 case elfcpp::R_AARCH64_TLS_TPREL64:
6041 case elfcpp::R_AARCH64_TLSDESC:
6042 case elfcpp::R_AARCH64_IRELATIVE:
6043 case elfcpp::R_AARCH64_ABS32:
6044 case elfcpp::R_AARCH64_ABS64:
6045 return;
6046
6047 default:
6048 break;
6049 }
6050
6051 // This prevents us from issuing more than one error per reloc
6052 // section. But we can still wind up issuing more than one
6053 // error per object file.
6054 if (this->issued_non_pic_error_)
6055 return;
6056 gold_assert(parameters->options().output_is_position_independent());
6057 object->error(_("requires unsupported dynamic reloc; "
6058 "recompile with -fPIC"));
6059 this->issued_non_pic_error_ = true;
6060 return;
6061 }
6062
6063 // Return whether we need to make a PLT entry for a relocation of the
6064 // given type against a STT_GNU_IFUNC symbol.
6065
6066 template<int size, bool big_endian>
6067 bool
6068 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
6069 Sized_relobj_file<size, big_endian>* object,
6070 unsigned int r_type)
6071 {
6072 const AArch64_reloc_property* arp =
6073 aarch64_reloc_property_table->get_reloc_property(r_type);
6074 gold_assert(arp != NULL);
6075
6076 int flags = arp->reference_flags();
6077 if (flags & Symbol::TLS_REF)
6078 {
6079 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
6080 object->name().c_str(), arp->name().c_str());
6081 return false;
6082 }
6083 return flags != 0;
6084 }
6085
6086 // Scan a relocation for a local symbol.
6087
6088 template<int size, bool big_endian>
6089 inline void
6090 Target_aarch64<size, big_endian>::Scan::local(
6091 Symbol_table* symtab,
6092 Layout* layout,
6093 Target_aarch64<size, big_endian>* target,
6094 Sized_relobj_file<size, big_endian>* object,
6095 unsigned int data_shndx,
6096 Output_section* output_section,
6097 const elfcpp::Rela<size, big_endian>& rela,
6098 unsigned int r_type,
6099 const elfcpp::Sym<size, big_endian>& lsym,
6100 bool is_discarded)
6101 {
6102 if (is_discarded)
6103 return;
6104
6105 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6106 Reloc_section;
6107 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6108
6109 // A local STT_GNU_IFUNC symbol may require a PLT entry.
6110 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
6111 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
6112 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
6113
6114 switch (r_type)
6115 {
6116 case elfcpp::R_AARCH64_NONE:
6117 break;
6118
6119 case elfcpp::R_AARCH64_ABS32:
6120 case elfcpp::R_AARCH64_ABS16:
6121 if (parameters->options().output_is_position_independent())
6122 {
6123 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6124 object->name().c_str(), r_type);
6125 }
6126 break;
6127
6128 case elfcpp::R_AARCH64_ABS64:
6129 // If building a shared library or pie, we need to mark this as a dynmic
6130 // reloction, so that the dynamic loader can relocate it.
6131 if (parameters->options().output_is_position_independent())
6132 {
6133 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6134 rela_dyn->add_local_relative(object, r_sym,
6135 elfcpp::R_AARCH64_RELATIVE,
6136 output_section,
6137 data_shndx,
6138 rela.get_r_offset(),
6139 rela.get_r_addend(),
6140 is_ifunc);
6141 }
6142 break;
6143
6144 case elfcpp::R_AARCH64_PREL64:
6145 case elfcpp::R_AARCH64_PREL32:
6146 case elfcpp::R_AARCH64_PREL16:
6147 break;
6148
6149 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6150 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6151 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6152 // The above relocations are used to access GOT entries.
6153 {
6154 Output_data_got_aarch64<size, big_endian>* got =
6155 target->got_section(symtab, layout);
6156 bool is_new = false;
6157 // This symbol requires a GOT entry.
6158 if (is_ifunc)
6159 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6160 else
6161 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6162 if (is_new && parameters->options().output_is_position_independent())
6163 target->rela_dyn_section(layout)->
6164 add_local_relative(object,
6165 r_sym,
6166 elfcpp::R_AARCH64_RELATIVE,
6167 got,
6168 object->local_got_offset(r_sym,
6169 GOT_TYPE_STANDARD),
6170 0,
6171 false);
6172 }
6173 break;
6174
6175 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6176 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6177 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6178 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6179 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6180 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6181 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6182 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6183 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6184 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6185 if (parameters->options().output_is_position_independent())
6186 {
6187 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6188 object->name().c_str(), r_type);
6189 }
6190 break;
6191
6192 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6193 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6194 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6195 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6196 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6197 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6198 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6199 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6200 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6201 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6202 break;
6203
6204 // Control flow, pc-relative. We don't need to do anything for a relative
6205 // addressing relocation against a local symbol if it does not reference
6206 // the GOT.
6207 case elfcpp::R_AARCH64_TSTBR14:
6208 case elfcpp::R_AARCH64_CONDBR19:
6209 case elfcpp::R_AARCH64_JUMP26:
6210 case elfcpp::R_AARCH64_CALL26:
6211 break;
6212
6213 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6214 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6215 {
6216 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6217 optimize_tls_reloc(!parameters->options().shared(), r_type);
6218 if (tlsopt == tls::TLSOPT_TO_LE)
6219 break;
6220
6221 layout->set_has_static_tls();
6222 // Create a GOT entry for the tp-relative offset.
6223 if (!parameters->doing_static_link())
6224 {
6225 Output_data_got_aarch64<size, big_endian>* got =
6226 target->got_section(symtab, layout);
6227 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6228 target->rela_dyn_section(layout),
6229 elfcpp::R_AARCH64_TLS_TPREL64);
6230 }
6231 else if (!object->local_has_got_offset(r_sym,
6232 GOT_TYPE_TLS_OFFSET))
6233 {
6234 Output_data_got_aarch64<size, big_endian>* got =
6235 target->got_section(symtab, layout);
6236 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6237 unsigned int got_offset =
6238 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6239 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6240 gold_assert(addend == 0);
6241 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6242 object, r_sym);
6243 }
6244 }
6245 break;
6246
6247 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6248 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6249 {
6250 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6251 optimize_tls_reloc(!parameters->options().shared(), r_type);
6252 if (tlsopt == tls::TLSOPT_TO_LE)
6253 {
6254 layout->set_has_static_tls();
6255 break;
6256 }
6257 gold_assert(tlsopt == tls::TLSOPT_NONE);
6258
6259 Output_data_got_aarch64<size, big_endian>* got =
6260 target->got_section(symtab, layout);
6261 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6262 GOT_TYPE_TLS_PAIR,
6263 target->rela_dyn_section(layout),
6264 elfcpp::R_AARCH64_TLS_DTPMOD64);
6265 }
6266 break;
6267
6268 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6269 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6270 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6271 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6272 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6273 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6274 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6275 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6276 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
6277 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6278 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
6279 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6280 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
6281 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6282 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
6283 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
6284 {
6285 layout->set_has_static_tls();
6286 bool output_is_shared = parameters->options().shared();
6287 if (output_is_shared)
6288 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6289 object->name().c_str(), r_type);
6290 }
6291 break;
6292
6293 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6294 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6295 {
6296 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6297 optimize_tls_reloc(!parameters->options().shared(), r_type);
6298 if (tlsopt == tls::TLSOPT_NONE)
6299 {
6300 // Create a GOT entry for the module index.
6301 target->got_mod_index_entry(symtab, layout, object);
6302 }
6303 else if (tlsopt != tls::TLSOPT_TO_LE)
6304 unsupported_reloc_local(object, r_type);
6305 }
6306 break;
6307
6308 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6309 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6310 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6311 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6312 break;
6313
6314 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6315 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6316 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6317 {
6318 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6319 optimize_tls_reloc(!parameters->options().shared(), r_type);
6320 target->define_tls_base_symbol(symtab, layout);
6321 if (tlsopt == tls::TLSOPT_NONE)
6322 {
6323 // Create reserved PLT and GOT entries for the resolver.
6324 target->reserve_tlsdesc_entries(symtab, layout);
6325
6326 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6327 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6328 // entry needs to be in an area in .got.plt, not .got. Call
6329 // got_section to make sure the section has been created.
6330 target->got_section(symtab, layout);
6331 Output_data_got<size, big_endian>* got =
6332 target->got_tlsdesc_section();
6333 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6334 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6335 {
6336 unsigned int got_offset = got->add_constant(0);
6337 got->add_constant(0);
6338 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6339 got_offset);
6340 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6341 // We store the arguments we need in a vector, and use
6342 // the index into the vector as the parameter to pass
6343 // to the target specific routines.
6344 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6345 void* arg = reinterpret_cast<void*>(intarg);
6346 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6347 got, got_offset, 0);
6348 }
6349 }
6350 else if (tlsopt != tls::TLSOPT_TO_LE)
6351 unsupported_reloc_local(object, r_type);
6352 }
6353 break;
6354
6355 case elfcpp::R_AARCH64_TLSDESC_CALL:
6356 break;
6357
6358 default:
6359 unsupported_reloc_local(object, r_type);
6360 }
6361 }
6362
6363
6364 // Report an unsupported relocation against a global symbol.
6365
6366 template<int size, bool big_endian>
6367 void
6368 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6369 Sized_relobj_file<size, big_endian>* object,
6370 unsigned int r_type,
6371 Symbol* gsym)
6372 {
6373 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6374 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6375 }
6376
6377 template<int size, bool big_endian>
6378 inline void
6379 Target_aarch64<size, big_endian>::Scan::global(
6380 Symbol_table* symtab,
6381 Layout* layout,
6382 Target_aarch64<size, big_endian>* target,
6383 Sized_relobj_file<size, big_endian> * object,
6384 unsigned int data_shndx,
6385 Output_section* output_section,
6386 const elfcpp::Rela<size, big_endian>& rela,
6387 unsigned int r_type,
6388 Symbol* gsym)
6389 {
6390 // A STT_GNU_IFUNC symbol may require a PLT entry.
6391 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6392 && this->reloc_needs_plt_for_ifunc(object, r_type))
6393 target->make_plt_entry(symtab, layout, gsym);
6394
6395 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6396 Reloc_section;
6397 const AArch64_reloc_property* arp =
6398 aarch64_reloc_property_table->get_reloc_property(r_type);
6399 gold_assert(arp != NULL);
6400
6401 switch (r_type)
6402 {
6403 case elfcpp::R_AARCH64_NONE:
6404 break;
6405
6406 case elfcpp::R_AARCH64_ABS16:
6407 case elfcpp::R_AARCH64_ABS32:
6408 case elfcpp::R_AARCH64_ABS64:
6409 {
6410 // Make a PLT entry if necessary.
6411 if (gsym->needs_plt_entry())
6412 {
6413 target->make_plt_entry(symtab, layout, gsym);
6414 // Since this is not a PC-relative relocation, we may be
6415 // taking the address of a function. In that case we need to
6416 // set the entry in the dynamic symbol table to the address of
6417 // the PLT entry.
6418 if (gsym->is_from_dynobj() && !parameters->options().shared())
6419 gsym->set_needs_dynsym_value();
6420 }
6421 // Make a dynamic relocation if necessary.
6422 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6423 {
6424 if (!parameters->options().output_is_position_independent()
6425 && gsym->may_need_copy_reloc())
6426 {
6427 target->copy_reloc(symtab, layout, object,
6428 data_shndx, output_section, gsym, rela);
6429 }
6430 else if (r_type == elfcpp::R_AARCH64_ABS64
6431 && gsym->type() == elfcpp::STT_GNU_IFUNC
6432 && gsym->can_use_relative_reloc(false)
6433 && !gsym->is_from_dynobj()
6434 && !gsym->is_undefined()
6435 && !gsym->is_preemptible())
6436 {
6437 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6438 // symbol. This makes a function address in a PIE executable
6439 // match the address in a shared library that it links against.
6440 Reloc_section* rela_dyn =
6441 target->rela_irelative_section(layout);
6442 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6443 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6444 output_section, object,
6445 data_shndx,
6446 rela.get_r_offset(),
6447 rela.get_r_addend());
6448 }
6449 else if (r_type == elfcpp::R_AARCH64_ABS64
6450 && gsym->can_use_relative_reloc(false))
6451 {
6452 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6453 rela_dyn->add_global_relative(gsym,
6454 elfcpp::R_AARCH64_RELATIVE,
6455 output_section,
6456 object,
6457 data_shndx,
6458 rela.get_r_offset(),
6459 rela.get_r_addend(),
6460 false);
6461 }
6462 else
6463 {
6464 check_non_pic(object, r_type);
6465 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6466 rela_dyn = target->rela_dyn_section(layout);
6467 rela_dyn->add_global(
6468 gsym, r_type, output_section, object,
6469 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6470 }
6471 }
6472 }
6473 break;
6474
6475 case elfcpp::R_AARCH64_PREL16:
6476 case elfcpp::R_AARCH64_PREL32:
6477 case elfcpp::R_AARCH64_PREL64:
6478 // This is used to fill the GOT absolute address.
6479 if (gsym->needs_plt_entry())
6480 {
6481 target->make_plt_entry(symtab, layout, gsym);
6482 }
6483 break;
6484
6485 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6486 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6487 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6488 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6489 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6490 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6491 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6492 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6493 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6494 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6495 if (parameters->options().output_is_position_independent())
6496 {
6497 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6498 object->name().c_str(), r_type);
6499 }
6500 // Make a PLT entry if necessary.
6501 if (gsym->needs_plt_entry())
6502 {
6503 target->make_plt_entry(symtab, layout, gsym);
6504 // Since this is not a PC-relative relocation, we may be
6505 // taking the address of a function. In that case we need to
6506 // set the entry in the dynamic symbol table to the address of
6507 // the PLT entry.
6508 if (gsym->is_from_dynobj() && !parameters->options().shared())
6509 gsym->set_needs_dynsym_value();
6510 }
6511 break;
6512
6513 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6514 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6515 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6516 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6517 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6518 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6519 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6520 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6521 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6522 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6523 {
6524 if (gsym->needs_plt_entry())
6525 target->make_plt_entry(symtab, layout, gsym);
6526 // Make a dynamic relocation if necessary.
6527 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6528 {
6529 if (parameters->options().output_is_executable()
6530 && gsym->may_need_copy_reloc())
6531 {
6532 target->copy_reloc(symtab, layout, object,
6533 data_shndx, output_section, gsym, rela);
6534 }
6535 }
6536 break;
6537 }
6538
6539 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6540 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6541 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6542 {
6543 // The above relocations are used to access GOT entries.
6544 // Note a GOT entry is an *address* to a symbol.
6545 // The symbol requires a GOT entry
6546 Output_data_got_aarch64<size, big_endian>* got =
6547 target->got_section(symtab, layout);
6548 if (gsym->final_value_is_known())
6549 {
6550 // For a STT_GNU_IFUNC symbol we want the PLT address.
6551 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6552 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6553 else
6554 got->add_global(gsym, GOT_TYPE_STANDARD);
6555 }
6556 else
6557 {
6558 // If this symbol is not fully resolved, we need to add a dynamic
6559 // relocation for it.
6560 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6561
6562 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6563 //
6564 // 1) The symbol may be defined in some other module.
6565 // 2) We are building a shared library and this is a protected
6566 // symbol; using GLOB_DAT means that the dynamic linker can use
6567 // the address of the PLT in the main executable when appropriate
6568 // so that function address comparisons work.
6569 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6570 // again so that function address comparisons work.
6571 if (gsym->is_from_dynobj()
6572 || gsym->is_undefined()
6573 || gsym->is_preemptible()
6574 || (gsym->visibility() == elfcpp::STV_PROTECTED
6575 && parameters->options().shared())
6576 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6577 && parameters->options().output_is_position_independent()))
6578 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6579 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6580 else
6581 {
6582 // For a STT_GNU_IFUNC symbol we want to write the PLT
6583 // offset into the GOT, so that function pointer
6584 // comparisons work correctly.
6585 bool is_new;
6586 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6587 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6588 else
6589 {
6590 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6591 // Tell the dynamic linker to use the PLT address
6592 // when resolving relocations.
6593 if (gsym->is_from_dynobj()
6594 && !parameters->options().shared())
6595 gsym->set_needs_dynsym_value();
6596 }
6597 if (is_new)
6598 {
6599 rela_dyn->add_global_relative(
6600 gsym, elfcpp::R_AARCH64_RELATIVE,
6601 got,
6602 gsym->got_offset(GOT_TYPE_STANDARD),
6603 0,
6604 false);
6605 }
6606 }
6607 }
6608 break;
6609 }
6610
6611 case elfcpp::R_AARCH64_TSTBR14:
6612 case elfcpp::R_AARCH64_CONDBR19:
6613 case elfcpp::R_AARCH64_JUMP26:
6614 case elfcpp::R_AARCH64_CALL26:
6615 {
6616 if (gsym->final_value_is_known())
6617 break;
6618
6619 if (gsym->is_defined() &&
6620 !gsym->is_from_dynobj() &&
6621 !gsym->is_preemptible())
6622 break;
6623
6624 // Make plt entry for function call.
6625 target->make_plt_entry(symtab, layout, gsym);
6626 break;
6627 }
6628
6629 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6630 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6631 {
6632 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6633 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6634 if (tlsopt == tls::TLSOPT_TO_LE)
6635 {
6636 layout->set_has_static_tls();
6637 break;
6638 }
6639 gold_assert(tlsopt == tls::TLSOPT_NONE);
6640
6641 // General dynamic.
6642 Output_data_got_aarch64<size, big_endian>* got =
6643 target->got_section(symtab, layout);
6644 // Create 2 consecutive entries for module index and offset.
6645 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6646 target->rela_dyn_section(layout),
6647 elfcpp::R_AARCH64_TLS_DTPMOD64,
6648 elfcpp::R_AARCH64_TLS_DTPREL64);
6649 }
6650 break;
6651
6652 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6653 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6654 {
6655 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6656 optimize_tls_reloc(!parameters->options().shared(), r_type);
6657 if (tlsopt == tls::TLSOPT_NONE)
6658 {
6659 // Create a GOT entry for the module index.
6660 target->got_mod_index_entry(symtab, layout, object);
6661 }
6662 else if (tlsopt != tls::TLSOPT_TO_LE)
6663 unsupported_reloc_local(object, r_type);
6664 }
6665 break;
6666
6667 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6668 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6669 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6670 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6671 break;
6672
6673 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6674 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6675 {
6676 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6677 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6678 if (tlsopt == tls::TLSOPT_TO_LE)
6679 break;
6680
6681 layout->set_has_static_tls();
6682 // Create a GOT entry for the tp-relative offset.
6683 Output_data_got_aarch64<size, big_endian>* got
6684 = target->got_section(symtab, layout);
6685 if (!parameters->doing_static_link())
6686 {
6687 got->add_global_with_rel(
6688 gsym, GOT_TYPE_TLS_OFFSET,
6689 target->rela_dyn_section(layout),
6690 elfcpp::R_AARCH64_TLS_TPREL64);
6691 }
6692 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6693 {
6694 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6695 unsigned int got_offset =
6696 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6697 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6698 gold_assert(addend == 0);
6699 got->add_static_reloc(got_offset,
6700 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6701 }
6702 }
6703 break;
6704
6705 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6706 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6707 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6708 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6709 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6710 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6711 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6712 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6713 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
6714 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
6715 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
6716 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
6717 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
6718 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
6719 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
6720 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC: // Local executable
6721 layout->set_has_static_tls();
6722 if (parameters->options().shared())
6723 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6724 object->name().c_str(), r_type);
6725 break;
6726
6727 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6728 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6729 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6730 {
6731 target->define_tls_base_symbol(symtab, layout);
6732 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6733 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6734 if (tlsopt == tls::TLSOPT_NONE)
6735 {
6736 // Create reserved PLT and GOT entries for the resolver.
6737 target->reserve_tlsdesc_entries(symtab, layout);
6738
6739 // Create a double GOT entry with an R_AARCH64_TLSDESC
6740 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6741 // entry needs to be in an area in .got.plt, not .got. Call
6742 // got_section to make sure the section has been created.
6743 target->got_section(symtab, layout);
6744 Output_data_got<size, big_endian>* got =
6745 target->got_tlsdesc_section();
6746 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6747 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6748 elfcpp::R_AARCH64_TLSDESC, 0);
6749 }
6750 else if (tlsopt == tls::TLSOPT_TO_IE)
6751 {
6752 // Create a GOT entry for the tp-relative offset.
6753 Output_data_got<size, big_endian>* got
6754 = target->got_section(symtab, layout);
6755 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6756 target->rela_dyn_section(layout),
6757 elfcpp::R_AARCH64_TLS_TPREL64);
6758 }
6759 else if (tlsopt != tls::TLSOPT_TO_LE)
6760 unsupported_reloc_global(object, r_type, gsym);
6761 }
6762 break;
6763
6764 case elfcpp::R_AARCH64_TLSDESC_CALL:
6765 break;
6766
6767 default:
6768 gold_error(_("%s: unsupported reloc type in global scan"),
6769 aarch64_reloc_property_table->
6770 reloc_name_in_error_message(r_type).c_str());
6771 }
6772 return;
6773 } // End of Scan::global
6774
6775
6776 // Create the PLT section.
6777 template<int size, bool big_endian>
6778 void
6779 Target_aarch64<size, big_endian>::make_plt_section(
6780 Symbol_table* symtab, Layout* layout)
6781 {
6782 if (this->plt_ == NULL)
6783 {
6784 // Create the GOT section first.
6785 this->got_section(symtab, layout);
6786
6787 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6788 this->got_irelative_);
6789
6790 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6791 (elfcpp::SHF_ALLOC
6792 | elfcpp::SHF_EXECINSTR),
6793 this->plt_, ORDER_PLT, false);
6794
6795 // Make the sh_info field of .rela.plt point to .plt.
6796 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6797 rela_plt_os->set_info_section(this->plt_->output_section());
6798 }
6799 }
6800
6801 // Return the section for TLSDESC relocations.
6802
6803 template<int size, bool big_endian>
6804 typename Target_aarch64<size, big_endian>::Reloc_section*
6805 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6806 {
6807 return this->plt_section()->rela_tlsdesc(layout);
6808 }
6809
6810 // Create a PLT entry for a global symbol.
6811
6812 template<int size, bool big_endian>
6813 void
6814 Target_aarch64<size, big_endian>::make_plt_entry(
6815 Symbol_table* symtab,
6816 Layout* layout,
6817 Symbol* gsym)
6818 {
6819 if (gsym->has_plt_offset())
6820 return;
6821
6822 if (this->plt_ == NULL)
6823 this->make_plt_section(symtab, layout);
6824
6825 this->plt_->add_entry(symtab, layout, gsym);
6826 }
6827
6828 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6829
6830 template<int size, bool big_endian>
6831 void
6832 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6833 Symbol_table* symtab, Layout* layout,
6834 Sized_relobj_file<size, big_endian>* relobj,
6835 unsigned int local_sym_index)
6836 {
6837 if (relobj->local_has_plt_offset(local_sym_index))
6838 return;
6839 if (this->plt_ == NULL)
6840 this->make_plt_section(symtab, layout);
6841 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6842 relobj,
6843 local_sym_index);
6844 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6845 }
6846
6847 template<int size, bool big_endian>
6848 void
6849 Target_aarch64<size, big_endian>::gc_process_relocs(
6850 Symbol_table* symtab,
6851 Layout* layout,
6852 Sized_relobj_file<size, big_endian>* object,
6853 unsigned int data_shndx,
6854 unsigned int sh_type,
6855 const unsigned char* prelocs,
6856 size_t reloc_count,
6857 Output_section* output_section,
6858 bool needs_special_offset_handling,
6859 size_t local_symbol_count,
6860 const unsigned char* plocal_symbols)
6861 {
6862 typedef Target_aarch64<size, big_endian> Aarch64;
6863 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6864 Classify_reloc;
6865
6866 if (sh_type == elfcpp::SHT_REL)
6867 {
6868 return;
6869 }
6870
6871 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6872 symtab,
6873 layout,
6874 this,
6875 object,
6876 data_shndx,
6877 prelocs,
6878 reloc_count,
6879 output_section,
6880 needs_special_offset_handling,
6881 local_symbol_count,
6882 plocal_symbols);
6883 }
6884
6885 // Scan relocations for a section.
6886
6887 template<int size, bool big_endian>
6888 void
6889 Target_aarch64<size, big_endian>::scan_relocs(
6890 Symbol_table* symtab,
6891 Layout* layout,
6892 Sized_relobj_file<size, big_endian>* object,
6893 unsigned int data_shndx,
6894 unsigned int sh_type,
6895 const unsigned char* prelocs,
6896 size_t reloc_count,
6897 Output_section* output_section,
6898 bool needs_special_offset_handling,
6899 size_t local_symbol_count,
6900 const unsigned char* plocal_symbols)
6901 {
6902 typedef Target_aarch64<size, big_endian> Aarch64;
6903 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6904 Classify_reloc;
6905
6906 if (sh_type == elfcpp::SHT_REL)
6907 {
6908 gold_error(_("%s: unsupported REL reloc section"),
6909 object->name().c_str());
6910 return;
6911 }
6912
6913 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6914 symtab,
6915 layout,
6916 this,
6917 object,
6918 data_shndx,
6919 prelocs,
6920 reloc_count,
6921 output_section,
6922 needs_special_offset_handling,
6923 local_symbol_count,
6924 plocal_symbols);
6925 }
6926
6927 // Return the value to use for a dynamic which requires special
6928 // treatment. This is how we support equality comparisons of function
6929 // pointers across shared library boundaries, as described in the
6930 // processor specific ABI supplement.
6931
6932 template<int size, bool big_endian>
6933 uint64_t
6934 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6935 {
6936 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6937 return this->plt_address_for_global(gsym);
6938 }
6939
6940
6941 // Finalize the sections.
6942
6943 template<int size, bool big_endian>
6944 void
6945 Target_aarch64<size, big_endian>::do_finalize_sections(
6946 Layout* layout,
6947 const Input_objects*,
6948 Symbol_table* symtab)
6949 {
6950 const Reloc_section* rel_plt = (this->plt_ == NULL
6951 ? NULL
6952 : this->plt_->rela_plt());
6953 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6954 this->rela_dyn_, true, false);
6955
6956 // Emit any relocs we saved in an attempt to avoid generating COPY
6957 // relocs.
6958 if (this->copy_relocs_.any_saved_relocs())
6959 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6960
6961 // Fill in some more dynamic tags.
6962 Output_data_dynamic* const odyn = layout->dynamic_data();
6963 if (odyn != NULL)
6964 {
6965 if (this->plt_ != NULL
6966 && this->plt_->output_section() != NULL
6967 && this->plt_ ->has_tlsdesc_entry())
6968 {
6969 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6970 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6971 this->got_->finalize_data_size();
6972 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6973 this->plt_, plt_offset);
6974 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6975 this->got_, got_offset);
6976 }
6977 }
6978
6979 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6980 // the .got section.
6981 Symbol* sym = this->global_offset_table_;
6982 if (sym != NULL)
6983 {
6984 uint64_t data_size = this->got_->current_data_size();
6985 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6986
6987 // If the .got section is more than 0x8000 bytes, we add
6988 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6989 // bit relocations have a greater chance of working.
6990 if (data_size >= 0x8000)
6991 symtab->get_sized_symbol<size>(sym)->set_value(
6992 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6993 }
6994
6995 if (parameters->doing_static_link()
6996 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6997 {
6998 // If linking statically, make sure that the __rela_iplt symbols
6999 // were defined if necessary, even if we didn't create a PLT.
7000 static const Define_symbol_in_segment syms[] =
7001 {
7002 {
7003 "__rela_iplt_start", // name
7004 elfcpp::PT_LOAD, // segment_type
7005 elfcpp::PF_W, // segment_flags_set
7006 elfcpp::PF(0), // segment_flags_clear
7007 0, // value
7008 0, // size
7009 elfcpp::STT_NOTYPE, // type
7010 elfcpp::STB_GLOBAL, // binding
7011 elfcpp::STV_HIDDEN, // visibility
7012 0, // nonvis
7013 Symbol::SEGMENT_START, // offset_from_base
7014 true // only_if_ref
7015 },
7016 {
7017 "__rela_iplt_end", // name
7018 elfcpp::PT_LOAD, // segment_type
7019 elfcpp::PF_W, // segment_flags_set
7020 elfcpp::PF(0), // segment_flags_clear
7021 0, // value
7022 0, // size
7023 elfcpp::STT_NOTYPE, // type
7024 elfcpp::STB_GLOBAL, // binding
7025 elfcpp::STV_HIDDEN, // visibility
7026 0, // nonvis
7027 Symbol::SEGMENT_START, // offset_from_base
7028 true // only_if_ref
7029 }
7030 };
7031
7032 symtab->define_symbols(layout, 2, syms,
7033 layout->script_options()->saw_sections_clause());
7034 }
7035
7036 return;
7037 }
7038
7039 // Perform a relocation.
7040
7041 template<int size, bool big_endian>
7042 inline bool
7043 Target_aarch64<size, big_endian>::Relocate::relocate(
7044 const Relocate_info<size, big_endian>* relinfo,
7045 unsigned int,
7046 Target_aarch64<size, big_endian>* target,
7047 Output_section* ,
7048 size_t relnum,
7049 const unsigned char* preloc,
7050 const Sized_symbol<size>* gsym,
7051 const Symbol_value<size>* psymval,
7052 unsigned char* view,
7053 typename elfcpp::Elf_types<size>::Elf_Addr address,
7054 section_size_type /* view_size */)
7055 {
7056 if (view == NULL)
7057 return true;
7058
7059 typedef AArch64_relocate_functions<size, big_endian> Reloc;
7060
7061 const elfcpp::Rela<size, big_endian> rela(preloc);
7062 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
7063 const AArch64_reloc_property* reloc_property =
7064 aarch64_reloc_property_table->get_reloc_property(r_type);
7065
7066 if (reloc_property == NULL)
7067 {
7068 std::string reloc_name =
7069 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
7070 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7071 _("cannot relocate %s in object file"),
7072 reloc_name.c_str());
7073 return true;
7074 }
7075
7076 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
7077
7078 // Pick the value to use for symbols defined in the PLT.
7079 Symbol_value<size> symval;
7080 if (gsym != NULL
7081 && gsym->use_plt_offset(reloc_property->reference_flags()))
7082 {
7083 symval.set_output_value(target->plt_address_for_global(gsym));
7084 psymval = &symval;
7085 }
7086 else if (gsym == NULL && psymval->is_ifunc_symbol())
7087 {
7088 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7089 if (object->local_has_plt_offset(r_sym))
7090 {
7091 symval.set_output_value(target->plt_address_for_local(object, r_sym));
7092 psymval = &symval;
7093 }
7094 }
7095
7096 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7097
7098 // Get the GOT offset if needed.
7099 // For aarch64, the GOT pointer points to the start of the GOT section.
7100 bool have_got_offset = false;
7101 int got_offset = 0;
7102 int got_base = (target->got_ != NULL
7103 ? (target->got_->current_data_size() >= 0x8000
7104 ? 0x8000 : 0)
7105 : 0);
7106 switch (r_type)
7107 {
7108 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
7109 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
7110 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
7111 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
7112 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
7113 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
7114 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
7115 case elfcpp::R_AARCH64_GOTREL64:
7116 case elfcpp::R_AARCH64_GOTREL32:
7117 case elfcpp::R_AARCH64_GOT_LD_PREL19:
7118 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
7119 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7120 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7121 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7122 if (gsym != NULL)
7123 {
7124 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
7125 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
7126 }
7127 else
7128 {
7129 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7130 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
7131 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
7132 - got_base);
7133 }
7134 have_got_offset = true;
7135 break;
7136
7137 default:
7138 break;
7139 }
7140
7141 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
7142 typename elfcpp::Elf_types<size>::Elf_Addr value;
7143 switch (r_type)
7144 {
7145 case elfcpp::R_AARCH64_NONE:
7146 break;
7147
7148 case elfcpp::R_AARCH64_ABS64:
7149 if (!parameters->options().apply_dynamic_relocs()
7150 && parameters->options().output_is_position_independent()
7151 && gsym != NULL
7152 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
7153 && !gsym->can_use_relative_reloc(false))
7154 // We have generated an absolute dynamic relocation, so do not
7155 // apply the relocation statically. (Works around bugs in older
7156 // Android dynamic linkers.)
7157 break;
7158 reloc_status = Reloc::template rela_ua<64>(
7159 view, object, psymval, addend, reloc_property);
7160 break;
7161
7162 case elfcpp::R_AARCH64_ABS32:
7163 if (!parameters->options().apply_dynamic_relocs()
7164 && parameters->options().output_is_position_independent()
7165 && gsym != NULL
7166 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7167 // We have generated an absolute dynamic relocation, so do not
7168 // apply the relocation statically. (Works around bugs in older
7169 // Android dynamic linkers.)
7170 break;
7171 reloc_status = Reloc::template rela_ua<32>(
7172 view, object, psymval, addend, reloc_property);
7173 break;
7174
7175 case elfcpp::R_AARCH64_ABS16:
7176 if (!parameters->options().apply_dynamic_relocs()
7177 && parameters->options().output_is_position_independent()
7178 && gsym != NULL
7179 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7180 // We have generated an absolute dynamic relocation, so do not
7181 // apply the relocation statically. (Works around bugs in older
7182 // Android dynamic linkers.)
7183 break;
7184 reloc_status = Reloc::template rela_ua<16>(
7185 view, object, psymval, addend, reloc_property);
7186 break;
7187
7188 case elfcpp::R_AARCH64_PREL64:
7189 reloc_status = Reloc::template pcrela_ua<64>(
7190 view, object, psymval, addend, address, reloc_property);
7191 break;
7192
7193 case elfcpp::R_AARCH64_PREL32:
7194 reloc_status = Reloc::template pcrela_ua<32>(
7195 view, object, psymval, addend, address, reloc_property);
7196 break;
7197
7198 case elfcpp::R_AARCH64_PREL16:
7199 reloc_status = Reloc::template pcrela_ua<16>(
7200 view, object, psymval, addend, address, reloc_property);
7201 break;
7202
7203 case elfcpp::R_AARCH64_MOVW_UABS_G0:
7204 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
7205 case elfcpp::R_AARCH64_MOVW_UABS_G1:
7206 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
7207 case elfcpp::R_AARCH64_MOVW_UABS_G2:
7208 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
7209 case elfcpp::R_AARCH64_MOVW_UABS_G3:
7210 reloc_status = Reloc::template rela_general<32>(
7211 view, object, psymval, addend, reloc_property);
7212 break;
7213 case elfcpp::R_AARCH64_MOVW_SABS_G0:
7214 case elfcpp::R_AARCH64_MOVW_SABS_G1:
7215 case elfcpp::R_AARCH64_MOVW_SABS_G2:
7216 reloc_status = Reloc::movnz(view, psymval->value(object, addend),
7217 reloc_property);
7218 break;
7219
7220 case elfcpp::R_AARCH64_LD_PREL_LO19:
7221 reloc_status = Reloc::template pcrela_general<32>(
7222 view, object, psymval, addend, address, reloc_property);
7223 break;
7224
7225 case elfcpp::R_AARCH64_ADR_PREL_LO21:
7226 reloc_status = Reloc::adr(view, object, psymval, addend,
7227 address, reloc_property);
7228 break;
7229
7230 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
7231 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
7232 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
7233 reloc_property);
7234 break;
7235
7236 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
7237 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
7238 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
7239 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7240 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7241 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7242 reloc_status = Reloc::template rela_general<32>(
7243 view, object, psymval, addend, reloc_property);
7244 break;
7245
7246 case elfcpp::R_AARCH64_CALL26:
7247 if (this->skip_call_tls_get_addr_)
7248 {
7249 // Double check that the TLSGD insn has been optimized away.
7250 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7251 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7252 reinterpret_cast<Insntype*>(view));
7253 gold_assert((insn & 0xff000000) == 0x91000000);
7254
7255 reloc_status = Reloc::STATUS_OKAY;
7256 this->skip_call_tls_get_addr_ = false;
7257 // Return false to stop further processing this reloc.
7258 return false;
7259 }
7260 // Fall through.
7261 case elfcpp::R_AARCH64_JUMP26:
7262 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7263 gsym, psymval, object,
7264 target->stub_group_size_))
7265 break;
7266 // Fall through.
7267 case elfcpp::R_AARCH64_TSTBR14:
7268 case elfcpp::R_AARCH64_CONDBR19:
7269 reloc_status = Reloc::template pcrela_general<32>(
7270 view, object, psymval, addend, address, reloc_property);
7271 break;
7272
7273 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7274 gold_assert(have_got_offset);
7275 value = target->got_->address() + got_base + got_offset;
7276 reloc_status = Reloc::adrp(view, value + addend, address);
7277 break;
7278
7279 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7280 gold_assert(have_got_offset);
7281 value = target->got_->address() + got_base + got_offset;
7282 reloc_status = Reloc::template rela_general<32>(
7283 view, value, addend, reloc_property);
7284 break;
7285
7286 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7287 {
7288 gold_assert(have_got_offset);
7289 value = target->got_->address() + got_base + got_offset + addend -
7290 Reloc::Page(target->got_->address() + got_base);
7291 if ((value & 7) != 0)
7292 reloc_status = Reloc::STATUS_OVERFLOW;
7293 else
7294 reloc_status = Reloc::template reloc_common<32>(
7295 view, value, reloc_property);
7296 break;
7297 }
7298
7299 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7300 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7301 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7302 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7303 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7304 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7305 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7306 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7307 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7308 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7309 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7310 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7311 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7312 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7313 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7314 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7315 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7316 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7317 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
7318 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7319 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
7320 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7321 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
7322 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7323 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
7324 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7325 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7326 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7327 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7328 case elfcpp::R_AARCH64_TLSDESC_CALL:
7329 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7330 gsym, psymval, view, address);
7331 break;
7332
7333 // These are dynamic relocations, which are unexpected when linking.
7334 case elfcpp::R_AARCH64_COPY:
7335 case elfcpp::R_AARCH64_GLOB_DAT:
7336 case elfcpp::R_AARCH64_JUMP_SLOT:
7337 case elfcpp::R_AARCH64_RELATIVE:
7338 case elfcpp::R_AARCH64_IRELATIVE:
7339 case elfcpp::R_AARCH64_TLS_DTPREL64:
7340 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7341 case elfcpp::R_AARCH64_TLS_TPREL64:
7342 case elfcpp::R_AARCH64_TLSDESC:
7343 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7344 _("unexpected reloc %u in object file"),
7345 r_type);
7346 break;
7347
7348 default:
7349 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7350 _("unsupported reloc %s"),
7351 reloc_property->name().c_str());
7352 break;
7353 }
7354
7355 // Report any errors.
7356 switch (reloc_status)
7357 {
7358 case Reloc::STATUS_OKAY:
7359 break;
7360 case Reloc::STATUS_OVERFLOW:
7361 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7362 _("relocation overflow in %s"),
7363 reloc_property->name().c_str());
7364 break;
7365 case Reloc::STATUS_BAD_RELOC:
7366 gold_error_at_location(
7367 relinfo,
7368 relnum,
7369 rela.get_r_offset(),
7370 _("unexpected opcode while processing relocation %s"),
7371 reloc_property->name().c_str());
7372 break;
7373 default:
7374 gold_unreachable();
7375 }
7376
7377 return true;
7378 }
7379
7380
7381 template<int size, bool big_endian>
7382 inline
7383 typename AArch64_relocate_functions<size, big_endian>::Status
7384 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7385 const Relocate_info<size, big_endian>* relinfo,
7386 Target_aarch64<size, big_endian>* target,
7387 size_t relnum,
7388 const elfcpp::Rela<size, big_endian>& rela,
7389 unsigned int r_type, const Sized_symbol<size>* gsym,
7390 const Symbol_value<size>* psymval,
7391 unsigned char* view,
7392 typename elfcpp::Elf_types<size>::Elf_Addr address)
7393 {
7394 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7395 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7396
7397 Output_segment* tls_segment = relinfo->layout->tls_segment();
7398 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7399 const AArch64_reloc_property* reloc_property =
7400 aarch64_reloc_property_table->get_reloc_property(r_type);
7401 gold_assert(reloc_property != NULL);
7402
7403 const bool is_final = (gsym == NULL
7404 ? !parameters->options().shared()
7405 : gsym->final_value_is_known());
7406 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7407 optimize_tls_reloc(is_final, r_type);
7408
7409 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7410 int tls_got_offset_type;
7411 switch (r_type)
7412 {
7413 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7414 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7415 {
7416 if (tlsopt == tls::TLSOPT_TO_LE)
7417 {
7418 if (tls_segment == NULL)
7419 {
7420 gold_assert(parameters->errors()->error_count() > 0
7421 || issue_undefined_symbol_error(gsym));
7422 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7423 }
7424 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7425 psymval);
7426 }
7427 else if (tlsopt == tls::TLSOPT_NONE)
7428 {
7429 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7430 // Firstly get the address for the got entry.
7431 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7432 if (gsym != NULL)
7433 {
7434 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7435 got_entry_address = target->got_->address() +
7436 gsym->got_offset(tls_got_offset_type);
7437 }
7438 else
7439 {
7440 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7441 gold_assert(
7442 object->local_has_got_offset(r_sym, tls_got_offset_type));
7443 got_entry_address = target->got_->address() +
7444 object->local_got_offset(r_sym, tls_got_offset_type);
7445 }
7446
7447 // Relocate the address into adrp/ld, adrp/add pair.
7448 switch (r_type)
7449 {
7450 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7451 return aarch64_reloc_funcs::adrp(
7452 view, got_entry_address + addend, address);
7453
7454 break;
7455
7456 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7457 return aarch64_reloc_funcs::template rela_general<32>(
7458 view, got_entry_address, addend, reloc_property);
7459 break;
7460
7461 default:
7462 gold_unreachable();
7463 }
7464 }
7465 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7466 _("unsupported gd_to_ie relaxation on %u"),
7467 r_type);
7468 }
7469 break;
7470
7471 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7472 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7473 {
7474 if (tlsopt == tls::TLSOPT_TO_LE)
7475 {
7476 if (tls_segment == NULL)
7477 {
7478 gold_assert(parameters->errors()->error_count() > 0
7479 || issue_undefined_symbol_error(gsym));
7480 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7481 }
7482 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7483 psymval);
7484 }
7485
7486 gold_assert(tlsopt == tls::TLSOPT_NONE);
7487 // Relocate the field with the offset of the GOT entry for
7488 // the module index.
7489 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7490 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7491 target->got_->address());
7492
7493 switch (r_type)
7494 {
7495 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7496 return aarch64_reloc_funcs::adrp(
7497 view, got_entry_address + addend, address);
7498 break;
7499
7500 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7501 return aarch64_reloc_funcs::template rela_general<32>(
7502 view, got_entry_address, addend, reloc_property);
7503 break;
7504
7505 default:
7506 gold_unreachable();
7507 }
7508 }
7509 break;
7510
7511 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7512 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7513 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7514 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7515 {
7516 AArch64_address value = psymval->value(object, 0);
7517 if (tlsopt == tls::TLSOPT_TO_LE)
7518 {
7519 if (tls_segment == NULL)
7520 {
7521 gold_assert(parameters->errors()->error_count() > 0
7522 || issue_undefined_symbol_error(gsym));
7523 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7524 }
7525 }
7526 switch (r_type)
7527 {
7528 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7529 return aarch64_reloc_funcs::movnz(view, value + addend,
7530 reloc_property);
7531 break;
7532
7533 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7534 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7535 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7536 return aarch64_reloc_funcs::template rela_general<32>(
7537 view, value, addend, reloc_property);
7538 break;
7539
7540 default:
7541 gold_unreachable();
7542 }
7543 // We should never reach here.
7544 }
7545 break;
7546
7547 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7548 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7549 {
7550 if (tlsopt == tls::TLSOPT_TO_LE)
7551 {
7552 if (tls_segment == NULL)
7553 {
7554 gold_assert(parameters->errors()->error_count() > 0
7555 || issue_undefined_symbol_error(gsym));
7556 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7557 }
7558 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7559 psymval);
7560 }
7561 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7562
7563 // Firstly get the address for the got entry.
7564 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7565 if (gsym != NULL)
7566 {
7567 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7568 got_entry_address = target->got_->address() +
7569 gsym->got_offset(tls_got_offset_type);
7570 }
7571 else
7572 {
7573 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7574 gold_assert(
7575 object->local_has_got_offset(r_sym, tls_got_offset_type));
7576 got_entry_address = target->got_->address() +
7577 object->local_got_offset(r_sym, tls_got_offset_type);
7578 }
7579 // Relocate the address into adrp/ld, adrp/add pair.
7580 switch (r_type)
7581 {
7582 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7583 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7584 address);
7585 break;
7586 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7587 return aarch64_reloc_funcs::template rela_general<32>(
7588 view, got_entry_address, addend, reloc_property);
7589 default:
7590 gold_unreachable();
7591 }
7592 }
7593 // We shall never reach here.
7594 break;
7595
7596 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7597 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7598 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7599 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7600 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7601 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7602 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7603 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7604 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
7605 case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
7606 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
7607 case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
7608 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
7609 case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
7610 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
7611 case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
7612 {
7613 gold_assert(tls_segment != NULL);
7614 AArch64_address value = psymval->value(object, 0);
7615
7616 if (!parameters->options().shared())
7617 {
7618 AArch64_address aligned_tcb_size =
7619 align_address(target->tcb_size(),
7620 tls_segment->maximum_alignment());
7621 value += aligned_tcb_size;
7622 switch (r_type)
7623 {
7624 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7625 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7626 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7627 return aarch64_reloc_funcs::movnz(view, value + addend,
7628 reloc_property);
7629 default:
7630 return aarch64_reloc_funcs::template
7631 rela_general<32>(view,
7632 value,
7633 addend,
7634 reloc_property);
7635 }
7636 }
7637 else
7638 gold_error(_("%s: unsupported reloc %u "
7639 "in non-static TLSLE mode."),
7640 object->name().c_str(), r_type);
7641 }
7642 break;
7643
7644 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7645 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7646 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7647 case elfcpp::R_AARCH64_TLSDESC_CALL:
7648 {
7649 if (tlsopt == tls::TLSOPT_TO_LE)
7650 {
7651 if (tls_segment == NULL)
7652 {
7653 gold_assert(parameters->errors()->error_count() > 0
7654 || issue_undefined_symbol_error(gsym));
7655 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7656 }
7657 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7658 view, psymval);
7659 }
7660 else
7661 {
7662 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7663 ? GOT_TYPE_TLS_OFFSET
7664 : GOT_TYPE_TLS_DESC);
7665 int got_tlsdesc_offset = 0;
7666 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7667 && tlsopt == tls::TLSOPT_NONE)
7668 {
7669 // We created GOT entries in the .got.tlsdesc portion of the
7670 // .got.plt section, but the offset stored in the symbol is the
7671 // offset within .got.tlsdesc.
7672 got_tlsdesc_offset = (target->got_tlsdesc_->address()
7673 - target->got_->address());
7674 }
7675 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7676 if (gsym != NULL)
7677 {
7678 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7679 got_entry_address = target->got_->address()
7680 + got_tlsdesc_offset
7681 + gsym->got_offset(tls_got_offset_type);
7682 }
7683 else
7684 {
7685 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7686 gold_assert(
7687 object->local_has_got_offset(r_sym, tls_got_offset_type));
7688 got_entry_address = target->got_->address() +
7689 got_tlsdesc_offset +
7690 object->local_got_offset(r_sym, tls_got_offset_type);
7691 }
7692 if (tlsopt == tls::TLSOPT_TO_IE)
7693 {
7694 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7695 view, psymval, got_entry_address,
7696 address);
7697 }
7698
7699 // Now do tlsdesc relocation.
7700 switch (r_type)
7701 {
7702 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7703 return aarch64_reloc_funcs::adrp(view,
7704 got_entry_address + addend,
7705 address);
7706 break;
7707 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7708 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7709 return aarch64_reloc_funcs::template rela_general<32>(
7710 view, got_entry_address, addend, reloc_property);
7711 break;
7712 case elfcpp::R_AARCH64_TLSDESC_CALL:
7713 return aarch64_reloc_funcs::STATUS_OKAY;
7714 break;
7715 default:
7716 gold_unreachable();
7717 }
7718 }
7719 }
7720 break;
7721
7722 default:
7723 gold_error(_("%s: unsupported TLS reloc %u."),
7724 object->name().c_str(), r_type);
7725 }
7726 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7727 } // End of relocate_tls.
7728
7729
7730 template<int size, bool big_endian>
7731 inline
7732 typename AArch64_relocate_functions<size, big_endian>::Status
7733 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7734 const Relocate_info<size, big_endian>* relinfo,
7735 Target_aarch64<size, big_endian>* target,
7736 const elfcpp::Rela<size, big_endian>& rela,
7737 unsigned int r_type,
7738 unsigned char* view,
7739 const Symbol_value<size>* psymval)
7740 {
7741 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7742 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7743 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7744
7745 Insntype* ip = reinterpret_cast<Insntype*>(view);
7746 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7747 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7748 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7749
7750 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7751 {
7752 // This is the 2nd relocs, optimization should already have been
7753 // done.
7754 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7755 return aarch64_reloc_funcs::STATUS_OKAY;
7756 }
7757
7758 // The original sequence is -
7759 // 90000000 adrp x0, 0 <main>
7760 // 91000000 add x0, x0, #0x0
7761 // 94000000 bl 0 <__tls_get_addr>
7762 // optimized to sequence -
7763 // d53bd040 mrs x0, tpidr_el0
7764 // 91400000 add x0, x0, #0x0, lsl #12
7765 // 91000000 add x0, x0, #0x0
7766
7767 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7768 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7769 // have to change "bl tls_get_addr", which does not have a corresponding tls
7770 // relocation type. So before proceeding, we need to make sure compiler
7771 // does not change the sequence.
7772 if(!(insn1 == 0x90000000 // adrp x0,0
7773 && insn2 == 0x91000000 // add x0, x0, #0x0
7774 && insn3 == 0x94000000)) // bl 0
7775 {
7776 // Ideally we should give up gd_to_le relaxation and do gd access.
7777 // However the gd_to_le relaxation decision has been made early
7778 // in the scan stage, where we did not allocate any GOT entry for
7779 // this symbol. Therefore we have to exit and report error now.
7780 gold_error(_("unexpected reloc insn sequence while relaxing "
7781 "tls gd to le for reloc %u."), r_type);
7782 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7783 }
7784
7785 // Write new insns.
7786 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7787 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7788 insn3 = 0x91000000; // add x0, x0, #0x0
7789 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7790 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7791 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7792
7793 // Calculate tprel value.
7794 Output_segment* tls_segment = relinfo->layout->tls_segment();
7795 gold_assert(tls_segment != NULL);
7796 AArch64_address value = psymval->value(relinfo->object, 0);
7797 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7798 AArch64_address aligned_tcb_size =
7799 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7800 AArch64_address x = value + aligned_tcb_size;
7801
7802 // After new insns are written, apply TLSLE relocs.
7803 const AArch64_reloc_property* rp1 =
7804 aarch64_reloc_property_table->get_reloc_property(
7805 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7806 const AArch64_reloc_property* rp2 =
7807 aarch64_reloc_property_table->get_reloc_property(
7808 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7809 gold_assert(rp1 != NULL && rp2 != NULL);
7810
7811 typename aarch64_reloc_funcs::Status s1 =
7812 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7813 x,
7814 addend,
7815 rp1);
7816 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7817 return s1;
7818
7819 typename aarch64_reloc_funcs::Status s2 =
7820 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7821 x,
7822 addend,
7823 rp2);
7824
7825 this->skip_call_tls_get_addr_ = true;
7826 return s2;
7827 } // End of tls_gd_to_le
7828
7829
7830 template<int size, bool big_endian>
7831 inline
7832 typename AArch64_relocate_functions<size, big_endian>::Status
7833 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7834 const Relocate_info<size, big_endian>* relinfo,
7835 Target_aarch64<size, big_endian>* target,
7836 const elfcpp::Rela<size, big_endian>& rela,
7837 unsigned int r_type,
7838 unsigned char* view,
7839 const Symbol_value<size>* psymval)
7840 {
7841 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7842 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7843 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7844
7845 Insntype* ip = reinterpret_cast<Insntype*>(view);
7846 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7847 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7848 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7849
7850 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7851 {
7852 // This is the 2nd relocs, optimization should already have been
7853 // done.
7854 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7855 return aarch64_reloc_funcs::STATUS_OKAY;
7856 }
7857
7858 // The original sequence is -
7859 // 90000000 adrp x0, 0 <main>
7860 // 91000000 add x0, x0, #0x0
7861 // 94000000 bl 0 <__tls_get_addr>
7862 // optimized to sequence -
7863 // d53bd040 mrs x0, tpidr_el0
7864 // 91400000 add x0, x0, #0x0, lsl #12
7865 // 91000000 add x0, x0, #0x0
7866
7867 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7868 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7869 // have to change "bl tls_get_addr", which does not have a corresponding tls
7870 // relocation type. So before proceeding, we need to make sure compiler
7871 // does not change the sequence.
7872 if(!(insn1 == 0x90000000 // adrp x0,0
7873 && insn2 == 0x91000000 // add x0, x0, #0x0
7874 && insn3 == 0x94000000)) // bl 0
7875 {
7876 // Ideally we should give up gd_to_le relaxation and do gd access.
7877 // However the gd_to_le relaxation decision has been made early
7878 // in the scan stage, where we did not allocate a GOT entry for
7879 // this symbol. Therefore we have to exit and report an error now.
7880 gold_error(_("unexpected reloc insn sequence while relaxing "
7881 "tls gd to le for reloc %u."), r_type);
7882 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7883 }
7884
7885 // Write new insns.
7886 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7887 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7888 insn3 = 0x91000000; // add x0, x0, #0x0
7889 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7890 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7891 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7892
7893 // Calculate tprel value.
7894 Output_segment* tls_segment = relinfo->layout->tls_segment();
7895 gold_assert(tls_segment != NULL);
7896 AArch64_address value = psymval->value(relinfo->object, 0);
7897 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7898 AArch64_address aligned_tcb_size =
7899 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7900 AArch64_address x = value + aligned_tcb_size;
7901
7902 // After new insns are written, apply TLSLE relocs.
7903 const AArch64_reloc_property* rp1 =
7904 aarch64_reloc_property_table->get_reloc_property(
7905 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7906 const AArch64_reloc_property* rp2 =
7907 aarch64_reloc_property_table->get_reloc_property(
7908 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7909 gold_assert(rp1 != NULL && rp2 != NULL);
7910
7911 typename aarch64_reloc_funcs::Status s1 =
7912 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7913 x,
7914 addend,
7915 rp1);
7916 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7917 return s1;
7918
7919 typename aarch64_reloc_funcs::Status s2 =
7920 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7921 x,
7922 addend,
7923 rp2);
7924
7925 this->skip_call_tls_get_addr_ = true;
7926 return s2;
7927
7928 } // End of tls_ld_to_le
7929
7930 template<int size, bool big_endian>
7931 inline
7932 typename AArch64_relocate_functions<size, big_endian>::Status
7933 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7934 const Relocate_info<size, big_endian>* relinfo,
7935 Target_aarch64<size, big_endian>* target,
7936 const elfcpp::Rela<size, big_endian>& rela,
7937 unsigned int r_type,
7938 unsigned char* view,
7939 const Symbol_value<size>* psymval)
7940 {
7941 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7942 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7943 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7944
7945 AArch64_address value = psymval->value(relinfo->object, 0);
7946 Output_segment* tls_segment = relinfo->layout->tls_segment();
7947 AArch64_address aligned_tcb_address =
7948 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7949 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7950 AArch64_address x = value + addend + aligned_tcb_address;
7951 // "x" is the offset to tp, we can only do this if x is within
7952 // range [0, 2^32-1]
7953 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7954 {
7955 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7956 r_type);
7957 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7958 }
7959
7960 Insntype* ip = reinterpret_cast<Insntype*>(view);
7961 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7962 unsigned int regno;
7963 Insntype newinsn;
7964 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7965 {
7966 // Generate movz.
7967 regno = (insn & 0x1f);
7968 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7969 }
7970 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7971 {
7972 // Generate movk.
7973 regno = (insn & 0x1f);
7974 gold_assert(regno == ((insn >> 5) & 0x1f));
7975 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7976 }
7977 else
7978 gold_unreachable();
7979
7980 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7981 return aarch64_reloc_funcs::STATUS_OKAY;
7982 } // End of tls_ie_to_le
7983
7984
7985 template<int size, bool big_endian>
7986 inline
7987 typename AArch64_relocate_functions<size, big_endian>::Status
7988 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7989 const Relocate_info<size, big_endian>* relinfo,
7990 Target_aarch64<size, big_endian>* target,
7991 const elfcpp::Rela<size, big_endian>& rela,
7992 unsigned int r_type,
7993 unsigned char* view,
7994 const Symbol_value<size>* psymval)
7995 {
7996 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7997 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7998 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7999
8000 // TLSDESC-GD sequence is like:
8001 // adrp x0, :tlsdesc:v1
8002 // ldr x1, [x0, #:tlsdesc_lo12:v1]
8003 // add x0, x0, :tlsdesc_lo12:v1
8004 // .tlsdesccall v1
8005 // blr x1
8006 // After desc_gd_to_le optimization, the sequence will be like:
8007 // movz x0, #0x0, lsl #16
8008 // movk x0, #0x10
8009 // nop
8010 // nop
8011
8012 // Calculate tprel value.
8013 Output_segment* tls_segment = relinfo->layout->tls_segment();
8014 gold_assert(tls_segment != NULL);
8015 Insntype* ip = reinterpret_cast<Insntype*>(view);
8016 const elfcpp::Elf_Xword addend = rela.get_r_addend();
8017 AArch64_address value = psymval->value(relinfo->object, addend);
8018 AArch64_address aligned_tcb_size =
8019 align_address(target->tcb_size(), tls_segment->maximum_alignment());
8020 AArch64_address x = value + aligned_tcb_size;
8021 // x is the offset to tp, we can only do this if x is within range
8022 // [0, 2^32-1]. If x is out of range, fail and exit.
8023 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
8024 {
8025 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
8026 "We Can't do gd_to_le relaxation.\n"), r_type);
8027 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
8028 }
8029 Insntype newinsn;
8030 switch (r_type)
8031 {
8032 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
8033 case elfcpp::R_AARCH64_TLSDESC_CALL:
8034 // Change to nop
8035 newinsn = 0xd503201f;
8036 break;
8037
8038 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
8039 // Change to movz.
8040 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
8041 break;
8042
8043 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
8044 // Change to movk.
8045 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
8046 break;
8047
8048 default:
8049 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
8050 r_type);
8051 gold_unreachable();
8052 }
8053 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
8054 return aarch64_reloc_funcs::STATUS_OKAY;
8055 } // End of tls_desc_gd_to_le
8056
8057
8058 template<int size, bool big_endian>
8059 inline
8060 typename AArch64_relocate_functions<size, big_endian>::Status
8061 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
8062 const Relocate_info<size, big_endian>* /* relinfo */,
8063 Target_aarch64<size, big_endian>* /* target */,
8064 const elfcpp::Rela<size, big_endian>& rela,
8065 unsigned int r_type,
8066 unsigned char* view,
8067 const Symbol_value<size>* /* psymval */,
8068 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
8069 typename elfcpp::Elf_types<size>::Elf_Addr address)
8070 {
8071 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
8072 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
8073
8074 // TLSDESC-GD sequence is like:
8075 // adrp x0, :tlsdesc:v1
8076 // ldr x1, [x0, #:tlsdesc_lo12:v1]
8077 // add x0, x0, :tlsdesc_lo12:v1
8078 // .tlsdesccall v1
8079 // blr x1
8080 // After desc_gd_to_ie optimization, the sequence will be like:
8081 // adrp x0, :tlsie:v1
8082 // ldr x0, [x0, :tlsie_lo12:v1]
8083 // nop
8084 // nop
8085
8086 Insntype* ip = reinterpret_cast<Insntype*>(view);
8087 const elfcpp::Elf_Xword addend = rela.get_r_addend();
8088 Insntype newinsn;
8089 switch (r_type)
8090 {
8091 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
8092 case elfcpp::R_AARCH64_TLSDESC_CALL:
8093 // Change to nop
8094 newinsn = 0xd503201f;
8095 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
8096 break;
8097
8098 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
8099 {
8100 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
8101 address);
8102 }
8103 break;
8104
8105 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
8106 {
8107 // Set ldr target register to be x0.
8108 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
8109 insn &= 0xffffffe0;
8110 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
8111 // Do relocation.
8112 const AArch64_reloc_property* reloc_property =
8113 aarch64_reloc_property_table->get_reloc_property(
8114 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8115 return aarch64_reloc_funcs::template rela_general<32>(
8116 view, got_entry_address, addend, reloc_property);
8117 }
8118 break;
8119
8120 default:
8121 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
8122 r_type);
8123 gold_unreachable();
8124 }
8125 return aarch64_reloc_funcs::STATUS_OKAY;
8126 } // End of tls_desc_gd_to_ie
8127
8128 // Relocate section data.
8129
8130 template<int size, bool big_endian>
8131 void
8132 Target_aarch64<size, big_endian>::relocate_section(
8133 const Relocate_info<size, big_endian>* relinfo,
8134 unsigned int sh_type,
8135 const unsigned char* prelocs,
8136 size_t reloc_count,
8137 Output_section* output_section,
8138 bool needs_special_offset_handling,
8139 unsigned char* view,
8140 typename elfcpp::Elf_types<size>::Elf_Addr address,
8141 section_size_type view_size,
8142 const Reloc_symbol_changes* reloc_symbol_changes)
8143 {
8144 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
8145 typedef Target_aarch64<size, big_endian> Aarch64;
8146 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
8147 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8148 Classify_reloc;
8149
8150 gold_assert(sh_type == elfcpp::SHT_RELA);
8151
8152 // See if we are relocating a relaxed input section. If so, the view
8153 // covers the whole output section and we need to adjust accordingly.
8154 if (needs_special_offset_handling)
8155 {
8156 const Output_relaxed_input_section* poris =
8157 output_section->find_relaxed_input_section(relinfo->object,
8158 relinfo->data_shndx);
8159 if (poris != NULL)
8160 {
8161 Address section_address = poris->address();
8162 section_size_type section_size = poris->data_size();
8163
8164 gold_assert((section_address >= address)
8165 && ((section_address + section_size)
8166 <= (address + view_size)));
8167
8168 off_t offset = section_address - address;
8169 view += offset;
8170 address += offset;
8171 view_size = section_size;
8172 }
8173 }
8174
8175 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
8176 gold::Default_comdat_behavior, Classify_reloc>(
8177 relinfo,
8178 this,
8179 prelocs,
8180 reloc_count,
8181 output_section,
8182 needs_special_offset_handling,
8183 view,
8184 address,
8185 view_size,
8186 reloc_symbol_changes);
8187 }
8188
8189 // Scan the relocs during a relocatable link.
8190
8191 template<int size, bool big_endian>
8192 void
8193 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
8194 Symbol_table* symtab,
8195 Layout* layout,
8196 Sized_relobj_file<size, big_endian>* object,
8197 unsigned int data_shndx,
8198 unsigned int sh_type,
8199 const unsigned char* prelocs,
8200 size_t reloc_count,
8201 Output_section* output_section,
8202 bool needs_special_offset_handling,
8203 size_t local_symbol_count,
8204 const unsigned char* plocal_symbols,
8205 Relocatable_relocs* rr)
8206 {
8207 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8208 Classify_reloc;
8209 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
8210 Scan_relocatable_relocs;
8211
8212 gold_assert(sh_type == elfcpp::SHT_RELA);
8213
8214 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
8215 symtab,
8216 layout,
8217 object,
8218 data_shndx,
8219 prelocs,
8220 reloc_count,
8221 output_section,
8222 needs_special_offset_handling,
8223 local_symbol_count,
8224 plocal_symbols,
8225 rr);
8226 }
8227
8228 // Scan the relocs for --emit-relocs.
8229
8230 template<int size, bool big_endian>
8231 void
8232 Target_aarch64<size, big_endian>::emit_relocs_scan(
8233 Symbol_table* symtab,
8234 Layout* layout,
8235 Sized_relobj_file<size, big_endian>* object,
8236 unsigned int data_shndx,
8237 unsigned int sh_type,
8238 const unsigned char* prelocs,
8239 size_t reloc_count,
8240 Output_section* output_section,
8241 bool needs_special_offset_handling,
8242 size_t local_symbol_count,
8243 const unsigned char* plocal_syms,
8244 Relocatable_relocs* rr)
8245 {
8246 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8247 Classify_reloc;
8248 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
8249 Emit_relocs_strategy;
8250
8251 gold_assert(sh_type == elfcpp::SHT_RELA);
8252
8253 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
8254 symtab,
8255 layout,
8256 object,
8257 data_shndx,
8258 prelocs,
8259 reloc_count,
8260 output_section,
8261 needs_special_offset_handling,
8262 local_symbol_count,
8263 plocal_syms,
8264 rr);
8265 }
8266
8267 // Relocate a section during a relocatable link.
8268
8269 template<int size, bool big_endian>
8270 void
8271 Target_aarch64<size, big_endian>::relocate_relocs(
8272 const Relocate_info<size, big_endian>* relinfo,
8273 unsigned int sh_type,
8274 const unsigned char* prelocs,
8275 size_t reloc_count,
8276 Output_section* output_section,
8277 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
8278 unsigned char* view,
8279 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
8280 section_size_type view_size,
8281 unsigned char* reloc_view,
8282 section_size_type reloc_view_size)
8283 {
8284 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8285 Classify_reloc;
8286
8287 gold_assert(sh_type == elfcpp::SHT_RELA);
8288
8289 gold::relocate_relocs<size, big_endian, Classify_reloc>(
8290 relinfo,
8291 prelocs,
8292 reloc_count,
8293 output_section,
8294 offset_in_output_section,
8295 view,
8296 view_address,
8297 view_size,
8298 reloc_view,
8299 reloc_view_size);
8300 }
8301
8302
8303 // Return whether this is a 3-insn erratum sequence.
8304
8305 template<int size, bool big_endian>
8306 bool
8307 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8308 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8309 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8310 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8311 {
8312 unsigned rt1, rt2;
8313 bool load, pair;
8314
8315 // The 2nd insn is a single register load or store; or register pair
8316 // store.
8317 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8318 && (!pair || (pair && !load)))
8319 {
8320 // The 3rd insn is a load or store instruction from the "Load/store
8321 // register (unsigned immediate)" encoding class, using Rn as the
8322 // base address register.
8323 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8324 && (Insn_utilities::aarch64_rn(insn3)
8325 == Insn_utilities::aarch64_rd(insn1)))
8326 return true;
8327 }
8328 return false;
8329 }
8330
8331
8332 // Return whether this is a 835769 sequence.
8333 // (Similarly implemented as in elfnn-aarch64.c.)
8334
8335 template<int size, bool big_endian>
8336 bool
8337 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8338 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8339 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8340 {
8341 uint32_t rt;
8342 uint32_t rt2 = 0;
8343 uint32_t rn;
8344 uint32_t rm;
8345 uint32_t ra;
8346 bool pair;
8347 bool load;
8348
8349 if (Insn_utilities::aarch64_mlxl(insn2)
8350 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8351 {
8352 /* Any SIMD memory op is independent of the subsequent MLA
8353 by definition of the erratum. */
8354 if (Insn_utilities::aarch64_bit(insn1, 26))
8355 return true;
8356
8357 /* If not SIMD, check for integer memory ops and MLA relationship. */
8358 rn = Insn_utilities::aarch64_rn(insn2);
8359 ra = Insn_utilities::aarch64_ra(insn2);
8360 rm = Insn_utilities::aarch64_rm(insn2);
8361
8362 /* If this is a load and there's a true(RAW) dependency, we are safe
8363 and this is not an erratum sequence. */
8364 if (load &&
8365 (rt == rn || rt == rm || rt == ra
8366 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8367 return false;
8368
8369 /* We conservatively put out stubs for all other cases (including
8370 writebacks). */
8371 return true;
8372 }
8373
8374 return false;
8375 }
8376
8377
8378 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8379
8380 template<int size, bool big_endian>
8381 void
8382 Target_aarch64<size, big_endian>::create_erratum_stub(
8383 AArch64_relobj<size, big_endian>* relobj,
8384 unsigned int shndx,
8385 section_size_type erratum_insn_offset,
8386 Address erratum_address,
8387 typename Insn_utilities::Insntype erratum_insn,
8388 int erratum_type,
8389 unsigned int e843419_adrp_offset)
8390 {
8391 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8392 The_stub_table* stub_table = relobj->stub_table(shndx);
8393 gold_assert(stub_table != NULL);
8394 if (stub_table->find_erratum_stub(relobj,
8395 shndx,
8396 erratum_insn_offset) == NULL)
8397 {
8398 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8399 The_erratum_stub* stub;
8400 if (erratum_type == ST_E_835769)
8401 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8402 erratum_insn_offset);
8403 else if (erratum_type == ST_E_843419)
8404 stub = new E843419_stub<size, big_endian>(
8405 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8406 else
8407 gold_unreachable();
8408 stub->set_erratum_insn(erratum_insn);
8409 stub->set_erratum_address(erratum_address);
8410 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8411 // always the next insn after erratum insn.
8412 stub->set_destination_address(erratum_address + BPI);
8413 stub_table->add_erratum_stub(stub);
8414 }
8415 }
8416
8417
8418 // Scan erratum for section SHNDX range [output_address + span_start,
8419 // output_address + span_end). Note here we do not share the code with
8420 // scan_erratum_843419_span function, because for 843419 we optimize by only
8421 // scanning the last few insns of a page, whereas for 835769, we need to scan
8422 // every insn.
8423
8424 template<int size, bool big_endian>
8425 void
8426 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8427 AArch64_relobj<size, big_endian>* relobj,
8428 unsigned int shndx,
8429 const section_size_type span_start,
8430 const section_size_type span_end,
8431 unsigned char* input_view,
8432 Address output_address)
8433 {
8434 typedef typename Insn_utilities::Insntype Insntype;
8435
8436 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8437
8438 // Adjust output_address and view to the start of span.
8439 output_address += span_start;
8440 input_view += span_start;
8441
8442 section_size_type span_length = span_end - span_start;
8443 section_size_type offset = 0;
8444 for (offset = 0; offset + BPI < span_length; offset += BPI)
8445 {
8446 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8447 Insntype insn1 = ip[0];
8448 Insntype insn2 = ip[1];
8449 if (is_erratum_835769_sequence(insn1, insn2))
8450 {
8451 Insntype erratum_insn = insn2;
8452 // "span_start + offset" is the offset for insn1. So for insn2, it is
8453 // "span_start + offset + BPI".
8454 section_size_type erratum_insn_offset = span_start + offset + BPI;
8455 Address erratum_address = output_address + offset + BPI;
8456 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8457 "section %d, offset 0x%08x."),
8458 relobj->name().c_str(), shndx,
8459 (unsigned int)(span_start + offset));
8460
8461 this->create_erratum_stub(relobj, shndx,
8462 erratum_insn_offset, erratum_address,
8463 erratum_insn, ST_E_835769);
8464 offset += BPI; // Skip mac insn.
8465 }
8466 }
8467 } // End of "Target_aarch64::scan_erratum_835769_span".
8468
8469
8470 // Scan erratum for section SHNDX range
8471 // [output_address + span_start, output_address + span_end).
8472
8473 template<int size, bool big_endian>
8474 void
8475 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8476 AArch64_relobj<size, big_endian>* relobj,
8477 unsigned int shndx,
8478 const section_size_type span_start,
8479 const section_size_type span_end,
8480 unsigned char* input_view,
8481 Address output_address)
8482 {
8483 typedef typename Insn_utilities::Insntype Insntype;
8484
8485 // Adjust output_address and view to the start of span.
8486 output_address += span_start;
8487 input_view += span_start;
8488
8489 if ((output_address & 0x03) != 0)
8490 return;
8491
8492 section_size_type offset = 0;
8493 section_size_type span_length = span_end - span_start;
8494 // The first instruction must be ending at 0xFF8 or 0xFFC.
8495 unsigned int page_offset = output_address & 0xFFF;
8496 // Make sure starting position, that is "output_address+offset",
8497 // starts at page position 0xff8 or 0xffc.
8498 if (page_offset < 0xff8)
8499 offset = 0xff8 - page_offset;
8500 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8501 {
8502 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8503 Insntype insn1 = ip[0];
8504 if (Insn_utilities::is_adrp(insn1))
8505 {
8506 Insntype insn2 = ip[1];
8507 Insntype insn3 = ip[2];
8508 Insntype erratum_insn;
8509 unsigned insn_offset;
8510 bool do_report = false;
8511 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8512 {
8513 do_report = true;
8514 erratum_insn = insn3;
8515 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8516 }
8517 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8518 {
8519 // Optionally we can have an insn between ins2 and ins3
8520 Insntype insn_opt = ip[2];
8521 // And insn_opt must not be a branch.
8522 if (!Insn_utilities::aarch64_b(insn_opt)
8523 && !Insn_utilities::aarch64_bl(insn_opt)
8524 && !Insn_utilities::aarch64_blr(insn_opt)
8525 && !Insn_utilities::aarch64_br(insn_opt))
8526 {
8527 // And insn_opt must not write to dest reg in insn1. However
8528 // we do a conservative scan, which means we may fix/report
8529 // more than necessary, but it doesn't hurt.
8530
8531 Insntype insn4 = ip[3];
8532 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8533 {
8534 do_report = true;
8535 erratum_insn = insn4;
8536 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8537 }
8538 }
8539 }
8540 if (do_report)
8541 {
8542 unsigned int erratum_insn_offset =
8543 span_start + offset + insn_offset;
8544 Address erratum_address =
8545 output_address + offset + insn_offset;
8546 create_erratum_stub(relobj, shndx,
8547 erratum_insn_offset, erratum_address,
8548 erratum_insn, ST_E_843419,
8549 span_start + offset);
8550 }
8551 }
8552
8553 // Advance to next candidate instruction. We only consider instruction
8554 // sequences starting at a page offset of 0xff8 or 0xffc.
8555 page_offset = (output_address + offset) & 0xfff;
8556 if (page_offset == 0xff8)
8557 offset += 4;
8558 else // (page_offset == 0xffc), we move to next page's 0xff8.
8559 offset += 0xffc;
8560 }
8561 } // End of "Target_aarch64::scan_erratum_843419_span".
8562
8563
8564 // The selector for aarch64 object files.
8565
8566 template<int size, bool big_endian>
8567 class Target_selector_aarch64 : public Target_selector
8568 {
8569 public:
8570 Target_selector_aarch64();
8571
8572 virtual Target*
8573 do_instantiate_target()
8574 { return new Target_aarch64<size, big_endian>(); }
8575 };
8576
8577 template<>
8578 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8579 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8580 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8581 { }
8582
8583 template<>
8584 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8585 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8586 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8587 { }
8588
8589 template<>
8590 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8591 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8592 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8593 { }
8594
8595 template<>
8596 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8597 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8598 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8599 { }
8600
8601 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8602 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8603 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8604 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8605
8606 } // End anonymous namespace.