]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gold/aarch64.cc
Fix problem where erratum stubs are not always applied.
[thirdparty/binutils-gdb.git] / gold / aarch64.cc
1 // aarch64.cc -- aarch64 target support for gold.
2
3 // Copyright (C) 2014-2017 Free Software Foundation, Inc.
4 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26 #include <map>
27 #include <set>
28
29 #include "elfcpp.h"
30 #include "dwarf.h"
31 #include "parameters.h"
32 #include "reloc.h"
33 #include "aarch64.h"
34 #include "object.h"
35 #include "symtab.h"
36 #include "layout.h"
37 #include "output.h"
38 #include "copy-relocs.h"
39 #include "target.h"
40 #include "target-reloc.h"
41 #include "target-select.h"
42 #include "tls.h"
43 #include "freebsd.h"
44 #include "nacl.h"
45 #include "gc.h"
46 #include "icf.h"
47 #include "aarch64-reloc-property.h"
48
49 // The first three .got.plt entries are reserved.
50 const int32_t AARCH64_GOTPLT_RESERVE_COUNT = 3;
51
52
53 namespace
54 {
55
56 using namespace gold;
57
58 template<int size, bool big_endian>
59 class Output_data_plt_aarch64;
60
61 template<int size, bool big_endian>
62 class Output_data_plt_aarch64_standard;
63
64 template<int size, bool big_endian>
65 class Target_aarch64;
66
67 template<int size, bool big_endian>
68 class AArch64_relocate_functions;
69
70 // Utility class dealing with insns. This is ported from macros in
71 // bfd/elfnn-aarch64.cc, but wrapped inside a class as static members. This
72 // class is used in erratum sequence scanning.
73
74 template<bool big_endian>
75 class AArch64_insn_utilities
76 {
77 public:
78 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
79
80 static const int BYTES_PER_INSN;
81
82 // Zero register encoding - 31.
83 static const unsigned int AARCH64_ZR;
84
85 static unsigned int
86 aarch64_bit(Insntype insn, int pos)
87 { return ((1 << pos) & insn) >> pos; }
88
89 static unsigned int
90 aarch64_bits(Insntype insn, int pos, int l)
91 { return (insn >> pos) & ((1 << l) - 1); }
92
93 // Get the encoding field "op31" of 3-source data processing insns. "op31" is
94 // the name defined in armv8 insn manual C3.5.9.
95 static unsigned int
96 aarch64_op31(Insntype insn)
97 { return aarch64_bits(insn, 21, 3); }
98
99 // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
100 // third source register. See armv8 insn manual C3.5.9.
101 static unsigned int
102 aarch64_ra(Insntype insn)
103 { return aarch64_bits(insn, 10, 5); }
104
105 static bool
106 is_adr(const Insntype insn)
107 { return (insn & 0x9F000000) == 0x10000000; }
108
109 static bool
110 is_adrp(const Insntype insn)
111 { return (insn & 0x9F000000) == 0x90000000; }
112
113 static bool
114 is_mrs_tpidr_el0(const Insntype insn)
115 { return (insn & 0xFFFFFFE0) == 0xd53bd040; }
116
117 static unsigned int
118 aarch64_rm(const Insntype insn)
119 { return aarch64_bits(insn, 16, 5); }
120
121 static unsigned int
122 aarch64_rn(const Insntype insn)
123 { return aarch64_bits(insn, 5, 5); }
124
125 static unsigned int
126 aarch64_rd(const Insntype insn)
127 { return aarch64_bits(insn, 0, 5); }
128
129 static unsigned int
130 aarch64_rt(const Insntype insn)
131 { return aarch64_bits(insn, 0, 5); }
132
133 static unsigned int
134 aarch64_rt2(const Insntype insn)
135 { return aarch64_bits(insn, 10, 5); }
136
137 // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
138 static Insntype
139 aarch64_adr_encode_imm(Insntype adr, int imm21)
140 {
141 gold_assert(is_adr(adr));
142 gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
143 const int mask19 = (1 << 19) - 1;
144 const int mask2 = 3;
145 adr &= ~((mask19 << 5) | (mask2 << 29));
146 adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
147 return adr;
148 }
149
150 // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
151 // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
152 // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
153 static int64_t
154 aarch64_adrp_decode_imm(const Insntype adrp)
155 {
156 const int mask19 = (1 << 19) - 1;
157 const int mask2 = 3;
158 gold_assert(is_adrp(adrp));
159 // 21-bit imm encoded in adrp.
160 uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
161 // Retrieve msb of 21-bit-signed imm for sign extension.
162 uint64_t msbt = (imm >> 20) & 1;
163 // Real value is imm multiplied by 4k. Value now has 33-bit information.
164 int64_t value = imm << 12;
165 // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
166 // with value.
167 return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
168 }
169
170 static bool
171 aarch64_b(const Insntype insn)
172 { return (insn & 0xFC000000) == 0x14000000; }
173
174 static bool
175 aarch64_bl(const Insntype insn)
176 { return (insn & 0xFC000000) == 0x94000000; }
177
178 static bool
179 aarch64_blr(const Insntype insn)
180 { return (insn & 0xFFFFFC1F) == 0xD63F0000; }
181
182 static bool
183 aarch64_br(const Insntype insn)
184 { return (insn & 0xFFFFFC1F) == 0xD61F0000; }
185
186 // All ld/st ops. See C4-182 of the ARM ARM. The encoding space for
187 // LD_PCREL, LDST_RO, LDST_UI and LDST_UIMM cover prefetch ops.
188 static bool
189 aarch64_ld(Insntype insn) { return aarch64_bit(insn, 22) == 1; }
190
191 static bool
192 aarch64_ldst(Insntype insn)
193 { return (insn & 0x0a000000) == 0x08000000; }
194
195 static bool
196 aarch64_ldst_ex(Insntype insn)
197 { return (insn & 0x3f000000) == 0x08000000; }
198
199 static bool
200 aarch64_ldst_pcrel(Insntype insn)
201 { return (insn & 0x3b000000) == 0x18000000; }
202
203 static bool
204 aarch64_ldst_nap(Insntype insn)
205 { return (insn & 0x3b800000) == 0x28000000; }
206
207 static bool
208 aarch64_ldstp_pi(Insntype insn)
209 { return (insn & 0x3b800000) == 0x28800000; }
210
211 static bool
212 aarch64_ldstp_o(Insntype insn)
213 { return (insn & 0x3b800000) == 0x29000000; }
214
215 static bool
216 aarch64_ldstp_pre(Insntype insn)
217 { return (insn & 0x3b800000) == 0x29800000; }
218
219 static bool
220 aarch64_ldst_ui(Insntype insn)
221 { return (insn & 0x3b200c00) == 0x38000000; }
222
223 static bool
224 aarch64_ldst_piimm(Insntype insn)
225 { return (insn & 0x3b200c00) == 0x38000400; }
226
227 static bool
228 aarch64_ldst_u(Insntype insn)
229 { return (insn & 0x3b200c00) == 0x38000800; }
230
231 static bool
232 aarch64_ldst_preimm(Insntype insn)
233 { return (insn & 0x3b200c00) == 0x38000c00; }
234
235 static bool
236 aarch64_ldst_ro(Insntype insn)
237 { return (insn & 0x3b200c00) == 0x38200800; }
238
239 static bool
240 aarch64_ldst_uimm(Insntype insn)
241 { return (insn & 0x3b000000) == 0x39000000; }
242
243 static bool
244 aarch64_ldst_simd_m(Insntype insn)
245 { return (insn & 0xbfbf0000) == 0x0c000000; }
246
247 static bool
248 aarch64_ldst_simd_m_pi(Insntype insn)
249 { return (insn & 0xbfa00000) == 0x0c800000; }
250
251 static bool
252 aarch64_ldst_simd_s(Insntype insn)
253 { return (insn & 0xbf9f0000) == 0x0d000000; }
254
255 static bool
256 aarch64_ldst_simd_s_pi(Insntype insn)
257 { return (insn & 0xbf800000) == 0x0d800000; }
258
259 // Classify an INSN if it is indeed a load/store. Return true if INSN is a
260 // LD/ST instruction otherwise return false. For scalar LD/ST instructions
261 // PAIR is FALSE, RT is returned and RT2 is set equal to RT. For LD/ST pair
262 // instructions PAIR is TRUE, RT and RT2 are returned.
263 static bool
264 aarch64_mem_op_p(Insntype insn, unsigned int *rt, unsigned int *rt2,
265 bool *pair, bool *load)
266 {
267 uint32_t opcode;
268 unsigned int r;
269 uint32_t opc = 0;
270 uint32_t v = 0;
271 uint32_t opc_v = 0;
272
273 /* Bail out quickly if INSN doesn't fall into the load-store
274 encoding space. */
275 if (!aarch64_ldst (insn))
276 return false;
277
278 *pair = false;
279 *load = false;
280 if (aarch64_ldst_ex (insn))
281 {
282 *rt = aarch64_rt (insn);
283 *rt2 = *rt;
284 if (aarch64_bit (insn, 21) == 1)
285 {
286 *pair = true;
287 *rt2 = aarch64_rt2 (insn);
288 }
289 *load = aarch64_ld (insn);
290 return true;
291 }
292 else if (aarch64_ldst_nap (insn)
293 || aarch64_ldstp_pi (insn)
294 || aarch64_ldstp_o (insn)
295 || aarch64_ldstp_pre (insn))
296 {
297 *pair = true;
298 *rt = aarch64_rt (insn);
299 *rt2 = aarch64_rt2 (insn);
300 *load = aarch64_ld (insn);
301 return true;
302 }
303 else if (aarch64_ldst_pcrel (insn)
304 || aarch64_ldst_ui (insn)
305 || aarch64_ldst_piimm (insn)
306 || aarch64_ldst_u (insn)
307 || aarch64_ldst_preimm (insn)
308 || aarch64_ldst_ro (insn)
309 || aarch64_ldst_uimm (insn))
310 {
311 *rt = aarch64_rt (insn);
312 *rt2 = *rt;
313 if (aarch64_ldst_pcrel (insn))
314 *load = true;
315 opc = aarch64_bits (insn, 22, 2);
316 v = aarch64_bit (insn, 26);
317 opc_v = opc | (v << 2);
318 *load = (opc_v == 1 || opc_v == 2 || opc_v == 3
319 || opc_v == 5 || opc_v == 7);
320 return true;
321 }
322 else if (aarch64_ldst_simd_m (insn)
323 || aarch64_ldst_simd_m_pi (insn))
324 {
325 *rt = aarch64_rt (insn);
326 *load = aarch64_bit (insn, 22);
327 opcode = (insn >> 12) & 0xf;
328 switch (opcode)
329 {
330 case 0:
331 case 2:
332 *rt2 = *rt + 3;
333 break;
334
335 case 4:
336 case 6:
337 *rt2 = *rt + 2;
338 break;
339
340 case 7:
341 *rt2 = *rt;
342 break;
343
344 case 8:
345 case 10:
346 *rt2 = *rt + 1;
347 break;
348
349 default:
350 return false;
351 }
352 return true;
353 }
354 else if (aarch64_ldst_simd_s (insn)
355 || aarch64_ldst_simd_s_pi (insn))
356 {
357 *rt = aarch64_rt (insn);
358 r = (insn >> 21) & 1;
359 *load = aarch64_bit (insn, 22);
360 opcode = (insn >> 13) & 0x7;
361 switch (opcode)
362 {
363 case 0:
364 case 2:
365 case 4:
366 *rt2 = *rt + r;
367 break;
368
369 case 1:
370 case 3:
371 case 5:
372 *rt2 = *rt + (r == 0 ? 2 : 3);
373 break;
374
375 case 6:
376 *rt2 = *rt + r;
377 break;
378
379 case 7:
380 *rt2 = *rt + (r == 0 ? 2 : 3);
381 break;
382
383 default:
384 return false;
385 }
386 return true;
387 }
388 return false;
389 } // End of "aarch64_mem_op_p".
390
391 // Return true if INSN is mac insn.
392 static bool
393 aarch64_mac(Insntype insn)
394 { return (insn & 0xff000000) == 0x9b000000; }
395
396 // Return true if INSN is multiply-accumulate.
397 // (This is similar to implementaton in elfnn-aarch64.c.)
398 static bool
399 aarch64_mlxl(Insntype insn)
400 {
401 uint32_t op31 = aarch64_op31(insn);
402 if (aarch64_mac(insn)
403 && (op31 == 0 || op31 == 1 || op31 == 5)
404 /* Exclude MUL instructions which are encoded as a multiple-accumulate
405 with RA = XZR. */
406 && aarch64_ra(insn) != AARCH64_ZR)
407 {
408 return true;
409 }
410 return false;
411 }
412 }; // End of "AArch64_insn_utilities".
413
414
415 // Insn length in byte.
416
417 template<bool big_endian>
418 const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
419
420
421 // Zero register encoding - 31.
422
423 template<bool big_endian>
424 const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
425
426
427 // Output_data_got_aarch64 class.
428
429 template<int size, bool big_endian>
430 class Output_data_got_aarch64 : public Output_data_got<size, big_endian>
431 {
432 public:
433 typedef typename elfcpp::Elf_types<size>::Elf_Addr Valtype;
434 Output_data_got_aarch64(Symbol_table* symtab, Layout* layout)
435 : Output_data_got<size, big_endian>(),
436 symbol_table_(symtab), layout_(layout)
437 { }
438
439 // Add a static entry for the GOT entry at OFFSET. GSYM is a global
440 // symbol and R_TYPE is the code of a dynamic relocation that needs to be
441 // applied in a static link.
442 void
443 add_static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
444 { this->static_relocs_.push_back(Static_reloc(got_offset, r_type, gsym)); }
445
446
447 // Add a static reloc for the GOT entry at OFFSET. RELOBJ is an object
448 // defining a local symbol with INDEX. R_TYPE is the code of a dynamic
449 // relocation that needs to be applied in a static link.
450 void
451 add_static_reloc(unsigned int got_offset, unsigned int r_type,
452 Sized_relobj_file<size, big_endian>* relobj,
453 unsigned int index)
454 {
455 this->static_relocs_.push_back(Static_reloc(got_offset, r_type, relobj,
456 index));
457 }
458
459
460 protected:
461 // Write out the GOT table.
462 void
463 do_write(Output_file* of) {
464 // The first entry in the GOT is the address of the .dynamic section.
465 gold_assert(this->data_size() >= size / 8);
466 Output_section* dynamic = this->layout_->dynamic_section();
467 Valtype dynamic_addr = dynamic == NULL ? 0 : dynamic->address();
468 this->replace_constant(0, dynamic_addr);
469 Output_data_got<size, big_endian>::do_write(of);
470
471 // Handling static relocs
472 if (this->static_relocs_.empty())
473 return;
474
475 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
476
477 gold_assert(parameters->doing_static_link());
478 const off_t offset = this->offset();
479 const section_size_type oview_size =
480 convert_to_section_size_type(this->data_size());
481 unsigned char* const oview = of->get_output_view(offset, oview_size);
482
483 Output_segment* tls_segment = this->layout_->tls_segment();
484 gold_assert(tls_segment != NULL);
485
486 AArch64_address aligned_tcb_address =
487 align_address(Target_aarch64<size, big_endian>::TCB_SIZE,
488 tls_segment->maximum_alignment());
489
490 for (size_t i = 0; i < this->static_relocs_.size(); ++i)
491 {
492 Static_reloc& reloc(this->static_relocs_[i]);
493 AArch64_address value;
494
495 if (!reloc.symbol_is_global())
496 {
497 Sized_relobj_file<size, big_endian>* object = reloc.relobj();
498 const Symbol_value<size>* psymval =
499 reloc.relobj()->local_symbol(reloc.index());
500
501 // We are doing static linking. Issue an error and skip this
502 // relocation if the symbol is undefined or in a discarded_section.
503 bool is_ordinary;
504 unsigned int shndx = psymval->input_shndx(&is_ordinary);
505 if ((shndx == elfcpp::SHN_UNDEF)
506 || (is_ordinary
507 && shndx != elfcpp::SHN_UNDEF
508 && !object->is_section_included(shndx)
509 && !this->symbol_table_->is_section_folded(object, shndx)))
510 {
511 gold_error(_("undefined or discarded local symbol %u from "
512 " object %s in GOT"),
513 reloc.index(), reloc.relobj()->name().c_str());
514 continue;
515 }
516 value = psymval->value(object, 0);
517 }
518 else
519 {
520 const Symbol* gsym = reloc.symbol();
521 gold_assert(gsym != NULL);
522 if (gsym->is_forwarder())
523 gsym = this->symbol_table_->resolve_forwards(gsym);
524
525 // We are doing static linking. Issue an error and skip this
526 // relocation if the symbol is undefined or in a discarded_section
527 // unless it is a weakly_undefined symbol.
528 if ((gsym->is_defined_in_discarded_section()
529 || gsym->is_undefined())
530 && !gsym->is_weak_undefined())
531 {
532 gold_error(_("undefined or discarded symbol %s in GOT"),
533 gsym->name());
534 continue;
535 }
536
537 if (!gsym->is_weak_undefined())
538 {
539 const Sized_symbol<size>* sym =
540 static_cast<const Sized_symbol<size>*>(gsym);
541 value = sym->value();
542 }
543 else
544 value = 0;
545 }
546
547 unsigned got_offset = reloc.got_offset();
548 gold_assert(got_offset < oview_size);
549
550 typedef typename elfcpp::Swap<size, big_endian>::Valtype Valtype;
551 Valtype* wv = reinterpret_cast<Valtype*>(oview + got_offset);
552 Valtype x;
553 switch (reloc.r_type())
554 {
555 case elfcpp::R_AARCH64_TLS_DTPREL64:
556 x = value;
557 break;
558 case elfcpp::R_AARCH64_TLS_TPREL64:
559 x = value + aligned_tcb_address;
560 break;
561 default:
562 gold_unreachable();
563 }
564 elfcpp::Swap<size, big_endian>::writeval(wv, x);
565 }
566
567 of->write_output_view(offset, oview_size, oview);
568 }
569
570 private:
571 // Symbol table of the output object.
572 Symbol_table* symbol_table_;
573 // A pointer to the Layout class, so that we can find the .dynamic
574 // section when we write out the GOT section.
575 Layout* layout_;
576
577 // This class represent dynamic relocations that need to be applied by
578 // gold because we are using TLS relocations in a static link.
579 class Static_reloc
580 {
581 public:
582 Static_reloc(unsigned int got_offset, unsigned int r_type, Symbol* gsym)
583 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(true)
584 { this->u_.global.symbol = gsym; }
585
586 Static_reloc(unsigned int got_offset, unsigned int r_type,
587 Sized_relobj_file<size, big_endian>* relobj, unsigned int index)
588 : got_offset_(got_offset), r_type_(r_type), symbol_is_global_(false)
589 {
590 this->u_.local.relobj = relobj;
591 this->u_.local.index = index;
592 }
593
594 // Return the GOT offset.
595 unsigned int
596 got_offset() const
597 { return this->got_offset_; }
598
599 // Relocation type.
600 unsigned int
601 r_type() const
602 { return this->r_type_; }
603
604 // Whether the symbol is global or not.
605 bool
606 symbol_is_global() const
607 { return this->symbol_is_global_; }
608
609 // For a relocation against a global symbol, the global symbol.
610 Symbol*
611 symbol() const
612 {
613 gold_assert(this->symbol_is_global_);
614 return this->u_.global.symbol;
615 }
616
617 // For a relocation against a local symbol, the defining object.
618 Sized_relobj_file<size, big_endian>*
619 relobj() const
620 {
621 gold_assert(!this->symbol_is_global_);
622 return this->u_.local.relobj;
623 }
624
625 // For a relocation against a local symbol, the local symbol index.
626 unsigned int
627 index() const
628 {
629 gold_assert(!this->symbol_is_global_);
630 return this->u_.local.index;
631 }
632
633 private:
634 // GOT offset of the entry to which this relocation is applied.
635 unsigned int got_offset_;
636 // Type of relocation.
637 unsigned int r_type_;
638 // Whether this relocation is against a global symbol.
639 bool symbol_is_global_;
640 // A global or local symbol.
641 union
642 {
643 struct
644 {
645 // For a global symbol, the symbol itself.
646 Symbol* symbol;
647 } global;
648 struct
649 {
650 // For a local symbol, the object defining the symbol.
651 Sized_relobj_file<size, big_endian>* relobj;
652 // For a local symbol, the symbol index.
653 unsigned int index;
654 } local;
655 } u_;
656 }; // End of inner class Static_reloc
657
658 std::vector<Static_reloc> static_relocs_;
659 }; // End of Output_data_got_aarch64
660
661
662 template<int size, bool big_endian>
663 class AArch64_input_section;
664
665
666 template<int size, bool big_endian>
667 class AArch64_output_section;
668
669
670 template<int size, bool big_endian>
671 class AArch64_relobj;
672
673
674 // Stub type enum constants.
675
676 enum
677 {
678 ST_NONE = 0,
679
680 // Using adrp/add pair, 4 insns (including alignment) without mem access,
681 // the fastest stub. This has a limited jump distance, which is tested by
682 // aarch64_valid_for_adrp_p.
683 ST_ADRP_BRANCH = 1,
684
685 // Using ldr-absolute-address/br-register, 4 insns with 1 mem access,
686 // unlimited in jump distance.
687 ST_LONG_BRANCH_ABS = 2,
688
689 // Using ldr/calculate-pcrel/jump, 8 insns (including alignment) with 1
690 // mem access, slowest one. Only used in position independent executables.
691 ST_LONG_BRANCH_PCREL = 3,
692
693 // Stub for erratum 843419 handling.
694 ST_E_843419 = 4,
695
696 // Stub for erratum 835769 handling.
697 ST_E_835769 = 5,
698
699 // Number of total stub types.
700 ST_NUMBER = 6
701 };
702
703
704 // Struct that wraps insns for a particular stub. All stub templates are
705 // created/initialized as constants by Stub_template_repertoire.
706
707 template<bool big_endian>
708 struct Stub_template
709 {
710 const typename AArch64_insn_utilities<big_endian>::Insntype* insns;
711 const int insn_num;
712 };
713
714
715 // Simple singleton class that creates/initializes/stores all types of stub
716 // templates.
717
718 template<bool big_endian>
719 class Stub_template_repertoire
720 {
721 public:
722 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
723
724 // Single static method to get stub template for a given stub type.
725 static const Stub_template<big_endian>*
726 get_stub_template(int type)
727 {
728 static Stub_template_repertoire<big_endian> singleton;
729 return singleton.stub_templates_[type];
730 }
731
732 private:
733 // Constructor - creates/initializes all stub templates.
734 Stub_template_repertoire();
735 ~Stub_template_repertoire()
736 { }
737
738 // Disallowing copy ctor and copy assignment operator.
739 Stub_template_repertoire(Stub_template_repertoire&);
740 Stub_template_repertoire& operator=(Stub_template_repertoire&);
741
742 // Data that stores all insn templates.
743 const Stub_template<big_endian>* stub_templates_[ST_NUMBER];
744 }; // End of "class Stub_template_repertoire".
745
746
747 // Constructor - creates/initilizes all stub templates.
748
749 template<bool big_endian>
750 Stub_template_repertoire<big_endian>::Stub_template_repertoire()
751 {
752 // Insn array definitions.
753 const static Insntype ST_NONE_INSNS[] = {};
754
755 const static Insntype ST_ADRP_BRANCH_INSNS[] =
756 {
757 0x90000010, /* adrp ip0, X */
758 /* ADR_PREL_PG_HI21(X) */
759 0x91000210, /* add ip0, ip0, :lo12:X */
760 /* ADD_ABS_LO12_NC(X) */
761 0xd61f0200, /* br ip0 */
762 0x00000000, /* alignment padding */
763 };
764
765 const static Insntype ST_LONG_BRANCH_ABS_INSNS[] =
766 {
767 0x58000050, /* ldr ip0, 0x8 */
768 0xd61f0200, /* br ip0 */
769 0x00000000, /* address field */
770 0x00000000, /* address fields */
771 };
772
773 const static Insntype ST_LONG_BRANCH_PCREL_INSNS[] =
774 {
775 0x58000090, /* ldr ip0, 0x10 */
776 0x10000011, /* adr ip1, #0 */
777 0x8b110210, /* add ip0, ip0, ip1 */
778 0xd61f0200, /* br ip0 */
779 0x00000000, /* address field */
780 0x00000000, /* address field */
781 0x00000000, /* alignment padding */
782 0x00000000, /* alignment padding */
783 };
784
785 const static Insntype ST_E_843419_INSNS[] =
786 {
787 0x00000000, /* Placeholder for erratum insn. */
788 0x14000000, /* b <label> */
789 };
790
791 // ST_E_835769 has the same stub template as ST_E_843419
792 // but we reproduce the array here so that the sizeof
793 // expressions in install_insn_template will work.
794 const static Insntype ST_E_835769_INSNS[] =
795 {
796 0x00000000, /* Placeholder for erratum insn. */
797 0x14000000, /* b <label> */
798 };
799
800 #define install_insn_template(T) \
801 const static Stub_template<big_endian> template_##T = { \
802 T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
803 this->stub_templates_[T] = &template_##T
804
805 install_insn_template(ST_NONE);
806 install_insn_template(ST_ADRP_BRANCH);
807 install_insn_template(ST_LONG_BRANCH_ABS);
808 install_insn_template(ST_LONG_BRANCH_PCREL);
809 install_insn_template(ST_E_843419);
810 install_insn_template(ST_E_835769);
811
812 #undef install_insn_template
813 }
814
815
816 // Base class for stubs.
817
818 template<int size, bool big_endian>
819 class Stub_base
820 {
821 public:
822 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
823 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
824
825 static const AArch64_address invalid_address =
826 static_cast<AArch64_address>(-1);
827
828 static const section_offset_type invalid_offset =
829 static_cast<section_offset_type>(-1);
830
831 Stub_base(int type)
832 : destination_address_(invalid_address),
833 offset_(invalid_offset),
834 type_(type)
835 {}
836
837 ~Stub_base()
838 {}
839
840 // Get stub type.
841 int
842 type() const
843 { return this->type_; }
844
845 // Get stub template that provides stub insn information.
846 const Stub_template<big_endian>*
847 stub_template() const
848 {
849 return Stub_template_repertoire<big_endian>::
850 get_stub_template(this->type());
851 }
852
853 // Get destination address.
854 AArch64_address
855 destination_address() const
856 {
857 gold_assert(this->destination_address_ != this->invalid_address);
858 return this->destination_address_;
859 }
860
861 // Set destination address.
862 void
863 set_destination_address(AArch64_address address)
864 {
865 gold_assert(address != this->invalid_address);
866 this->destination_address_ = address;
867 }
868
869 // Reset the destination address.
870 void
871 reset_destination_address()
872 { this->destination_address_ = this->invalid_address; }
873
874 // Get offset of code stub. For Reloc_stub, it is the offset from the
875 // beginning of its containing stub table; for Erratum_stub, it is the offset
876 // from the end of reloc_stubs.
877 section_offset_type
878 offset() const
879 {
880 gold_assert(this->offset_ != this->invalid_offset);
881 return this->offset_;
882 }
883
884 // Set stub offset.
885 void
886 set_offset(section_offset_type offset)
887 { this->offset_ = offset; }
888
889 // Return the stub insn.
890 const Insntype*
891 insns() const
892 { return this->stub_template()->insns; }
893
894 // Return num of stub insns.
895 unsigned int
896 insn_num() const
897 { return this->stub_template()->insn_num; }
898
899 // Get size of the stub.
900 int
901 stub_size() const
902 {
903 return this->insn_num() *
904 AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
905 }
906
907 // Write stub to output file.
908 void
909 write(unsigned char* view, section_size_type view_size)
910 { this->do_write(view, view_size); }
911
912 protected:
913 // Abstract method to be implemented by sub-classes.
914 virtual void
915 do_write(unsigned char*, section_size_type) = 0;
916
917 private:
918 // The last insn of a stub is a jump to destination insn. This field records
919 // the destination address.
920 AArch64_address destination_address_;
921 // The stub offset. Note this has difference interpretations between an
922 // Reloc_stub and an Erratum_stub. For Reloc_stub this is the offset from the
923 // beginning of the containing stub_table, whereas for Erratum_stub, this is
924 // the offset from the end of reloc_stubs.
925 section_offset_type offset_;
926 // Stub type.
927 const int type_;
928 }; // End of "Stub_base".
929
930
931 // Erratum stub class. An erratum stub differs from a reloc stub in that for
932 // each erratum occurrence, we generate an erratum stub. We never share erratum
933 // stubs, whereas for reloc stubs, different branch insns share a single reloc
934 // stub as long as the branch targets are the same. (More to the point, reloc
935 // stubs can be shared because they're used to reach a specific target, whereas
936 // erratum stubs branch back to the original control flow.)
937
938 template<int size, bool big_endian>
939 class Erratum_stub : public Stub_base<size, big_endian>
940 {
941 public:
942 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
943 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
944 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
945 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
946
947 static const int STUB_ADDR_ALIGN;
948
949 static const Insntype invalid_insn = static_cast<Insntype>(-1);
950
951 Erratum_stub(The_aarch64_relobj* relobj, int type,
952 unsigned shndx, unsigned int sh_offset)
953 : Stub_base<size, big_endian>(type), relobj_(relobj),
954 shndx_(shndx), sh_offset_(sh_offset),
955 erratum_insn_(invalid_insn),
956 erratum_address_(this->invalid_address)
957 {}
958
959 ~Erratum_stub() {}
960
961 // Return the object that contains the erratum.
962 The_aarch64_relobj*
963 relobj()
964 { return this->relobj_; }
965
966 // Get section index of the erratum.
967 unsigned int
968 shndx() const
969 { return this->shndx_; }
970
971 // Get section offset of the erratum.
972 unsigned int
973 sh_offset() const
974 { return this->sh_offset_; }
975
976 // Get the erratum insn. This is the insn located at erratum_insn_address.
977 Insntype
978 erratum_insn() const
979 {
980 gold_assert(this->erratum_insn_ != this->invalid_insn);
981 return this->erratum_insn_;
982 }
983
984 // Set the insn that the erratum happens to.
985 void
986 set_erratum_insn(Insntype insn)
987 { this->erratum_insn_ = insn; }
988
989 // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
990 // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
991 // is no longer the one we want to write out to the stub, update erratum_insn_
992 // with relocated version. Also note that in this case xn must not be "PC", so
993 // it is safe to move the erratum insn from the origin place to the stub. For
994 // 835769, the erratum insn is multiply-accumulate insn, which could not be a
995 // relocation spot (assertion added though).
996 void
997 update_erratum_insn(Insntype insn)
998 {
999 gold_assert(this->erratum_insn_ != this->invalid_insn);
1000 switch (this->type())
1001 {
1002 case ST_E_843419:
1003 gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
1004 gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
1005 gold_assert(Insn_utilities::aarch64_rd(insn) ==
1006 Insn_utilities::aarch64_rd(this->erratum_insn()));
1007 gold_assert(Insn_utilities::aarch64_rn(insn) ==
1008 Insn_utilities::aarch64_rn(this->erratum_insn()));
1009 // Update plain ld/st insn with relocated insn.
1010 this->erratum_insn_ = insn;
1011 break;
1012 case ST_E_835769:
1013 gold_assert(insn == this->erratum_insn());
1014 break;
1015 default:
1016 gold_unreachable();
1017 }
1018 }
1019
1020
1021 // Return the address where an erratum must be done.
1022 AArch64_address
1023 erratum_address() const
1024 {
1025 gold_assert(this->erratum_address_ != this->invalid_address);
1026 return this->erratum_address_;
1027 }
1028
1029 // Set the address where an erratum must be done.
1030 void
1031 set_erratum_address(AArch64_address addr)
1032 { this->erratum_address_ = addr; }
1033
1034 // Comparator used to group Erratum_stubs in a set by (obj, shndx,
1035 // sh_offset). We do not include 'type' in the calculation, because there is
1036 // at most one stub type at (obj, shndx, sh_offset).
1037 bool
1038 operator<(const Erratum_stub<size, big_endian>& k) const
1039 {
1040 if (this == &k)
1041 return false;
1042 // We group stubs by relobj.
1043 if (this->relobj_ != k.relobj_)
1044 return this->relobj_ < k.relobj_;
1045 // Then by section index.
1046 if (this->shndx_ != k.shndx_)
1047 return this->shndx_ < k.shndx_;
1048 // Lastly by section offset.
1049 return this->sh_offset_ < k.sh_offset_;
1050 }
1051
1052 void
1053 invalidate_erratum_stub()
1054 {
1055 gold_assert(this->erratum_insn_ != invalid_insn);
1056 this->erratum_insn_ = invalid_insn;
1057 }
1058
1059 bool
1060 is_invalidated_erratum_stub()
1061 { return this->erratum_insn_ == invalid_insn; }
1062
1063 protected:
1064 virtual void
1065 do_write(unsigned char*, section_size_type);
1066
1067 private:
1068 // The object that needs to be fixed.
1069 The_aarch64_relobj* relobj_;
1070 // The shndx in the object that needs to be fixed.
1071 const unsigned int shndx_;
1072 // The section offset in the obejct that needs to be fixed.
1073 const unsigned int sh_offset_;
1074 // The insn to be fixed.
1075 Insntype erratum_insn_;
1076 // The address of the above insn.
1077 AArch64_address erratum_address_;
1078 }; // End of "Erratum_stub".
1079
1080
1081 // Erratum sub class to wrap additional info needed by 843419. In fixing this
1082 // erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
1083 // adrp's code position (two or three insns before erratum insn itself).
1084
1085 template<int size, bool big_endian>
1086 class E843419_stub : public Erratum_stub<size, big_endian>
1087 {
1088 public:
1089 typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
1090
1091 E843419_stub(AArch64_relobj<size, big_endian>* relobj,
1092 unsigned int shndx, unsigned int sh_offset,
1093 unsigned int adrp_sh_offset)
1094 : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
1095 adrp_sh_offset_(adrp_sh_offset)
1096 {}
1097
1098 unsigned int
1099 adrp_sh_offset() const
1100 { return this->adrp_sh_offset_; }
1101
1102 private:
1103 // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
1104 // can obtain it from its parent.)
1105 const unsigned int adrp_sh_offset_;
1106 };
1107
1108
1109 template<int size, bool big_endian>
1110 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1111
1112 // Comparator used in set definition.
1113 template<int size, bool big_endian>
1114 struct Erratum_stub_less
1115 {
1116 bool
1117 operator()(const Erratum_stub<size, big_endian>* s1,
1118 const Erratum_stub<size, big_endian>* s2) const
1119 { return *s1 < *s2; }
1120 };
1121
1122 // Erratum_stub implementation for writing stub to output file.
1123
1124 template<int size, bool big_endian>
1125 void
1126 Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
1127 {
1128 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1129 const Insntype* insns = this->insns();
1130 uint32_t num_insns = this->insn_num();
1131 Insntype* ip = reinterpret_cast<Insntype*>(view);
1132 // For current implemented erratum 843419 and 835769, the first insn in the
1133 // stub is always a copy of the problematic insn (in 843419, the mem access
1134 // insn, in 835769, the mac insn), followed by a jump-back.
1135 elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
1136 for (uint32_t i = 1; i < num_insns; ++i)
1137 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1138 }
1139
1140
1141 // Reloc stub class.
1142
1143 template<int size, bool big_endian>
1144 class Reloc_stub : public Stub_base<size, big_endian>
1145 {
1146 public:
1147 typedef Reloc_stub<size, big_endian> This;
1148 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1149
1150 // Branch range. This is used to calculate the section group size, as well as
1151 // determine whether a stub is needed.
1152 static const int MAX_BRANCH_OFFSET = ((1 << 25) - 1) << 2;
1153 static const int MIN_BRANCH_OFFSET = -((1 << 25) << 2);
1154
1155 // Constant used to determine if an offset fits in the adrp instruction
1156 // encoding.
1157 static const int MAX_ADRP_IMM = (1 << 20) - 1;
1158 static const int MIN_ADRP_IMM = -(1 << 20);
1159
1160 static const int BYTES_PER_INSN = 4;
1161 static const int STUB_ADDR_ALIGN;
1162
1163 // Determine whether the offset fits in the jump/branch instruction.
1164 static bool
1165 aarch64_valid_branch_offset_p(int64_t offset)
1166 { return offset >= MIN_BRANCH_OFFSET && offset <= MAX_BRANCH_OFFSET; }
1167
1168 // Determine whether the offset fits in the adrp immediate field.
1169 static bool
1170 aarch64_valid_for_adrp_p(AArch64_address location, AArch64_address dest)
1171 {
1172 typedef AArch64_relocate_functions<size, big_endian> Reloc;
1173 int64_t adrp_imm = (Reloc::Page(dest) - Reloc::Page(location)) >> 12;
1174 return adrp_imm >= MIN_ADRP_IMM && adrp_imm <= MAX_ADRP_IMM;
1175 }
1176
1177 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1178 // needed.
1179 static int
1180 stub_type_for_reloc(unsigned int r_type, AArch64_address address,
1181 AArch64_address target);
1182
1183 Reloc_stub(int type)
1184 : Stub_base<size, big_endian>(type)
1185 { }
1186
1187 ~Reloc_stub()
1188 { }
1189
1190 // The key class used to index the stub instance in the stub table's stub map.
1191 class Key
1192 {
1193 public:
1194 Key(int type, const Symbol* symbol, const Relobj* relobj,
1195 unsigned int r_sym, int32_t addend)
1196 : type_(type), addend_(addend)
1197 {
1198 if (symbol != NULL)
1199 {
1200 this->r_sym_ = Reloc_stub::invalid_index;
1201 this->u_.symbol = symbol;
1202 }
1203 else
1204 {
1205 gold_assert(relobj != NULL && r_sym != invalid_index);
1206 this->r_sym_ = r_sym;
1207 this->u_.relobj = relobj;
1208 }
1209 }
1210
1211 ~Key()
1212 { }
1213
1214 // Return stub type.
1215 int
1216 type() const
1217 { return this->type_; }
1218
1219 // Return the local symbol index or invalid_index.
1220 unsigned int
1221 r_sym() const
1222 { return this->r_sym_; }
1223
1224 // Return the symbol if there is one.
1225 const Symbol*
1226 symbol() const
1227 { return this->r_sym_ == invalid_index ? this->u_.symbol : NULL; }
1228
1229 // Return the relobj if there is one.
1230 const Relobj*
1231 relobj() const
1232 { return this->r_sym_ != invalid_index ? this->u_.relobj : NULL; }
1233
1234 // Whether this equals to another key k.
1235 bool
1236 eq(const Key& k) const
1237 {
1238 return ((this->type_ == k.type_)
1239 && (this->r_sym_ == k.r_sym_)
1240 && ((this->r_sym_ != Reloc_stub::invalid_index)
1241 ? (this->u_.relobj == k.u_.relobj)
1242 : (this->u_.symbol == k.u_.symbol))
1243 && (this->addend_ == k.addend_));
1244 }
1245
1246 // Return a hash value.
1247 size_t
1248 hash_value() const
1249 {
1250 size_t name_hash_value = gold::string_hash<char>(
1251 (this->r_sym_ != Reloc_stub::invalid_index)
1252 ? this->u_.relobj->name().c_str()
1253 : this->u_.symbol->name());
1254 // We only have 4 stub types.
1255 size_t stub_type_hash_value = 0x03 & this->type_;
1256 return (name_hash_value
1257 ^ stub_type_hash_value
1258 ^ ((this->r_sym_ & 0x3fff) << 2)
1259 ^ ((this->addend_ & 0xffff) << 16));
1260 }
1261
1262 // Functors for STL associative containers.
1263 struct hash
1264 {
1265 size_t
1266 operator()(const Key& k) const
1267 { return k.hash_value(); }
1268 };
1269
1270 struct equal_to
1271 {
1272 bool
1273 operator()(const Key& k1, const Key& k2) const
1274 { return k1.eq(k2); }
1275 };
1276
1277 private:
1278 // Stub type.
1279 const int type_;
1280 // If this is a local symbol, this is the index in the defining object.
1281 // Otherwise, it is invalid_index for a global symbol.
1282 unsigned int r_sym_;
1283 // If r_sym_ is an invalid index, this points to a global symbol.
1284 // Otherwise, it points to a relobj. We used the unsized and target
1285 // independent Symbol and Relobj classes instead of Sized_symbol<32> and
1286 // Arm_relobj, in order to avoid making the stub class a template
1287 // as most of the stub machinery is endianness-neutral. However, it
1288 // may require a bit of casting done by users of this class.
1289 union
1290 {
1291 const Symbol* symbol;
1292 const Relobj* relobj;
1293 } u_;
1294 // Addend associated with a reloc.
1295 int32_t addend_;
1296 }; // End of inner class Reloc_stub::Key
1297
1298 protected:
1299 // This may be overridden in the child class.
1300 virtual void
1301 do_write(unsigned char*, section_size_type);
1302
1303 private:
1304 static const unsigned int invalid_index = static_cast<unsigned int>(-1);
1305 }; // End of Reloc_stub
1306
1307 template<int size, bool big_endian>
1308 const int Reloc_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
1309
1310 // Write data to output file.
1311
1312 template<int size, bool big_endian>
1313 void
1314 Reloc_stub<size, big_endian>::
1315 do_write(unsigned char* view, section_size_type)
1316 {
1317 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
1318 const uint32_t* insns = this->insns();
1319 uint32_t num_insns = this->insn_num();
1320 Insntype* ip = reinterpret_cast<Insntype*>(view);
1321 for (uint32_t i = 0; i < num_insns; ++i)
1322 elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
1323 }
1324
1325
1326 // Determine the stub type for a certain relocation or ST_NONE, if no stub is
1327 // needed.
1328
1329 template<int size, bool big_endian>
1330 inline int
1331 Reloc_stub<size, big_endian>::stub_type_for_reloc(
1332 unsigned int r_type, AArch64_address location, AArch64_address dest)
1333 {
1334 int64_t branch_offset = 0;
1335 switch(r_type)
1336 {
1337 case elfcpp::R_AARCH64_CALL26:
1338 case elfcpp::R_AARCH64_JUMP26:
1339 branch_offset = dest - location;
1340 break;
1341 default:
1342 gold_unreachable();
1343 }
1344
1345 if (aarch64_valid_branch_offset_p(branch_offset))
1346 return ST_NONE;
1347
1348 if (aarch64_valid_for_adrp_p(location, dest))
1349 return ST_ADRP_BRANCH;
1350
1351 // Always use PC-relative addressing in case of -shared or -pie.
1352 if (parameters->options().output_is_position_independent())
1353 return ST_LONG_BRANCH_PCREL;
1354
1355 // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
1356 // But is only applicable to non-shared or non-pie.
1357 return ST_LONG_BRANCH_ABS;
1358 }
1359
1360 // A class to hold stubs for the ARM target. This contains 2 different types of
1361 // stubs - reloc stubs and erratum stubs.
1362
1363 template<int size, bool big_endian>
1364 class Stub_table : public Output_data
1365 {
1366 public:
1367 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1368 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1369 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
1370 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1371 typedef Reloc_stub<size, big_endian> The_reloc_stub;
1372 typedef typename The_reloc_stub::Key The_reloc_stub_key;
1373 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1374 typedef Erratum_stub_less<size, big_endian> The_erratum_stub_less;
1375 typedef typename The_reloc_stub_key::hash The_reloc_stub_key_hash;
1376 typedef typename The_reloc_stub_key::equal_to The_reloc_stub_key_equal_to;
1377 typedef Stub_table<size, big_endian> The_stub_table;
1378 typedef Unordered_map<The_reloc_stub_key, The_reloc_stub*,
1379 The_reloc_stub_key_hash, The_reloc_stub_key_equal_to>
1380 Reloc_stub_map;
1381 typedef typename Reloc_stub_map::const_iterator Reloc_stub_map_const_iter;
1382 typedef Relocate_info<size, big_endian> The_relocate_info;
1383
1384 typedef std::set<The_erratum_stub*, The_erratum_stub_less> Erratum_stub_set;
1385 typedef typename Erratum_stub_set::iterator Erratum_stub_set_iter;
1386
1387 Stub_table(The_aarch64_input_section* owner)
1388 : Output_data(), owner_(owner), reloc_stubs_size_(0),
1389 erratum_stubs_size_(0), prev_data_size_(0)
1390 { }
1391
1392 ~Stub_table()
1393 { }
1394
1395 The_aarch64_input_section*
1396 owner() const
1397 { return owner_; }
1398
1399 // Whether this stub table is empty.
1400 bool
1401 empty() const
1402 { return reloc_stubs_.empty() && erratum_stubs_.empty(); }
1403
1404 // Return the current data size.
1405 off_t
1406 current_data_size() const
1407 { return this->current_data_size_for_child(); }
1408
1409 // Add a STUB using KEY. The caller is responsible for avoiding addition
1410 // if a STUB with the same key has already been added.
1411 void
1412 add_reloc_stub(The_reloc_stub* stub, const The_reloc_stub_key& key);
1413
1414 // Add an erratum stub into the erratum stub set. The set is ordered by
1415 // (relobj, shndx, sh_offset).
1416 void
1417 add_erratum_stub(The_erratum_stub* stub);
1418
1419 // Find if such erratum exists for any given (obj, shndx, sh_offset).
1420 The_erratum_stub*
1421 find_erratum_stub(The_aarch64_relobj* a64relobj,
1422 unsigned int shndx, unsigned int sh_offset);
1423
1424 // Find all the erratums for a given input section. The return value is a pair
1425 // of iterators [begin, end).
1426 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1427 find_erratum_stubs_for_input_section(The_aarch64_relobj* a64relobj,
1428 unsigned int shndx);
1429
1430 // Compute the erratum stub address.
1431 AArch64_address
1432 erratum_stub_address(The_erratum_stub* stub) const
1433 {
1434 AArch64_address r = align_address(this->address() + this->reloc_stubs_size_,
1435 The_erratum_stub::STUB_ADDR_ALIGN);
1436 r += stub->offset();
1437 return r;
1438 }
1439
1440 // Finalize stubs. No-op here, just for completeness.
1441 void
1442 finalize_stubs()
1443 { }
1444
1445 // Look up a relocation stub using KEY. Return NULL if there is none.
1446 The_reloc_stub*
1447 find_reloc_stub(The_reloc_stub_key& key)
1448 {
1449 Reloc_stub_map_const_iter p = this->reloc_stubs_.find(key);
1450 return (p != this->reloc_stubs_.end()) ? p->second : NULL;
1451 }
1452
1453 // Relocate reloc stubs in this stub table. This does not relocate erratum stubs.
1454 void
1455 relocate_reloc_stubs(const The_relocate_info*,
1456 The_target_aarch64*,
1457 Output_section*,
1458 unsigned char*,
1459 AArch64_address,
1460 section_size_type);
1461
1462 // Relocate an erratum stub.
1463 void
1464 relocate_erratum_stub(The_erratum_stub*, unsigned char*);
1465
1466 // Update data size at the end of a relaxation pass. Return true if data size
1467 // is different from that of the previous relaxation pass.
1468 bool
1469 update_data_size_changed_p()
1470 {
1471 // No addralign changed here.
1472 off_t s = align_address(this->reloc_stubs_size_,
1473 The_erratum_stub::STUB_ADDR_ALIGN)
1474 + this->erratum_stubs_size_;
1475 bool changed = (s != this->prev_data_size_);
1476 this->prev_data_size_ = s;
1477 return changed;
1478 }
1479
1480 protected:
1481 // Write out section contents.
1482 void
1483 do_write(Output_file*);
1484
1485 // Return the required alignment.
1486 uint64_t
1487 do_addralign() const
1488 {
1489 return std::max(The_reloc_stub::STUB_ADDR_ALIGN,
1490 The_erratum_stub::STUB_ADDR_ALIGN);
1491 }
1492
1493 // Reset address and file offset.
1494 void
1495 do_reset_address_and_file_offset()
1496 { this->set_current_data_size_for_child(this->prev_data_size_); }
1497
1498 // Set final data size.
1499 void
1500 set_final_data_size()
1501 { this->set_data_size(this->current_data_size()); }
1502
1503 private:
1504 // Relocate one reloc stub.
1505 void
1506 relocate_reloc_stub(The_reloc_stub*,
1507 const The_relocate_info*,
1508 The_target_aarch64*,
1509 Output_section*,
1510 unsigned char*,
1511 AArch64_address,
1512 section_size_type);
1513
1514 private:
1515 // Owner of this stub table.
1516 The_aarch64_input_section* owner_;
1517 // The relocation stubs.
1518 Reloc_stub_map reloc_stubs_;
1519 // The erratum stubs.
1520 Erratum_stub_set erratum_stubs_;
1521 // Size of reloc stubs.
1522 off_t reloc_stubs_size_;
1523 // Size of erratum stubs.
1524 off_t erratum_stubs_size_;
1525 // data size of this in the previous pass.
1526 off_t prev_data_size_;
1527 }; // End of Stub_table
1528
1529
1530 // Add an erratum stub into the erratum stub set. The set is ordered by
1531 // (relobj, shndx, sh_offset).
1532
1533 template<int size, bool big_endian>
1534 void
1535 Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
1536 {
1537 std::pair<Erratum_stub_set_iter, bool> ret =
1538 this->erratum_stubs_.insert(stub);
1539 gold_assert(ret.second);
1540 this->erratum_stubs_size_ = align_address(
1541 this->erratum_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1542 stub->set_offset(this->erratum_stubs_size_);
1543 this->erratum_stubs_size_ += stub->stub_size();
1544 }
1545
1546
1547 // Find if such erratum exists for given (obj, shndx, sh_offset).
1548
1549 template<int size, bool big_endian>
1550 Erratum_stub<size, big_endian>*
1551 Stub_table<size, big_endian>::find_erratum_stub(
1552 The_aarch64_relobj* a64relobj, unsigned int shndx, unsigned int sh_offset)
1553 {
1554 // A dummy object used as key to search in the set.
1555 The_erratum_stub key(a64relobj, ST_NONE,
1556 shndx, sh_offset);
1557 Erratum_stub_set_iter i = this->erratum_stubs_.find(&key);
1558 if (i != this->erratum_stubs_.end())
1559 {
1560 The_erratum_stub* stub(*i);
1561 gold_assert(stub->erratum_insn() != 0);
1562 return stub;
1563 }
1564 return NULL;
1565 }
1566
1567
1568 // Find all the errata for a given input section. The return value is a pair of
1569 // iterators [begin, end).
1570
1571 template<int size, bool big_endian>
1572 std::pair<typename Stub_table<size, big_endian>::Erratum_stub_set_iter,
1573 typename Stub_table<size, big_endian>::Erratum_stub_set_iter>
1574 Stub_table<size, big_endian>::find_erratum_stubs_for_input_section(
1575 The_aarch64_relobj* a64relobj, unsigned int shndx)
1576 {
1577 typedef std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter> Result_pair;
1578 Erratum_stub_set_iter start, end;
1579 The_erratum_stub low_key(a64relobj, ST_NONE, shndx, 0);
1580 start = this->erratum_stubs_.lower_bound(&low_key);
1581 if (start == this->erratum_stubs_.end())
1582 return Result_pair(this->erratum_stubs_.end(),
1583 this->erratum_stubs_.end());
1584 end = start;
1585 while (end != this->erratum_stubs_.end() &&
1586 (*end)->relobj() == a64relobj && (*end)->shndx() == shndx)
1587 ++end;
1588 return Result_pair(start, end);
1589 }
1590
1591
1592 // Add a STUB using KEY. The caller is responsible for avoiding addition
1593 // if a STUB with the same key has already been added.
1594
1595 template<int size, bool big_endian>
1596 void
1597 Stub_table<size, big_endian>::add_reloc_stub(
1598 The_reloc_stub* stub, const The_reloc_stub_key& key)
1599 {
1600 gold_assert(stub->type() == key.type());
1601 this->reloc_stubs_[key] = stub;
1602
1603 // Assign stub offset early. We can do this because we never remove
1604 // reloc stubs and they are in the beginning of the stub table.
1605 this->reloc_stubs_size_ = align_address(this->reloc_stubs_size_,
1606 The_reloc_stub::STUB_ADDR_ALIGN);
1607 stub->set_offset(this->reloc_stubs_size_);
1608 this->reloc_stubs_size_ += stub->stub_size();
1609 }
1610
1611
1612 // Relocate an erratum stub.
1613
1614 template<int size, bool big_endian>
1615 void
1616 Stub_table<size, big_endian>::
1617 relocate_erratum_stub(The_erratum_stub* estub,
1618 unsigned char* view)
1619 {
1620 // Just for convenience.
1621 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
1622
1623 gold_assert(!estub->is_invalidated_erratum_stub());
1624 AArch64_address stub_address = this->erratum_stub_address(estub);
1625 // The address of "b" in the stub that is to be "relocated".
1626 AArch64_address stub_b_insn_address;
1627 // Branch offset that is to be filled in "b" insn.
1628 int b_offset = 0;
1629 switch (estub->type())
1630 {
1631 case ST_E_843419:
1632 case ST_E_835769:
1633 // The 1st insn of the erratum could be a relocation spot,
1634 // in this case we need to fix it with
1635 // "(*i)->erratum_insn()".
1636 elfcpp::Swap<32, big_endian>::writeval(
1637 view + (stub_address - this->address()),
1638 estub->erratum_insn());
1639 // For the erratum, the 2nd insn is a b-insn to be patched
1640 // (relocated).
1641 stub_b_insn_address = stub_address + 1 * BPI;
1642 b_offset = estub->destination_address() - stub_b_insn_address;
1643 AArch64_relocate_functions<size, big_endian>::construct_b(
1644 view + (stub_b_insn_address - this->address()),
1645 ((unsigned int)(b_offset)) & 0xfffffff);
1646 break;
1647 default:
1648 gold_unreachable();
1649 break;
1650 }
1651 estub->invalidate_erratum_stub();
1652 }
1653
1654
1655 // Relocate only reloc stubs in this stub table. This does not relocate erratum
1656 // stubs.
1657
1658 template<int size, bool big_endian>
1659 void
1660 Stub_table<size, big_endian>::
1661 relocate_reloc_stubs(const The_relocate_info* relinfo,
1662 The_target_aarch64* target_aarch64,
1663 Output_section* output_section,
1664 unsigned char* view,
1665 AArch64_address address,
1666 section_size_type view_size)
1667 {
1668 // "view_size" is the total size of the stub_table.
1669 gold_assert(address == this->address() &&
1670 view_size == static_cast<section_size_type>(this->data_size()));
1671 for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
1672 p != this->reloc_stubs_.end(); ++p)
1673 relocate_reloc_stub(p->second, relinfo, target_aarch64, output_section,
1674 view, address, view_size);
1675 }
1676
1677
1678 // Relocate one reloc stub. This is a helper for
1679 // Stub_table::relocate_reloc_stubs().
1680
1681 template<int size, bool big_endian>
1682 void
1683 Stub_table<size, big_endian>::
1684 relocate_reloc_stub(The_reloc_stub* stub,
1685 const The_relocate_info* relinfo,
1686 The_target_aarch64* target_aarch64,
1687 Output_section* output_section,
1688 unsigned char* view,
1689 AArch64_address address,
1690 section_size_type view_size)
1691 {
1692 // "offset" is the offset from the beginning of the stub_table.
1693 section_size_type offset = stub->offset();
1694 section_size_type stub_size = stub->stub_size();
1695 // "view_size" is the total size of the stub_table.
1696 gold_assert(offset + stub_size <= view_size);
1697
1698 target_aarch64->relocate_reloc_stub(stub, relinfo, output_section,
1699 view + offset, address + offset, view_size);
1700 }
1701
1702
1703 // Write out the stubs to file.
1704
1705 template<int size, bool big_endian>
1706 void
1707 Stub_table<size, big_endian>::do_write(Output_file* of)
1708 {
1709 off_t offset = this->offset();
1710 const section_size_type oview_size =
1711 convert_to_section_size_type(this->data_size());
1712 unsigned char* const oview = of->get_output_view(offset, oview_size);
1713
1714 // Write relocation stubs.
1715 for (typename Reloc_stub_map::const_iterator p = this->reloc_stubs_.begin();
1716 p != this->reloc_stubs_.end(); ++p)
1717 {
1718 The_reloc_stub* stub = p->second;
1719 AArch64_address address = this->address() + stub->offset();
1720 gold_assert(address ==
1721 align_address(address, The_reloc_stub::STUB_ADDR_ALIGN));
1722 stub->write(oview + stub->offset(), stub->stub_size());
1723 }
1724
1725 // Write erratum stubs.
1726 unsigned int erratum_stub_start_offset =
1727 align_address(this->reloc_stubs_size_, The_erratum_stub::STUB_ADDR_ALIGN);
1728 for (typename Erratum_stub_set::iterator p = this->erratum_stubs_.begin();
1729 p != this->erratum_stubs_.end(); ++p)
1730 {
1731 The_erratum_stub* stub(*p);
1732 stub->write(oview + erratum_stub_start_offset + stub->offset(),
1733 stub->stub_size());
1734 }
1735
1736 of->write_output_view(this->offset(), oview_size, oview);
1737 }
1738
1739
1740 // AArch64_relobj class.
1741
1742 template<int size, bool big_endian>
1743 class AArch64_relobj : public Sized_relobj_file<size, big_endian>
1744 {
1745 public:
1746 typedef AArch64_relobj<size, big_endian> This;
1747 typedef Target_aarch64<size, big_endian> The_target_aarch64;
1748 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
1749 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
1750 typedef Stub_table<size, big_endian> The_stub_table;
1751 typedef Erratum_stub<size, big_endian> The_erratum_stub;
1752 typedef typename The_stub_table::Erratum_stub_set_iter Erratum_stub_set_iter;
1753 typedef std::vector<The_stub_table*> Stub_table_list;
1754 static const AArch64_address invalid_address =
1755 static_cast<AArch64_address>(-1);
1756
1757 AArch64_relobj(const std::string& name, Input_file* input_file, off_t offset,
1758 const typename elfcpp::Ehdr<size, big_endian>& ehdr)
1759 : Sized_relobj_file<size, big_endian>(name, input_file, offset, ehdr),
1760 stub_tables_()
1761 { }
1762
1763 ~AArch64_relobj()
1764 { }
1765
1766 // Return the stub table of the SHNDX-th section if there is one.
1767 The_stub_table*
1768 stub_table(unsigned int shndx) const
1769 {
1770 gold_assert(shndx < this->stub_tables_.size());
1771 return this->stub_tables_[shndx];
1772 }
1773
1774 // Set STUB_TABLE to be the stub_table of the SHNDX-th section.
1775 void
1776 set_stub_table(unsigned int shndx, The_stub_table* stub_table)
1777 {
1778 gold_assert(shndx < this->stub_tables_.size());
1779 this->stub_tables_[shndx] = stub_table;
1780 }
1781
1782 // Entrance to errata scanning.
1783 void
1784 scan_errata(unsigned int shndx,
1785 const elfcpp::Shdr<size, big_endian>&,
1786 Output_section*, const Symbol_table*,
1787 The_target_aarch64*);
1788
1789 // Scan all relocation sections for stub generation.
1790 void
1791 scan_sections_for_stubs(The_target_aarch64*, const Symbol_table*,
1792 const Layout*);
1793
1794 // Whether a section is a scannable text section.
1795 bool
1796 text_section_is_scannable(const elfcpp::Shdr<size, big_endian>&, unsigned int,
1797 const Output_section*, const Symbol_table*);
1798
1799 // Convert regular input section with index SHNDX to a relaxed section.
1800 void
1801 convert_input_section_to_relaxed_section(unsigned shndx)
1802 {
1803 // The stubs have relocations and we need to process them after writing
1804 // out the stubs. So relocation now must follow section write.
1805 this->set_section_offset(shndx, -1ULL);
1806 this->set_relocs_must_follow_section_writes();
1807 }
1808
1809 // Structure for mapping symbol position.
1810 struct Mapping_symbol_position
1811 {
1812 Mapping_symbol_position(unsigned int shndx, AArch64_address offset):
1813 shndx_(shndx), offset_(offset)
1814 {}
1815
1816 // "<" comparator used in ordered_map container.
1817 bool
1818 operator<(const Mapping_symbol_position& p) const
1819 {
1820 return (this->shndx_ < p.shndx_
1821 || (this->shndx_ == p.shndx_ && this->offset_ < p.offset_));
1822 }
1823
1824 // Section index.
1825 unsigned int shndx_;
1826
1827 // Section offset.
1828 AArch64_address offset_;
1829 };
1830
1831 typedef std::map<Mapping_symbol_position, char> Mapping_symbol_info;
1832
1833 protected:
1834 // Post constructor setup.
1835 void
1836 do_setup()
1837 {
1838 // Call parent's setup method.
1839 Sized_relobj_file<size, big_endian>::do_setup();
1840
1841 // Initialize look-up tables.
1842 this->stub_tables_.resize(this->shnum());
1843 }
1844
1845 virtual void
1846 do_relocate_sections(
1847 const Symbol_table* symtab, const Layout* layout,
1848 const unsigned char* pshdrs, Output_file* of,
1849 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1850
1851 // Count local symbols and (optionally) record mapping info.
1852 virtual void
1853 do_count_local_symbols(Stringpool_template<char>*,
1854 Stringpool_template<char>*);
1855
1856 private:
1857 // Fix all errata in the object, and for each erratum, relocate corresponding
1858 // erratum stub.
1859 void
1860 fix_errata_and_relocate_erratum_stubs(
1861 typename Sized_relobj_file<size, big_endian>::Views* pviews);
1862
1863 // Try to fix erratum 843419 in an optimized way. Return true if patch is
1864 // applied.
1865 bool
1866 try_fix_erratum_843419_optimized(
1867 The_erratum_stub*, AArch64_address,
1868 typename Sized_relobj_file<size, big_endian>::View_size&);
1869
1870 // Whether a section needs to be scanned for relocation stubs.
1871 bool
1872 section_needs_reloc_stub_scanning(const elfcpp::Shdr<size, big_endian>&,
1873 const Relobj::Output_sections&,
1874 const Symbol_table*, const unsigned char*);
1875
1876 // List of stub tables.
1877 Stub_table_list stub_tables_;
1878
1879 // Mapping symbol information sorted by (section index, section_offset).
1880 Mapping_symbol_info mapping_symbol_info_;
1881 }; // End of AArch64_relobj
1882
1883
1884 // Override to record mapping symbol information.
1885 template<int size, bool big_endian>
1886 void
1887 AArch64_relobj<size, big_endian>::do_count_local_symbols(
1888 Stringpool_template<char>* pool, Stringpool_template<char>* dynpool)
1889 {
1890 Sized_relobj_file<size, big_endian>::do_count_local_symbols(pool, dynpool);
1891
1892 // Only erratum-fixing work needs mapping symbols, so skip this time consuming
1893 // processing if not fixing erratum.
1894 if (!parameters->options().fix_cortex_a53_843419()
1895 && !parameters->options().fix_cortex_a53_835769())
1896 return;
1897
1898 const unsigned int loccount = this->local_symbol_count();
1899 if (loccount == 0)
1900 return;
1901
1902 // Read the symbol table section header.
1903 const unsigned int symtab_shndx = this->symtab_shndx();
1904 elfcpp::Shdr<size, big_endian>
1905 symtabshdr(this, this->elf_file()->section_header(symtab_shndx));
1906 gold_assert(symtabshdr.get_sh_type() == elfcpp::SHT_SYMTAB);
1907
1908 // Read the local symbols.
1909 const int sym_size =elfcpp::Elf_sizes<size>::sym_size;
1910 gold_assert(loccount == symtabshdr.get_sh_info());
1911 off_t locsize = loccount * sym_size;
1912 const unsigned char* psyms = this->get_view(symtabshdr.get_sh_offset(),
1913 locsize, true, true);
1914
1915 // For mapping symbol processing, we need to read the symbol names.
1916 unsigned int strtab_shndx = this->adjust_shndx(symtabshdr.get_sh_link());
1917 if (strtab_shndx >= this->shnum())
1918 {
1919 this->error(_("invalid symbol table name index: %u"), strtab_shndx);
1920 return;
1921 }
1922
1923 elfcpp::Shdr<size, big_endian>
1924 strtabshdr(this, this->elf_file()->section_header(strtab_shndx));
1925 if (strtabshdr.get_sh_type() != elfcpp::SHT_STRTAB)
1926 {
1927 this->error(_("symbol table name section has wrong type: %u"),
1928 static_cast<unsigned int>(strtabshdr.get_sh_type()));
1929 return;
1930 }
1931
1932 const char* pnames =
1933 reinterpret_cast<const char*>(this->get_view(strtabshdr.get_sh_offset(),
1934 strtabshdr.get_sh_size(),
1935 false, false));
1936
1937 // Skip the first dummy symbol.
1938 psyms += sym_size;
1939 typename Sized_relobj_file<size, big_endian>::Local_values*
1940 plocal_values = this->local_values();
1941 for (unsigned int i = 1; i < loccount; ++i, psyms += sym_size)
1942 {
1943 elfcpp::Sym<size, big_endian> sym(psyms);
1944 Symbol_value<size>& lv((*plocal_values)[i]);
1945 AArch64_address input_value = lv.input_value();
1946
1947 // Check to see if this is a mapping symbol. AArch64 mapping symbols are
1948 // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
1949 // symbols.
1950 // Mapping symbols could be one of the following 4 forms -
1951 // a) $x
1952 // b) $x.<any...>
1953 // c) $d
1954 // d) $d.<any...>
1955 const char* sym_name = pnames + sym.get_st_name();
1956 if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
1957 && (sym_name[2] == '\0' || sym_name[2] == '.'))
1958 {
1959 bool is_ordinary;
1960 unsigned int input_shndx =
1961 this->adjust_sym_shndx(i, sym.get_st_shndx(), &is_ordinary);
1962 gold_assert(is_ordinary);
1963
1964 Mapping_symbol_position msp(input_shndx, input_value);
1965 // Insert mapping_symbol_info into map whose ordering is defined by
1966 // (shndx, offset_within_section).
1967 this->mapping_symbol_info_[msp] = sym_name[1];
1968 }
1969 }
1970 }
1971
1972
1973 // Fix all errata in the object and for each erratum, we relocate the
1974 // corresponding erratum stub (by calling Stub_table::relocate_erratum_stub).
1975
1976 template<int size, bool big_endian>
1977 void
1978 AArch64_relobj<size, big_endian>::fix_errata_and_relocate_erratum_stubs(
1979 typename Sized_relobj_file<size, big_endian>::Views* pviews)
1980 {
1981 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
1982 unsigned int shnum = this->shnum();
1983 const Relobj::Output_sections& out_sections(this->output_sections());
1984 for (unsigned int i = 1; i < shnum; ++i)
1985 {
1986 The_stub_table* stub_table = this->stub_table(i);
1987 if (!stub_table)
1988 continue;
1989 std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
1990 ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
1991 Erratum_stub_set_iter p = ipair.first, end = ipair.second;
1992 typename Sized_relobj_file<size, big_endian>::View_size&
1993 pview((*pviews)[i]);
1994 AArch64_address view_offset = 0;
1995 if (pview.is_input_output_view)
1996 {
1997 // In this case, write_sections has not added the output offset to
1998 // the view's address, so we must do so. Currently this only happens
1999 // for a relaxed section.
2000 unsigned int index = this->adjust_shndx(i);
2001 const Output_relaxed_input_section* poris =
2002 out_sections[index]->find_relaxed_input_section(this, index);
2003 gold_assert(poris != NULL);
2004 view_offset = poris->address() - pview.address;
2005 }
2006
2007 while (p != end)
2008 {
2009 The_erratum_stub* stub = *p;
2010
2011 // Double check data before fix.
2012 gold_assert(pview.address + view_offset + stub->sh_offset()
2013 == stub->erratum_address());
2014
2015 // Update previously recorded erratum insn with relocated
2016 // version.
2017 Insntype* ip =
2018 reinterpret_cast<Insntype*>(
2019 pview.view + view_offset + stub->sh_offset());
2020 Insntype insn_to_fix = ip[0];
2021 stub->update_erratum_insn(insn_to_fix);
2022
2023 // First try to see if erratum is 843419 and if it can be fixed
2024 // without using branch-to-stub.
2025 if (!try_fix_erratum_843419_optimized(stub, view_offset, pview))
2026 {
2027 // Replace the erratum insn with a branch-to-stub.
2028 AArch64_address stub_address =
2029 stub_table->erratum_stub_address(stub);
2030 unsigned int b_offset = stub_address - stub->erratum_address();
2031 AArch64_relocate_functions<size, big_endian>::construct_b(
2032 pview.view + view_offset + stub->sh_offset(),
2033 b_offset & 0xfffffff);
2034 }
2035
2036 // Erratum fix is done (or skipped), continue to relocate erratum
2037 // stub. Note, when erratum fix is skipped (either because we
2038 // proactively change the code sequence or the code sequence is
2039 // changed by relaxation, etc), we can still safely relocate the
2040 // erratum stub, ignoring the fact the erratum could never be
2041 // executed.
2042 stub_table->relocate_erratum_stub(
2043 stub,
2044 pview.view + (stub_table->address() - pview.address));
2045
2046 // Next erratum stub.
2047 ++p;
2048 }
2049 }
2050 }
2051
2052
2053 // This is an optimization for 843419. This erratum requires the sequence begin
2054 // with 'adrp', when final value calculated by adrp fits in adr, we can just
2055 // replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
2056 // in this case, we do not delete the erratum stub (too late to do so), it is
2057 // merely generated without ever being called.)
2058
2059 template<int size, bool big_endian>
2060 bool
2061 AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
2062 The_erratum_stub* stub, AArch64_address view_offset,
2063 typename Sized_relobj_file<size, big_endian>::View_size& pview)
2064 {
2065 if (stub->type() != ST_E_843419)
2066 return false;
2067
2068 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2069 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
2070 E843419_stub<size, big_endian>* e843419_stub =
2071 reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
2072 AArch64_address pc =
2073 pview.address + view_offset + e843419_stub->adrp_sh_offset();
2074 unsigned int adrp_offset = e843419_stub->adrp_sh_offset ();
2075 Insntype* adrp_view =
2076 reinterpret_cast<Insntype*>(pview.view + view_offset + adrp_offset);
2077 Insntype adrp_insn = adrp_view[0];
2078
2079 // If the instruction at adrp_sh_offset is "mrs R, tpidr_el0", it may come
2080 // from IE -> LE relaxation etc. This is a side-effect of TLS relaxation that
2081 // ADRP has been turned into MRS, there is no erratum risk anymore.
2082 // Therefore, we return true to avoid doing unnecessary branch-to-stub.
2083 if (Insn_utilities::is_mrs_tpidr_el0(adrp_insn))
2084 return true;
2085
2086 // If the instruction at adrp_sh_offset is not ADRP and the instruction before
2087 // it is "mrs R, tpidr_el0", it may come from LD -> LE relaxation etc.
2088 // Like the above case, there is no erratum risk any more, we can safely
2089 // return true.
2090 if (!Insn_utilities::is_adrp(adrp_insn) && adrp_offset)
2091 {
2092 Insntype* prev_view =
2093 reinterpret_cast<Insntype*>(
2094 pview.view + view_offset + adrp_offset - 4);
2095 Insntype prev_insn = prev_view[0];
2096
2097 if (Insn_utilities::is_mrs_tpidr_el0(prev_insn))
2098 return true;
2099 }
2100
2101 /* If we reach here, the first instruction must be ADRP. */
2102 gold_assert(Insn_utilities::is_adrp(adrp_insn));
2103 // Get adrp 33-bit signed imm value.
2104 int64_t adrp_imm = Insn_utilities::
2105 aarch64_adrp_decode_imm(adrp_insn);
2106 // adrp - final value transferred to target register is calculated as:
2107 // PC[11:0] = Zeros(12)
2108 // adrp_dest_value = PC + adrp_imm;
2109 int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
2110 // adr -final value transferred to target register is calucalted as:
2111 // PC + adr_imm
2112 // So we have:
2113 // PC + adr_imm = adrp_dest_value
2114 // ==>
2115 // adr_imm = adrp_dest_value - PC
2116 int64_t adr_imm = adrp_dest_value - pc;
2117 // Check if imm fits in adr (21-bit signed).
2118 if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
2119 {
2120 // Convert 'adrp' into 'adr'.
2121 Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
2122 adr_insn = Insn_utilities::
2123 aarch64_adr_encode_imm(adr_insn, adr_imm);
2124 elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
2125 return true;
2126 }
2127 return false;
2128 }
2129
2130
2131 // Relocate sections.
2132
2133 template<int size, bool big_endian>
2134 void
2135 AArch64_relobj<size, big_endian>::do_relocate_sections(
2136 const Symbol_table* symtab, const Layout* layout,
2137 const unsigned char* pshdrs, Output_file* of,
2138 typename Sized_relobj_file<size, big_endian>::Views* pviews)
2139 {
2140 // Relocate the section data.
2141 this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
2142 1, this->shnum() - 1);
2143
2144 // We do not generate stubs if doing a relocatable link.
2145 if (parameters->options().relocatable())
2146 return;
2147
2148 // This part only relocates erratum stubs that belong to input sections of this
2149 // object file.
2150 if (parameters->options().fix_cortex_a53_843419()
2151 || parameters->options().fix_cortex_a53_835769())
2152 this->fix_errata_and_relocate_erratum_stubs(pviews);
2153
2154 Relocate_info<size, big_endian> relinfo;
2155 relinfo.symtab = symtab;
2156 relinfo.layout = layout;
2157 relinfo.object = this;
2158
2159 // This part relocates all reloc stubs that are contained in stub_tables of
2160 // this object file.
2161 unsigned int shnum = this->shnum();
2162 The_target_aarch64* target = The_target_aarch64::current_target();
2163
2164 for (unsigned int i = 1; i < shnum; ++i)
2165 {
2166 The_aarch64_input_section* aarch64_input_section =
2167 target->find_aarch64_input_section(this, i);
2168 if (aarch64_input_section != NULL
2169 && aarch64_input_section->is_stub_table_owner()
2170 && !aarch64_input_section->stub_table()->empty())
2171 {
2172 Output_section* os = this->output_section(i);
2173 gold_assert(os != NULL);
2174
2175 relinfo.reloc_shndx = elfcpp::SHN_UNDEF;
2176 relinfo.reloc_shdr = NULL;
2177 relinfo.data_shndx = i;
2178 relinfo.data_shdr = pshdrs + i * elfcpp::Elf_sizes<size>::shdr_size;
2179
2180 typename Sized_relobj_file<size, big_endian>::View_size&
2181 view_struct = (*pviews)[i];
2182 gold_assert(view_struct.view != NULL);
2183
2184 The_stub_table* stub_table = aarch64_input_section->stub_table();
2185 off_t offset = stub_table->address() - view_struct.address;
2186 unsigned char* view = view_struct.view + offset;
2187 AArch64_address address = stub_table->address();
2188 section_size_type view_size = stub_table->data_size();
2189 stub_table->relocate_reloc_stubs(&relinfo, target, os, view, address,
2190 view_size);
2191 }
2192 }
2193 }
2194
2195
2196 // Determine if an input section is scannable for stub processing. SHDR is
2197 // the header of the section and SHNDX is the section index. OS is the output
2198 // section for the input section and SYMTAB is the global symbol table used to
2199 // look up ICF information.
2200
2201 template<int size, bool big_endian>
2202 bool
2203 AArch64_relobj<size, big_endian>::text_section_is_scannable(
2204 const elfcpp::Shdr<size, big_endian>& text_shdr,
2205 unsigned int text_shndx,
2206 const Output_section* os,
2207 const Symbol_table* symtab)
2208 {
2209 // Skip any empty sections, unallocated sections or sections whose
2210 // type are not SHT_PROGBITS.
2211 if (text_shdr.get_sh_size() == 0
2212 || (text_shdr.get_sh_flags() & elfcpp::SHF_ALLOC) == 0
2213 || text_shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2214 return false;
2215
2216 // Skip any discarded or ICF'ed sections.
2217 if (os == NULL || symtab->is_section_folded(this, text_shndx))
2218 return false;
2219
2220 // Skip exception frame.
2221 if (strcmp(os->name(), ".eh_frame") == 0)
2222 return false ;
2223
2224 gold_assert(!this->is_output_section_offset_invalid(text_shndx) ||
2225 os->find_relaxed_input_section(this, text_shndx) != NULL);
2226
2227 return true;
2228 }
2229
2230
2231 // Determine if we want to scan the SHNDX-th section for relocation stubs.
2232 // This is a helper for AArch64_relobj::scan_sections_for_stubs().
2233
2234 template<int size, bool big_endian>
2235 bool
2236 AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
2237 const elfcpp::Shdr<size, big_endian>& shdr,
2238 const Relobj::Output_sections& out_sections,
2239 const Symbol_table* symtab,
2240 const unsigned char* pshdrs)
2241 {
2242 unsigned int sh_type = shdr.get_sh_type();
2243 if (sh_type != elfcpp::SHT_RELA)
2244 return false;
2245
2246 // Ignore empty section.
2247 off_t sh_size = shdr.get_sh_size();
2248 if (sh_size == 0)
2249 return false;
2250
2251 // Ignore reloc section with unexpected symbol table. The
2252 // error will be reported in the final link.
2253 if (this->adjust_shndx(shdr.get_sh_link()) != this->symtab_shndx())
2254 return false;
2255
2256 gold_assert(sh_type == elfcpp::SHT_RELA);
2257 unsigned int reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2258
2259 // Ignore reloc section with unexpected entsize or uneven size.
2260 // The error will be reported in the final link.
2261 if (reloc_size != shdr.get_sh_entsize() || sh_size % reloc_size != 0)
2262 return false;
2263
2264 // Ignore reloc section with bad info. This error will be
2265 // reported in the final link.
2266 unsigned int text_shndx = this->adjust_shndx(shdr.get_sh_info());
2267 if (text_shndx >= this->shnum())
2268 return false;
2269
2270 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2271 const elfcpp::Shdr<size, big_endian> text_shdr(pshdrs +
2272 text_shndx * shdr_size);
2273 return this->text_section_is_scannable(text_shdr, text_shndx,
2274 out_sections[text_shndx], symtab);
2275 }
2276
2277
2278 // Scan section SHNDX for erratum 843419 and 835769.
2279
2280 template<int size, bool big_endian>
2281 void
2282 AArch64_relobj<size, big_endian>::scan_errata(
2283 unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
2284 Output_section* os, const Symbol_table* symtab,
2285 The_target_aarch64* target)
2286 {
2287 if (shdr.get_sh_size() == 0
2288 || (shdr.get_sh_flags() &
2289 (elfcpp::SHF_ALLOC | elfcpp::SHF_EXECINSTR)) == 0
2290 || shdr.get_sh_type() != elfcpp::SHT_PROGBITS)
2291 return;
2292
2293 if (!os || symtab->is_section_folded(this, shndx)) return;
2294
2295 AArch64_address output_offset = this->get_output_section_offset(shndx);
2296 AArch64_address output_address;
2297 if (output_offset != invalid_address)
2298 output_address = os->address() + output_offset;
2299 else
2300 {
2301 const Output_relaxed_input_section* poris =
2302 os->find_relaxed_input_section(this, shndx);
2303 if (!poris) return;
2304 output_address = poris->address();
2305 }
2306
2307 section_size_type input_view_size = 0;
2308 const unsigned char* input_view =
2309 this->section_contents(shndx, &input_view_size, false);
2310
2311 Mapping_symbol_position section_start(shndx, 0);
2312 // Find the first mapping symbol record within section shndx.
2313 typename Mapping_symbol_info::const_iterator p =
2314 this->mapping_symbol_info_.lower_bound(section_start);
2315 while (p != this->mapping_symbol_info_.end() &&
2316 p->first.shndx_ == shndx)
2317 {
2318 typename Mapping_symbol_info::const_iterator prev = p;
2319 ++p;
2320 if (prev->second == 'x')
2321 {
2322 section_size_type span_start =
2323 convert_to_section_size_type(prev->first.offset_);
2324 section_size_type span_end;
2325 if (p != this->mapping_symbol_info_.end()
2326 && p->first.shndx_ == shndx)
2327 span_end = convert_to_section_size_type(p->first.offset_);
2328 else
2329 span_end = convert_to_section_size_type(shdr.get_sh_size());
2330
2331 // Here we do not share the scanning code of both errata. For 843419,
2332 // only the last few insns of each page are examined, which is fast,
2333 // whereas, for 835769, every insn pair needs to be checked.
2334
2335 if (parameters->options().fix_cortex_a53_843419())
2336 target->scan_erratum_843419_span(
2337 this, shndx, span_start, span_end,
2338 const_cast<unsigned char*>(input_view), output_address);
2339
2340 if (parameters->options().fix_cortex_a53_835769())
2341 target->scan_erratum_835769_span(
2342 this, shndx, span_start, span_end,
2343 const_cast<unsigned char*>(input_view), output_address);
2344 }
2345 }
2346 }
2347
2348
2349 // Scan relocations for stub generation.
2350
2351 template<int size, bool big_endian>
2352 void
2353 AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
2354 The_target_aarch64* target,
2355 const Symbol_table* symtab,
2356 const Layout* layout)
2357 {
2358 unsigned int shnum = this->shnum();
2359 const unsigned int shdr_size = elfcpp::Elf_sizes<size>::shdr_size;
2360
2361 // Read the section headers.
2362 const unsigned char* pshdrs = this->get_view(this->elf_file()->shoff(),
2363 shnum * shdr_size,
2364 true, true);
2365
2366 // To speed up processing, we set up hash tables for fast lookup of
2367 // input offsets to output addresses.
2368 this->initialize_input_to_output_maps();
2369
2370 const Relobj::Output_sections& out_sections(this->output_sections());
2371
2372 Relocate_info<size, big_endian> relinfo;
2373 relinfo.symtab = symtab;
2374 relinfo.layout = layout;
2375 relinfo.object = this;
2376
2377 // Do relocation stubs scanning.
2378 const unsigned char* p = pshdrs + shdr_size;
2379 for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
2380 {
2381 const elfcpp::Shdr<size, big_endian> shdr(p);
2382 if (parameters->options().fix_cortex_a53_843419()
2383 || parameters->options().fix_cortex_a53_835769())
2384 scan_errata(i, shdr, out_sections[i], symtab, target);
2385 if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
2386 pshdrs))
2387 {
2388 unsigned int index = this->adjust_shndx(shdr.get_sh_info());
2389 AArch64_address output_offset =
2390 this->get_output_section_offset(index);
2391 AArch64_address output_address;
2392 if (output_offset != invalid_address)
2393 {
2394 output_address = out_sections[index]->address() + output_offset;
2395 }
2396 else
2397 {
2398 // Currently this only happens for a relaxed section.
2399 const Output_relaxed_input_section* poris =
2400 out_sections[index]->find_relaxed_input_section(this, index);
2401 gold_assert(poris != NULL);
2402 output_address = poris->address();
2403 }
2404
2405 // Get the relocations.
2406 const unsigned char* prelocs = this->get_view(shdr.get_sh_offset(),
2407 shdr.get_sh_size(),
2408 true, false);
2409
2410 // Get the section contents.
2411 section_size_type input_view_size = 0;
2412 const unsigned char* input_view =
2413 this->section_contents(index, &input_view_size, false);
2414
2415 relinfo.reloc_shndx = i;
2416 relinfo.data_shndx = index;
2417 unsigned int sh_type = shdr.get_sh_type();
2418 unsigned int reloc_size;
2419 gold_assert (sh_type == elfcpp::SHT_RELA);
2420 reloc_size = elfcpp::Elf_sizes<size>::rela_size;
2421
2422 Output_section* os = out_sections[index];
2423 target->scan_section_for_stubs(&relinfo, sh_type, prelocs,
2424 shdr.get_sh_size() / reloc_size,
2425 os,
2426 output_offset == invalid_address,
2427 input_view, output_address,
2428 input_view_size);
2429 }
2430 }
2431 }
2432
2433
2434 // A class to wrap an ordinary input section containing executable code.
2435
2436 template<int size, bool big_endian>
2437 class AArch64_input_section : public Output_relaxed_input_section
2438 {
2439 public:
2440 typedef Stub_table<size, big_endian> The_stub_table;
2441
2442 AArch64_input_section(Relobj* relobj, unsigned int shndx)
2443 : Output_relaxed_input_section(relobj, shndx, 1),
2444 stub_table_(NULL),
2445 original_contents_(NULL), original_size_(0),
2446 original_addralign_(1)
2447 { }
2448
2449 ~AArch64_input_section()
2450 { delete[] this->original_contents_; }
2451
2452 // Initialize.
2453 void
2454 init();
2455
2456 // Set the stub_table.
2457 void
2458 set_stub_table(The_stub_table* st)
2459 { this->stub_table_ = st; }
2460
2461 // Whether this is a stub table owner.
2462 bool
2463 is_stub_table_owner() const
2464 { return this->stub_table_ != NULL && this->stub_table_->owner() == this; }
2465
2466 // Return the original size of the section.
2467 uint32_t
2468 original_size() const
2469 { return this->original_size_; }
2470
2471 // Return the stub table.
2472 The_stub_table*
2473 stub_table()
2474 { return stub_table_; }
2475
2476 protected:
2477 // Write out this input section.
2478 void
2479 do_write(Output_file*);
2480
2481 // Return required alignment of this.
2482 uint64_t
2483 do_addralign() const
2484 {
2485 if (this->is_stub_table_owner())
2486 return std::max(this->stub_table_->addralign(),
2487 static_cast<uint64_t>(this->original_addralign_));
2488 else
2489 return this->original_addralign_;
2490 }
2491
2492 // Finalize data size.
2493 void
2494 set_final_data_size();
2495
2496 // Reset address and file offset.
2497 void
2498 do_reset_address_and_file_offset();
2499
2500 // Output offset.
2501 bool
2502 do_output_offset(const Relobj* object, unsigned int shndx,
2503 section_offset_type offset,
2504 section_offset_type* poutput) const
2505 {
2506 if ((object == this->relobj())
2507 && (shndx == this->shndx())
2508 && (offset >= 0)
2509 && (offset <=
2510 convert_types<section_offset_type, uint32_t>(this->original_size_)))
2511 {
2512 *poutput = offset;
2513 return true;
2514 }
2515 else
2516 return false;
2517 }
2518
2519 private:
2520 // Copying is not allowed.
2521 AArch64_input_section(const AArch64_input_section&);
2522 AArch64_input_section& operator=(const AArch64_input_section&);
2523
2524 // The relocation stubs.
2525 The_stub_table* stub_table_;
2526 // Original section contents. We have to make a copy here since the file
2527 // containing the original section may not be locked when we need to access
2528 // the contents.
2529 unsigned char* original_contents_;
2530 // Section size of the original input section.
2531 uint32_t original_size_;
2532 // Address alignment of the original input section.
2533 uint32_t original_addralign_;
2534 }; // End of AArch64_input_section
2535
2536
2537 // Finalize data size.
2538
2539 template<int size, bool big_endian>
2540 void
2541 AArch64_input_section<size, big_endian>::set_final_data_size()
2542 {
2543 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2544
2545 if (this->is_stub_table_owner())
2546 {
2547 this->stub_table_->finalize_data_size();
2548 off = align_address(off, this->stub_table_->addralign());
2549 off += this->stub_table_->data_size();
2550 }
2551 this->set_data_size(off);
2552 }
2553
2554
2555 // Reset address and file offset.
2556
2557 template<int size, bool big_endian>
2558 void
2559 AArch64_input_section<size, big_endian>::do_reset_address_and_file_offset()
2560 {
2561 // Size of the original input section contents.
2562 off_t off = convert_types<off_t, uint64_t>(this->original_size_);
2563
2564 // If this is a stub table owner, account for the stub table size.
2565 if (this->is_stub_table_owner())
2566 {
2567 The_stub_table* stub_table = this->stub_table_;
2568
2569 // Reset the stub table's address and file offset. The
2570 // current data size for child will be updated after that.
2571 stub_table_->reset_address_and_file_offset();
2572 off = align_address(off, stub_table_->addralign());
2573 off += stub_table->current_data_size();
2574 }
2575
2576 this->set_current_data_size(off);
2577 }
2578
2579
2580 // Initialize an Arm_input_section.
2581
2582 template<int size, bool big_endian>
2583 void
2584 AArch64_input_section<size, big_endian>::init()
2585 {
2586 Relobj* relobj = this->relobj();
2587 unsigned int shndx = this->shndx();
2588
2589 // We have to cache original size, alignment and contents to avoid locking
2590 // the original file.
2591 this->original_addralign_ =
2592 convert_types<uint32_t, uint64_t>(relobj->section_addralign(shndx));
2593
2594 // This is not efficient but we expect only a small number of relaxed
2595 // input sections for stubs.
2596 section_size_type section_size;
2597 const unsigned char* section_contents =
2598 relobj->section_contents(shndx, &section_size, false);
2599 this->original_size_ =
2600 convert_types<uint32_t, uint64_t>(relobj->section_size(shndx));
2601
2602 gold_assert(this->original_contents_ == NULL);
2603 this->original_contents_ = new unsigned char[section_size];
2604 memcpy(this->original_contents_, section_contents, section_size);
2605
2606 // We want to make this look like the original input section after
2607 // output sections are finalized.
2608 Output_section* os = relobj->output_section(shndx);
2609 off_t offset = relobj->output_section_offset(shndx);
2610 gold_assert(os != NULL && !relobj->is_output_section_offset_invalid(shndx));
2611 this->set_address(os->address() + offset);
2612 this->set_file_offset(os->offset() + offset);
2613 this->set_current_data_size(this->original_size_);
2614 this->finalize_data_size();
2615 }
2616
2617
2618 // Write data to output file.
2619
2620 template<int size, bool big_endian>
2621 void
2622 AArch64_input_section<size, big_endian>::do_write(Output_file* of)
2623 {
2624 // We have to write out the original section content.
2625 gold_assert(this->original_contents_ != NULL);
2626 of->write(this->offset(), this->original_contents_,
2627 this->original_size_);
2628
2629 // If this owns a stub table and it is not empty, write it.
2630 if (this->is_stub_table_owner() && !this->stub_table_->empty())
2631 this->stub_table_->write(of);
2632 }
2633
2634
2635 // Arm output section class. This is defined mainly to add a number of stub
2636 // generation methods.
2637
2638 template<int size, bool big_endian>
2639 class AArch64_output_section : public Output_section
2640 {
2641 public:
2642 typedef Target_aarch64<size, big_endian> The_target_aarch64;
2643 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2644 typedef Stub_table<size, big_endian> The_stub_table;
2645 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2646
2647 public:
2648 AArch64_output_section(const char* name, elfcpp::Elf_Word type,
2649 elfcpp::Elf_Xword flags)
2650 : Output_section(name, type, flags)
2651 { }
2652
2653 ~AArch64_output_section() {}
2654
2655 // Group input sections for stub generation.
2656 void
2657 group_sections(section_size_type, bool, Target_aarch64<size, big_endian>*,
2658 const Task*);
2659
2660 private:
2661 typedef Output_section::Input_section Input_section;
2662 typedef Output_section::Input_section_list Input_section_list;
2663
2664 // Create a stub group.
2665 void
2666 create_stub_group(Input_section_list::const_iterator,
2667 Input_section_list::const_iterator,
2668 Input_section_list::const_iterator,
2669 The_target_aarch64*,
2670 std::vector<Output_relaxed_input_section*>&,
2671 const Task*);
2672 }; // End of AArch64_output_section
2673
2674
2675 // Create a stub group for input sections from FIRST to LAST. OWNER points to
2676 // the input section that will be the owner of the stub table.
2677
2678 template<int size, bool big_endian> void
2679 AArch64_output_section<size, big_endian>::create_stub_group(
2680 Input_section_list::const_iterator first,
2681 Input_section_list::const_iterator last,
2682 Input_section_list::const_iterator owner,
2683 The_target_aarch64* target,
2684 std::vector<Output_relaxed_input_section*>& new_relaxed_sections,
2685 const Task* task)
2686 {
2687 // Currently we convert ordinary input sections into relaxed sections only
2688 // at this point.
2689 The_aarch64_input_section* input_section;
2690 if (owner->is_relaxed_input_section())
2691 gold_unreachable();
2692 else
2693 {
2694 gold_assert(owner->is_input_section());
2695 // Create a new relaxed input section. We need to lock the original
2696 // file.
2697 Task_lock_obj<Object> tl(task, owner->relobj());
2698 input_section =
2699 target->new_aarch64_input_section(owner->relobj(), owner->shndx());
2700 new_relaxed_sections.push_back(input_section);
2701 }
2702
2703 // Create a stub table.
2704 The_stub_table* stub_table =
2705 target->new_stub_table(input_section);
2706
2707 input_section->set_stub_table(stub_table);
2708
2709 Input_section_list::const_iterator p = first;
2710 // Look for input sections or relaxed input sections in [first ... last].
2711 do
2712 {
2713 if (p->is_input_section() || p->is_relaxed_input_section())
2714 {
2715 // The stub table information for input sections live
2716 // in their objects.
2717 The_aarch64_relobj* aarch64_relobj =
2718 static_cast<The_aarch64_relobj*>(p->relobj());
2719 aarch64_relobj->set_stub_table(p->shndx(), stub_table);
2720 }
2721 }
2722 while (p++ != last);
2723 }
2724
2725
2726 // Group input sections for stub generation. GROUP_SIZE is roughly the limit of
2727 // stub groups. We grow a stub group by adding input section until the size is
2728 // just below GROUP_SIZE. The last input section will be converted into a stub
2729 // table owner. If STUB_ALWAYS_AFTER_BRANCH is false, we also add input sectiond
2730 // after the stub table, effectively doubling the group size.
2731 //
2732 // This is similar to the group_sections() function in elf32-arm.c but is
2733 // implemented differently.
2734
2735 template<int size, bool big_endian>
2736 void AArch64_output_section<size, big_endian>::group_sections(
2737 section_size_type group_size,
2738 bool stubs_always_after_branch,
2739 Target_aarch64<size, big_endian>* target,
2740 const Task* task)
2741 {
2742 typedef enum
2743 {
2744 NO_GROUP,
2745 FINDING_STUB_SECTION,
2746 HAS_STUB_SECTION
2747 } State;
2748
2749 std::vector<Output_relaxed_input_section*> new_relaxed_sections;
2750
2751 State state = NO_GROUP;
2752 section_size_type off = 0;
2753 section_size_type group_begin_offset = 0;
2754 section_size_type group_end_offset = 0;
2755 section_size_type stub_table_end_offset = 0;
2756 Input_section_list::const_iterator group_begin =
2757 this->input_sections().end();
2758 Input_section_list::const_iterator stub_table =
2759 this->input_sections().end();
2760 Input_section_list::const_iterator group_end = this->input_sections().end();
2761 for (Input_section_list::const_iterator p = this->input_sections().begin();
2762 p != this->input_sections().end();
2763 ++p)
2764 {
2765 section_size_type section_begin_offset =
2766 align_address(off, p->addralign());
2767 section_size_type section_end_offset =
2768 section_begin_offset + p->data_size();
2769
2770 // Check to see if we should group the previously seen sections.
2771 switch (state)
2772 {
2773 case NO_GROUP:
2774 break;
2775
2776 case FINDING_STUB_SECTION:
2777 // Adding this section makes the group larger than GROUP_SIZE.
2778 if (section_end_offset - group_begin_offset >= group_size)
2779 {
2780 if (stubs_always_after_branch)
2781 {
2782 gold_assert(group_end != this->input_sections().end());
2783 this->create_stub_group(group_begin, group_end, group_end,
2784 target, new_relaxed_sections,
2785 task);
2786 state = NO_GROUP;
2787 }
2788 else
2789 {
2790 // Input sections up to stub_group_size bytes after the stub
2791 // table can be handled by it too.
2792 state = HAS_STUB_SECTION;
2793 stub_table = group_end;
2794 stub_table_end_offset = group_end_offset;
2795 }
2796 }
2797 break;
2798
2799 case HAS_STUB_SECTION:
2800 // Adding this section makes the post stub-section group larger
2801 // than GROUP_SIZE.
2802 gold_unreachable();
2803 // NOT SUPPORTED YET. For completeness only.
2804 if (section_end_offset - stub_table_end_offset >= group_size)
2805 {
2806 gold_assert(group_end != this->input_sections().end());
2807 this->create_stub_group(group_begin, group_end, stub_table,
2808 target, new_relaxed_sections, task);
2809 state = NO_GROUP;
2810 }
2811 break;
2812
2813 default:
2814 gold_unreachable();
2815 }
2816
2817 // If we see an input section and currently there is no group, start
2818 // a new one. Skip any empty sections. We look at the data size
2819 // instead of calling p->relobj()->section_size() to avoid locking.
2820 if ((p->is_input_section() || p->is_relaxed_input_section())
2821 && (p->data_size() != 0))
2822 {
2823 if (state == NO_GROUP)
2824 {
2825 state = FINDING_STUB_SECTION;
2826 group_begin = p;
2827 group_begin_offset = section_begin_offset;
2828 }
2829
2830 // Keep track of the last input section seen.
2831 group_end = p;
2832 group_end_offset = section_end_offset;
2833 }
2834
2835 off = section_end_offset;
2836 }
2837
2838 // Create a stub group for any ungrouped sections.
2839 if (state == FINDING_STUB_SECTION || state == HAS_STUB_SECTION)
2840 {
2841 gold_assert(group_end != this->input_sections().end());
2842 this->create_stub_group(group_begin, group_end,
2843 (state == FINDING_STUB_SECTION
2844 ? group_end
2845 : stub_table),
2846 target, new_relaxed_sections, task);
2847 }
2848
2849 if (!new_relaxed_sections.empty())
2850 this->convert_input_sections_to_relaxed_sections(new_relaxed_sections);
2851
2852 // Update the section offsets
2853 for (size_t i = 0; i < new_relaxed_sections.size(); ++i)
2854 {
2855 The_aarch64_relobj* relobj = static_cast<The_aarch64_relobj*>(
2856 new_relaxed_sections[i]->relobj());
2857 unsigned int shndx = new_relaxed_sections[i]->shndx();
2858 // Tell AArch64_relobj that this input section is converted.
2859 relobj->convert_input_section_to_relaxed_section(shndx);
2860 }
2861 } // End of AArch64_output_section::group_sections
2862
2863
2864 AArch64_reloc_property_table* aarch64_reloc_property_table = NULL;
2865
2866
2867 // The aarch64 target class.
2868 // See the ABI at
2869 // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056b/IHI0056B_aaelf64.pdf
2870 template<int size, bool big_endian>
2871 class Target_aarch64 : public Sized_target<size, big_endian>
2872 {
2873 public:
2874 typedef Target_aarch64<size, big_endian> This;
2875 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
2876 Reloc_section;
2877 typedef Relocate_info<size, big_endian> The_relocate_info;
2878 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
2879 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
2880 typedef Reloc_stub<size, big_endian> The_reloc_stub;
2881 typedef Erratum_stub<size, big_endian> The_erratum_stub;
2882 typedef typename Reloc_stub<size, big_endian>::Key The_reloc_stub_key;
2883 typedef Stub_table<size, big_endian> The_stub_table;
2884 typedef std::vector<The_stub_table*> Stub_table_list;
2885 typedef typename Stub_table_list::iterator Stub_table_iterator;
2886 typedef AArch64_input_section<size, big_endian> The_aarch64_input_section;
2887 typedef AArch64_output_section<size, big_endian> The_aarch64_output_section;
2888 typedef Unordered_map<Section_id,
2889 AArch64_input_section<size, big_endian>*,
2890 Section_id_hash> AArch64_input_section_map;
2891 typedef AArch64_insn_utilities<big_endian> Insn_utilities;
2892 const static int TCB_SIZE = size / 8 * 2;
2893
2894 Target_aarch64(const Target::Target_info* info = &aarch64_info)
2895 : Sized_target<size, big_endian>(info),
2896 got_(NULL), plt_(NULL), got_plt_(NULL), got_irelative_(NULL),
2897 got_tlsdesc_(NULL), global_offset_table_(NULL), rela_dyn_(NULL),
2898 rela_irelative_(NULL), copy_relocs_(elfcpp::R_AARCH64_COPY),
2899 got_mod_index_offset_(-1U),
2900 tlsdesc_reloc_info_(), tls_base_symbol_defined_(false),
2901 stub_tables_(), stub_group_size_(0), aarch64_input_section_map_()
2902 { }
2903
2904 // Scan the relocations to determine unreferenced sections for
2905 // garbage collection.
2906 void
2907 gc_process_relocs(Symbol_table* symtab,
2908 Layout* layout,
2909 Sized_relobj_file<size, big_endian>* object,
2910 unsigned int data_shndx,
2911 unsigned int sh_type,
2912 const unsigned char* prelocs,
2913 size_t reloc_count,
2914 Output_section* output_section,
2915 bool needs_special_offset_handling,
2916 size_t local_symbol_count,
2917 const unsigned char* plocal_symbols);
2918
2919 // Scan the relocations to look for symbol adjustments.
2920 void
2921 scan_relocs(Symbol_table* symtab,
2922 Layout* layout,
2923 Sized_relobj_file<size, big_endian>* object,
2924 unsigned int data_shndx,
2925 unsigned int sh_type,
2926 const unsigned char* prelocs,
2927 size_t reloc_count,
2928 Output_section* output_section,
2929 bool needs_special_offset_handling,
2930 size_t local_symbol_count,
2931 const unsigned char* plocal_symbols);
2932
2933 // Finalize the sections.
2934 void
2935 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
2936
2937 // Return the value to use for a dynamic which requires special
2938 // treatment.
2939 uint64_t
2940 do_dynsym_value(const Symbol*) const;
2941
2942 // Relocate a section.
2943 void
2944 relocate_section(const Relocate_info<size, big_endian>*,
2945 unsigned int sh_type,
2946 const unsigned char* prelocs,
2947 size_t reloc_count,
2948 Output_section* output_section,
2949 bool needs_special_offset_handling,
2950 unsigned char* view,
2951 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2952 section_size_type view_size,
2953 const Reloc_symbol_changes*);
2954
2955 // Scan the relocs during a relocatable link.
2956 void
2957 scan_relocatable_relocs(Symbol_table* symtab,
2958 Layout* layout,
2959 Sized_relobj_file<size, big_endian>* object,
2960 unsigned int data_shndx,
2961 unsigned int sh_type,
2962 const unsigned char* prelocs,
2963 size_t reloc_count,
2964 Output_section* output_section,
2965 bool needs_special_offset_handling,
2966 size_t local_symbol_count,
2967 const unsigned char* plocal_symbols,
2968 Relocatable_relocs*);
2969
2970 // Scan the relocs for --emit-relocs.
2971 void
2972 emit_relocs_scan(Symbol_table* symtab,
2973 Layout* layout,
2974 Sized_relobj_file<size, big_endian>* object,
2975 unsigned int data_shndx,
2976 unsigned int sh_type,
2977 const unsigned char* prelocs,
2978 size_t reloc_count,
2979 Output_section* output_section,
2980 bool needs_special_offset_handling,
2981 size_t local_symbol_count,
2982 const unsigned char* plocal_syms,
2983 Relocatable_relocs* rr);
2984
2985 // Relocate a section during a relocatable link.
2986 void
2987 relocate_relocs(
2988 const Relocate_info<size, big_endian>*,
2989 unsigned int sh_type,
2990 const unsigned char* prelocs,
2991 size_t reloc_count,
2992 Output_section* output_section,
2993 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
2994 unsigned char* view,
2995 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
2996 section_size_type view_size,
2997 unsigned char* reloc_view,
2998 section_size_type reloc_view_size);
2999
3000 // Return the symbol index to use for a target specific relocation.
3001 // The only target specific relocation is R_AARCH64_TLSDESC for a
3002 // local symbol, which is an absolute reloc.
3003 unsigned int
3004 do_reloc_symbol_index(void*, unsigned int r_type) const
3005 {
3006 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
3007 return 0;
3008 }
3009
3010 // Return the addend to use for a target specific relocation.
3011 uint64_t
3012 do_reloc_addend(void* arg, unsigned int r_type, uint64_t addend) const;
3013
3014 // Return the PLT section.
3015 uint64_t
3016 do_plt_address_for_global(const Symbol* gsym) const
3017 { return this->plt_section()->address_for_global(gsym); }
3018
3019 uint64_t
3020 do_plt_address_for_local(const Relobj* relobj, unsigned int symndx) const
3021 { return this->plt_section()->address_for_local(relobj, symndx); }
3022
3023 // This function should be defined in targets that can use relocation
3024 // types to determine (implemented in local_reloc_may_be_function_pointer
3025 // and global_reloc_may_be_function_pointer)
3026 // if a function's pointer is taken. ICF uses this in safe mode to only
3027 // fold those functions whose pointer is defintely not taken.
3028 bool
3029 do_can_check_for_function_pointers() const
3030 { return true; }
3031
3032 // Return the number of entries in the PLT.
3033 unsigned int
3034 plt_entry_count() const;
3035
3036 //Return the offset of the first non-reserved PLT entry.
3037 unsigned int
3038 first_plt_entry_offset() const;
3039
3040 // Return the size of each PLT entry.
3041 unsigned int
3042 plt_entry_size() const;
3043
3044 // Create a stub table.
3045 The_stub_table*
3046 new_stub_table(The_aarch64_input_section*);
3047
3048 // Create an aarch64 input section.
3049 The_aarch64_input_section*
3050 new_aarch64_input_section(Relobj*, unsigned int);
3051
3052 // Find an aarch64 input section instance for a given OBJ and SHNDX.
3053 The_aarch64_input_section*
3054 find_aarch64_input_section(Relobj*, unsigned int) const;
3055
3056 // Return the thread control block size.
3057 unsigned int
3058 tcb_size() const { return This::TCB_SIZE; }
3059
3060 // Scan a section for stub generation.
3061 void
3062 scan_section_for_stubs(const Relocate_info<size, big_endian>*, unsigned int,
3063 const unsigned char*, size_t, Output_section*,
3064 bool, const unsigned char*,
3065 Address,
3066 section_size_type);
3067
3068 // Scan a relocation section for stub.
3069 template<int sh_type>
3070 void
3071 scan_reloc_section_for_stubs(
3072 const The_relocate_info* relinfo,
3073 const unsigned char* prelocs,
3074 size_t reloc_count,
3075 Output_section* output_section,
3076 bool needs_special_offset_handling,
3077 const unsigned char* view,
3078 Address view_address,
3079 section_size_type);
3080
3081 // Relocate a single reloc stub.
3082 void
3083 relocate_reloc_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
3084 Output_section*, unsigned char*, Address,
3085 section_size_type);
3086
3087 // Get the default AArch64 target.
3088 static This*
3089 current_target()
3090 {
3091 gold_assert(parameters->target().machine_code() == elfcpp::EM_AARCH64
3092 && parameters->target().get_size() == size
3093 && parameters->target().is_big_endian() == big_endian);
3094 return static_cast<This*>(parameters->sized_target<size, big_endian>());
3095 }
3096
3097
3098 // Scan erratum 843419 for a part of a section.
3099 void
3100 scan_erratum_843419_span(
3101 AArch64_relobj<size, big_endian>*,
3102 unsigned int,
3103 const section_size_type,
3104 const section_size_type,
3105 unsigned char*,
3106 Address);
3107
3108 // Scan erratum 835769 for a part of a section.
3109 void
3110 scan_erratum_835769_span(
3111 AArch64_relobj<size, big_endian>*,
3112 unsigned int,
3113 const section_size_type,
3114 const section_size_type,
3115 unsigned char*,
3116 Address);
3117
3118 protected:
3119 void
3120 do_select_as_default_target()
3121 {
3122 gold_assert(aarch64_reloc_property_table == NULL);
3123 aarch64_reloc_property_table = new AArch64_reloc_property_table();
3124 }
3125
3126 // Add a new reloc argument, returning the index in the vector.
3127 size_t
3128 add_tlsdesc_info(Sized_relobj_file<size, big_endian>* object,
3129 unsigned int r_sym)
3130 {
3131 this->tlsdesc_reloc_info_.push_back(Tlsdesc_info(object, r_sym));
3132 return this->tlsdesc_reloc_info_.size() - 1;
3133 }
3134
3135 virtual Output_data_plt_aarch64<size, big_endian>*
3136 do_make_data_plt(Layout* layout,
3137 Output_data_got_aarch64<size, big_endian>* got,
3138 Output_data_space* got_plt,
3139 Output_data_space* got_irelative)
3140 {
3141 return new Output_data_plt_aarch64_standard<size, big_endian>(
3142 layout, got, got_plt, got_irelative);
3143 }
3144
3145
3146 // do_make_elf_object to override the same function in the base class.
3147 Object*
3148 do_make_elf_object(const std::string&, Input_file*, off_t,
3149 const elfcpp::Ehdr<size, big_endian>&);
3150
3151 Output_data_plt_aarch64<size, big_endian>*
3152 make_data_plt(Layout* layout,
3153 Output_data_got_aarch64<size, big_endian>* got,
3154 Output_data_space* got_plt,
3155 Output_data_space* got_irelative)
3156 {
3157 return this->do_make_data_plt(layout, got, got_plt, got_irelative);
3158 }
3159
3160 // We only need to generate stubs, and hence perform relaxation if we are
3161 // not doing relocatable linking.
3162 virtual bool
3163 do_may_relax() const
3164 { return !parameters->options().relocatable(); }
3165
3166 // Relaxation hook. This is where we do stub generation.
3167 virtual bool
3168 do_relax(int, const Input_objects*, Symbol_table*, Layout*, const Task*);
3169
3170 void
3171 group_sections(Layout* layout,
3172 section_size_type group_size,
3173 bool stubs_always_after_branch,
3174 const Task* task);
3175
3176 void
3177 scan_reloc_for_stub(const The_relocate_info*, unsigned int,
3178 const Sized_symbol<size>*, unsigned int,
3179 const Symbol_value<size>*,
3180 typename elfcpp::Elf_types<size>::Elf_Swxword,
3181 Address Elf_Addr);
3182
3183 // Make an output section.
3184 Output_section*
3185 do_make_output_section(const char* name, elfcpp::Elf_Word type,
3186 elfcpp::Elf_Xword flags)
3187 { return new The_aarch64_output_section(name, type, flags); }
3188
3189 private:
3190 // The class which scans relocations.
3191 class Scan
3192 {
3193 public:
3194 Scan()
3195 : issued_non_pic_error_(false)
3196 { }
3197
3198 inline void
3199 local(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3200 Sized_relobj_file<size, big_endian>* object,
3201 unsigned int data_shndx,
3202 Output_section* output_section,
3203 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3204 const elfcpp::Sym<size, big_endian>& lsym,
3205 bool is_discarded);
3206
3207 inline void
3208 global(Symbol_table* symtab, Layout* layout, Target_aarch64* target,
3209 Sized_relobj_file<size, big_endian>* object,
3210 unsigned int data_shndx,
3211 Output_section* output_section,
3212 const elfcpp::Rela<size, big_endian>& reloc, unsigned int r_type,
3213 Symbol* gsym);
3214
3215 inline bool
3216 local_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3217 Target_aarch64<size, big_endian>* ,
3218 Sized_relobj_file<size, big_endian>* ,
3219 unsigned int ,
3220 Output_section* ,
3221 const elfcpp::Rela<size, big_endian>& ,
3222 unsigned int r_type,
3223 const elfcpp::Sym<size, big_endian>&);
3224
3225 inline bool
3226 global_reloc_may_be_function_pointer(Symbol_table* , Layout* ,
3227 Target_aarch64<size, big_endian>* ,
3228 Sized_relobj_file<size, big_endian>* ,
3229 unsigned int ,
3230 Output_section* ,
3231 const elfcpp::Rela<size, big_endian>& ,
3232 unsigned int r_type,
3233 Symbol* gsym);
3234
3235 private:
3236 static void
3237 unsupported_reloc_local(Sized_relobj_file<size, big_endian>*,
3238 unsigned int r_type);
3239
3240 static void
3241 unsupported_reloc_global(Sized_relobj_file<size, big_endian>*,
3242 unsigned int r_type, Symbol*);
3243
3244 inline bool
3245 possible_function_pointer_reloc(unsigned int r_type);
3246
3247 void
3248 check_non_pic(Relobj*, unsigned int r_type);
3249
3250 bool
3251 reloc_needs_plt_for_ifunc(Sized_relobj_file<size, big_endian>*,
3252 unsigned int r_type);
3253
3254 // Whether we have issued an error about a non-PIC compilation.
3255 bool issued_non_pic_error_;
3256 };
3257
3258 // The class which implements relocation.
3259 class Relocate
3260 {
3261 public:
3262 Relocate()
3263 : skip_call_tls_get_addr_(false)
3264 { }
3265
3266 ~Relocate()
3267 { }
3268
3269 // Do a relocation. Return false if the caller should not issue
3270 // any warnings about this relocation.
3271 inline bool
3272 relocate(const Relocate_info<size, big_endian>*, unsigned int,
3273 Target_aarch64*, Output_section*, size_t, const unsigned char*,
3274 const Sized_symbol<size>*, const Symbol_value<size>*,
3275 unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
3276 section_size_type);
3277
3278 private:
3279 inline typename AArch64_relocate_functions<size, big_endian>::Status
3280 relocate_tls(const Relocate_info<size, big_endian>*,
3281 Target_aarch64<size, big_endian>*,
3282 size_t,
3283 const elfcpp::Rela<size, big_endian>&,
3284 unsigned int r_type, const Sized_symbol<size>*,
3285 const Symbol_value<size>*,
3286 unsigned char*,
3287 typename elfcpp::Elf_types<size>::Elf_Addr);
3288
3289 inline typename AArch64_relocate_functions<size, big_endian>::Status
3290 tls_gd_to_le(
3291 const Relocate_info<size, big_endian>*,
3292 Target_aarch64<size, big_endian>*,
3293 const elfcpp::Rela<size, big_endian>&,
3294 unsigned int,
3295 unsigned char*,
3296 const Symbol_value<size>*);
3297
3298 inline typename AArch64_relocate_functions<size, big_endian>::Status
3299 tls_ld_to_le(
3300 const Relocate_info<size, big_endian>*,
3301 Target_aarch64<size, big_endian>*,
3302 const elfcpp::Rela<size, big_endian>&,
3303 unsigned int,
3304 unsigned char*,
3305 const Symbol_value<size>*);
3306
3307 inline typename AArch64_relocate_functions<size, big_endian>::Status
3308 tls_ie_to_le(
3309 const Relocate_info<size, big_endian>*,
3310 Target_aarch64<size, big_endian>*,
3311 const elfcpp::Rela<size, big_endian>&,
3312 unsigned int,
3313 unsigned char*,
3314 const Symbol_value<size>*);
3315
3316 inline typename AArch64_relocate_functions<size, big_endian>::Status
3317 tls_desc_gd_to_le(
3318 const Relocate_info<size, big_endian>*,
3319 Target_aarch64<size, big_endian>*,
3320 const elfcpp::Rela<size, big_endian>&,
3321 unsigned int,
3322 unsigned char*,
3323 const Symbol_value<size>*);
3324
3325 inline typename AArch64_relocate_functions<size, big_endian>::Status
3326 tls_desc_gd_to_ie(
3327 const Relocate_info<size, big_endian>*,
3328 Target_aarch64<size, big_endian>*,
3329 const elfcpp::Rela<size, big_endian>&,
3330 unsigned int,
3331 unsigned char*,
3332 const Symbol_value<size>*,
3333 typename elfcpp::Elf_types<size>::Elf_Addr,
3334 typename elfcpp::Elf_types<size>::Elf_Addr);
3335
3336 bool skip_call_tls_get_addr_;
3337
3338 }; // End of class Relocate
3339
3340 // Adjust TLS relocation type based on the options and whether this
3341 // is a local symbol.
3342 static tls::Tls_optimization
3343 optimize_tls_reloc(bool is_final, int r_type);
3344
3345 // Get the GOT section, creating it if necessary.
3346 Output_data_got_aarch64<size, big_endian>*
3347 got_section(Symbol_table*, Layout*);
3348
3349 // Get the GOT PLT section.
3350 Output_data_space*
3351 got_plt_section() const
3352 {
3353 gold_assert(this->got_plt_ != NULL);
3354 return this->got_plt_;
3355 }
3356
3357 // Get the GOT section for TLSDESC entries.
3358 Output_data_got<size, big_endian>*
3359 got_tlsdesc_section() const
3360 {
3361 gold_assert(this->got_tlsdesc_ != NULL);
3362 return this->got_tlsdesc_;
3363 }
3364
3365 // Create the PLT section.
3366 void
3367 make_plt_section(Symbol_table* symtab, Layout* layout);
3368
3369 // Create a PLT entry for a global symbol.
3370 void
3371 make_plt_entry(Symbol_table*, Layout*, Symbol*);
3372
3373 // Create a PLT entry for a local STT_GNU_IFUNC symbol.
3374 void
3375 make_local_ifunc_plt_entry(Symbol_table*, Layout*,
3376 Sized_relobj_file<size, big_endian>* relobj,
3377 unsigned int local_sym_index);
3378
3379 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
3380 void
3381 define_tls_base_symbol(Symbol_table*, Layout*);
3382
3383 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
3384 void
3385 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
3386
3387 // Create a GOT entry for the TLS module index.
3388 unsigned int
3389 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
3390 Sized_relobj_file<size, big_endian>* object);
3391
3392 // Get the PLT section.
3393 Output_data_plt_aarch64<size, big_endian>*
3394 plt_section() const
3395 {
3396 gold_assert(this->plt_ != NULL);
3397 return this->plt_;
3398 }
3399
3400 // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
3401 // ST_E_843419, we need an additional field for adrp offset.
3402 void create_erratum_stub(
3403 AArch64_relobj<size, big_endian>* relobj,
3404 unsigned int shndx,
3405 section_size_type erratum_insn_offset,
3406 Address erratum_address,
3407 typename Insn_utilities::Insntype erratum_insn,
3408 int erratum_type,
3409 unsigned int e843419_adrp_offset=0);
3410
3411 // Return whether this is a 3-insn erratum sequence.
3412 bool is_erratum_843419_sequence(
3413 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
3414 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
3415 typename elfcpp::Swap<32,big_endian>::Valtype insn3);
3416
3417 // Return whether this is a 835769 sequence.
3418 // (Similarly implemented as in elfnn-aarch64.c.)
3419 bool is_erratum_835769_sequence(
3420 typename elfcpp::Swap<32,big_endian>::Valtype,
3421 typename elfcpp::Swap<32,big_endian>::Valtype);
3422
3423 // Get the dynamic reloc section, creating it if necessary.
3424 Reloc_section*
3425 rela_dyn_section(Layout*);
3426
3427 // Get the section to use for TLSDESC relocations.
3428 Reloc_section*
3429 rela_tlsdesc_section(Layout*) const;
3430
3431 // Get the section to use for IRELATIVE relocations.
3432 Reloc_section*
3433 rela_irelative_section(Layout*);
3434
3435 // Add a potential copy relocation.
3436 void
3437 copy_reloc(Symbol_table* symtab, Layout* layout,
3438 Sized_relobj_file<size, big_endian>* object,
3439 unsigned int shndx, Output_section* output_section,
3440 Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
3441 {
3442 unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
3443 this->copy_relocs_.copy_reloc(symtab, layout,
3444 symtab->get_sized_symbol<size>(sym),
3445 object, shndx, output_section,
3446 r_type, reloc.get_r_offset(),
3447 reloc.get_r_addend(),
3448 this->rela_dyn_section(layout));
3449 }
3450
3451 // Information about this specific target which we pass to the
3452 // general Target structure.
3453 static const Target::Target_info aarch64_info;
3454
3455 // The types of GOT entries needed for this platform.
3456 // These values are exposed to the ABI in an incremental link.
3457 // Do not renumber existing values without changing the version
3458 // number of the .gnu_incremental_inputs section.
3459 enum Got_type
3460 {
3461 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
3462 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
3463 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
3464 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
3465 };
3466
3467 // This type is used as the argument to the target specific
3468 // relocation routines. The only target specific reloc is
3469 // R_AARCh64_TLSDESC against a local symbol.
3470 struct Tlsdesc_info
3471 {
3472 Tlsdesc_info(Sized_relobj_file<size, big_endian>* a_object,
3473 unsigned int a_r_sym)
3474 : object(a_object), r_sym(a_r_sym)
3475 { }
3476
3477 // The object in which the local symbol is defined.
3478 Sized_relobj_file<size, big_endian>* object;
3479 // The local symbol index in the object.
3480 unsigned int r_sym;
3481 };
3482
3483 // The GOT section.
3484 Output_data_got_aarch64<size, big_endian>* got_;
3485 // The PLT section.
3486 Output_data_plt_aarch64<size, big_endian>* plt_;
3487 // The GOT PLT section.
3488 Output_data_space* got_plt_;
3489 // The GOT section for IRELATIVE relocations.
3490 Output_data_space* got_irelative_;
3491 // The GOT section for TLSDESC relocations.
3492 Output_data_got<size, big_endian>* got_tlsdesc_;
3493 // The _GLOBAL_OFFSET_TABLE_ symbol.
3494 Symbol* global_offset_table_;
3495 // The dynamic reloc section.
3496 Reloc_section* rela_dyn_;
3497 // The section to use for IRELATIVE relocs.
3498 Reloc_section* rela_irelative_;
3499 // Relocs saved to avoid a COPY reloc.
3500 Copy_relocs<elfcpp::SHT_RELA, size, big_endian> copy_relocs_;
3501 // Offset of the GOT entry for the TLS module index.
3502 unsigned int got_mod_index_offset_;
3503 // We handle R_AARCH64_TLSDESC against a local symbol as a target
3504 // specific relocation. Here we store the object and local symbol
3505 // index for the relocation.
3506 std::vector<Tlsdesc_info> tlsdesc_reloc_info_;
3507 // True if the _TLS_MODULE_BASE_ symbol has been defined.
3508 bool tls_base_symbol_defined_;
3509 // List of stub_tables
3510 Stub_table_list stub_tables_;
3511 // Actual stub group size
3512 section_size_type stub_group_size_;
3513 AArch64_input_section_map aarch64_input_section_map_;
3514 }; // End of Target_aarch64
3515
3516
3517 template<>
3518 const Target::Target_info Target_aarch64<64, false>::aarch64_info =
3519 {
3520 64, // size
3521 false, // is_big_endian
3522 elfcpp::EM_AARCH64, // machine_code
3523 false, // has_make_symbol
3524 false, // has_resolve
3525 false, // has_code_fill
3526 false, // is_default_stack_executable
3527 true, // can_icf_inline_merge_sections
3528 '\0', // wrap_char
3529 "/lib/ld.so.1", // program interpreter
3530 0x400000, // default_text_segment_address
3531 0x10000, // abi_pagesize (overridable by -z max-page-size)
3532 0x1000, // common_pagesize (overridable by -z common-page-size)
3533 false, // isolate_execinstr
3534 0, // rosegment_gap
3535 elfcpp::SHN_UNDEF, // small_common_shndx
3536 elfcpp::SHN_UNDEF, // large_common_shndx
3537 0, // small_common_section_flags
3538 0, // large_common_section_flags
3539 NULL, // attributes_section
3540 NULL, // attributes_vendor
3541 "_start", // entry_symbol_name
3542 32, // hash_entry_size
3543 };
3544
3545 template<>
3546 const Target::Target_info Target_aarch64<32, false>::aarch64_info =
3547 {
3548 32, // size
3549 false, // is_big_endian
3550 elfcpp::EM_AARCH64, // machine_code
3551 false, // has_make_symbol
3552 false, // has_resolve
3553 false, // has_code_fill
3554 false, // is_default_stack_executable
3555 false, // can_icf_inline_merge_sections
3556 '\0', // wrap_char
3557 "/lib/ld.so.1", // program interpreter
3558 0x400000, // default_text_segment_address
3559 0x10000, // abi_pagesize (overridable by -z max-page-size)
3560 0x1000, // common_pagesize (overridable by -z common-page-size)
3561 false, // isolate_execinstr
3562 0, // rosegment_gap
3563 elfcpp::SHN_UNDEF, // small_common_shndx
3564 elfcpp::SHN_UNDEF, // large_common_shndx
3565 0, // small_common_section_flags
3566 0, // large_common_section_flags
3567 NULL, // attributes_section
3568 NULL, // attributes_vendor
3569 "_start", // entry_symbol_name
3570 32, // hash_entry_size
3571 };
3572
3573 template<>
3574 const Target::Target_info Target_aarch64<64, true>::aarch64_info =
3575 {
3576 64, // size
3577 true, // is_big_endian
3578 elfcpp::EM_AARCH64, // machine_code
3579 false, // has_make_symbol
3580 false, // has_resolve
3581 false, // has_code_fill
3582 false, // is_default_stack_executable
3583 true, // can_icf_inline_merge_sections
3584 '\0', // wrap_char
3585 "/lib/ld.so.1", // program interpreter
3586 0x400000, // default_text_segment_address
3587 0x10000, // abi_pagesize (overridable by -z max-page-size)
3588 0x1000, // common_pagesize (overridable by -z common-page-size)
3589 false, // isolate_execinstr
3590 0, // rosegment_gap
3591 elfcpp::SHN_UNDEF, // small_common_shndx
3592 elfcpp::SHN_UNDEF, // large_common_shndx
3593 0, // small_common_section_flags
3594 0, // large_common_section_flags
3595 NULL, // attributes_section
3596 NULL, // attributes_vendor
3597 "_start", // entry_symbol_name
3598 32, // hash_entry_size
3599 };
3600
3601 template<>
3602 const Target::Target_info Target_aarch64<32, true>::aarch64_info =
3603 {
3604 32, // size
3605 true, // is_big_endian
3606 elfcpp::EM_AARCH64, // machine_code
3607 false, // has_make_symbol
3608 false, // has_resolve
3609 false, // has_code_fill
3610 false, // is_default_stack_executable
3611 false, // can_icf_inline_merge_sections
3612 '\0', // wrap_char
3613 "/lib/ld.so.1", // program interpreter
3614 0x400000, // default_text_segment_address
3615 0x10000, // abi_pagesize (overridable by -z max-page-size)
3616 0x1000, // common_pagesize (overridable by -z common-page-size)
3617 false, // isolate_execinstr
3618 0, // rosegment_gap
3619 elfcpp::SHN_UNDEF, // small_common_shndx
3620 elfcpp::SHN_UNDEF, // large_common_shndx
3621 0, // small_common_section_flags
3622 0, // large_common_section_flags
3623 NULL, // attributes_section
3624 NULL, // attributes_vendor
3625 "_start", // entry_symbol_name
3626 32, // hash_entry_size
3627 };
3628
3629 // Get the GOT section, creating it if necessary.
3630
3631 template<int size, bool big_endian>
3632 Output_data_got_aarch64<size, big_endian>*
3633 Target_aarch64<size, big_endian>::got_section(Symbol_table* symtab,
3634 Layout* layout)
3635 {
3636 if (this->got_ == NULL)
3637 {
3638 gold_assert(symtab != NULL && layout != NULL);
3639
3640 // When using -z now, we can treat .got.plt as a relro section.
3641 // Without -z now, it is modified after program startup by lazy
3642 // PLT relocations.
3643 bool is_got_plt_relro = parameters->options().now();
3644 Output_section_order got_order = (is_got_plt_relro
3645 ? ORDER_RELRO
3646 : ORDER_RELRO_LAST);
3647 Output_section_order got_plt_order = (is_got_plt_relro
3648 ? ORDER_RELRO
3649 : ORDER_NON_RELRO_FIRST);
3650
3651 // Layout of .got and .got.plt sections.
3652 // .got[0] &_DYNAMIC <-_GLOBAL_OFFSET_TABLE_
3653 // ...
3654 // .gotplt[0] reserved for ld.so (&linkmap) <--DT_PLTGOT
3655 // .gotplt[1] reserved for ld.so (resolver)
3656 // .gotplt[2] reserved
3657
3658 // Generate .got section.
3659 this->got_ = new Output_data_got_aarch64<size, big_endian>(symtab,
3660 layout);
3661 layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
3662 (elfcpp::SHF_ALLOC | elfcpp::SHF_WRITE),
3663 this->got_, got_order, true);
3664 // The first word of GOT is reserved for the address of .dynamic.
3665 // We put 0 here now. The value will be replaced later in
3666 // Output_data_got_aarch64::do_write.
3667 this->got_->add_constant(0);
3668
3669 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
3670 // _GLOBAL_OFFSET_TABLE_ value points to the start of the .got section,
3671 // even if there is a .got.plt section.
3672 this->global_offset_table_ =
3673 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
3674 Symbol_table::PREDEFINED,
3675 this->got_,
3676 0, 0, elfcpp::STT_OBJECT,
3677 elfcpp::STB_LOCAL,
3678 elfcpp::STV_HIDDEN, 0,
3679 false, false);
3680
3681 // Generate .got.plt section.
3682 this->got_plt_ = new Output_data_space(size / 8, "** GOT PLT");
3683 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3684 (elfcpp::SHF_ALLOC
3685 | elfcpp::SHF_WRITE),
3686 this->got_plt_, got_plt_order,
3687 is_got_plt_relro);
3688
3689 // The first three entries are reserved.
3690 this->got_plt_->set_current_data_size(
3691 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3692
3693 // If there are any IRELATIVE relocations, they get GOT entries
3694 // in .got.plt after the jump slot entries.
3695 this->got_irelative_ = new Output_data_space(size / 8,
3696 "** GOT IRELATIVE PLT");
3697 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3698 (elfcpp::SHF_ALLOC
3699 | elfcpp::SHF_WRITE),
3700 this->got_irelative_,
3701 got_plt_order,
3702 is_got_plt_relro);
3703
3704 // If there are any TLSDESC relocations, they get GOT entries in
3705 // .got.plt after the jump slot and IRELATIVE entries.
3706 this->got_tlsdesc_ = new Output_data_got<size, big_endian>();
3707 layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
3708 (elfcpp::SHF_ALLOC
3709 | elfcpp::SHF_WRITE),
3710 this->got_tlsdesc_,
3711 got_plt_order,
3712 is_got_plt_relro);
3713
3714 if (!is_got_plt_relro)
3715 {
3716 // Those bytes can go into the relro segment.
3717 layout->increase_relro(
3718 AARCH64_GOTPLT_RESERVE_COUNT * (size / 8));
3719 }
3720
3721 }
3722 return this->got_;
3723 }
3724
3725 // Get the dynamic reloc section, creating it if necessary.
3726
3727 template<int size, bool big_endian>
3728 typename Target_aarch64<size, big_endian>::Reloc_section*
3729 Target_aarch64<size, big_endian>::rela_dyn_section(Layout* layout)
3730 {
3731 if (this->rela_dyn_ == NULL)
3732 {
3733 gold_assert(layout != NULL);
3734 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
3735 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3736 elfcpp::SHF_ALLOC, this->rela_dyn_,
3737 ORDER_DYNAMIC_RELOCS, false);
3738 }
3739 return this->rela_dyn_;
3740 }
3741
3742 // Get the section to use for IRELATIVE relocs, creating it if
3743 // necessary. These go in .rela.dyn, but only after all other dynamic
3744 // relocations. They need to follow the other dynamic relocations so
3745 // that they can refer to global variables initialized by those
3746 // relocs.
3747
3748 template<int size, bool big_endian>
3749 typename Target_aarch64<size, big_endian>::Reloc_section*
3750 Target_aarch64<size, big_endian>::rela_irelative_section(Layout* layout)
3751 {
3752 if (this->rela_irelative_ == NULL)
3753 {
3754 // Make sure we have already created the dynamic reloc section.
3755 this->rela_dyn_section(layout);
3756 this->rela_irelative_ = new Reloc_section(false);
3757 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
3758 elfcpp::SHF_ALLOC, this->rela_irelative_,
3759 ORDER_DYNAMIC_RELOCS, false);
3760 gold_assert(this->rela_dyn_->output_section()
3761 == this->rela_irelative_->output_section());
3762 }
3763 return this->rela_irelative_;
3764 }
3765
3766
3767 // do_make_elf_object to override the same function in the base class. We need
3768 // to use a target-specific sub-class of Sized_relobj_file<size, big_endian> to
3769 // store backend specific information. Hence we need to have our own ELF object
3770 // creation.
3771
3772 template<int size, bool big_endian>
3773 Object*
3774 Target_aarch64<size, big_endian>::do_make_elf_object(
3775 const std::string& name,
3776 Input_file* input_file,
3777 off_t offset, const elfcpp::Ehdr<size, big_endian>& ehdr)
3778 {
3779 int et = ehdr.get_e_type();
3780 // ET_EXEC files are valid input for --just-symbols/-R,
3781 // and we treat them as relocatable objects.
3782 if (et == elfcpp::ET_EXEC && input_file->just_symbols())
3783 return Sized_target<size, big_endian>::do_make_elf_object(
3784 name, input_file, offset, ehdr);
3785 else if (et == elfcpp::ET_REL)
3786 {
3787 AArch64_relobj<size, big_endian>* obj =
3788 new AArch64_relobj<size, big_endian>(name, input_file, offset, ehdr);
3789 obj->setup();
3790 return obj;
3791 }
3792 else if (et == elfcpp::ET_DYN)
3793 {
3794 // Keep base implementation.
3795 Sized_dynobj<size, big_endian>* obj =
3796 new Sized_dynobj<size, big_endian>(name, input_file, offset, ehdr);
3797 obj->setup();
3798 return obj;
3799 }
3800 else
3801 {
3802 gold_error(_("%s: unsupported ELF file type %d"),
3803 name.c_str(), et);
3804 return NULL;
3805 }
3806 }
3807
3808
3809 // Scan a relocation for stub generation.
3810
3811 template<int size, bool big_endian>
3812 void
3813 Target_aarch64<size, big_endian>::scan_reloc_for_stub(
3814 const Relocate_info<size, big_endian>* relinfo,
3815 unsigned int r_type,
3816 const Sized_symbol<size>* gsym,
3817 unsigned int r_sym,
3818 const Symbol_value<size>* psymval,
3819 typename elfcpp::Elf_types<size>::Elf_Swxword addend,
3820 Address address)
3821 {
3822 const AArch64_relobj<size, big_endian>* aarch64_relobj =
3823 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3824
3825 Symbol_value<size> symval;
3826 if (gsym != NULL)
3827 {
3828 const AArch64_reloc_property* arp = aarch64_reloc_property_table->
3829 get_reloc_property(r_type);
3830 if (gsym->use_plt_offset(arp->reference_flags()))
3831 {
3832 // This uses a PLT, change the symbol value.
3833 symval.set_output_value(this->plt_address_for_global(gsym));
3834 psymval = &symval;
3835 }
3836 else if (gsym->is_undefined())
3837 {
3838 // There is no need to generate a stub symbol if the original symbol
3839 // is undefined.
3840 gold_debug(DEBUG_TARGET,
3841 "stub: not creating a stub for undefined symbol %s in file %s",
3842 gsym->name(), aarch64_relobj->name().c_str());
3843 return;
3844 }
3845 }
3846
3847 // Get the symbol value.
3848 typename Symbol_value<size>::Value value = psymval->value(aarch64_relobj, 0);
3849
3850 // Owing to pipelining, the PC relative branches below actually skip
3851 // two instructions when the branch offset is 0.
3852 Address destination = static_cast<Address>(-1);
3853 switch (r_type)
3854 {
3855 case elfcpp::R_AARCH64_CALL26:
3856 case elfcpp::R_AARCH64_JUMP26:
3857 destination = value + addend;
3858 break;
3859 default:
3860 gold_unreachable();
3861 }
3862
3863 int stub_type = The_reloc_stub::
3864 stub_type_for_reloc(r_type, address, destination);
3865 if (stub_type == ST_NONE)
3866 return;
3867
3868 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
3869 gold_assert(stub_table != NULL);
3870
3871 The_reloc_stub_key key(stub_type, gsym, aarch64_relobj, r_sym, addend);
3872 The_reloc_stub* stub = stub_table->find_reloc_stub(key);
3873 if (stub == NULL)
3874 {
3875 stub = new The_reloc_stub(stub_type);
3876 stub_table->add_reloc_stub(stub, key);
3877 }
3878 stub->set_destination_address(destination);
3879 } // End of Target_aarch64::scan_reloc_for_stub
3880
3881
3882 // This function scans a relocation section for stub generation.
3883 // The template parameter Relocate must be a class type which provides
3884 // a single function, relocate(), which implements the machine
3885 // specific part of a relocation.
3886
3887 // BIG_ENDIAN is the endianness of the data. SH_TYPE is the section type:
3888 // SHT_REL or SHT_RELA.
3889
3890 // PRELOCS points to the relocation data. RELOC_COUNT is the number
3891 // of relocs. OUTPUT_SECTION is the output section.
3892 // NEEDS_SPECIAL_OFFSET_HANDLING is true if input offsets need to be
3893 // mapped to output offsets.
3894
3895 // VIEW is the section data, VIEW_ADDRESS is its memory address, and
3896 // VIEW_SIZE is the size. These refer to the input section, unless
3897 // NEEDS_SPECIAL_OFFSET_HANDLING is true, in which case they refer to
3898 // the output section.
3899
3900 template<int size, bool big_endian>
3901 template<int sh_type>
3902 void inline
3903 Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
3904 const Relocate_info<size, big_endian>* relinfo,
3905 const unsigned char* prelocs,
3906 size_t reloc_count,
3907 Output_section* /*output_section*/,
3908 bool /*needs_special_offset_handling*/,
3909 const unsigned char* /*view*/,
3910 Address view_address,
3911 section_size_type)
3912 {
3913 typedef typename Reloc_types<sh_type,size,big_endian>::Reloc Reltype;
3914
3915 const int reloc_size =
3916 Reloc_types<sh_type,size,big_endian>::reloc_size;
3917 AArch64_relobj<size, big_endian>* object =
3918 static_cast<AArch64_relobj<size, big_endian>*>(relinfo->object);
3919 unsigned int local_count = object->local_symbol_count();
3920
3921 gold::Default_comdat_behavior default_comdat_behavior;
3922 Comdat_behavior comdat_behavior = CB_UNDETERMINED;
3923
3924 for (size_t i = 0; i < reloc_count; ++i, prelocs += reloc_size)
3925 {
3926 Reltype reloc(prelocs);
3927 typename elfcpp::Elf_types<size>::Elf_WXword r_info = reloc.get_r_info();
3928 unsigned int r_sym = elfcpp::elf_r_sym<size>(r_info);
3929 unsigned int r_type = elfcpp::elf_r_type<size>(r_info);
3930 if (r_type != elfcpp::R_AARCH64_CALL26
3931 && r_type != elfcpp::R_AARCH64_JUMP26)
3932 continue;
3933
3934 section_offset_type offset =
3935 convert_to_section_size_type(reloc.get_r_offset());
3936
3937 // Get the addend.
3938 typename elfcpp::Elf_types<size>::Elf_Swxword addend =
3939 reloc.get_r_addend();
3940
3941 const Sized_symbol<size>* sym;
3942 Symbol_value<size> symval;
3943 const Symbol_value<size> *psymval;
3944 bool is_defined_in_discarded_section;
3945 unsigned int shndx;
3946 if (r_sym < local_count)
3947 {
3948 sym = NULL;
3949 psymval = object->local_symbol(r_sym);
3950
3951 // If the local symbol belongs to a section we are discarding,
3952 // and that section is a debug section, try to find the
3953 // corresponding kept section and map this symbol to its
3954 // counterpart in the kept section. The symbol must not
3955 // correspond to a section we are folding.
3956 bool is_ordinary;
3957 shndx = psymval->input_shndx(&is_ordinary);
3958 is_defined_in_discarded_section =
3959 (is_ordinary
3960 && shndx != elfcpp::SHN_UNDEF
3961 && !object->is_section_included(shndx)
3962 && !relinfo->symtab->is_section_folded(object, shndx));
3963
3964 // We need to compute the would-be final value of this local
3965 // symbol.
3966 if (!is_defined_in_discarded_section)
3967 {
3968 typedef Sized_relobj_file<size, big_endian> ObjType;
3969 if (psymval->is_section_symbol())
3970 symval.set_is_section_symbol();
3971 typename ObjType::Compute_final_local_value_status status =
3972 object->compute_final_local_value(r_sym, psymval, &symval,
3973 relinfo->symtab);
3974 if (status == ObjType::CFLV_OK)
3975 {
3976 // Currently we cannot handle a branch to a target in
3977 // a merged section. If this is the case, issue an error
3978 // and also free the merge symbol value.
3979 if (!symval.has_output_value())
3980 {
3981 const std::string& section_name =
3982 object->section_name(shndx);
3983 object->error(_("cannot handle branch to local %u "
3984 "in a merged section %s"),
3985 r_sym, section_name.c_str());
3986 }
3987 psymval = &symval;
3988 }
3989 else
3990 {
3991 // We cannot determine the final value.
3992 continue;
3993 }
3994 }
3995 }
3996 else
3997 {
3998 const Symbol* gsym;
3999 gsym = object->global_symbol(r_sym);
4000 gold_assert(gsym != NULL);
4001 if (gsym->is_forwarder())
4002 gsym = relinfo->symtab->resolve_forwards(gsym);
4003
4004 sym = static_cast<const Sized_symbol<size>*>(gsym);
4005 if (sym->has_symtab_index() && sym->symtab_index() != -1U)
4006 symval.set_output_symtab_index(sym->symtab_index());
4007 else
4008 symval.set_no_output_symtab_entry();
4009
4010 // We need to compute the would-be final value of this global
4011 // symbol.
4012 const Symbol_table* symtab = relinfo->symtab;
4013 const Sized_symbol<size>* sized_symbol =
4014 symtab->get_sized_symbol<size>(gsym);
4015 Symbol_table::Compute_final_value_status status;
4016 typename elfcpp::Elf_types<size>::Elf_Addr value =
4017 symtab->compute_final_value<size>(sized_symbol, &status);
4018
4019 // Skip this if the symbol has not output section.
4020 if (status == Symbol_table::CFVS_NO_OUTPUT_SECTION)
4021 continue;
4022 symval.set_output_value(value);
4023
4024 if (gsym->type() == elfcpp::STT_TLS)
4025 symval.set_is_tls_symbol();
4026 else if (gsym->type() == elfcpp::STT_GNU_IFUNC)
4027 symval.set_is_ifunc_symbol();
4028 psymval = &symval;
4029
4030 is_defined_in_discarded_section =
4031 (gsym->is_defined_in_discarded_section()
4032 && gsym->is_undefined());
4033 shndx = 0;
4034 }
4035
4036 Symbol_value<size> symval2;
4037 if (is_defined_in_discarded_section)
4038 {
4039 if (comdat_behavior == CB_UNDETERMINED)
4040 {
4041 std::string name = object->section_name(relinfo->data_shndx);
4042 comdat_behavior = default_comdat_behavior.get(name.c_str());
4043 }
4044 if (comdat_behavior == CB_PRETEND)
4045 {
4046 bool found;
4047 typename elfcpp::Elf_types<size>::Elf_Addr value =
4048 object->map_to_kept_section(shndx, &found);
4049 if (found)
4050 symval2.set_output_value(value + psymval->input_value());
4051 else
4052 symval2.set_output_value(0);
4053 }
4054 else
4055 {
4056 if (comdat_behavior == CB_WARNING)
4057 gold_warning_at_location(relinfo, i, offset,
4058 _("relocation refers to discarded "
4059 "section"));
4060 symval2.set_output_value(0);
4061 }
4062 symval2.set_no_output_symtab_entry();
4063 psymval = &symval2;
4064 }
4065
4066 this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
4067 addend, view_address + offset);
4068 } // End of iterating relocs in a section
4069 } // End of Target_aarch64::scan_reloc_section_for_stubs
4070
4071
4072 // Scan an input section for stub generation.
4073
4074 template<int size, bool big_endian>
4075 void
4076 Target_aarch64<size, big_endian>::scan_section_for_stubs(
4077 const Relocate_info<size, big_endian>* relinfo,
4078 unsigned int sh_type,
4079 const unsigned char* prelocs,
4080 size_t reloc_count,
4081 Output_section* output_section,
4082 bool needs_special_offset_handling,
4083 const unsigned char* view,
4084 Address view_address,
4085 section_size_type view_size)
4086 {
4087 gold_assert(sh_type == elfcpp::SHT_RELA);
4088 this->scan_reloc_section_for_stubs<elfcpp::SHT_RELA>(
4089 relinfo,
4090 prelocs,
4091 reloc_count,
4092 output_section,
4093 needs_special_offset_handling,
4094 view,
4095 view_address,
4096 view_size);
4097 }
4098
4099
4100 // Relocate a single reloc stub.
4101
4102 template<int size, bool big_endian>
4103 void Target_aarch64<size, big_endian>::
4104 relocate_reloc_stub(The_reloc_stub* stub,
4105 const The_relocate_info*,
4106 Output_section*,
4107 unsigned char* view,
4108 Address address,
4109 section_size_type)
4110 {
4111 typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
4112 typedef typename The_reloc_functions::Status The_reloc_functions_status;
4113 typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
4114
4115 Insntype* ip = reinterpret_cast<Insntype*>(view);
4116 int insn_number = stub->insn_num();
4117 const uint32_t* insns = stub->insns();
4118 // Check the insns are really those stub insns.
4119 for (int i = 0; i < insn_number; ++i)
4120 {
4121 Insntype insn = elfcpp::Swap<32,big_endian>::readval(ip + i);
4122 gold_assert(((uint32_t)insn == insns[i]));
4123 }
4124
4125 Address dest = stub->destination_address();
4126
4127 switch(stub->type())
4128 {
4129 case ST_ADRP_BRANCH:
4130 {
4131 // 1st reloc is ADR_PREL_PG_HI21
4132 The_reloc_functions_status status =
4133 The_reloc_functions::adrp(view, dest, address);
4134 // An error should never arise in the above step. If so, please
4135 // check 'aarch64_valid_for_adrp_p'.
4136 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4137
4138 // 2nd reloc is ADD_ABS_LO12_NC
4139 const AArch64_reloc_property* arp =
4140 aarch64_reloc_property_table->get_reloc_property(
4141 elfcpp::R_AARCH64_ADD_ABS_LO12_NC);
4142 gold_assert(arp != NULL);
4143 status = The_reloc_functions::template
4144 rela_general<32>(view + 4, dest, 0, arp);
4145 // An error should never arise, it is an "_NC" relocation.
4146 gold_assert(status == The_reloc_functions::STATUS_OKAY);
4147 }
4148 break;
4149
4150 case ST_LONG_BRANCH_ABS:
4151 // 1st reloc is R_AARCH64_PREL64, at offset 8
4152 elfcpp::Swap<64,big_endian>::writeval(view + 8, dest);
4153 break;
4154
4155 case ST_LONG_BRANCH_PCREL:
4156 {
4157 // "PC" calculation is the 2nd insn in the stub.
4158 uint64_t offset = dest - (address + 4);
4159 // Offset is placed at offset 4 and 5.
4160 elfcpp::Swap<64,big_endian>::writeval(view + 16, offset);
4161 }
4162 break;
4163
4164 default:
4165 gold_unreachable();
4166 }
4167 }
4168
4169
4170 // A class to handle the PLT data.
4171 // This is an abstract base class that handles most of the linker details
4172 // but does not know the actual contents of PLT entries. The derived
4173 // classes below fill in those details.
4174
4175 template<int size, bool big_endian>
4176 class Output_data_plt_aarch64 : public Output_section_data
4177 {
4178 public:
4179 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
4180 Reloc_section;
4181 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4182
4183 Output_data_plt_aarch64(Layout* layout,
4184 uint64_t addralign,
4185 Output_data_got_aarch64<size, big_endian>* got,
4186 Output_data_space* got_plt,
4187 Output_data_space* got_irelative)
4188 : Output_section_data(addralign), tlsdesc_rel_(NULL), irelative_rel_(NULL),
4189 got_(got), got_plt_(got_plt), got_irelative_(got_irelative),
4190 count_(0), irelative_count_(0), tlsdesc_got_offset_(-1U)
4191 { this->init(layout); }
4192
4193 // Initialize the PLT section.
4194 void
4195 init(Layout* layout);
4196
4197 // Add an entry to the PLT.
4198 void
4199 add_entry(Symbol_table*, Layout*, Symbol* gsym);
4200
4201 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol.
4202 unsigned int
4203 add_local_ifunc_entry(Symbol_table* symtab, Layout*,
4204 Sized_relobj_file<size, big_endian>* relobj,
4205 unsigned int local_sym_index);
4206
4207 // Add the relocation for a PLT entry.
4208 void
4209 add_relocation(Symbol_table*, Layout*, Symbol* gsym,
4210 unsigned int got_offset);
4211
4212 // Add the reserved TLSDESC_PLT entry to the PLT.
4213 void
4214 reserve_tlsdesc_entry(unsigned int got_offset)
4215 { this->tlsdesc_got_offset_ = got_offset; }
4216
4217 // Return true if a TLSDESC_PLT entry has been reserved.
4218 bool
4219 has_tlsdesc_entry() const
4220 { return this->tlsdesc_got_offset_ != -1U; }
4221
4222 // Return the GOT offset for the reserved TLSDESC_PLT entry.
4223 unsigned int
4224 get_tlsdesc_got_offset() const
4225 { return this->tlsdesc_got_offset_; }
4226
4227 // Return the PLT offset of the reserved TLSDESC_PLT entry.
4228 unsigned int
4229 get_tlsdesc_plt_offset() const
4230 {
4231 return (this->first_plt_entry_offset() +
4232 (this->count_ + this->irelative_count_)
4233 * this->get_plt_entry_size());
4234 }
4235
4236 // Return the .rela.plt section data.
4237 Reloc_section*
4238 rela_plt()
4239 { return this->rel_; }
4240
4241 // Return where the TLSDESC relocations should go.
4242 Reloc_section*
4243 rela_tlsdesc(Layout*);
4244
4245 // Return where the IRELATIVE relocations should go in the PLT
4246 // relocations.
4247 Reloc_section*
4248 rela_irelative(Symbol_table*, Layout*);
4249
4250 // Return whether we created a section for IRELATIVE relocations.
4251 bool
4252 has_irelative_section() const
4253 { return this->irelative_rel_ != NULL; }
4254
4255 // Return the number of PLT entries.
4256 unsigned int
4257 entry_count() const
4258 { return this->count_ + this->irelative_count_; }
4259
4260 // Return the offset of the first non-reserved PLT entry.
4261 unsigned int
4262 first_plt_entry_offset() const
4263 { return this->do_first_plt_entry_offset(); }
4264
4265 // Return the size of a PLT entry.
4266 unsigned int
4267 get_plt_entry_size() const
4268 { return this->do_get_plt_entry_size(); }
4269
4270 // Return the reserved tlsdesc entry size.
4271 unsigned int
4272 get_plt_tlsdesc_entry_size() const
4273 { return this->do_get_plt_tlsdesc_entry_size(); }
4274
4275 // Return the PLT address to use for a global symbol.
4276 uint64_t
4277 address_for_global(const Symbol*);
4278
4279 // Return the PLT address to use for a local symbol.
4280 uint64_t
4281 address_for_local(const Relobj*, unsigned int symndx);
4282
4283 protected:
4284 // Fill in the first PLT entry.
4285 void
4286 fill_first_plt_entry(unsigned char* pov,
4287 Address got_address,
4288 Address plt_address)
4289 { this->do_fill_first_plt_entry(pov, got_address, plt_address); }
4290
4291 // Fill in a normal PLT entry.
4292 void
4293 fill_plt_entry(unsigned char* pov,
4294 Address got_address,
4295 Address plt_address,
4296 unsigned int got_offset,
4297 unsigned int plt_offset)
4298 {
4299 this->do_fill_plt_entry(pov, got_address, plt_address,
4300 got_offset, plt_offset);
4301 }
4302
4303 // Fill in the reserved TLSDESC PLT entry.
4304 void
4305 fill_tlsdesc_entry(unsigned char* pov,
4306 Address gotplt_address,
4307 Address plt_address,
4308 Address got_base,
4309 unsigned int tlsdesc_got_offset,
4310 unsigned int plt_offset)
4311 {
4312 this->do_fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
4313 tlsdesc_got_offset, plt_offset);
4314 }
4315
4316 virtual unsigned int
4317 do_first_plt_entry_offset() const = 0;
4318
4319 virtual unsigned int
4320 do_get_plt_entry_size() const = 0;
4321
4322 virtual unsigned int
4323 do_get_plt_tlsdesc_entry_size() const = 0;
4324
4325 virtual void
4326 do_fill_first_plt_entry(unsigned char* pov,
4327 Address got_addr,
4328 Address plt_addr) = 0;
4329
4330 virtual void
4331 do_fill_plt_entry(unsigned char* pov,
4332 Address got_address,
4333 Address plt_address,
4334 unsigned int got_offset,
4335 unsigned int plt_offset) = 0;
4336
4337 virtual void
4338 do_fill_tlsdesc_entry(unsigned char* pov,
4339 Address gotplt_address,
4340 Address plt_address,
4341 Address got_base,
4342 unsigned int tlsdesc_got_offset,
4343 unsigned int plt_offset) = 0;
4344
4345 void
4346 do_adjust_output_section(Output_section* os);
4347
4348 // Write to a map file.
4349 void
4350 do_print_to_mapfile(Mapfile* mapfile) const
4351 { mapfile->print_output_data(this, _("** PLT")); }
4352
4353 private:
4354 // Set the final size.
4355 void
4356 set_final_data_size();
4357
4358 // Write out the PLT data.
4359 void
4360 do_write(Output_file*);
4361
4362 // The reloc section.
4363 Reloc_section* rel_;
4364
4365 // The TLSDESC relocs, if necessary. These must follow the regular
4366 // PLT relocs.
4367 Reloc_section* tlsdesc_rel_;
4368
4369 // The IRELATIVE relocs, if necessary. These must follow the
4370 // regular PLT relocations.
4371 Reloc_section* irelative_rel_;
4372
4373 // The .got section.
4374 Output_data_got_aarch64<size, big_endian>* got_;
4375
4376 // The .got.plt section.
4377 Output_data_space* got_plt_;
4378
4379 // The part of the .got.plt section used for IRELATIVE relocs.
4380 Output_data_space* got_irelative_;
4381
4382 // The number of PLT entries.
4383 unsigned int count_;
4384
4385 // Number of PLT entries with R_AARCH64_IRELATIVE relocs. These
4386 // follow the regular PLT entries.
4387 unsigned int irelative_count_;
4388
4389 // GOT offset of the reserved TLSDESC_GOT entry for the lazy trampoline.
4390 // Communicated to the loader via DT_TLSDESC_GOT. The magic value -1
4391 // indicates an offset is not allocated.
4392 unsigned int tlsdesc_got_offset_;
4393 };
4394
4395 // Initialize the PLT section.
4396
4397 template<int size, bool big_endian>
4398 void
4399 Output_data_plt_aarch64<size, big_endian>::init(Layout* layout)
4400 {
4401 this->rel_ = new Reloc_section(false);
4402 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4403 elfcpp::SHF_ALLOC, this->rel_,
4404 ORDER_DYNAMIC_PLT_RELOCS, false);
4405 }
4406
4407 template<int size, bool big_endian>
4408 void
4409 Output_data_plt_aarch64<size, big_endian>::do_adjust_output_section(
4410 Output_section* os)
4411 {
4412 os->set_entsize(this->get_plt_entry_size());
4413 }
4414
4415 // Add an entry to the PLT.
4416
4417 template<int size, bool big_endian>
4418 void
4419 Output_data_plt_aarch64<size, big_endian>::add_entry(Symbol_table* symtab,
4420 Layout* layout, Symbol* gsym)
4421 {
4422 gold_assert(!gsym->has_plt_offset());
4423
4424 unsigned int* pcount;
4425 unsigned int plt_reserved;
4426 Output_section_data_build* got;
4427
4428 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4429 && gsym->can_use_relative_reloc(false))
4430 {
4431 pcount = &this->irelative_count_;
4432 plt_reserved = 0;
4433 got = this->got_irelative_;
4434 }
4435 else
4436 {
4437 pcount = &this->count_;
4438 plt_reserved = this->first_plt_entry_offset();
4439 got = this->got_plt_;
4440 }
4441
4442 gsym->set_plt_offset((*pcount) * this->get_plt_entry_size()
4443 + plt_reserved);
4444
4445 ++*pcount;
4446
4447 section_offset_type got_offset = got->current_data_size();
4448
4449 // Every PLT entry needs a GOT entry which points back to the PLT
4450 // entry (this will be changed by the dynamic linker, normally
4451 // lazily when the function is called).
4452 got->set_current_data_size(got_offset + size / 8);
4453
4454 // Every PLT entry needs a reloc.
4455 this->add_relocation(symtab, layout, gsym, got_offset);
4456
4457 // Note that we don't need to save the symbol. The contents of the
4458 // PLT are independent of which symbols are used. The symbols only
4459 // appear in the relocations.
4460 }
4461
4462 // Add an entry to the PLT for a local STT_GNU_IFUNC symbol. Return
4463 // the PLT offset.
4464
4465 template<int size, bool big_endian>
4466 unsigned int
4467 Output_data_plt_aarch64<size, big_endian>::add_local_ifunc_entry(
4468 Symbol_table* symtab,
4469 Layout* layout,
4470 Sized_relobj_file<size, big_endian>* relobj,
4471 unsigned int local_sym_index)
4472 {
4473 unsigned int plt_offset = this->irelative_count_ * this->get_plt_entry_size();
4474 ++this->irelative_count_;
4475
4476 section_offset_type got_offset = this->got_irelative_->current_data_size();
4477
4478 // Every PLT entry needs a GOT entry which points back to the PLT
4479 // entry.
4480 this->got_irelative_->set_current_data_size(got_offset + size / 8);
4481
4482 // Every PLT entry needs a reloc.
4483 Reloc_section* rela = this->rela_irelative(symtab, layout);
4484 rela->add_symbolless_local_addend(relobj, local_sym_index,
4485 elfcpp::R_AARCH64_IRELATIVE,
4486 this->got_irelative_, got_offset, 0);
4487
4488 return plt_offset;
4489 }
4490
4491 // Add the relocation for a PLT entry.
4492
4493 template<int size, bool big_endian>
4494 void
4495 Output_data_plt_aarch64<size, big_endian>::add_relocation(
4496 Symbol_table* symtab, Layout* layout, Symbol* gsym, unsigned int got_offset)
4497 {
4498 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4499 && gsym->can_use_relative_reloc(false))
4500 {
4501 Reloc_section* rela = this->rela_irelative(symtab, layout);
4502 rela->add_symbolless_global_addend(gsym, elfcpp::R_AARCH64_IRELATIVE,
4503 this->got_irelative_, got_offset, 0);
4504 }
4505 else
4506 {
4507 gsym->set_needs_dynsym_entry();
4508 this->rel_->add_global(gsym, elfcpp::R_AARCH64_JUMP_SLOT, this->got_plt_,
4509 got_offset, 0);
4510 }
4511 }
4512
4513 // Return where the TLSDESC relocations should go, creating it if
4514 // necessary. These follow the JUMP_SLOT relocations.
4515
4516 template<int size, bool big_endian>
4517 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4518 Output_data_plt_aarch64<size, big_endian>::rela_tlsdesc(Layout* layout)
4519 {
4520 if (this->tlsdesc_rel_ == NULL)
4521 {
4522 this->tlsdesc_rel_ = new Reloc_section(false);
4523 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4524 elfcpp::SHF_ALLOC, this->tlsdesc_rel_,
4525 ORDER_DYNAMIC_PLT_RELOCS, false);
4526 gold_assert(this->tlsdesc_rel_->output_section()
4527 == this->rel_->output_section());
4528 }
4529 return this->tlsdesc_rel_;
4530 }
4531
4532 // Return where the IRELATIVE relocations should go in the PLT. These
4533 // follow the JUMP_SLOT and the TLSDESC relocations.
4534
4535 template<int size, bool big_endian>
4536 typename Output_data_plt_aarch64<size, big_endian>::Reloc_section*
4537 Output_data_plt_aarch64<size, big_endian>::rela_irelative(Symbol_table* symtab,
4538 Layout* layout)
4539 {
4540 if (this->irelative_rel_ == NULL)
4541 {
4542 // Make sure we have a place for the TLSDESC relocations, in
4543 // case we see any later on.
4544 this->rela_tlsdesc(layout);
4545 this->irelative_rel_ = new Reloc_section(false);
4546 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
4547 elfcpp::SHF_ALLOC, this->irelative_rel_,
4548 ORDER_DYNAMIC_PLT_RELOCS, false);
4549 gold_assert(this->irelative_rel_->output_section()
4550 == this->rel_->output_section());
4551
4552 if (parameters->doing_static_link())
4553 {
4554 // A statically linked executable will only have a .rela.plt
4555 // section to hold R_AARCH64_IRELATIVE relocs for
4556 // STT_GNU_IFUNC symbols. The library will use these
4557 // symbols to locate the IRELATIVE relocs at program startup
4558 // time.
4559 symtab->define_in_output_data("__rela_iplt_start", NULL,
4560 Symbol_table::PREDEFINED,
4561 this->irelative_rel_, 0, 0,
4562 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4563 elfcpp::STV_HIDDEN, 0, false, true);
4564 symtab->define_in_output_data("__rela_iplt_end", NULL,
4565 Symbol_table::PREDEFINED,
4566 this->irelative_rel_, 0, 0,
4567 elfcpp::STT_NOTYPE, elfcpp::STB_GLOBAL,
4568 elfcpp::STV_HIDDEN, 0, true, true);
4569 }
4570 }
4571 return this->irelative_rel_;
4572 }
4573
4574 // Return the PLT address to use for a global symbol.
4575
4576 template<int size, bool big_endian>
4577 uint64_t
4578 Output_data_plt_aarch64<size, big_endian>::address_for_global(
4579 const Symbol* gsym)
4580 {
4581 uint64_t offset = 0;
4582 if (gsym->type() == elfcpp::STT_GNU_IFUNC
4583 && gsym->can_use_relative_reloc(false))
4584 offset = (this->first_plt_entry_offset() +
4585 this->count_ * this->get_plt_entry_size());
4586 return this->address() + offset + gsym->plt_offset();
4587 }
4588
4589 // Return the PLT address to use for a local symbol. These are always
4590 // IRELATIVE relocs.
4591
4592 template<int size, bool big_endian>
4593 uint64_t
4594 Output_data_plt_aarch64<size, big_endian>::address_for_local(
4595 const Relobj* object,
4596 unsigned int r_sym)
4597 {
4598 return (this->address()
4599 + this->first_plt_entry_offset()
4600 + this->count_ * this->get_plt_entry_size()
4601 + object->local_plt_offset(r_sym));
4602 }
4603
4604 // Set the final size.
4605
4606 template<int size, bool big_endian>
4607 void
4608 Output_data_plt_aarch64<size, big_endian>::set_final_data_size()
4609 {
4610 unsigned int count = this->count_ + this->irelative_count_;
4611 unsigned int extra_size = 0;
4612 if (this->has_tlsdesc_entry())
4613 extra_size += this->get_plt_tlsdesc_entry_size();
4614 this->set_data_size(this->first_plt_entry_offset()
4615 + count * this->get_plt_entry_size()
4616 + extra_size);
4617 }
4618
4619 template<int size, bool big_endian>
4620 class Output_data_plt_aarch64_standard :
4621 public Output_data_plt_aarch64<size, big_endian>
4622 {
4623 public:
4624 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
4625 Output_data_plt_aarch64_standard(
4626 Layout* layout,
4627 Output_data_got_aarch64<size, big_endian>* got,
4628 Output_data_space* got_plt,
4629 Output_data_space* got_irelative)
4630 : Output_data_plt_aarch64<size, big_endian>(layout,
4631 size == 32 ? 4 : 8,
4632 got, got_plt,
4633 got_irelative)
4634 { }
4635
4636 protected:
4637 // Return the offset of the first non-reserved PLT entry.
4638 virtual unsigned int
4639 do_first_plt_entry_offset() const
4640 { return this->first_plt_entry_size; }
4641
4642 // Return the size of a PLT entry
4643 virtual unsigned int
4644 do_get_plt_entry_size() const
4645 { return this->plt_entry_size; }
4646
4647 // Return the size of a tlsdesc entry
4648 virtual unsigned int
4649 do_get_plt_tlsdesc_entry_size() const
4650 { return this->plt_tlsdesc_entry_size; }
4651
4652 virtual void
4653 do_fill_first_plt_entry(unsigned char* pov,
4654 Address got_address,
4655 Address plt_address);
4656
4657 virtual void
4658 do_fill_plt_entry(unsigned char* pov,
4659 Address got_address,
4660 Address plt_address,
4661 unsigned int got_offset,
4662 unsigned int plt_offset);
4663
4664 virtual void
4665 do_fill_tlsdesc_entry(unsigned char* pov,
4666 Address gotplt_address,
4667 Address plt_address,
4668 Address got_base,
4669 unsigned int tlsdesc_got_offset,
4670 unsigned int plt_offset);
4671
4672 private:
4673 // The size of the first plt entry size.
4674 static const int first_plt_entry_size = 32;
4675 // The size of the plt entry size.
4676 static const int plt_entry_size = 16;
4677 // The size of the plt tlsdesc entry size.
4678 static const int plt_tlsdesc_entry_size = 32;
4679 // Template for the first PLT entry.
4680 static const uint32_t first_plt_entry[first_plt_entry_size / 4];
4681 // Template for subsequent PLT entries.
4682 static const uint32_t plt_entry[plt_entry_size / 4];
4683 // The reserved TLSDESC entry in the PLT for an executable.
4684 static const uint32_t tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4];
4685 };
4686
4687 // The first entry in the PLT for an executable.
4688
4689 template<>
4690 const uint32_t
4691 Output_data_plt_aarch64_standard<32, false>::
4692 first_plt_entry[first_plt_entry_size / 4] =
4693 {
4694 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4695 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4696 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4697 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4698 0xd61f0220, /* br x17 */
4699 0xd503201f, /* nop */
4700 0xd503201f, /* nop */
4701 0xd503201f, /* nop */
4702 };
4703
4704
4705 template<>
4706 const uint32_t
4707 Output_data_plt_aarch64_standard<32, true>::
4708 first_plt_entry[first_plt_entry_size / 4] =
4709 {
4710 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4711 0x90000010, /* adrp x16, PLT_GOT+0x8 */
4712 0xb9400A11, /* ldr w17, [x16, #PLT_GOT+0x8] */
4713 0x11002210, /* add w16, w16,#PLT_GOT+0x8 */
4714 0xd61f0220, /* br x17 */
4715 0xd503201f, /* nop */
4716 0xd503201f, /* nop */
4717 0xd503201f, /* nop */
4718 };
4719
4720
4721 template<>
4722 const uint32_t
4723 Output_data_plt_aarch64_standard<64, false>::
4724 first_plt_entry[first_plt_entry_size / 4] =
4725 {
4726 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4727 0x90000010, /* adrp x16, PLT_GOT+16 */
4728 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4729 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4730 0xd61f0220, /* br x17 */
4731 0xd503201f, /* nop */
4732 0xd503201f, /* nop */
4733 0xd503201f, /* nop */
4734 };
4735
4736
4737 template<>
4738 const uint32_t
4739 Output_data_plt_aarch64_standard<64, true>::
4740 first_plt_entry[first_plt_entry_size / 4] =
4741 {
4742 0xa9bf7bf0, /* stp x16, x30, [sp, #-16]! */
4743 0x90000010, /* adrp x16, PLT_GOT+16 */
4744 0xf9400A11, /* ldr x17, [x16, #PLT_GOT+0x10] */
4745 0x91004210, /* add x16, x16,#PLT_GOT+0x10 */
4746 0xd61f0220, /* br x17 */
4747 0xd503201f, /* nop */
4748 0xd503201f, /* nop */
4749 0xd503201f, /* nop */
4750 };
4751
4752
4753 template<>
4754 const uint32_t
4755 Output_data_plt_aarch64_standard<32, false>::
4756 plt_entry[plt_entry_size / 4] =
4757 {
4758 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4759 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4760 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4761 0xd61f0220, /* br x17. */
4762 };
4763
4764
4765 template<>
4766 const uint32_t
4767 Output_data_plt_aarch64_standard<32, true>::
4768 plt_entry[plt_entry_size / 4] =
4769 {
4770 0x90000010, /* adrp x16, PLTGOT + n * 4 */
4771 0xb9400211, /* ldr w17, [w16, PLTGOT + n * 4] */
4772 0x11000210, /* add w16, w16, :lo12:PLTGOT + n * 4 */
4773 0xd61f0220, /* br x17. */
4774 };
4775
4776
4777 template<>
4778 const uint32_t
4779 Output_data_plt_aarch64_standard<64, false>::
4780 plt_entry[plt_entry_size / 4] =
4781 {
4782 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4783 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4784 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4785 0xd61f0220, /* br x17. */
4786 };
4787
4788
4789 template<>
4790 const uint32_t
4791 Output_data_plt_aarch64_standard<64, true>::
4792 plt_entry[plt_entry_size / 4] =
4793 {
4794 0x90000010, /* adrp x16, PLTGOT + n * 8 */
4795 0xf9400211, /* ldr x17, [x16, PLTGOT + n * 8] */
4796 0x91000210, /* add x16, x16, :lo12:PLTGOT + n * 8 */
4797 0xd61f0220, /* br x17. */
4798 };
4799
4800
4801 template<int size, bool big_endian>
4802 void
4803 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_first_plt_entry(
4804 unsigned char* pov,
4805 Address got_address,
4806 Address plt_address)
4807 {
4808 // PLT0 of the small PLT looks like this in ELF64 -
4809 // stp x16, x30, [sp, #-16]! Save the reloc and lr on stack.
4810 // adrp x16, PLT_GOT + 16 Get the page base of the GOTPLT
4811 // ldr x17, [x16, #:lo12:PLT_GOT+16] Load the address of the
4812 // symbol resolver
4813 // add x16, x16, #:lo12:PLT_GOT+16 Load the lo12 bits of the
4814 // GOTPLT entry for this.
4815 // br x17
4816 // PLT0 will be slightly different in ELF32 due to different got entry
4817 // size.
4818 memcpy(pov, this->first_plt_entry, this->first_plt_entry_size);
4819 Address gotplt_2nd_ent = got_address + (size / 8) * 2;
4820
4821 // Fill in the top 21 bits for this: ADRP x16, PLT_GOT + 8 * 2.
4822 // ADRP: (PG(S+A)-PG(P)) >> 12) & 0x1fffff.
4823 // FIXME: This only works for 64bit
4824 AArch64_relocate_functions<size, big_endian>::adrp(pov + 4,
4825 gotplt_2nd_ent, plt_address + 4);
4826
4827 // Fill in R_AARCH64_LDST8_LO12
4828 elfcpp::Swap<32, big_endian>::writeval(
4829 pov + 8,
4830 ((this->first_plt_entry[2] & 0xffc003ff)
4831 | ((gotplt_2nd_ent & 0xff8) << 7)));
4832
4833 // Fill in R_AARCH64_ADD_ABS_LO12
4834 elfcpp::Swap<32, big_endian>::writeval(
4835 pov + 12,
4836 ((this->first_plt_entry[3] & 0xffc003ff)
4837 | ((gotplt_2nd_ent & 0xfff) << 10)));
4838 }
4839
4840
4841 // Subsequent entries in the PLT for an executable.
4842 // FIXME: This only works for 64bit
4843
4844 template<int size, bool big_endian>
4845 void
4846 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_plt_entry(
4847 unsigned char* pov,
4848 Address got_address,
4849 Address plt_address,
4850 unsigned int got_offset,
4851 unsigned int plt_offset)
4852 {
4853 memcpy(pov, this->plt_entry, this->plt_entry_size);
4854
4855 Address gotplt_entry_address = got_address + got_offset;
4856 Address plt_entry_address = plt_address + plt_offset;
4857
4858 // Fill in R_AARCH64_PCREL_ADR_HI21
4859 AArch64_relocate_functions<size, big_endian>::adrp(
4860 pov,
4861 gotplt_entry_address,
4862 plt_entry_address);
4863
4864 // Fill in R_AARCH64_LDST64_ABS_LO12
4865 elfcpp::Swap<32, big_endian>::writeval(
4866 pov + 4,
4867 ((this->plt_entry[1] & 0xffc003ff)
4868 | ((gotplt_entry_address & 0xff8) << 7)));
4869
4870 // Fill in R_AARCH64_ADD_ABS_LO12
4871 elfcpp::Swap<32, big_endian>::writeval(
4872 pov + 8,
4873 ((this->plt_entry[2] & 0xffc003ff)
4874 | ((gotplt_entry_address & 0xfff) <<10)));
4875
4876 }
4877
4878
4879 template<>
4880 const uint32_t
4881 Output_data_plt_aarch64_standard<32, false>::
4882 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4883 {
4884 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4885 0x90000002, /* adrp x2, 0 */
4886 0x90000003, /* adrp x3, 0 */
4887 0xb9400042, /* ldr w2, [w2, #0] */
4888 0x11000063, /* add w3, w3, 0 */
4889 0xd61f0040, /* br x2 */
4890 0xd503201f, /* nop */
4891 0xd503201f, /* nop */
4892 };
4893
4894 template<>
4895 const uint32_t
4896 Output_data_plt_aarch64_standard<32, true>::
4897 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4898 {
4899 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4900 0x90000002, /* adrp x2, 0 */
4901 0x90000003, /* adrp x3, 0 */
4902 0xb9400042, /* ldr w2, [w2, #0] */
4903 0x11000063, /* add w3, w3, 0 */
4904 0xd61f0040, /* br x2 */
4905 0xd503201f, /* nop */
4906 0xd503201f, /* nop */
4907 };
4908
4909 template<>
4910 const uint32_t
4911 Output_data_plt_aarch64_standard<64, false>::
4912 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4913 {
4914 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4915 0x90000002, /* adrp x2, 0 */
4916 0x90000003, /* adrp x3, 0 */
4917 0xf9400042, /* ldr x2, [x2, #0] */
4918 0x91000063, /* add x3, x3, 0 */
4919 0xd61f0040, /* br x2 */
4920 0xd503201f, /* nop */
4921 0xd503201f, /* nop */
4922 };
4923
4924 template<>
4925 const uint32_t
4926 Output_data_plt_aarch64_standard<64, true>::
4927 tlsdesc_plt_entry[plt_tlsdesc_entry_size / 4] =
4928 {
4929 0xa9bf0fe2, /* stp x2, x3, [sp, #-16]! */
4930 0x90000002, /* adrp x2, 0 */
4931 0x90000003, /* adrp x3, 0 */
4932 0xf9400042, /* ldr x2, [x2, #0] */
4933 0x91000063, /* add x3, x3, 0 */
4934 0xd61f0040, /* br x2 */
4935 0xd503201f, /* nop */
4936 0xd503201f, /* nop */
4937 };
4938
4939 template<int size, bool big_endian>
4940 void
4941 Output_data_plt_aarch64_standard<size, big_endian>::do_fill_tlsdesc_entry(
4942 unsigned char* pov,
4943 Address gotplt_address,
4944 Address plt_address,
4945 Address got_base,
4946 unsigned int tlsdesc_got_offset,
4947 unsigned int plt_offset)
4948 {
4949 memcpy(pov, tlsdesc_plt_entry, plt_tlsdesc_entry_size);
4950
4951 // move DT_TLSDESC_GOT address into x2
4952 // move .got.plt address into x3
4953 Address tlsdesc_got_entry = got_base + tlsdesc_got_offset;
4954 Address plt_entry_address = plt_address + plt_offset;
4955
4956 // R_AARCH64_ADR_PREL_PG_HI21
4957 AArch64_relocate_functions<size, big_endian>::adrp(
4958 pov + 4,
4959 tlsdesc_got_entry,
4960 plt_entry_address + 4);
4961
4962 // R_AARCH64_ADR_PREL_PG_HI21
4963 AArch64_relocate_functions<size, big_endian>::adrp(
4964 pov + 8,
4965 gotplt_address,
4966 plt_entry_address + 8);
4967
4968 // R_AARCH64_LDST64_ABS_LO12
4969 elfcpp::Swap<32, big_endian>::writeval(
4970 pov + 12,
4971 ((this->tlsdesc_plt_entry[3] & 0xffc003ff)
4972 | ((tlsdesc_got_entry & 0xff8) << 7)));
4973
4974 // R_AARCH64_ADD_ABS_LO12
4975 elfcpp::Swap<32, big_endian>::writeval(
4976 pov + 16,
4977 ((this->tlsdesc_plt_entry[4] & 0xffc003ff)
4978 | ((gotplt_address & 0xfff) << 10)));
4979 }
4980
4981 // Write out the PLT. This uses the hand-coded instructions above,
4982 // and adjusts them as needed. This is specified by the AMD64 ABI.
4983
4984 template<int size, bool big_endian>
4985 void
4986 Output_data_plt_aarch64<size, big_endian>::do_write(Output_file* of)
4987 {
4988 const off_t offset = this->offset();
4989 const section_size_type oview_size =
4990 convert_to_section_size_type(this->data_size());
4991 unsigned char* const oview = of->get_output_view(offset, oview_size);
4992
4993 const off_t got_file_offset = this->got_plt_->offset();
4994 gold_assert(got_file_offset + this->got_plt_->data_size()
4995 == this->got_irelative_->offset());
4996
4997 const section_size_type got_size =
4998 convert_to_section_size_type(this->got_plt_->data_size()
4999 + this->got_irelative_->data_size());
5000 unsigned char* const got_view = of->get_output_view(got_file_offset,
5001 got_size);
5002
5003 unsigned char* pov = oview;
5004
5005 // The base address of the .plt section.
5006 typename elfcpp::Elf_types<size>::Elf_Addr plt_address = this->address();
5007 // The base address of the PLT portion of the .got section.
5008 typename elfcpp::Elf_types<size>::Elf_Addr gotplt_address
5009 = this->got_plt_->address();
5010
5011 this->fill_first_plt_entry(pov, gotplt_address, plt_address);
5012 pov += this->first_plt_entry_offset();
5013
5014 // The first three entries in .got.plt are reserved.
5015 unsigned char* got_pov = got_view;
5016 memset(got_pov, 0, size / 8 * AARCH64_GOTPLT_RESERVE_COUNT);
5017 got_pov += (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
5018
5019 unsigned int plt_offset = this->first_plt_entry_offset();
5020 unsigned int got_offset = (size / 8) * AARCH64_GOTPLT_RESERVE_COUNT;
5021 const unsigned int count = this->count_ + this->irelative_count_;
5022 for (unsigned int plt_index = 0;
5023 plt_index < count;
5024 ++plt_index,
5025 pov += this->get_plt_entry_size(),
5026 got_pov += size / 8,
5027 plt_offset += this->get_plt_entry_size(),
5028 got_offset += size / 8)
5029 {
5030 // Set and adjust the PLT entry itself.
5031 this->fill_plt_entry(pov, gotplt_address, plt_address,
5032 got_offset, plt_offset);
5033
5034 // Set the entry in the GOT, which points to plt0.
5035 elfcpp::Swap<size, big_endian>::writeval(got_pov, plt_address);
5036 }
5037
5038 if (this->has_tlsdesc_entry())
5039 {
5040 // Set and adjust the reserved TLSDESC PLT entry.
5041 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
5042 // The base address of the .base section.
5043 typename elfcpp::Elf_types<size>::Elf_Addr got_base =
5044 this->got_->address();
5045 this->fill_tlsdesc_entry(pov, gotplt_address, plt_address, got_base,
5046 tlsdesc_got_offset, plt_offset);
5047 pov += this->get_plt_tlsdesc_entry_size();
5048 }
5049
5050 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
5051 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
5052
5053 of->write_output_view(offset, oview_size, oview);
5054 of->write_output_view(got_file_offset, got_size, got_view);
5055 }
5056
5057 // Telling how to update the immediate field of an instruction.
5058 struct AArch64_howto
5059 {
5060 // The immediate field mask.
5061 elfcpp::Elf_Xword dst_mask;
5062
5063 // The offset to apply relocation immediate
5064 int doffset;
5065
5066 // The second part offset, if the immediate field has two parts.
5067 // -1 if the immediate field has only one part.
5068 int doffset2;
5069 };
5070
5071 static const AArch64_howto aarch64_howto[AArch64_reloc_property::INST_NUM] =
5072 {
5073 {0, -1, -1}, // DATA
5074 {0x1fffe0, 5, -1}, // MOVW [20:5]-imm16
5075 {0xffffe0, 5, -1}, // LD [23:5]-imm19
5076 {0x60ffffe0, 29, 5}, // ADR [30:29]-immlo [23:5]-immhi
5077 {0x60ffffe0, 29, 5}, // ADRP [30:29]-immlo [23:5]-immhi
5078 {0x3ffc00, 10, -1}, // ADD [21:10]-imm12
5079 {0x3ffc00, 10, -1}, // LDST [21:10]-imm12
5080 {0x7ffe0, 5, -1}, // TBZNZ [18:5]-imm14
5081 {0xffffe0, 5, -1}, // CONDB [23:5]-imm19
5082 {0x3ffffff, 0, -1}, // B [25:0]-imm26
5083 {0x3ffffff, 0, -1}, // CALL [25:0]-imm26
5084 };
5085
5086 // AArch64 relocate function class
5087
5088 template<int size, bool big_endian>
5089 class AArch64_relocate_functions
5090 {
5091 public:
5092 typedef enum
5093 {
5094 STATUS_OKAY, // No error during relocation.
5095 STATUS_OVERFLOW, // Relocation overflow.
5096 STATUS_BAD_RELOC, // Relocation cannot be applied.
5097 } Status;
5098
5099 typedef AArch64_relocate_functions<size, big_endian> This;
5100 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
5101 typedef Relocate_info<size, big_endian> The_relocate_info;
5102 typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
5103 typedef Reloc_stub<size, big_endian> The_reloc_stub;
5104 typedef Stub_table<size, big_endian> The_stub_table;
5105 typedef elfcpp::Rela<size, big_endian> The_rela;
5106 typedef typename elfcpp::Swap<size, big_endian>::Valtype AArch64_valtype;
5107
5108 // Return the page address of the address.
5109 // Page(address) = address & ~0xFFF
5110
5111 static inline AArch64_valtype
5112 Page(Address address)
5113 {
5114 return (address & (~static_cast<Address>(0xFFF)));
5115 }
5116
5117 private:
5118 // Update instruction (pointed by view) with selected bits (immed).
5119 // val = (val & ~dst_mask) | (immed << doffset)
5120
5121 template<int valsize>
5122 static inline void
5123 update_view(unsigned char* view,
5124 AArch64_valtype immed,
5125 elfcpp::Elf_Xword doffset,
5126 elfcpp::Elf_Xword dst_mask)
5127 {
5128 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5129 Valtype* wv = reinterpret_cast<Valtype*>(view);
5130 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5131
5132 // Clear immediate fields.
5133 val &= ~dst_mask;
5134 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5135 static_cast<Valtype>(val | (immed << doffset)));
5136 }
5137
5138 // Update two parts of an instruction (pointed by view) with selected
5139 // bits (immed1 and immed2).
5140 // val = (val & ~dst_mask) | (immed1 << doffset1) | (immed2 << doffset2)
5141
5142 template<int valsize>
5143 static inline void
5144 update_view_two_parts(
5145 unsigned char* view,
5146 AArch64_valtype immed1,
5147 AArch64_valtype immed2,
5148 elfcpp::Elf_Xword doffset1,
5149 elfcpp::Elf_Xword doffset2,
5150 elfcpp::Elf_Xword dst_mask)
5151 {
5152 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5153 Valtype* wv = reinterpret_cast<Valtype*>(view);
5154 Valtype val = elfcpp::Swap<valsize, big_endian>::readval(wv);
5155 val &= ~dst_mask;
5156 elfcpp::Swap<valsize, big_endian>::writeval(wv,
5157 static_cast<Valtype>(val | (immed1 << doffset1) |
5158 (immed2 << doffset2)));
5159 }
5160
5161 // Update adr or adrp instruction with immed.
5162 // In adr and adrp: [30:29] immlo [23:5] immhi
5163
5164 static inline void
5165 update_adr(unsigned char* view, AArch64_valtype immed)
5166 {
5167 elfcpp::Elf_Xword dst_mask = (0x3 << 29) | (0x7ffff << 5);
5168 This::template update_view_two_parts<32>(
5169 view,
5170 immed & 0x3,
5171 (immed & 0x1ffffc) >> 2,
5172 29,
5173 5,
5174 dst_mask);
5175 }
5176
5177 // Update movz/movn instruction with bits immed.
5178 // Set instruction to movz if is_movz is true, otherwise set instruction
5179 // to movn.
5180
5181 static inline void
5182 update_movnz(unsigned char* view,
5183 AArch64_valtype immed,
5184 bool is_movz)
5185 {
5186 typedef typename elfcpp::Swap<32, big_endian>::Valtype Valtype;
5187 Valtype* wv = reinterpret_cast<Valtype*>(view);
5188 Valtype val = elfcpp::Swap<32, big_endian>::readval(wv);
5189
5190 const elfcpp::Elf_Xword doffset =
5191 aarch64_howto[AArch64_reloc_property::INST_MOVW].doffset;
5192 const elfcpp::Elf_Xword dst_mask =
5193 aarch64_howto[AArch64_reloc_property::INST_MOVW].dst_mask;
5194
5195 // Clear immediate fields and opc code.
5196 val &= ~(dst_mask | (0x3 << 29));
5197
5198 // Set instruction to movz or movn.
5199 // movz: [30:29] is 10 movn: [30:29] is 00
5200 if (is_movz)
5201 val |= (0x2 << 29);
5202
5203 elfcpp::Swap<32, big_endian>::writeval(wv,
5204 static_cast<Valtype>(val | (immed << doffset)));
5205 }
5206
5207 public:
5208
5209 // Update selected bits in text.
5210
5211 template<int valsize>
5212 static inline typename This::Status
5213 reloc_common(unsigned char* view, Address x,
5214 const AArch64_reloc_property* reloc_property)
5215 {
5216 // Select bits from X.
5217 Address immed = reloc_property->select_x_value(x);
5218
5219 // Update view.
5220 const AArch64_reloc_property::Reloc_inst inst =
5221 reloc_property->reloc_inst();
5222 // If it is a data relocation or instruction has 2 parts of immediate
5223 // fields, you should not call pcrela_general.
5224 gold_assert(aarch64_howto[inst].doffset2 == -1 &&
5225 aarch64_howto[inst].doffset != -1);
5226 This::template update_view<valsize>(view, immed,
5227 aarch64_howto[inst].doffset,
5228 aarch64_howto[inst].dst_mask);
5229
5230 // Do check overflow or alignment if needed.
5231 return (reloc_property->checkup_x_value(x)
5232 ? This::STATUS_OKAY
5233 : This::STATUS_OVERFLOW);
5234 }
5235
5236 // Construct a B insn. Note, although we group it here with other relocation
5237 // operation, there is actually no 'relocation' involved here.
5238 static inline void
5239 construct_b(unsigned char* view, unsigned int branch_offset)
5240 {
5241 update_view_two_parts<32>(view, 0x05, (branch_offset >> 2),
5242 26, 0, 0xffffffff);
5243 }
5244
5245 // Do a simple rela relocation at unaligned addresses.
5246
5247 template<int valsize>
5248 static inline typename This::Status
5249 rela_ua(unsigned char* view,
5250 const Sized_relobj_file<size, big_endian>* object,
5251 const Symbol_value<size>* psymval,
5252 AArch64_valtype addend,
5253 const AArch64_reloc_property* reloc_property)
5254 {
5255 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5256 Valtype;
5257 typename elfcpp::Elf_types<size>::Elf_Addr x =
5258 psymval->value(object, addend);
5259 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5260 static_cast<Valtype>(x));
5261 return (reloc_property->checkup_x_value(x)
5262 ? This::STATUS_OKAY
5263 : This::STATUS_OVERFLOW);
5264 }
5265
5266 // Do a simple pc-relative relocation at unaligned addresses.
5267
5268 template<int valsize>
5269 static inline typename This::Status
5270 pcrela_ua(unsigned char* view,
5271 const Sized_relobj_file<size, big_endian>* object,
5272 const Symbol_value<size>* psymval,
5273 AArch64_valtype addend,
5274 Address address,
5275 const AArch64_reloc_property* reloc_property)
5276 {
5277 typedef typename elfcpp::Swap_unaligned<valsize, big_endian>::Valtype
5278 Valtype;
5279 Address x = psymval->value(object, addend) - address;
5280 elfcpp::Swap_unaligned<valsize, big_endian>::writeval(view,
5281 static_cast<Valtype>(x));
5282 return (reloc_property->checkup_x_value(x)
5283 ? This::STATUS_OKAY
5284 : This::STATUS_OVERFLOW);
5285 }
5286
5287 // Do a simple rela relocation at aligned addresses.
5288
5289 template<int valsize>
5290 static inline typename This::Status
5291 rela(
5292 unsigned char* view,
5293 const Sized_relobj_file<size, big_endian>* object,
5294 const Symbol_value<size>* psymval,
5295 AArch64_valtype addend,
5296 const AArch64_reloc_property* reloc_property)
5297 {
5298 typedef typename elfcpp::Swap<valsize, big_endian>::Valtype Valtype;
5299 Valtype* wv = reinterpret_cast<Valtype*>(view);
5300 Address x = psymval->value(object, addend);
5301 elfcpp::Swap<valsize, big_endian>::writeval(wv,static_cast<Valtype>(x));
5302 return (reloc_property->checkup_x_value(x)
5303 ? This::STATUS_OKAY
5304 : This::STATUS_OVERFLOW);
5305 }
5306
5307 // Do relocate. Update selected bits in text.
5308 // new_val = (val & ~dst_mask) | (immed << doffset)
5309
5310 template<int valsize>
5311 static inline typename This::Status
5312 rela_general(unsigned char* view,
5313 const Sized_relobj_file<size, big_endian>* object,
5314 const Symbol_value<size>* psymval,
5315 AArch64_valtype addend,
5316 const AArch64_reloc_property* reloc_property)
5317 {
5318 // Calculate relocation.
5319 Address x = psymval->value(object, addend);
5320 return This::template reloc_common<valsize>(view, x, reloc_property);
5321 }
5322
5323 // Do relocate. Update selected bits in text.
5324 // new val = (val & ~dst_mask) | (immed << doffset)
5325
5326 template<int valsize>
5327 static inline typename This::Status
5328 rela_general(
5329 unsigned char* view,
5330 AArch64_valtype s,
5331 AArch64_valtype addend,
5332 const AArch64_reloc_property* reloc_property)
5333 {
5334 // Calculate relocation.
5335 Address x = s + addend;
5336 return This::template reloc_common<valsize>(view, x, reloc_property);
5337 }
5338
5339 // Do address relative relocate. Update selected bits in text.
5340 // new val = (val & ~dst_mask) | (immed << doffset)
5341
5342 template<int valsize>
5343 static inline typename This::Status
5344 pcrela_general(
5345 unsigned char* view,
5346 const Sized_relobj_file<size, big_endian>* object,
5347 const Symbol_value<size>* psymval,
5348 AArch64_valtype addend,
5349 Address address,
5350 const AArch64_reloc_property* reloc_property)
5351 {
5352 // Calculate relocation.
5353 Address x = psymval->value(object, addend) - address;
5354 return This::template reloc_common<valsize>(view, x, reloc_property);
5355 }
5356
5357
5358 // Calculate (S + A) - address, update adr instruction.
5359
5360 static inline typename This::Status
5361 adr(unsigned char* view,
5362 const Sized_relobj_file<size, big_endian>* object,
5363 const Symbol_value<size>* psymval,
5364 Address addend,
5365 Address address,
5366 const AArch64_reloc_property* /* reloc_property */)
5367 {
5368 AArch64_valtype x = psymval->value(object, addend) - address;
5369 // Pick bits [20:0] of X.
5370 AArch64_valtype immed = x & 0x1fffff;
5371 update_adr(view, immed);
5372 // Check -2^20 <= X < 2^20
5373 return (size == 64 && Bits<21>::has_overflow((x))
5374 ? This::STATUS_OVERFLOW
5375 : This::STATUS_OKAY);
5376 }
5377
5378 // Calculate PG(S+A) - PG(address), update adrp instruction.
5379 // R_AARCH64_ADR_PREL_PG_HI21
5380
5381 static inline typename This::Status
5382 adrp(
5383 unsigned char* view,
5384 Address sa,
5385 Address address)
5386 {
5387 AArch64_valtype x = This::Page(sa) - This::Page(address);
5388 // Pick [32:12] of X.
5389 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5390 update_adr(view, immed);
5391 // Check -2^32 <= X < 2^32
5392 return (size == 64 && Bits<33>::has_overflow((x))
5393 ? This::STATUS_OVERFLOW
5394 : This::STATUS_OKAY);
5395 }
5396
5397 // Calculate PG(S+A) - PG(address), update adrp instruction.
5398 // R_AARCH64_ADR_PREL_PG_HI21
5399
5400 static inline typename This::Status
5401 adrp(unsigned char* view,
5402 const Sized_relobj_file<size, big_endian>* object,
5403 const Symbol_value<size>* psymval,
5404 Address addend,
5405 Address address,
5406 const AArch64_reloc_property* reloc_property)
5407 {
5408 Address sa = psymval->value(object, addend);
5409 AArch64_valtype x = This::Page(sa) - This::Page(address);
5410 // Pick [32:12] of X.
5411 AArch64_valtype immed = (x >> 12) & 0x1fffff;
5412 update_adr(view, immed);
5413 return (reloc_property->checkup_x_value(x)
5414 ? This::STATUS_OKAY
5415 : This::STATUS_OVERFLOW);
5416 }
5417
5418 // Update mov[n/z] instruction. Check overflow if needed.
5419 // If X >=0, set the instruction to movz and its immediate value to the
5420 // selected bits S.
5421 // If X < 0, set the instruction to movn and its immediate value to
5422 // NOT (selected bits of).
5423
5424 static inline typename This::Status
5425 movnz(unsigned char* view,
5426 AArch64_valtype x,
5427 const AArch64_reloc_property* reloc_property)
5428 {
5429 // Select bits from X.
5430 Address immed;
5431 bool is_movz;
5432 typedef typename elfcpp::Elf_types<size>::Elf_Swxword SignedW;
5433 if (static_cast<SignedW>(x) >= 0)
5434 {
5435 immed = reloc_property->select_x_value(x);
5436 is_movz = true;
5437 }
5438 else
5439 {
5440 immed = reloc_property->select_x_value(~x);;
5441 is_movz = false;
5442 }
5443
5444 // Update movnz instruction.
5445 update_movnz(view, immed, is_movz);
5446
5447 // Do check overflow or alignment if needed.
5448 return (reloc_property->checkup_x_value(x)
5449 ? This::STATUS_OKAY
5450 : This::STATUS_OVERFLOW);
5451 }
5452
5453 static inline bool
5454 maybe_apply_stub(unsigned int,
5455 const The_relocate_info*,
5456 const The_rela&,
5457 unsigned char*,
5458 Address,
5459 const Sized_symbol<size>*,
5460 const Symbol_value<size>*,
5461 const Sized_relobj_file<size, big_endian>*,
5462 section_size_type);
5463
5464 }; // End of AArch64_relocate_functions
5465
5466
5467 // For a certain relocation type (usually jump/branch), test to see if the
5468 // destination needs a stub to fulfil. If so, re-route the destination of the
5469 // original instruction to the stub, note, at this time, the stub has already
5470 // been generated.
5471
5472 template<int size, bool big_endian>
5473 bool
5474 AArch64_relocate_functions<size, big_endian>::
5475 maybe_apply_stub(unsigned int r_type,
5476 const The_relocate_info* relinfo,
5477 const The_rela& rela,
5478 unsigned char* view,
5479 Address address,
5480 const Sized_symbol<size>* gsym,
5481 const Symbol_value<size>* psymval,
5482 const Sized_relobj_file<size, big_endian>* object,
5483 section_size_type current_group_size)
5484 {
5485 if (parameters->options().relocatable())
5486 return false;
5487
5488 typename elfcpp::Elf_types<size>::Elf_Swxword addend = rela.get_r_addend();
5489 Address branch_target = psymval->value(object, 0) + addend;
5490 int stub_type =
5491 The_reloc_stub::stub_type_for_reloc(r_type, address, branch_target);
5492 if (stub_type == ST_NONE)
5493 return false;
5494
5495 const The_aarch64_relobj* aarch64_relobj =
5496 static_cast<const The_aarch64_relobj*>(object);
5497 const AArch64_reloc_property* arp =
5498 aarch64_reloc_property_table->get_reloc_property(r_type);
5499 gold_assert(arp != NULL);
5500
5501 // We don't create stubs for undefined symbols, but do for weak.
5502 if (gsym
5503 && !gsym->use_plt_offset(arp->reference_flags())
5504 && gsym->is_undefined())
5505 {
5506 gold_debug(DEBUG_TARGET,
5507 "stub: looking for a stub for undefined symbol %s in file %s",
5508 gsym->name(), aarch64_relobj->name().c_str());
5509 return false;
5510 }
5511
5512 The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
5513 gold_assert(stub_table != NULL);
5514
5515 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
5516 typename The_reloc_stub::Key stub_key(stub_type, gsym, object, r_sym, addend);
5517 The_reloc_stub* stub = stub_table->find_reloc_stub(stub_key);
5518 gold_assert(stub != NULL);
5519
5520 Address new_branch_target = stub_table->address() + stub->offset();
5521 typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
5522 new_branch_target - address;
5523 typename This::Status status = This::template
5524 rela_general<32>(view, branch_offset, 0, arp);
5525 if (status != This::STATUS_OKAY)
5526 gold_error(_("Stub is too far away, try a smaller value "
5527 "for '--stub-group-size'. The current value is 0x%lx."),
5528 static_cast<unsigned long>(current_group_size));
5529 return true;
5530 }
5531
5532
5533 // Group input sections for stub generation.
5534 //
5535 // We group input sections in an output section so that the total size,
5536 // including any padding space due to alignment is smaller than GROUP_SIZE
5537 // unless the only input section in group is bigger than GROUP_SIZE already.
5538 // Then an ARM stub table is created to follow the last input section
5539 // in group. For each group an ARM stub table is created an is placed
5540 // after the last group. If STUB_ALWAYS_AFTER_BRANCH is false, we further
5541 // extend the group after the stub table.
5542
5543 template<int size, bool big_endian>
5544 void
5545 Target_aarch64<size, big_endian>::group_sections(
5546 Layout* layout,
5547 section_size_type group_size,
5548 bool stubs_always_after_branch,
5549 const Task* task)
5550 {
5551 // Group input sections and insert stub table
5552 Layout::Section_list section_list;
5553 layout->get_executable_sections(&section_list);
5554 for (Layout::Section_list::const_iterator p = section_list.begin();
5555 p != section_list.end();
5556 ++p)
5557 {
5558 AArch64_output_section<size, big_endian>* output_section =
5559 static_cast<AArch64_output_section<size, big_endian>*>(*p);
5560 output_section->group_sections(group_size, stubs_always_after_branch,
5561 this, task);
5562 }
5563 }
5564
5565
5566 // Find the AArch64_input_section object corresponding to the SHNDX-th input
5567 // section of RELOBJ.
5568
5569 template<int size, bool big_endian>
5570 AArch64_input_section<size, big_endian>*
5571 Target_aarch64<size, big_endian>::find_aarch64_input_section(
5572 Relobj* relobj, unsigned int shndx) const
5573 {
5574 Section_id sid(relobj, shndx);
5575 typename AArch64_input_section_map::const_iterator p =
5576 this->aarch64_input_section_map_.find(sid);
5577 return (p != this->aarch64_input_section_map_.end()) ? p->second : NULL;
5578 }
5579
5580
5581 // Make a new AArch64_input_section object.
5582
5583 template<int size, bool big_endian>
5584 AArch64_input_section<size, big_endian>*
5585 Target_aarch64<size, big_endian>::new_aarch64_input_section(
5586 Relobj* relobj, unsigned int shndx)
5587 {
5588 Section_id sid(relobj, shndx);
5589
5590 AArch64_input_section<size, big_endian>* input_section =
5591 new AArch64_input_section<size, big_endian>(relobj, shndx);
5592 input_section->init();
5593
5594 // Register new AArch64_input_section in map for look-up.
5595 std::pair<typename AArch64_input_section_map::iterator,bool> ins =
5596 this->aarch64_input_section_map_.insert(
5597 std::make_pair(sid, input_section));
5598
5599 // Make sure that it we have not created another AArch64_input_section
5600 // for this input section already.
5601 gold_assert(ins.second);
5602
5603 return input_section;
5604 }
5605
5606
5607 // Relaxation hook. This is where we do stub generation.
5608
5609 template<int size, bool big_endian>
5610 bool
5611 Target_aarch64<size, big_endian>::do_relax(
5612 int pass,
5613 const Input_objects* input_objects,
5614 Symbol_table* symtab,
5615 Layout* layout ,
5616 const Task* task)
5617 {
5618 gold_assert(!parameters->options().relocatable());
5619 if (pass == 1)
5620 {
5621 // We don't handle negative stub_group_size right now.
5622 this->stub_group_size_ = abs(parameters->options().stub_group_size());
5623 if (this->stub_group_size_ == 1)
5624 {
5625 // Leave room for 4096 4-byte stub entries. If we exceed that, then we
5626 // will fail to link. The user will have to relink with an explicit
5627 // group size option.
5628 this->stub_group_size_ = The_reloc_stub::MAX_BRANCH_OFFSET -
5629 4096 * 4;
5630 }
5631 group_sections(layout, this->stub_group_size_, true, task);
5632 }
5633 else
5634 {
5635 // If this is not the first pass, addresses and file offsets have
5636 // been reset at this point, set them here.
5637 for (Stub_table_iterator sp = this->stub_tables_.begin();
5638 sp != this->stub_tables_.end(); ++sp)
5639 {
5640 The_stub_table* stt = *sp;
5641 The_aarch64_input_section* owner = stt->owner();
5642 off_t off = align_address(owner->original_size(),
5643 stt->addralign());
5644 stt->set_address_and_file_offset(owner->address() + off,
5645 owner->offset() + off);
5646 }
5647 }
5648
5649 // Scan relocs for relocation stubs
5650 for (Input_objects::Relobj_iterator op = input_objects->relobj_begin();
5651 op != input_objects->relobj_end();
5652 ++op)
5653 {
5654 The_aarch64_relobj* aarch64_relobj =
5655 static_cast<The_aarch64_relobj*>(*op);
5656 // Lock the object so we can read from it. This is only called
5657 // single-threaded from Layout::finalize, so it is OK to lock.
5658 Task_lock_obj<Object> tl(task, aarch64_relobj);
5659 aarch64_relobj->scan_sections_for_stubs(this, symtab, layout);
5660 }
5661
5662 bool any_stub_table_changed = false;
5663 for (Stub_table_iterator siter = this->stub_tables_.begin();
5664 siter != this->stub_tables_.end() && !any_stub_table_changed; ++siter)
5665 {
5666 The_stub_table* stub_table = *siter;
5667 if (stub_table->update_data_size_changed_p())
5668 {
5669 The_aarch64_input_section* owner = stub_table->owner();
5670 uint64_t address = owner->address();
5671 off_t offset = owner->offset();
5672 owner->reset_address_and_file_offset();
5673 owner->set_address_and_file_offset(address, offset);
5674
5675 any_stub_table_changed = true;
5676 }
5677 }
5678
5679 // Do not continue relaxation.
5680 bool continue_relaxation = any_stub_table_changed;
5681 if (!continue_relaxation)
5682 for (Stub_table_iterator sp = this->stub_tables_.begin();
5683 (sp != this->stub_tables_.end());
5684 ++sp)
5685 (*sp)->finalize_stubs();
5686
5687 return continue_relaxation;
5688 }
5689
5690
5691 // Make a new Stub_table.
5692
5693 template<int size, bool big_endian>
5694 Stub_table<size, big_endian>*
5695 Target_aarch64<size, big_endian>::new_stub_table(
5696 AArch64_input_section<size, big_endian>* owner)
5697 {
5698 Stub_table<size, big_endian>* stub_table =
5699 new Stub_table<size, big_endian>(owner);
5700 stub_table->set_address(align_address(
5701 owner->address() + owner->data_size(), 8));
5702 stub_table->set_file_offset(owner->offset() + owner->data_size());
5703 stub_table->finalize_data_size();
5704
5705 this->stub_tables_.push_back(stub_table);
5706
5707 return stub_table;
5708 }
5709
5710
5711 template<int size, bool big_endian>
5712 uint64_t
5713 Target_aarch64<size, big_endian>::do_reloc_addend(
5714 void* arg, unsigned int r_type, uint64_t) const
5715 {
5716 gold_assert(r_type == elfcpp::R_AARCH64_TLSDESC);
5717 uintptr_t intarg = reinterpret_cast<uintptr_t>(arg);
5718 gold_assert(intarg < this->tlsdesc_reloc_info_.size());
5719 const Tlsdesc_info& ti(this->tlsdesc_reloc_info_[intarg]);
5720 const Symbol_value<size>* psymval = ti.object->local_symbol(ti.r_sym);
5721 gold_assert(psymval->is_tls_symbol());
5722 // The value of a TLS symbol is the offset in the TLS segment.
5723 return psymval->value(ti.object, 0);
5724 }
5725
5726 // Return the number of entries in the PLT.
5727
5728 template<int size, bool big_endian>
5729 unsigned int
5730 Target_aarch64<size, big_endian>::plt_entry_count() const
5731 {
5732 if (this->plt_ == NULL)
5733 return 0;
5734 return this->plt_->entry_count();
5735 }
5736
5737 // Return the offset of the first non-reserved PLT entry.
5738
5739 template<int size, bool big_endian>
5740 unsigned int
5741 Target_aarch64<size, big_endian>::first_plt_entry_offset() const
5742 {
5743 return this->plt_->first_plt_entry_offset();
5744 }
5745
5746 // Return the size of each PLT entry.
5747
5748 template<int size, bool big_endian>
5749 unsigned int
5750 Target_aarch64<size, big_endian>::plt_entry_size() const
5751 {
5752 return this->plt_->get_plt_entry_size();
5753 }
5754
5755 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
5756
5757 template<int size, bool big_endian>
5758 void
5759 Target_aarch64<size, big_endian>::define_tls_base_symbol(
5760 Symbol_table* symtab, Layout* layout)
5761 {
5762 if (this->tls_base_symbol_defined_)
5763 return;
5764
5765 Output_segment* tls_segment = layout->tls_segment();
5766 if (tls_segment != NULL)
5767 {
5768 // _TLS_MODULE_BASE_ always points to the beginning of tls segment.
5769 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
5770 Symbol_table::PREDEFINED,
5771 tls_segment, 0, 0,
5772 elfcpp::STT_TLS,
5773 elfcpp::STB_LOCAL,
5774 elfcpp::STV_HIDDEN, 0,
5775 Symbol::SEGMENT_START,
5776 true);
5777 }
5778 this->tls_base_symbol_defined_ = true;
5779 }
5780
5781 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
5782
5783 template<int size, bool big_endian>
5784 void
5785 Target_aarch64<size, big_endian>::reserve_tlsdesc_entries(
5786 Symbol_table* symtab, Layout* layout)
5787 {
5788 if (this->plt_ == NULL)
5789 this->make_plt_section(symtab, layout);
5790
5791 if (!this->plt_->has_tlsdesc_entry())
5792 {
5793 // Allocate the TLSDESC_GOT entry.
5794 Output_data_got_aarch64<size, big_endian>* got =
5795 this->got_section(symtab, layout);
5796 unsigned int got_offset = got->add_constant(0);
5797
5798 // Allocate the TLSDESC_PLT entry.
5799 this->plt_->reserve_tlsdesc_entry(got_offset);
5800 }
5801 }
5802
5803 // Create a GOT entry for the TLS module index.
5804
5805 template<int size, bool big_endian>
5806 unsigned int
5807 Target_aarch64<size, big_endian>::got_mod_index_entry(
5808 Symbol_table* symtab, Layout* layout,
5809 Sized_relobj_file<size, big_endian>* object)
5810 {
5811 if (this->got_mod_index_offset_ == -1U)
5812 {
5813 gold_assert(symtab != NULL && layout != NULL && object != NULL);
5814 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
5815 Output_data_got_aarch64<size, big_endian>* got =
5816 this->got_section(symtab, layout);
5817 unsigned int got_offset = got->add_constant(0);
5818 rela_dyn->add_local(object, 0, elfcpp::R_AARCH64_TLS_DTPMOD64, got,
5819 got_offset, 0);
5820 got->add_constant(0);
5821 this->got_mod_index_offset_ = got_offset;
5822 }
5823 return this->got_mod_index_offset_;
5824 }
5825
5826 // Optimize the TLS relocation type based on what we know about the
5827 // symbol. IS_FINAL is true if the final address of this symbol is
5828 // known at link time.
5829
5830 template<int size, bool big_endian>
5831 tls::Tls_optimization
5832 Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
5833 int r_type)
5834 {
5835 // If we are generating a shared library, then we can't do anything
5836 // in the linker
5837 if (parameters->options().shared())
5838 return tls::TLSOPT_NONE;
5839
5840 switch (r_type)
5841 {
5842 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
5843 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
5844 case elfcpp::R_AARCH64_TLSDESC_LD_PREL19:
5845 case elfcpp::R_AARCH64_TLSDESC_ADR_PREL21:
5846 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
5847 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
5848 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
5849 case elfcpp::R_AARCH64_TLSDESC_OFF_G1:
5850 case elfcpp::R_AARCH64_TLSDESC_OFF_G0_NC:
5851 case elfcpp::R_AARCH64_TLSDESC_LDR:
5852 case elfcpp::R_AARCH64_TLSDESC_ADD:
5853 case elfcpp::R_AARCH64_TLSDESC_CALL:
5854 // These are General-Dynamic which permits fully general TLS
5855 // access. Since we know that we are generating an executable,
5856 // we can convert this to Initial-Exec. If we also know that
5857 // this is a local symbol, we can further switch to Local-Exec.
5858 if (is_final)
5859 return tls::TLSOPT_TO_LE;
5860 return tls::TLSOPT_TO_IE;
5861
5862 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
5863 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
5864 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
5865 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5866 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
5867 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
5868 // These are Local-Dynamic, which refer to local symbols in the
5869 // dynamic TLS block. Since we know that we generating an
5870 // executable, we can switch to Local-Exec.
5871 return tls::TLSOPT_TO_LE;
5872
5873 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5874 case elfcpp::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5875 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
5876 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
5877 case elfcpp::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
5878 // These are Initial-Exec relocs which get the thread offset
5879 // from the GOT. If we know that we are linking against the
5880 // local symbol, we can switch to Local-Exec, which links the
5881 // thread offset into the instruction.
5882 if (is_final)
5883 return tls::TLSOPT_TO_LE;
5884 return tls::TLSOPT_NONE;
5885
5886 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
5887 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
5888 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5889 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
5890 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5891 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
5892 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
5893 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
5894 // When we already have Local-Exec, there is nothing further we
5895 // can do.
5896 return tls::TLSOPT_NONE;
5897
5898 default:
5899 gold_unreachable();
5900 }
5901 }
5902
5903 // Returns true if this relocation type could be that of a function pointer.
5904
5905 template<int size, bool big_endian>
5906 inline bool
5907 Target_aarch64<size, big_endian>::Scan::possible_function_pointer_reloc(
5908 unsigned int r_type)
5909 {
5910 switch (r_type)
5911 {
5912 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
5913 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
5914 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
5915 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
5916 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
5917 {
5918 return true;
5919 }
5920 }
5921 return false;
5922 }
5923
5924 // For safe ICF, scan a relocation for a local symbol to check if it
5925 // corresponds to a function pointer being taken. In that case mark
5926 // the function whose pointer was taken as not foldable.
5927
5928 template<int size, bool big_endian>
5929 inline bool
5930 Target_aarch64<size, big_endian>::Scan::local_reloc_may_be_function_pointer(
5931 Symbol_table* ,
5932 Layout* ,
5933 Target_aarch64<size, big_endian>* ,
5934 Sized_relobj_file<size, big_endian>* ,
5935 unsigned int ,
5936 Output_section* ,
5937 const elfcpp::Rela<size, big_endian>& ,
5938 unsigned int r_type,
5939 const elfcpp::Sym<size, big_endian>&)
5940 {
5941 // When building a shared library, do not fold any local symbols.
5942 return (parameters->options().shared()
5943 || possible_function_pointer_reloc(r_type));
5944 }
5945
5946 // For safe ICF, scan a relocation for a global symbol to check if it
5947 // corresponds to a function pointer being taken. In that case mark
5948 // the function whose pointer was taken as not foldable.
5949
5950 template<int size, bool big_endian>
5951 inline bool
5952 Target_aarch64<size, big_endian>::Scan::global_reloc_may_be_function_pointer(
5953 Symbol_table* ,
5954 Layout* ,
5955 Target_aarch64<size, big_endian>* ,
5956 Sized_relobj_file<size, big_endian>* ,
5957 unsigned int ,
5958 Output_section* ,
5959 const elfcpp::Rela<size, big_endian>& ,
5960 unsigned int r_type,
5961 Symbol* gsym)
5962 {
5963 // When building a shared library, do not fold symbols whose visibility
5964 // is hidden, internal or protected.
5965 return ((parameters->options().shared()
5966 && (gsym->visibility() == elfcpp::STV_INTERNAL
5967 || gsym->visibility() == elfcpp::STV_PROTECTED
5968 || gsym->visibility() == elfcpp::STV_HIDDEN))
5969 || possible_function_pointer_reloc(r_type));
5970 }
5971
5972 // Report an unsupported relocation against a local symbol.
5973
5974 template<int size, bool big_endian>
5975 void
5976 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_local(
5977 Sized_relobj_file<size, big_endian>* object,
5978 unsigned int r_type)
5979 {
5980 gold_error(_("%s: unsupported reloc %u against local symbol"),
5981 object->name().c_str(), r_type);
5982 }
5983
5984 // We are about to emit a dynamic relocation of type R_TYPE. If the
5985 // dynamic linker does not support it, issue an error.
5986
5987 template<int size, bool big_endian>
5988 void
5989 Target_aarch64<size, big_endian>::Scan::check_non_pic(Relobj* object,
5990 unsigned int r_type)
5991 {
5992 gold_assert(r_type != elfcpp::R_AARCH64_NONE);
5993
5994 switch (r_type)
5995 {
5996 // These are the relocation types supported by glibc for AARCH64.
5997 case elfcpp::R_AARCH64_NONE:
5998 case elfcpp::R_AARCH64_COPY:
5999 case elfcpp::R_AARCH64_GLOB_DAT:
6000 case elfcpp::R_AARCH64_JUMP_SLOT:
6001 case elfcpp::R_AARCH64_RELATIVE:
6002 case elfcpp::R_AARCH64_TLS_DTPREL64:
6003 case elfcpp::R_AARCH64_TLS_DTPMOD64:
6004 case elfcpp::R_AARCH64_TLS_TPREL64:
6005 case elfcpp::R_AARCH64_TLSDESC:
6006 case elfcpp::R_AARCH64_IRELATIVE:
6007 case elfcpp::R_AARCH64_ABS32:
6008 case elfcpp::R_AARCH64_ABS64:
6009 return;
6010
6011 default:
6012 break;
6013 }
6014
6015 // This prevents us from issuing more than one error per reloc
6016 // section. But we can still wind up issuing more than one
6017 // error per object file.
6018 if (this->issued_non_pic_error_)
6019 return;
6020 gold_assert(parameters->options().output_is_position_independent());
6021 object->error(_("requires unsupported dynamic reloc; "
6022 "recompile with -fPIC"));
6023 this->issued_non_pic_error_ = true;
6024 return;
6025 }
6026
6027 // Return whether we need to make a PLT entry for a relocation of the
6028 // given type against a STT_GNU_IFUNC symbol.
6029
6030 template<int size, bool big_endian>
6031 bool
6032 Target_aarch64<size, big_endian>::Scan::reloc_needs_plt_for_ifunc(
6033 Sized_relobj_file<size, big_endian>* object,
6034 unsigned int r_type)
6035 {
6036 const AArch64_reloc_property* arp =
6037 aarch64_reloc_property_table->get_reloc_property(r_type);
6038 gold_assert(arp != NULL);
6039
6040 int flags = arp->reference_flags();
6041 if (flags & Symbol::TLS_REF)
6042 {
6043 gold_error(_("%s: unsupported TLS reloc %s for IFUNC symbol"),
6044 object->name().c_str(), arp->name().c_str());
6045 return false;
6046 }
6047 return flags != 0;
6048 }
6049
6050 // Scan a relocation for a local symbol.
6051
6052 template<int size, bool big_endian>
6053 inline void
6054 Target_aarch64<size, big_endian>::Scan::local(
6055 Symbol_table* symtab,
6056 Layout* layout,
6057 Target_aarch64<size, big_endian>* target,
6058 Sized_relobj_file<size, big_endian>* object,
6059 unsigned int data_shndx,
6060 Output_section* output_section,
6061 const elfcpp::Rela<size, big_endian>& rela,
6062 unsigned int r_type,
6063 const elfcpp::Sym<size, big_endian>& lsym,
6064 bool is_discarded)
6065 {
6066 if (is_discarded)
6067 return;
6068
6069 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6070 Reloc_section;
6071 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6072
6073 // A local STT_GNU_IFUNC symbol may require a PLT entry.
6074 bool is_ifunc = lsym.get_st_type() == elfcpp::STT_GNU_IFUNC;
6075 if (is_ifunc && this->reloc_needs_plt_for_ifunc(object, r_type))
6076 target->make_local_ifunc_plt_entry(symtab, layout, object, r_sym);
6077
6078 switch (r_type)
6079 {
6080 case elfcpp::R_AARCH64_NONE:
6081 break;
6082
6083 case elfcpp::R_AARCH64_ABS32:
6084 case elfcpp::R_AARCH64_ABS16:
6085 if (parameters->options().output_is_position_independent())
6086 {
6087 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6088 object->name().c_str(), r_type);
6089 }
6090 break;
6091
6092 case elfcpp::R_AARCH64_ABS64:
6093 // If building a shared library or pie, we need to mark this as a dynmic
6094 // reloction, so that the dynamic loader can relocate it.
6095 if (parameters->options().output_is_position_independent())
6096 {
6097 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6098 rela_dyn->add_local_relative(object, r_sym,
6099 elfcpp::R_AARCH64_RELATIVE,
6100 output_section,
6101 data_shndx,
6102 rela.get_r_offset(),
6103 rela.get_r_addend(),
6104 is_ifunc);
6105 }
6106 break;
6107
6108 case elfcpp::R_AARCH64_PREL64:
6109 case elfcpp::R_AARCH64_PREL32:
6110 case elfcpp::R_AARCH64_PREL16:
6111 break;
6112
6113 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6114 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6115 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6116 // The above relocations are used to access GOT entries.
6117 {
6118 Output_data_got_aarch64<size, big_endian>* got =
6119 target->got_section(symtab, layout);
6120 bool is_new = false;
6121 // This symbol requires a GOT entry.
6122 if (is_ifunc)
6123 is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
6124 else
6125 is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
6126 if (is_new && parameters->options().output_is_position_independent())
6127 target->rela_dyn_section(layout)->
6128 add_local_relative(object,
6129 r_sym,
6130 elfcpp::R_AARCH64_RELATIVE,
6131 got,
6132 object->local_got_offset(r_sym,
6133 GOT_TYPE_STANDARD),
6134 0,
6135 false);
6136 }
6137 break;
6138
6139 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6140 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6141 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6142 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6143 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6144 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6145 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6146 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6147 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6148 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6149 if (parameters->options().output_is_position_independent())
6150 {
6151 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6152 object->name().c_str(), r_type);
6153 }
6154 break;
6155
6156 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6157 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6158 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6159 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6160 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6161 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6162 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6163 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6164 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6165 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6166 break;
6167
6168 // Control flow, pc-relative. We don't need to do anything for a relative
6169 // addressing relocation against a local symbol if it does not reference
6170 // the GOT.
6171 case elfcpp::R_AARCH64_TSTBR14:
6172 case elfcpp::R_AARCH64_CONDBR19:
6173 case elfcpp::R_AARCH64_JUMP26:
6174 case elfcpp::R_AARCH64_CALL26:
6175 break;
6176
6177 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6178 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
6179 {
6180 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6181 optimize_tls_reloc(!parameters->options().shared(), r_type);
6182 if (tlsopt == tls::TLSOPT_TO_LE)
6183 break;
6184
6185 layout->set_has_static_tls();
6186 // Create a GOT entry for the tp-relative offset.
6187 if (!parameters->doing_static_link())
6188 {
6189 Output_data_got_aarch64<size, big_endian>* got =
6190 target->got_section(symtab, layout);
6191 got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
6192 target->rela_dyn_section(layout),
6193 elfcpp::R_AARCH64_TLS_TPREL64);
6194 }
6195 else if (!object->local_has_got_offset(r_sym,
6196 GOT_TYPE_TLS_OFFSET))
6197 {
6198 Output_data_got_aarch64<size, big_endian>* got =
6199 target->got_section(symtab, layout);
6200 got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
6201 unsigned int got_offset =
6202 object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
6203 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6204 gold_assert(addend == 0);
6205 got->add_static_reloc(got_offset, elfcpp::R_AARCH64_TLS_TPREL64,
6206 object, r_sym);
6207 }
6208 }
6209 break;
6210
6211 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6212 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
6213 {
6214 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6215 optimize_tls_reloc(!parameters->options().shared(), r_type);
6216 if (tlsopt == tls::TLSOPT_TO_LE)
6217 {
6218 layout->set_has_static_tls();
6219 break;
6220 }
6221 gold_assert(tlsopt == tls::TLSOPT_NONE);
6222
6223 Output_data_got_aarch64<size, big_endian>* got =
6224 target->got_section(symtab, layout);
6225 got->add_local_pair_with_rel(object,r_sym, data_shndx,
6226 GOT_TYPE_TLS_PAIR,
6227 target->rela_dyn_section(layout),
6228 elfcpp::R_AARCH64_TLS_DTPMOD64);
6229 }
6230 break;
6231
6232 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6233 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6234 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6235 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6236 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6237 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6238 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6239 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
6240 {
6241 layout->set_has_static_tls();
6242 bool output_is_shared = parameters->options().shared();
6243 if (output_is_shared)
6244 gold_error(_("%s: unsupported TLSLE reloc %u in shared code."),
6245 object->name().c_str(), r_type);
6246 }
6247 break;
6248
6249 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6250 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
6251 {
6252 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6253 optimize_tls_reloc(!parameters->options().shared(), r_type);
6254 if (tlsopt == tls::TLSOPT_NONE)
6255 {
6256 // Create a GOT entry for the module index.
6257 target->got_mod_index_entry(symtab, layout, object);
6258 }
6259 else if (tlsopt != tls::TLSOPT_TO_LE)
6260 unsupported_reloc_local(object, r_type);
6261 }
6262 break;
6263
6264 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6265 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6266 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6267 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
6268 break;
6269
6270 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6271 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6272 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
6273 {
6274 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6275 optimize_tls_reloc(!parameters->options().shared(), r_type);
6276 target->define_tls_base_symbol(symtab, layout);
6277 if (tlsopt == tls::TLSOPT_NONE)
6278 {
6279 // Create reserved PLT and GOT entries for the resolver.
6280 target->reserve_tlsdesc_entries(symtab, layout);
6281
6282 // Generate a double GOT entry with an R_AARCH64_TLSDESC reloc.
6283 // The R_AARCH64_TLSDESC reloc is resolved lazily, so the GOT
6284 // entry needs to be in an area in .got.plt, not .got. Call
6285 // got_section to make sure the section has been created.
6286 target->got_section(symtab, layout);
6287 Output_data_got<size, big_endian>* got =
6288 target->got_tlsdesc_section();
6289 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
6290 if (!object->local_has_got_offset(r_sym, GOT_TYPE_TLS_DESC))
6291 {
6292 unsigned int got_offset = got->add_constant(0);
6293 got->add_constant(0);
6294 object->set_local_got_offset(r_sym, GOT_TYPE_TLS_DESC,
6295 got_offset);
6296 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6297 // We store the arguments we need in a vector, and use
6298 // the index into the vector as the parameter to pass
6299 // to the target specific routines.
6300 uintptr_t intarg = target->add_tlsdesc_info(object, r_sym);
6301 void* arg = reinterpret_cast<void*>(intarg);
6302 rt->add_target_specific(elfcpp::R_AARCH64_TLSDESC, arg,
6303 got, got_offset, 0);
6304 }
6305 }
6306 else if (tlsopt != tls::TLSOPT_TO_LE)
6307 unsupported_reloc_local(object, r_type);
6308 }
6309 break;
6310
6311 case elfcpp::R_AARCH64_TLSDESC_CALL:
6312 break;
6313
6314 default:
6315 unsupported_reloc_local(object, r_type);
6316 }
6317 }
6318
6319
6320 // Report an unsupported relocation against a global symbol.
6321
6322 template<int size, bool big_endian>
6323 void
6324 Target_aarch64<size, big_endian>::Scan::unsupported_reloc_global(
6325 Sized_relobj_file<size, big_endian>* object,
6326 unsigned int r_type,
6327 Symbol* gsym)
6328 {
6329 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
6330 object->name().c_str(), r_type, gsym->demangled_name().c_str());
6331 }
6332
6333 template<int size, bool big_endian>
6334 inline void
6335 Target_aarch64<size, big_endian>::Scan::global(
6336 Symbol_table* symtab,
6337 Layout* layout,
6338 Target_aarch64<size, big_endian>* target,
6339 Sized_relobj_file<size, big_endian> * object,
6340 unsigned int data_shndx,
6341 Output_section* output_section,
6342 const elfcpp::Rela<size, big_endian>& rela,
6343 unsigned int r_type,
6344 Symbol* gsym)
6345 {
6346 // A STT_GNU_IFUNC symbol may require a PLT entry.
6347 if (gsym->type() == elfcpp::STT_GNU_IFUNC
6348 && this->reloc_needs_plt_for_ifunc(object, r_type))
6349 target->make_plt_entry(symtab, layout, gsym);
6350
6351 typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
6352 Reloc_section;
6353 const AArch64_reloc_property* arp =
6354 aarch64_reloc_property_table->get_reloc_property(r_type);
6355 gold_assert(arp != NULL);
6356
6357 switch (r_type)
6358 {
6359 case elfcpp::R_AARCH64_NONE:
6360 break;
6361
6362 case elfcpp::R_AARCH64_ABS16:
6363 case elfcpp::R_AARCH64_ABS32:
6364 case elfcpp::R_AARCH64_ABS64:
6365 {
6366 // Make a PLT entry if necessary.
6367 if (gsym->needs_plt_entry())
6368 {
6369 target->make_plt_entry(symtab, layout, gsym);
6370 // Since this is not a PC-relative relocation, we may be
6371 // taking the address of a function. In that case we need to
6372 // set the entry in the dynamic symbol table to the address of
6373 // the PLT entry.
6374 if (gsym->is_from_dynobj() && !parameters->options().shared())
6375 gsym->set_needs_dynsym_value();
6376 }
6377 // Make a dynamic relocation if necessary.
6378 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6379 {
6380 if (!parameters->options().output_is_position_independent()
6381 && gsym->may_need_copy_reloc())
6382 {
6383 target->copy_reloc(symtab, layout, object,
6384 data_shndx, output_section, gsym, rela);
6385 }
6386 else if (r_type == elfcpp::R_AARCH64_ABS64
6387 && gsym->type() == elfcpp::STT_GNU_IFUNC
6388 && gsym->can_use_relative_reloc(false)
6389 && !gsym->is_from_dynobj()
6390 && !gsym->is_undefined()
6391 && !gsym->is_preemptible())
6392 {
6393 // Use an IRELATIVE reloc for a locally defined STT_GNU_IFUNC
6394 // symbol. This makes a function address in a PIE executable
6395 // match the address in a shared library that it links against.
6396 Reloc_section* rela_dyn =
6397 target->rela_irelative_section(layout);
6398 unsigned int r_type = elfcpp::R_AARCH64_IRELATIVE;
6399 rela_dyn->add_symbolless_global_addend(gsym, r_type,
6400 output_section, object,
6401 data_shndx,
6402 rela.get_r_offset(),
6403 rela.get_r_addend());
6404 }
6405 else if (r_type == elfcpp::R_AARCH64_ABS64
6406 && gsym->can_use_relative_reloc(false))
6407 {
6408 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6409 rela_dyn->add_global_relative(gsym,
6410 elfcpp::R_AARCH64_RELATIVE,
6411 output_section,
6412 object,
6413 data_shndx,
6414 rela.get_r_offset(),
6415 rela.get_r_addend(),
6416 false);
6417 }
6418 else
6419 {
6420 check_non_pic(object, r_type);
6421 Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>*
6422 rela_dyn = target->rela_dyn_section(layout);
6423 rela_dyn->add_global(
6424 gsym, r_type, output_section, object,
6425 data_shndx, rela.get_r_offset(),rela.get_r_addend());
6426 }
6427 }
6428 }
6429 break;
6430
6431 case elfcpp::R_AARCH64_PREL16:
6432 case elfcpp::R_AARCH64_PREL32:
6433 case elfcpp::R_AARCH64_PREL64:
6434 // This is used to fill the GOT absolute address.
6435 if (gsym->needs_plt_entry())
6436 {
6437 target->make_plt_entry(symtab, layout, gsym);
6438 }
6439 break;
6440
6441 case elfcpp::R_AARCH64_MOVW_UABS_G0: // 263
6442 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC: // 264
6443 case elfcpp::R_AARCH64_MOVW_UABS_G1: // 265
6444 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC: // 266
6445 case elfcpp::R_AARCH64_MOVW_UABS_G2: // 267
6446 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC: // 268
6447 case elfcpp::R_AARCH64_MOVW_UABS_G3: // 269
6448 case elfcpp::R_AARCH64_MOVW_SABS_G0: // 270
6449 case elfcpp::R_AARCH64_MOVW_SABS_G1: // 271
6450 case elfcpp::R_AARCH64_MOVW_SABS_G2: // 272
6451 if (parameters->options().output_is_position_independent())
6452 {
6453 gold_error(_("%s: unsupported reloc %u in pos independent link."),
6454 object->name().c_str(), r_type);
6455 }
6456 break;
6457
6458 case elfcpp::R_AARCH64_LD_PREL_LO19: // 273
6459 case elfcpp::R_AARCH64_ADR_PREL_LO21: // 274
6460 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21: // 275
6461 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC: // 276
6462 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC: // 277
6463 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC: // 278
6464 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC: // 284
6465 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC: // 285
6466 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC: // 286
6467 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC: // 299
6468 {
6469 if (gsym->needs_plt_entry())
6470 target->make_plt_entry(symtab, layout, gsym);
6471 // Make a dynamic relocation if necessary.
6472 if (gsym->needs_dynamic_reloc(arp->reference_flags()))
6473 {
6474 if (parameters->options().output_is_executable()
6475 && gsym->may_need_copy_reloc())
6476 {
6477 target->copy_reloc(symtab, layout, object,
6478 data_shndx, output_section, gsym, rela);
6479 }
6480 }
6481 break;
6482 }
6483
6484 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
6485 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
6486 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
6487 {
6488 // The above relocations are used to access GOT entries.
6489 // Note a GOT entry is an *address* to a symbol.
6490 // The symbol requires a GOT entry
6491 Output_data_got_aarch64<size, big_endian>* got =
6492 target->got_section(symtab, layout);
6493 if (gsym->final_value_is_known())
6494 {
6495 // For a STT_GNU_IFUNC symbol we want the PLT address.
6496 if (gsym->type() == elfcpp::STT_GNU_IFUNC)
6497 got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6498 else
6499 got->add_global(gsym, GOT_TYPE_STANDARD);
6500 }
6501 else
6502 {
6503 // If this symbol is not fully resolved, we need to add a dynamic
6504 // relocation for it.
6505 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
6506
6507 // Use a GLOB_DAT rather than a RELATIVE reloc if:
6508 //
6509 // 1) The symbol may be defined in some other module.
6510 // 2) We are building a shared library and this is a protected
6511 // symbol; using GLOB_DAT means that the dynamic linker can use
6512 // the address of the PLT in the main executable when appropriate
6513 // so that function address comparisons work.
6514 // 3) This is a STT_GNU_IFUNC symbol in position dependent code,
6515 // again so that function address comparisons work.
6516 if (gsym->is_from_dynobj()
6517 || gsym->is_undefined()
6518 || gsym->is_preemptible()
6519 || (gsym->visibility() == elfcpp::STV_PROTECTED
6520 && parameters->options().shared())
6521 || (gsym->type() == elfcpp::STT_GNU_IFUNC
6522 && parameters->options().output_is_position_independent()))
6523 got->add_global_with_rel(gsym, GOT_TYPE_STANDARD,
6524 rela_dyn, elfcpp::R_AARCH64_GLOB_DAT);
6525 else
6526 {
6527 // For a STT_GNU_IFUNC symbol we want to write the PLT
6528 // offset into the GOT, so that function pointer
6529 // comparisons work correctly.
6530 bool is_new;
6531 if (gsym->type() != elfcpp::STT_GNU_IFUNC)
6532 is_new = got->add_global(gsym, GOT_TYPE_STANDARD);
6533 else
6534 {
6535 is_new = got->add_global_plt(gsym, GOT_TYPE_STANDARD);
6536 // Tell the dynamic linker to use the PLT address
6537 // when resolving relocations.
6538 if (gsym->is_from_dynobj()
6539 && !parameters->options().shared())
6540 gsym->set_needs_dynsym_value();
6541 }
6542 if (is_new)
6543 {
6544 rela_dyn->add_global_relative(
6545 gsym, elfcpp::R_AARCH64_RELATIVE,
6546 got,
6547 gsym->got_offset(GOT_TYPE_STANDARD),
6548 0,
6549 false);
6550 }
6551 }
6552 }
6553 break;
6554 }
6555
6556 case elfcpp::R_AARCH64_TSTBR14:
6557 case elfcpp::R_AARCH64_CONDBR19:
6558 case elfcpp::R_AARCH64_JUMP26:
6559 case elfcpp::R_AARCH64_CALL26:
6560 {
6561 if (gsym->final_value_is_known())
6562 break;
6563
6564 if (gsym->is_defined() &&
6565 !gsym->is_from_dynobj() &&
6566 !gsym->is_preemptible())
6567 break;
6568
6569 // Make plt entry for function call.
6570 target->make_plt_entry(symtab, layout, gsym);
6571 break;
6572 }
6573
6574 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
6575 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // General dynamic
6576 {
6577 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6578 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6579 if (tlsopt == tls::TLSOPT_TO_LE)
6580 {
6581 layout->set_has_static_tls();
6582 break;
6583 }
6584 gold_assert(tlsopt == tls::TLSOPT_NONE);
6585
6586 // General dynamic.
6587 Output_data_got_aarch64<size, big_endian>* got =
6588 target->got_section(symtab, layout);
6589 // Create 2 consecutive entries for module index and offset.
6590 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_PAIR,
6591 target->rela_dyn_section(layout),
6592 elfcpp::R_AARCH64_TLS_DTPMOD64,
6593 elfcpp::R_AARCH64_TLS_DTPREL64);
6594 }
6595 break;
6596
6597 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
6598 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local dynamic
6599 {
6600 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6601 optimize_tls_reloc(!parameters->options().shared(), r_type);
6602 if (tlsopt == tls::TLSOPT_NONE)
6603 {
6604 // Create a GOT entry for the module index.
6605 target->got_mod_index_entry(symtab, layout, object);
6606 }
6607 else if (tlsopt != tls::TLSOPT_TO_LE)
6608 unsupported_reloc_local(object, r_type);
6609 }
6610 break;
6611
6612 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
6613 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
6614 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
6615 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local dynamic
6616 break;
6617
6618 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
6619 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial executable
6620 {
6621 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6622 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6623 if (tlsopt == tls::TLSOPT_TO_LE)
6624 break;
6625
6626 layout->set_has_static_tls();
6627 // Create a GOT entry for the tp-relative offset.
6628 Output_data_got_aarch64<size, big_endian>* got
6629 = target->got_section(symtab, layout);
6630 if (!parameters->doing_static_link())
6631 {
6632 got->add_global_with_rel(
6633 gsym, GOT_TYPE_TLS_OFFSET,
6634 target->rela_dyn_section(layout),
6635 elfcpp::R_AARCH64_TLS_TPREL64);
6636 }
6637 if (!gsym->has_got_offset(GOT_TYPE_TLS_OFFSET))
6638 {
6639 got->add_global(gsym, GOT_TYPE_TLS_OFFSET);
6640 unsigned int got_offset =
6641 gsym->got_offset(GOT_TYPE_TLS_OFFSET);
6642 const elfcpp::Elf_Xword addend = rela.get_r_addend();
6643 gold_assert(addend == 0);
6644 got->add_static_reloc(got_offset,
6645 elfcpp::R_AARCH64_TLS_TPREL64, gsym);
6646 }
6647 }
6648 break;
6649
6650 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
6651 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
6652 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
6653 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
6654 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
6655 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
6656 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
6657 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC: // Local executable
6658 layout->set_has_static_tls();
6659 if (parameters->options().shared())
6660 gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
6661 object->name().c_str(), r_type);
6662 break;
6663
6664 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
6665 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
6666 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12: // TLS descriptor
6667 {
6668 target->define_tls_base_symbol(symtab, layout);
6669 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
6670 optimize_tls_reloc(gsym->final_value_is_known(), r_type);
6671 if (tlsopt == tls::TLSOPT_NONE)
6672 {
6673 // Create reserved PLT and GOT entries for the resolver.
6674 target->reserve_tlsdesc_entries(symtab, layout);
6675
6676 // Create a double GOT entry with an R_AARCH64_TLSDESC
6677 // relocation. The R_AARCH64_TLSDESC is resolved lazily, so the GOT
6678 // entry needs to be in an area in .got.plt, not .got. Call
6679 // got_section to make sure the section has been created.
6680 target->got_section(symtab, layout);
6681 Output_data_got<size, big_endian>* got =
6682 target->got_tlsdesc_section();
6683 Reloc_section* rt = target->rela_tlsdesc_section(layout);
6684 got->add_global_pair_with_rel(gsym, GOT_TYPE_TLS_DESC, rt,
6685 elfcpp::R_AARCH64_TLSDESC, 0);
6686 }
6687 else if (tlsopt == tls::TLSOPT_TO_IE)
6688 {
6689 // Create a GOT entry for the tp-relative offset.
6690 Output_data_got<size, big_endian>* got
6691 = target->got_section(symtab, layout);
6692 got->add_global_with_rel(gsym, GOT_TYPE_TLS_OFFSET,
6693 target->rela_dyn_section(layout),
6694 elfcpp::R_AARCH64_TLS_TPREL64);
6695 }
6696 else if (tlsopt != tls::TLSOPT_TO_LE)
6697 unsupported_reloc_global(object, r_type, gsym);
6698 }
6699 break;
6700
6701 case elfcpp::R_AARCH64_TLSDESC_CALL:
6702 break;
6703
6704 default:
6705 gold_error(_("%s: unsupported reloc type in global scan"),
6706 aarch64_reloc_property_table->
6707 reloc_name_in_error_message(r_type).c_str());
6708 }
6709 return;
6710 } // End of Scan::global
6711
6712
6713 // Create the PLT section.
6714 template<int size, bool big_endian>
6715 void
6716 Target_aarch64<size, big_endian>::make_plt_section(
6717 Symbol_table* symtab, Layout* layout)
6718 {
6719 if (this->plt_ == NULL)
6720 {
6721 // Create the GOT section first.
6722 this->got_section(symtab, layout);
6723
6724 this->plt_ = this->make_data_plt(layout, this->got_, this->got_plt_,
6725 this->got_irelative_);
6726
6727 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
6728 (elfcpp::SHF_ALLOC
6729 | elfcpp::SHF_EXECINSTR),
6730 this->plt_, ORDER_PLT, false);
6731
6732 // Make the sh_info field of .rela.plt point to .plt.
6733 Output_section* rela_plt_os = this->plt_->rela_plt()->output_section();
6734 rela_plt_os->set_info_section(this->plt_->output_section());
6735 }
6736 }
6737
6738 // Return the section for TLSDESC relocations.
6739
6740 template<int size, bool big_endian>
6741 typename Target_aarch64<size, big_endian>::Reloc_section*
6742 Target_aarch64<size, big_endian>::rela_tlsdesc_section(Layout* layout) const
6743 {
6744 return this->plt_section()->rela_tlsdesc(layout);
6745 }
6746
6747 // Create a PLT entry for a global symbol.
6748
6749 template<int size, bool big_endian>
6750 void
6751 Target_aarch64<size, big_endian>::make_plt_entry(
6752 Symbol_table* symtab,
6753 Layout* layout,
6754 Symbol* gsym)
6755 {
6756 if (gsym->has_plt_offset())
6757 return;
6758
6759 if (this->plt_ == NULL)
6760 this->make_plt_section(symtab, layout);
6761
6762 this->plt_->add_entry(symtab, layout, gsym);
6763 }
6764
6765 // Make a PLT entry for a local STT_GNU_IFUNC symbol.
6766
6767 template<int size, bool big_endian>
6768 void
6769 Target_aarch64<size, big_endian>::make_local_ifunc_plt_entry(
6770 Symbol_table* symtab, Layout* layout,
6771 Sized_relobj_file<size, big_endian>* relobj,
6772 unsigned int local_sym_index)
6773 {
6774 if (relobj->local_has_plt_offset(local_sym_index))
6775 return;
6776 if (this->plt_ == NULL)
6777 this->make_plt_section(symtab, layout);
6778 unsigned int plt_offset = this->plt_->add_local_ifunc_entry(symtab, layout,
6779 relobj,
6780 local_sym_index);
6781 relobj->set_local_plt_offset(local_sym_index, plt_offset);
6782 }
6783
6784 template<int size, bool big_endian>
6785 void
6786 Target_aarch64<size, big_endian>::gc_process_relocs(
6787 Symbol_table* symtab,
6788 Layout* layout,
6789 Sized_relobj_file<size, big_endian>* object,
6790 unsigned int data_shndx,
6791 unsigned int sh_type,
6792 const unsigned char* prelocs,
6793 size_t reloc_count,
6794 Output_section* output_section,
6795 bool needs_special_offset_handling,
6796 size_t local_symbol_count,
6797 const unsigned char* plocal_symbols)
6798 {
6799 typedef Target_aarch64<size, big_endian> Aarch64;
6800 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6801 Classify_reloc;
6802
6803 if (sh_type == elfcpp::SHT_REL)
6804 {
6805 return;
6806 }
6807
6808 gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6809 symtab,
6810 layout,
6811 this,
6812 object,
6813 data_shndx,
6814 prelocs,
6815 reloc_count,
6816 output_section,
6817 needs_special_offset_handling,
6818 local_symbol_count,
6819 plocal_symbols);
6820 }
6821
6822 // Scan relocations for a section.
6823
6824 template<int size, bool big_endian>
6825 void
6826 Target_aarch64<size, big_endian>::scan_relocs(
6827 Symbol_table* symtab,
6828 Layout* layout,
6829 Sized_relobj_file<size, big_endian>* object,
6830 unsigned int data_shndx,
6831 unsigned int sh_type,
6832 const unsigned char* prelocs,
6833 size_t reloc_count,
6834 Output_section* output_section,
6835 bool needs_special_offset_handling,
6836 size_t local_symbol_count,
6837 const unsigned char* plocal_symbols)
6838 {
6839 typedef Target_aarch64<size, big_endian> Aarch64;
6840 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
6841 Classify_reloc;
6842
6843 if (sh_type == elfcpp::SHT_REL)
6844 {
6845 gold_error(_("%s: unsupported REL reloc section"),
6846 object->name().c_str());
6847 return;
6848 }
6849
6850 gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
6851 symtab,
6852 layout,
6853 this,
6854 object,
6855 data_shndx,
6856 prelocs,
6857 reloc_count,
6858 output_section,
6859 needs_special_offset_handling,
6860 local_symbol_count,
6861 plocal_symbols);
6862 }
6863
6864 // Return the value to use for a dynamic which requires special
6865 // treatment. This is how we support equality comparisons of function
6866 // pointers across shared library boundaries, as described in the
6867 // processor specific ABI supplement.
6868
6869 template<int size, bool big_endian>
6870 uint64_t
6871 Target_aarch64<size, big_endian>::do_dynsym_value(const Symbol* gsym) const
6872 {
6873 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
6874 return this->plt_address_for_global(gsym);
6875 }
6876
6877
6878 // Finalize the sections.
6879
6880 template<int size, bool big_endian>
6881 void
6882 Target_aarch64<size, big_endian>::do_finalize_sections(
6883 Layout* layout,
6884 const Input_objects*,
6885 Symbol_table* symtab)
6886 {
6887 const Reloc_section* rel_plt = (this->plt_ == NULL
6888 ? NULL
6889 : this->plt_->rela_plt());
6890 layout->add_target_dynamic_tags(false, this->got_plt_, rel_plt,
6891 this->rela_dyn_, true, false);
6892
6893 // Emit any relocs we saved in an attempt to avoid generating COPY
6894 // relocs.
6895 if (this->copy_relocs_.any_saved_relocs())
6896 this->copy_relocs_.emit(this->rela_dyn_section(layout));
6897
6898 // Fill in some more dynamic tags.
6899 Output_data_dynamic* const odyn = layout->dynamic_data();
6900 if (odyn != NULL)
6901 {
6902 if (this->plt_ != NULL
6903 && this->plt_->output_section() != NULL
6904 && this->plt_ ->has_tlsdesc_entry())
6905 {
6906 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
6907 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
6908 this->got_->finalize_data_size();
6909 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
6910 this->plt_, plt_offset);
6911 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
6912 this->got_, got_offset);
6913 }
6914 }
6915
6916 // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
6917 // the .got.plt section.
6918 Symbol* sym = this->global_offset_table_;
6919 if (sym != NULL)
6920 {
6921 uint64_t data_size = this->got_plt_->current_data_size();
6922 symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
6923
6924 // If the .got section is more than 0x8000 bytes, we add
6925 // 0x8000 to the value of _GLOBAL_OFFSET_TABLE_, so that 16
6926 // bit relocations have a greater chance of working.
6927 if (data_size >= 0x8000)
6928 symtab->get_sized_symbol<size>(sym)->set_value(
6929 symtab->get_sized_symbol<size>(sym)->value() + 0x8000);
6930 }
6931
6932 if (parameters->doing_static_link()
6933 && (this->plt_ == NULL || !this->plt_->has_irelative_section()))
6934 {
6935 // If linking statically, make sure that the __rela_iplt symbols
6936 // were defined if necessary, even if we didn't create a PLT.
6937 static const Define_symbol_in_segment syms[] =
6938 {
6939 {
6940 "__rela_iplt_start", // name
6941 elfcpp::PT_LOAD, // segment_type
6942 elfcpp::PF_W, // segment_flags_set
6943 elfcpp::PF(0), // segment_flags_clear
6944 0, // value
6945 0, // size
6946 elfcpp::STT_NOTYPE, // type
6947 elfcpp::STB_GLOBAL, // binding
6948 elfcpp::STV_HIDDEN, // visibility
6949 0, // nonvis
6950 Symbol::SEGMENT_START, // offset_from_base
6951 true // only_if_ref
6952 },
6953 {
6954 "__rela_iplt_end", // name
6955 elfcpp::PT_LOAD, // segment_type
6956 elfcpp::PF_W, // segment_flags_set
6957 elfcpp::PF(0), // segment_flags_clear
6958 0, // value
6959 0, // size
6960 elfcpp::STT_NOTYPE, // type
6961 elfcpp::STB_GLOBAL, // binding
6962 elfcpp::STV_HIDDEN, // visibility
6963 0, // nonvis
6964 Symbol::SEGMENT_START, // offset_from_base
6965 true // only_if_ref
6966 }
6967 };
6968
6969 symtab->define_symbols(layout, 2, syms,
6970 layout->script_options()->saw_sections_clause());
6971 }
6972
6973 return;
6974 }
6975
6976 // Perform a relocation.
6977
6978 template<int size, bool big_endian>
6979 inline bool
6980 Target_aarch64<size, big_endian>::Relocate::relocate(
6981 const Relocate_info<size, big_endian>* relinfo,
6982 unsigned int,
6983 Target_aarch64<size, big_endian>* target,
6984 Output_section* ,
6985 size_t relnum,
6986 const unsigned char* preloc,
6987 const Sized_symbol<size>* gsym,
6988 const Symbol_value<size>* psymval,
6989 unsigned char* view,
6990 typename elfcpp::Elf_types<size>::Elf_Addr address,
6991 section_size_type /* view_size */)
6992 {
6993 if (view == NULL)
6994 return true;
6995
6996 typedef AArch64_relocate_functions<size, big_endian> Reloc;
6997
6998 const elfcpp::Rela<size, big_endian> rela(preloc);
6999 unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
7000 const AArch64_reloc_property* reloc_property =
7001 aarch64_reloc_property_table->get_reloc_property(r_type);
7002
7003 if (reloc_property == NULL)
7004 {
7005 std::string reloc_name =
7006 aarch64_reloc_property_table->reloc_name_in_error_message(r_type);
7007 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7008 _("cannot relocate %s in object file"),
7009 reloc_name.c_str());
7010 return true;
7011 }
7012
7013 const Sized_relobj_file<size, big_endian>* object = relinfo->object;
7014
7015 // Pick the value to use for symbols defined in the PLT.
7016 Symbol_value<size> symval;
7017 if (gsym != NULL
7018 && gsym->use_plt_offset(reloc_property->reference_flags()))
7019 {
7020 symval.set_output_value(target->plt_address_for_global(gsym));
7021 psymval = &symval;
7022 }
7023 else if (gsym == NULL && psymval->is_ifunc_symbol())
7024 {
7025 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7026 if (object->local_has_plt_offset(r_sym))
7027 {
7028 symval.set_output_value(target->plt_address_for_local(object, r_sym));
7029 psymval = &symval;
7030 }
7031 }
7032
7033 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7034
7035 // Get the GOT offset if needed.
7036 // For aarch64, the GOT pointer points to the start of the GOT section.
7037 bool have_got_offset = false;
7038 int got_offset = 0;
7039 int got_base = (target->got_ != NULL
7040 ? (target->got_->current_data_size() >= 0x8000
7041 ? 0x8000 : 0)
7042 : 0);
7043 switch (r_type)
7044 {
7045 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0:
7046 case elfcpp::R_AARCH64_MOVW_GOTOFF_G0_NC:
7047 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1:
7048 case elfcpp::R_AARCH64_MOVW_GOTOFF_G1_NC:
7049 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2:
7050 case elfcpp::R_AARCH64_MOVW_GOTOFF_G2_NC:
7051 case elfcpp::R_AARCH64_MOVW_GOTOFF_G3:
7052 case elfcpp::R_AARCH64_GOTREL64:
7053 case elfcpp::R_AARCH64_GOTREL32:
7054 case elfcpp::R_AARCH64_GOT_LD_PREL19:
7055 case elfcpp::R_AARCH64_LD64_GOTOFF_LO15:
7056 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7057 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7058 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7059 if (gsym != NULL)
7060 {
7061 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
7062 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - got_base;
7063 }
7064 else
7065 {
7066 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7067 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
7068 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
7069 - got_base);
7070 }
7071 have_got_offset = true;
7072 break;
7073
7074 default:
7075 break;
7076 }
7077
7078 typename Reloc::Status reloc_status = Reloc::STATUS_OKAY;
7079 typename elfcpp::Elf_types<size>::Elf_Addr value;
7080 switch (r_type)
7081 {
7082 case elfcpp::R_AARCH64_NONE:
7083 break;
7084
7085 case elfcpp::R_AARCH64_ABS64:
7086 if (!parameters->options().apply_dynamic_relocs()
7087 && parameters->options().output_is_position_independent()
7088 && gsym != NULL
7089 && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
7090 && !gsym->can_use_relative_reloc(false))
7091 // We have generated an absolute dynamic relocation, so do not
7092 // apply the relocation statically. (Works around bugs in older
7093 // Android dynamic linkers.)
7094 break;
7095 reloc_status = Reloc::template rela_ua<64>(
7096 view, object, psymval, addend, reloc_property);
7097 break;
7098
7099 case elfcpp::R_AARCH64_ABS32:
7100 if (!parameters->options().apply_dynamic_relocs()
7101 && parameters->options().output_is_position_independent()
7102 && gsym != NULL
7103 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7104 // We have generated an absolute dynamic relocation, so do not
7105 // apply the relocation statically. (Works around bugs in older
7106 // Android dynamic linkers.)
7107 break;
7108 reloc_status = Reloc::template rela_ua<32>(
7109 view, object, psymval, addend, reloc_property);
7110 break;
7111
7112 case elfcpp::R_AARCH64_ABS16:
7113 if (!parameters->options().apply_dynamic_relocs()
7114 && parameters->options().output_is_position_independent()
7115 && gsym != NULL
7116 && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
7117 // We have generated an absolute dynamic relocation, so do not
7118 // apply the relocation statically. (Works around bugs in older
7119 // Android dynamic linkers.)
7120 break;
7121 reloc_status = Reloc::template rela_ua<16>(
7122 view, object, psymval, addend, reloc_property);
7123 break;
7124
7125 case elfcpp::R_AARCH64_PREL64:
7126 reloc_status = Reloc::template pcrela_ua<64>(
7127 view, object, psymval, addend, address, reloc_property);
7128 break;
7129
7130 case elfcpp::R_AARCH64_PREL32:
7131 reloc_status = Reloc::template pcrela_ua<32>(
7132 view, object, psymval, addend, address, reloc_property);
7133 break;
7134
7135 case elfcpp::R_AARCH64_PREL16:
7136 reloc_status = Reloc::template pcrela_ua<16>(
7137 view, object, psymval, addend, address, reloc_property);
7138 break;
7139
7140 case elfcpp::R_AARCH64_MOVW_UABS_G0:
7141 case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
7142 case elfcpp::R_AARCH64_MOVW_UABS_G1:
7143 case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
7144 case elfcpp::R_AARCH64_MOVW_UABS_G2:
7145 case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
7146 case elfcpp::R_AARCH64_MOVW_UABS_G3:
7147 reloc_status = Reloc::template rela_general<32>(
7148 view, object, psymval, addend, reloc_property);
7149 break;
7150 case elfcpp::R_AARCH64_MOVW_SABS_G0:
7151 case elfcpp::R_AARCH64_MOVW_SABS_G1:
7152 case elfcpp::R_AARCH64_MOVW_SABS_G2:
7153 reloc_status = Reloc::movnz(view, psymval->value(object, addend),
7154 reloc_property);
7155 break;
7156
7157 case elfcpp::R_AARCH64_LD_PREL_LO19:
7158 reloc_status = Reloc::template pcrela_general<32>(
7159 view, object, psymval, addend, address, reloc_property);
7160 break;
7161
7162 case elfcpp::R_AARCH64_ADR_PREL_LO21:
7163 reloc_status = Reloc::adr(view, object, psymval, addend,
7164 address, reloc_property);
7165 break;
7166
7167 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21_NC:
7168 case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:
7169 reloc_status = Reloc::adrp(view, object, psymval, addend, address,
7170 reloc_property);
7171 break;
7172
7173 case elfcpp::R_AARCH64_LDST8_ABS_LO12_NC:
7174 case elfcpp::R_AARCH64_LDST16_ABS_LO12_NC:
7175 case elfcpp::R_AARCH64_LDST32_ABS_LO12_NC:
7176 case elfcpp::R_AARCH64_LDST64_ABS_LO12_NC:
7177 case elfcpp::R_AARCH64_LDST128_ABS_LO12_NC:
7178 case elfcpp::R_AARCH64_ADD_ABS_LO12_NC:
7179 reloc_status = Reloc::template rela_general<32>(
7180 view, object, psymval, addend, reloc_property);
7181 break;
7182
7183 case elfcpp::R_AARCH64_CALL26:
7184 if (this->skip_call_tls_get_addr_)
7185 {
7186 // Double check that the TLSGD insn has been optimized away.
7187 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7188 Insntype insn = elfcpp::Swap<32, big_endian>::readval(
7189 reinterpret_cast<Insntype*>(view));
7190 gold_assert((insn & 0xff000000) == 0x91000000);
7191
7192 reloc_status = Reloc::STATUS_OKAY;
7193 this->skip_call_tls_get_addr_ = false;
7194 // Return false to stop further processing this reloc.
7195 return false;
7196 }
7197 // Fall through.
7198 case elfcpp::R_AARCH64_JUMP26:
7199 if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
7200 gsym, psymval, object,
7201 target->stub_group_size_))
7202 break;
7203 // Fall through.
7204 case elfcpp::R_AARCH64_TSTBR14:
7205 case elfcpp::R_AARCH64_CONDBR19:
7206 reloc_status = Reloc::template pcrela_general<32>(
7207 view, object, psymval, addend, address, reloc_property);
7208 break;
7209
7210 case elfcpp::R_AARCH64_ADR_GOT_PAGE:
7211 gold_assert(have_got_offset);
7212 value = target->got_->address() + got_base + got_offset;
7213 reloc_status = Reloc::adrp(view, value + addend, address);
7214 break;
7215
7216 case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
7217 gold_assert(have_got_offset);
7218 value = target->got_->address() + got_base + got_offset;
7219 reloc_status = Reloc::template rela_general<32>(
7220 view, value, addend, reloc_property);
7221 break;
7222
7223 case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
7224 {
7225 gold_assert(have_got_offset);
7226 value = target->got_->address() + got_base + got_offset + addend -
7227 Reloc::Page(target->got_->address() + got_base);
7228 if ((value & 7) != 0)
7229 reloc_status = Reloc::STATUS_OVERFLOW;
7230 else
7231 reloc_status = Reloc::template reloc_common<32>(
7232 view, value, reloc_property);
7233 break;
7234 }
7235
7236 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7237 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7238 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7239 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7240 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7241 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7242 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7243 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7244 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7245 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7246 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7247 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7248 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7249 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7250 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7251 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7252 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7253 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7254 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7255 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7256 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7257 case elfcpp::R_AARCH64_TLSDESC_CALL:
7258 reloc_status = relocate_tls(relinfo, target, relnum, rela, r_type,
7259 gsym, psymval, view, address);
7260 break;
7261
7262 // These are dynamic relocations, which are unexpected when linking.
7263 case elfcpp::R_AARCH64_COPY:
7264 case elfcpp::R_AARCH64_GLOB_DAT:
7265 case elfcpp::R_AARCH64_JUMP_SLOT:
7266 case elfcpp::R_AARCH64_RELATIVE:
7267 case elfcpp::R_AARCH64_IRELATIVE:
7268 case elfcpp::R_AARCH64_TLS_DTPREL64:
7269 case elfcpp::R_AARCH64_TLS_DTPMOD64:
7270 case elfcpp::R_AARCH64_TLS_TPREL64:
7271 case elfcpp::R_AARCH64_TLSDESC:
7272 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7273 _("unexpected reloc %u in object file"),
7274 r_type);
7275 break;
7276
7277 default:
7278 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7279 _("unsupported reloc %s"),
7280 reloc_property->name().c_str());
7281 break;
7282 }
7283
7284 // Report any errors.
7285 switch (reloc_status)
7286 {
7287 case Reloc::STATUS_OKAY:
7288 break;
7289 case Reloc::STATUS_OVERFLOW:
7290 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7291 _("relocation overflow in %s"),
7292 reloc_property->name().c_str());
7293 break;
7294 case Reloc::STATUS_BAD_RELOC:
7295 gold_error_at_location(
7296 relinfo,
7297 relnum,
7298 rela.get_r_offset(),
7299 _("unexpected opcode while processing relocation %s"),
7300 reloc_property->name().c_str());
7301 break;
7302 default:
7303 gold_unreachable();
7304 }
7305
7306 return true;
7307 }
7308
7309
7310 template<int size, bool big_endian>
7311 inline
7312 typename AArch64_relocate_functions<size, big_endian>::Status
7313 Target_aarch64<size, big_endian>::Relocate::relocate_tls(
7314 const Relocate_info<size, big_endian>* relinfo,
7315 Target_aarch64<size, big_endian>* target,
7316 size_t relnum,
7317 const elfcpp::Rela<size, big_endian>& rela,
7318 unsigned int r_type, const Sized_symbol<size>* gsym,
7319 const Symbol_value<size>* psymval,
7320 unsigned char* view,
7321 typename elfcpp::Elf_types<size>::Elf_Addr address)
7322 {
7323 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7324 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7325
7326 Output_segment* tls_segment = relinfo->layout->tls_segment();
7327 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7328 const AArch64_reloc_property* reloc_property =
7329 aarch64_reloc_property_table->get_reloc_property(r_type);
7330 gold_assert(reloc_property != NULL);
7331
7332 const bool is_final = (gsym == NULL
7333 ? !parameters->options().shared()
7334 : gsym->final_value_is_known());
7335 tls::Tls_optimization tlsopt = Target_aarch64<size, big_endian>::
7336 optimize_tls_reloc(is_final, r_type);
7337
7338 Sized_relobj_file<size, big_endian>* object = relinfo->object;
7339 int tls_got_offset_type;
7340 switch (r_type)
7341 {
7342 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7343 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC: // Global-dynamic
7344 {
7345 if (tlsopt == tls::TLSOPT_TO_LE)
7346 {
7347 if (tls_segment == NULL)
7348 {
7349 gold_assert(parameters->errors()->error_count() > 0
7350 || issue_undefined_symbol_error(gsym));
7351 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7352 }
7353 return tls_gd_to_le(relinfo, target, rela, r_type, view,
7354 psymval);
7355 }
7356 else if (tlsopt == tls::TLSOPT_NONE)
7357 {
7358 tls_got_offset_type = GOT_TYPE_TLS_PAIR;
7359 // Firstly get the address for the got entry.
7360 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7361 if (gsym != NULL)
7362 {
7363 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7364 got_entry_address = target->got_->address() +
7365 gsym->got_offset(tls_got_offset_type);
7366 }
7367 else
7368 {
7369 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7370 gold_assert(
7371 object->local_has_got_offset(r_sym, tls_got_offset_type));
7372 got_entry_address = target->got_->address() +
7373 object->local_got_offset(r_sym, tls_got_offset_type);
7374 }
7375
7376 // Relocate the address into adrp/ld, adrp/add pair.
7377 switch (r_type)
7378 {
7379 case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
7380 return aarch64_reloc_funcs::adrp(
7381 view, got_entry_address + addend, address);
7382
7383 break;
7384
7385 case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
7386 return aarch64_reloc_funcs::template rela_general<32>(
7387 view, got_entry_address, addend, reloc_property);
7388 break;
7389
7390 default:
7391 gold_unreachable();
7392 }
7393 }
7394 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
7395 _("unsupported gd_to_ie relaxation on %u"),
7396 r_type);
7397 }
7398 break;
7399
7400 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7401 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC: // Local-dynamic
7402 {
7403 if (tlsopt == tls::TLSOPT_TO_LE)
7404 {
7405 if (tls_segment == NULL)
7406 {
7407 gold_assert(parameters->errors()->error_count() > 0
7408 || issue_undefined_symbol_error(gsym));
7409 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7410 }
7411 return this->tls_ld_to_le(relinfo, target, rela, r_type, view,
7412 psymval);
7413 }
7414
7415 gold_assert(tlsopt == tls::TLSOPT_NONE);
7416 // Relocate the field with the offset of the GOT entry for
7417 // the module index.
7418 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7419 got_entry_address = (target->got_mod_index_entry(NULL, NULL, NULL) +
7420 target->got_->address());
7421
7422 switch (r_type)
7423 {
7424 case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
7425 return aarch64_reloc_funcs::adrp(
7426 view, got_entry_address + addend, address);
7427 break;
7428
7429 case elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC:
7430 return aarch64_reloc_funcs::template rela_general<32>(
7431 view, got_entry_address, addend, reloc_property);
7432 break;
7433
7434 default:
7435 gold_unreachable();
7436 }
7437 }
7438 break;
7439
7440 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7441 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7442 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7443 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: // Other local-dynamic
7444 {
7445 AArch64_address value = psymval->value(object, 0);
7446 if (tlsopt == tls::TLSOPT_TO_LE)
7447 {
7448 if (tls_segment == NULL)
7449 {
7450 gold_assert(parameters->errors()->error_count() > 0
7451 || issue_undefined_symbol_error(gsym));
7452 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7453 }
7454 }
7455 switch (r_type)
7456 {
7457 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G1:
7458 return aarch64_reloc_funcs::movnz(view, value + addend,
7459 reloc_property);
7460 break;
7461
7462 case elfcpp::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7463 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_HI12:
7464 case elfcpp::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7465 return aarch64_reloc_funcs::template rela_general<32>(
7466 view, value, addend, reloc_property);
7467 break;
7468
7469 default:
7470 gold_unreachable();
7471 }
7472 // We should never reach here.
7473 }
7474 break;
7475
7476 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7477 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: // Initial-exec
7478 {
7479 if (tlsopt == tls::TLSOPT_TO_LE)
7480 {
7481 if (tls_segment == NULL)
7482 {
7483 gold_assert(parameters->errors()->error_count() > 0
7484 || issue_undefined_symbol_error(gsym));
7485 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7486 }
7487 return tls_ie_to_le(relinfo, target, rela, r_type, view,
7488 psymval);
7489 }
7490 tls_got_offset_type = GOT_TYPE_TLS_OFFSET;
7491
7492 // Firstly get the address for the got entry.
7493 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7494 if (gsym != NULL)
7495 {
7496 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7497 got_entry_address = target->got_->address() +
7498 gsym->got_offset(tls_got_offset_type);
7499 }
7500 else
7501 {
7502 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7503 gold_assert(
7504 object->local_has_got_offset(r_sym, tls_got_offset_type));
7505 got_entry_address = target->got_->address() +
7506 object->local_got_offset(r_sym, tls_got_offset_type);
7507 }
7508 // Relocate the address into adrp/ld, adrp/add pair.
7509 switch (r_type)
7510 {
7511 case elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7512 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
7513 address);
7514 break;
7515 case elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7516 return aarch64_reloc_funcs::template rela_general<32>(
7517 view, got_entry_address, addend, reloc_property);
7518 default:
7519 gold_unreachable();
7520 }
7521 }
7522 // We shall never reach here.
7523 break;
7524
7525 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7526 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7527 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7528 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7529 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7530 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
7531 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
7532 case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7533 {
7534 gold_assert(tls_segment != NULL);
7535 AArch64_address value = psymval->value(object, 0);
7536
7537 if (!parameters->options().shared())
7538 {
7539 AArch64_address aligned_tcb_size =
7540 align_address(target->tcb_size(),
7541 tls_segment->maximum_alignment());
7542 value += aligned_tcb_size;
7543 switch (r_type)
7544 {
7545 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G2:
7546 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G1:
7547 case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0:
7548 return aarch64_reloc_funcs::movnz(view, value + addend,
7549 reloc_property);
7550 default:
7551 return aarch64_reloc_funcs::template
7552 rela_general<32>(view,
7553 value,
7554 addend,
7555 reloc_property);
7556 }
7557 }
7558 else
7559 gold_error(_("%s: unsupported reloc %u "
7560 "in non-static TLSLE mode."),
7561 object->name().c_str(), r_type);
7562 }
7563 break;
7564
7565 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7566 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7567 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7568 case elfcpp::R_AARCH64_TLSDESC_CALL:
7569 {
7570 if (tlsopt == tls::TLSOPT_TO_LE)
7571 {
7572 if (tls_segment == NULL)
7573 {
7574 gold_assert(parameters->errors()->error_count() > 0
7575 || issue_undefined_symbol_error(gsym));
7576 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7577 }
7578 return tls_desc_gd_to_le(relinfo, target, rela, r_type,
7579 view, psymval);
7580 }
7581 else
7582 {
7583 tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
7584 ? GOT_TYPE_TLS_OFFSET
7585 : GOT_TYPE_TLS_DESC);
7586 int got_tlsdesc_offset = 0;
7587 if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
7588 && tlsopt == tls::TLSOPT_NONE)
7589 {
7590 // We created GOT entries in the .got.tlsdesc portion of the
7591 // .got.plt section, but the offset stored in the symbol is the
7592 // offset within .got.tlsdesc.
7593 got_tlsdesc_offset = (target->got_tlsdesc_->address()
7594 - target->got_->address());
7595 }
7596 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
7597 if (gsym != NULL)
7598 {
7599 gold_assert(gsym->has_got_offset(tls_got_offset_type));
7600 got_entry_address = target->got_->address()
7601 + got_tlsdesc_offset
7602 + gsym->got_offset(tls_got_offset_type);
7603 }
7604 else
7605 {
7606 unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
7607 gold_assert(
7608 object->local_has_got_offset(r_sym, tls_got_offset_type));
7609 got_entry_address = target->got_->address() +
7610 got_tlsdesc_offset +
7611 object->local_got_offset(r_sym, tls_got_offset_type);
7612 }
7613 if (tlsopt == tls::TLSOPT_TO_IE)
7614 {
7615 return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
7616 view, psymval, got_entry_address,
7617 address);
7618 }
7619
7620 // Now do tlsdesc relocation.
7621 switch (r_type)
7622 {
7623 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7624 return aarch64_reloc_funcs::adrp(view,
7625 got_entry_address + addend,
7626 address);
7627 break;
7628 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7629 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7630 return aarch64_reloc_funcs::template rela_general<32>(
7631 view, got_entry_address, addend, reloc_property);
7632 break;
7633 case elfcpp::R_AARCH64_TLSDESC_CALL:
7634 return aarch64_reloc_funcs::STATUS_OKAY;
7635 break;
7636 default:
7637 gold_unreachable();
7638 }
7639 }
7640 }
7641 break;
7642
7643 default:
7644 gold_error(_("%s: unsupported TLS reloc %u."),
7645 object->name().c_str(), r_type);
7646 }
7647 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7648 } // End of relocate_tls.
7649
7650
7651 template<int size, bool big_endian>
7652 inline
7653 typename AArch64_relocate_functions<size, big_endian>::Status
7654 Target_aarch64<size, big_endian>::Relocate::tls_gd_to_le(
7655 const Relocate_info<size, big_endian>* relinfo,
7656 Target_aarch64<size, big_endian>* target,
7657 const elfcpp::Rela<size, big_endian>& rela,
7658 unsigned int r_type,
7659 unsigned char* view,
7660 const Symbol_value<size>* psymval)
7661 {
7662 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7663 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7664 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7665
7666 Insntype* ip = reinterpret_cast<Insntype*>(view);
7667 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7668 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7669 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7670
7671 if (r_type == elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC)
7672 {
7673 // This is the 2nd relocs, optimization should already have been
7674 // done.
7675 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7676 return aarch64_reloc_funcs::STATUS_OKAY;
7677 }
7678
7679 // The original sequence is -
7680 // 90000000 adrp x0, 0 <main>
7681 // 91000000 add x0, x0, #0x0
7682 // 94000000 bl 0 <__tls_get_addr>
7683 // optimized to sequence -
7684 // d53bd040 mrs x0, tpidr_el0
7685 // 91400000 add x0, x0, #0x0, lsl #12
7686 // 91000000 add x0, x0, #0x0
7687
7688 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7689 // encounter the first relocation "R_AARCH64_TLSGD_ADR_PAGE21". Because we
7690 // have to change "bl tls_get_addr", which does not have a corresponding tls
7691 // relocation type. So before proceeding, we need to make sure compiler
7692 // does not change the sequence.
7693 if(!(insn1 == 0x90000000 // adrp x0,0
7694 && insn2 == 0x91000000 // add x0, x0, #0x0
7695 && insn3 == 0x94000000)) // bl 0
7696 {
7697 // Ideally we should give up gd_to_le relaxation and do gd access.
7698 // However the gd_to_le relaxation decision has been made early
7699 // in the scan stage, where we did not allocate any GOT entry for
7700 // this symbol. Therefore we have to exit and report error now.
7701 gold_error(_("unexpected reloc insn sequence while relaxing "
7702 "tls gd to le for reloc %u."), r_type);
7703 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7704 }
7705
7706 // Write new insns.
7707 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7708 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7709 insn3 = 0x91000000; // add x0, x0, #0x0
7710 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7711 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7712 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7713
7714 // Calculate tprel value.
7715 Output_segment* tls_segment = relinfo->layout->tls_segment();
7716 gold_assert(tls_segment != NULL);
7717 AArch64_address value = psymval->value(relinfo->object, 0);
7718 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7719 AArch64_address aligned_tcb_size =
7720 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7721 AArch64_address x = value + aligned_tcb_size;
7722
7723 // After new insns are written, apply TLSLE relocs.
7724 const AArch64_reloc_property* rp1 =
7725 aarch64_reloc_property_table->get_reloc_property(
7726 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7727 const AArch64_reloc_property* rp2 =
7728 aarch64_reloc_property_table->get_reloc_property(
7729 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7730 gold_assert(rp1 != NULL && rp2 != NULL);
7731
7732 typename aarch64_reloc_funcs::Status s1 =
7733 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7734 x,
7735 addend,
7736 rp1);
7737 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7738 return s1;
7739
7740 typename aarch64_reloc_funcs::Status s2 =
7741 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7742 x,
7743 addend,
7744 rp2);
7745
7746 this->skip_call_tls_get_addr_ = true;
7747 return s2;
7748 } // End of tls_gd_to_le
7749
7750
7751 template<int size, bool big_endian>
7752 inline
7753 typename AArch64_relocate_functions<size, big_endian>::Status
7754 Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
7755 const Relocate_info<size, big_endian>* relinfo,
7756 Target_aarch64<size, big_endian>* target,
7757 const elfcpp::Rela<size, big_endian>& rela,
7758 unsigned int r_type,
7759 unsigned char* view,
7760 const Symbol_value<size>* psymval)
7761 {
7762 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7763 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7764 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7765
7766 Insntype* ip = reinterpret_cast<Insntype*>(view);
7767 Insntype insn1 = elfcpp::Swap<32, big_endian>::readval(ip);
7768 Insntype insn2 = elfcpp::Swap<32, big_endian>::readval(ip + 1);
7769 Insntype insn3 = elfcpp::Swap<32, big_endian>::readval(ip + 2);
7770
7771 if (r_type == elfcpp::R_AARCH64_TLSLD_ADD_LO12_NC)
7772 {
7773 // This is the 2nd relocs, optimization should already have been
7774 // done.
7775 gold_assert((insn1 & 0xfff00000) == 0x91400000);
7776 return aarch64_reloc_funcs::STATUS_OKAY;
7777 }
7778
7779 // The original sequence is -
7780 // 90000000 adrp x0, 0 <main>
7781 // 91000000 add x0, x0, #0x0
7782 // 94000000 bl 0 <__tls_get_addr>
7783 // optimized to sequence -
7784 // d53bd040 mrs x0, tpidr_el0
7785 // 91400000 add x0, x0, #0x0, lsl #12
7786 // 91000000 add x0, x0, #0x0
7787
7788 // Unlike tls_ie_to_le, we change the 3 insns in one function call when we
7789 // encounter the first relocation "R_AARCH64_TLSLD_ADR_PAGE21". Because we
7790 // have to change "bl tls_get_addr", which does not have a corresponding tls
7791 // relocation type. So before proceeding, we need to make sure compiler
7792 // does not change the sequence.
7793 if(!(insn1 == 0x90000000 // adrp x0,0
7794 && insn2 == 0x91000000 // add x0, x0, #0x0
7795 && insn3 == 0x94000000)) // bl 0
7796 {
7797 // Ideally we should give up gd_to_le relaxation and do gd access.
7798 // However the gd_to_le relaxation decision has been made early
7799 // in the scan stage, where we did not allocate a GOT entry for
7800 // this symbol. Therefore we have to exit and report an error now.
7801 gold_error(_("unexpected reloc insn sequence while relaxing "
7802 "tls gd to le for reloc %u."), r_type);
7803 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7804 }
7805
7806 // Write new insns.
7807 insn1 = 0xd53bd040; // mrs x0, tpidr_el0
7808 insn2 = 0x91400000; // add x0, x0, #0x0, lsl #12
7809 insn3 = 0x91000000; // add x0, x0, #0x0
7810 elfcpp::Swap<32, big_endian>::writeval(ip, insn1);
7811 elfcpp::Swap<32, big_endian>::writeval(ip + 1, insn2);
7812 elfcpp::Swap<32, big_endian>::writeval(ip + 2, insn3);
7813
7814 // Calculate tprel value.
7815 Output_segment* tls_segment = relinfo->layout->tls_segment();
7816 gold_assert(tls_segment != NULL);
7817 AArch64_address value = psymval->value(relinfo->object, 0);
7818 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7819 AArch64_address aligned_tcb_size =
7820 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7821 AArch64_address x = value + aligned_tcb_size;
7822
7823 // After new insns are written, apply TLSLE relocs.
7824 const AArch64_reloc_property* rp1 =
7825 aarch64_reloc_property_table->get_reloc_property(
7826 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12);
7827 const AArch64_reloc_property* rp2 =
7828 aarch64_reloc_property_table->get_reloc_property(
7829 elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12);
7830 gold_assert(rp1 != NULL && rp2 != NULL);
7831
7832 typename aarch64_reloc_funcs::Status s1 =
7833 aarch64_reloc_funcs::template rela_general<32>(view + 4,
7834 x,
7835 addend,
7836 rp1);
7837 if (s1 != aarch64_reloc_funcs::STATUS_OKAY)
7838 return s1;
7839
7840 typename aarch64_reloc_funcs::Status s2 =
7841 aarch64_reloc_funcs::template rela_general<32>(view + 8,
7842 x,
7843 addend,
7844 rp2);
7845
7846 this->skip_call_tls_get_addr_ = true;
7847 return s2;
7848
7849 } // End of tls_ld_to_le
7850
7851 template<int size, bool big_endian>
7852 inline
7853 typename AArch64_relocate_functions<size, big_endian>::Status
7854 Target_aarch64<size, big_endian>::Relocate::tls_ie_to_le(
7855 const Relocate_info<size, big_endian>* relinfo,
7856 Target_aarch64<size, big_endian>* target,
7857 const elfcpp::Rela<size, big_endian>& rela,
7858 unsigned int r_type,
7859 unsigned char* view,
7860 const Symbol_value<size>* psymval)
7861 {
7862 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7863 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7864 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7865
7866 AArch64_address value = psymval->value(relinfo->object, 0);
7867 Output_segment* tls_segment = relinfo->layout->tls_segment();
7868 AArch64_address aligned_tcb_address =
7869 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7870 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7871 AArch64_address x = value + addend + aligned_tcb_address;
7872 // "x" is the offset to tp, we can only do this if x is within
7873 // range [0, 2^32-1]
7874 if (!(size == 32 || (size == 64 && (static_cast<uint64_t>(x) >> 32) == 0)))
7875 {
7876 gold_error(_("TLS variable referred by reloc %u is too far from TP."),
7877 r_type);
7878 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7879 }
7880
7881 Insntype* ip = reinterpret_cast<Insntype*>(view);
7882 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
7883 unsigned int regno;
7884 Insntype newinsn;
7885 if (r_type == elfcpp::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21)
7886 {
7887 // Generate movz.
7888 regno = (insn & 0x1f);
7889 newinsn = (0xd2a00000 | regno) | (((x >> 16) & 0xffff) << 5);
7890 }
7891 else if (r_type == elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC)
7892 {
7893 // Generate movk.
7894 regno = (insn & 0x1f);
7895 gold_assert(regno == ((insn >> 5) & 0x1f));
7896 newinsn = (0xf2800000 | regno) | ((x & 0xffff) << 5);
7897 }
7898 else
7899 gold_unreachable();
7900
7901 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7902 return aarch64_reloc_funcs::STATUS_OKAY;
7903 } // End of tls_ie_to_le
7904
7905
7906 template<int size, bool big_endian>
7907 inline
7908 typename AArch64_relocate_functions<size, big_endian>::Status
7909 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_le(
7910 const Relocate_info<size, big_endian>* relinfo,
7911 Target_aarch64<size, big_endian>* target,
7912 const elfcpp::Rela<size, big_endian>& rela,
7913 unsigned int r_type,
7914 unsigned char* view,
7915 const Symbol_value<size>* psymval)
7916 {
7917 typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
7918 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7919 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7920
7921 // TLSDESC-GD sequence is like:
7922 // adrp x0, :tlsdesc:v1
7923 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7924 // add x0, x0, :tlsdesc_lo12:v1
7925 // .tlsdesccall v1
7926 // blr x1
7927 // After desc_gd_to_le optimization, the sequence will be like:
7928 // movz x0, #0x0, lsl #16
7929 // movk x0, #0x10
7930 // nop
7931 // nop
7932
7933 // Calculate tprel value.
7934 Output_segment* tls_segment = relinfo->layout->tls_segment();
7935 gold_assert(tls_segment != NULL);
7936 Insntype* ip = reinterpret_cast<Insntype*>(view);
7937 const elfcpp::Elf_Xword addend = rela.get_r_addend();
7938 AArch64_address value = psymval->value(relinfo->object, addend);
7939 AArch64_address aligned_tcb_size =
7940 align_address(target->tcb_size(), tls_segment->maximum_alignment());
7941 AArch64_address x = value + aligned_tcb_size;
7942 // x is the offset to tp, we can only do this if x is within range
7943 // [0, 2^32-1]. If x is out of range, fail and exit.
7944 if (size == 64 && (static_cast<uint64_t>(x) >> 32) != 0)
7945 {
7946 gold_error(_("TLS variable referred by reloc %u is too far from TP. "
7947 "We Can't do gd_to_le relaxation.\n"), r_type);
7948 return aarch64_reloc_funcs::STATUS_BAD_RELOC;
7949 }
7950 Insntype newinsn;
7951 switch (r_type)
7952 {
7953 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
7954 case elfcpp::R_AARCH64_TLSDESC_CALL:
7955 // Change to nop
7956 newinsn = 0xd503201f;
7957 break;
7958
7959 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
7960 // Change to movz.
7961 newinsn = 0xd2a00000 | (((x >> 16) & 0xffff) << 5);
7962 break;
7963
7964 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
7965 // Change to movk.
7966 newinsn = 0xf2800000 | ((x & 0xffff) << 5);
7967 break;
7968
7969 default:
7970 gold_error(_("unsupported tlsdesc gd_to_le optimization on reloc %u"),
7971 r_type);
7972 gold_unreachable();
7973 }
7974 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
7975 return aarch64_reloc_funcs::STATUS_OKAY;
7976 } // End of tls_desc_gd_to_le
7977
7978
7979 template<int size, bool big_endian>
7980 inline
7981 typename AArch64_relocate_functions<size, big_endian>::Status
7982 Target_aarch64<size, big_endian>::Relocate::tls_desc_gd_to_ie(
7983 const Relocate_info<size, big_endian>* /* relinfo */,
7984 Target_aarch64<size, big_endian>* /* target */,
7985 const elfcpp::Rela<size, big_endian>& rela,
7986 unsigned int r_type,
7987 unsigned char* view,
7988 const Symbol_value<size>* /* psymval */,
7989 typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address,
7990 typename elfcpp::Elf_types<size>::Elf_Addr address)
7991 {
7992 typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
7993 typedef AArch64_relocate_functions<size, big_endian> aarch64_reloc_funcs;
7994
7995 // TLSDESC-GD sequence is like:
7996 // adrp x0, :tlsdesc:v1
7997 // ldr x1, [x0, #:tlsdesc_lo12:v1]
7998 // add x0, x0, :tlsdesc_lo12:v1
7999 // .tlsdesccall v1
8000 // blr x1
8001 // After desc_gd_to_ie optimization, the sequence will be like:
8002 // adrp x0, :tlsie:v1
8003 // ldr x0, [x0, :tlsie_lo12:v1]
8004 // nop
8005 // nop
8006
8007 Insntype* ip = reinterpret_cast<Insntype*>(view);
8008 const elfcpp::Elf_Xword addend = rela.get_r_addend();
8009 Insntype newinsn;
8010 switch (r_type)
8011 {
8012 case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
8013 case elfcpp::R_AARCH64_TLSDESC_CALL:
8014 // Change to nop
8015 newinsn = 0xd503201f;
8016 elfcpp::Swap<32, big_endian>::writeval(ip, newinsn);
8017 break;
8018
8019 case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
8020 {
8021 return aarch64_reloc_funcs::adrp(view, got_entry_address + addend,
8022 address);
8023 }
8024 break;
8025
8026 case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
8027 {
8028 // Set ldr target register to be x0.
8029 Insntype insn = elfcpp::Swap<32, big_endian>::readval(ip);
8030 insn &= 0xffffffe0;
8031 elfcpp::Swap<32, big_endian>::writeval(ip, insn);
8032 // Do relocation.
8033 const AArch64_reloc_property* reloc_property =
8034 aarch64_reloc_property_table->get_reloc_property(
8035 elfcpp::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
8036 return aarch64_reloc_funcs::template rela_general<32>(
8037 view, got_entry_address, addend, reloc_property);
8038 }
8039 break;
8040
8041 default:
8042 gold_error(_("Don't support tlsdesc gd_to_ie optimization on reloc %u"),
8043 r_type);
8044 gold_unreachable();
8045 }
8046 return aarch64_reloc_funcs::STATUS_OKAY;
8047 } // End of tls_desc_gd_to_ie
8048
8049 // Relocate section data.
8050
8051 template<int size, bool big_endian>
8052 void
8053 Target_aarch64<size, big_endian>::relocate_section(
8054 const Relocate_info<size, big_endian>* relinfo,
8055 unsigned int sh_type,
8056 const unsigned char* prelocs,
8057 size_t reloc_count,
8058 Output_section* output_section,
8059 bool needs_special_offset_handling,
8060 unsigned char* view,
8061 typename elfcpp::Elf_types<size>::Elf_Addr address,
8062 section_size_type view_size,
8063 const Reloc_symbol_changes* reloc_symbol_changes)
8064 {
8065 typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
8066 typedef Target_aarch64<size, big_endian> Aarch64;
8067 typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
8068 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8069 Classify_reloc;
8070
8071 gold_assert(sh_type == elfcpp::SHT_RELA);
8072
8073 // See if we are relocating a relaxed input section. If so, the view
8074 // covers the whole output section and we need to adjust accordingly.
8075 if (needs_special_offset_handling)
8076 {
8077 const Output_relaxed_input_section* poris =
8078 output_section->find_relaxed_input_section(relinfo->object,
8079 relinfo->data_shndx);
8080 if (poris != NULL)
8081 {
8082 Address section_address = poris->address();
8083 section_size_type section_size = poris->data_size();
8084
8085 gold_assert((section_address >= address)
8086 && ((section_address + section_size)
8087 <= (address + view_size)));
8088
8089 off_t offset = section_address - address;
8090 view += offset;
8091 address += offset;
8092 view_size = section_size;
8093 }
8094 }
8095
8096 gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
8097 gold::Default_comdat_behavior, Classify_reloc>(
8098 relinfo,
8099 this,
8100 prelocs,
8101 reloc_count,
8102 output_section,
8103 needs_special_offset_handling,
8104 view,
8105 address,
8106 view_size,
8107 reloc_symbol_changes);
8108 }
8109
8110 // Scan the relocs during a relocatable link.
8111
8112 template<int size, bool big_endian>
8113 void
8114 Target_aarch64<size, big_endian>::scan_relocatable_relocs(
8115 Symbol_table* symtab,
8116 Layout* layout,
8117 Sized_relobj_file<size, big_endian>* object,
8118 unsigned int data_shndx,
8119 unsigned int sh_type,
8120 const unsigned char* prelocs,
8121 size_t reloc_count,
8122 Output_section* output_section,
8123 bool needs_special_offset_handling,
8124 size_t local_symbol_count,
8125 const unsigned char* plocal_symbols,
8126 Relocatable_relocs* rr)
8127 {
8128 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8129 Classify_reloc;
8130 typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
8131 Scan_relocatable_relocs;
8132
8133 gold_assert(sh_type == elfcpp::SHT_RELA);
8134
8135 gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
8136 symtab,
8137 layout,
8138 object,
8139 data_shndx,
8140 prelocs,
8141 reloc_count,
8142 output_section,
8143 needs_special_offset_handling,
8144 local_symbol_count,
8145 plocal_symbols,
8146 rr);
8147 }
8148
8149 // Scan the relocs for --emit-relocs.
8150
8151 template<int size, bool big_endian>
8152 void
8153 Target_aarch64<size, big_endian>::emit_relocs_scan(
8154 Symbol_table* symtab,
8155 Layout* layout,
8156 Sized_relobj_file<size, big_endian>* object,
8157 unsigned int data_shndx,
8158 unsigned int sh_type,
8159 const unsigned char* prelocs,
8160 size_t reloc_count,
8161 Output_section* output_section,
8162 bool needs_special_offset_handling,
8163 size_t local_symbol_count,
8164 const unsigned char* plocal_syms,
8165 Relocatable_relocs* rr)
8166 {
8167 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8168 Classify_reloc;
8169 typedef gold::Default_emit_relocs_strategy<Classify_reloc>
8170 Emit_relocs_strategy;
8171
8172 gold_assert(sh_type == elfcpp::SHT_RELA);
8173
8174 gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
8175 symtab,
8176 layout,
8177 object,
8178 data_shndx,
8179 prelocs,
8180 reloc_count,
8181 output_section,
8182 needs_special_offset_handling,
8183 local_symbol_count,
8184 plocal_syms,
8185 rr);
8186 }
8187
8188 // Relocate a section during a relocatable link.
8189
8190 template<int size, bool big_endian>
8191 void
8192 Target_aarch64<size, big_endian>::relocate_relocs(
8193 const Relocate_info<size, big_endian>* relinfo,
8194 unsigned int sh_type,
8195 const unsigned char* prelocs,
8196 size_t reloc_count,
8197 Output_section* output_section,
8198 typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
8199 unsigned char* view,
8200 typename elfcpp::Elf_types<size>::Elf_Addr view_address,
8201 section_size_type view_size,
8202 unsigned char* reloc_view,
8203 section_size_type reloc_view_size)
8204 {
8205 typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
8206 Classify_reloc;
8207
8208 gold_assert(sh_type == elfcpp::SHT_RELA);
8209
8210 gold::relocate_relocs<size, big_endian, Classify_reloc>(
8211 relinfo,
8212 prelocs,
8213 reloc_count,
8214 output_section,
8215 offset_in_output_section,
8216 view,
8217 view_address,
8218 view_size,
8219 reloc_view,
8220 reloc_view_size);
8221 }
8222
8223
8224 // Return whether this is a 3-insn erratum sequence.
8225
8226 template<int size, bool big_endian>
8227 bool
8228 Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
8229 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8230 typename elfcpp::Swap<32,big_endian>::Valtype insn2,
8231 typename elfcpp::Swap<32,big_endian>::Valtype insn3)
8232 {
8233 unsigned rt1, rt2;
8234 bool load, pair;
8235
8236 // The 2nd insn is a single register load or store; or register pair
8237 // store.
8238 if (Insn_utilities::aarch64_mem_op_p(insn2, &rt1, &rt2, &pair, &load)
8239 && (!pair || (pair && !load)))
8240 {
8241 // The 3rd insn is a load or store instruction from the "Load/store
8242 // register (unsigned immediate)" encoding class, using Rn as the
8243 // base address register.
8244 if (Insn_utilities::aarch64_ldst_uimm(insn3)
8245 && (Insn_utilities::aarch64_rn(insn3)
8246 == Insn_utilities::aarch64_rd(insn1)))
8247 return true;
8248 }
8249 return false;
8250 }
8251
8252
8253 // Return whether this is a 835769 sequence.
8254 // (Similarly implemented as in elfnn-aarch64.c.)
8255
8256 template<int size, bool big_endian>
8257 bool
8258 Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
8259 typename elfcpp::Swap<32,big_endian>::Valtype insn1,
8260 typename elfcpp::Swap<32,big_endian>::Valtype insn2)
8261 {
8262 uint32_t rt;
8263 uint32_t rt2 = 0;
8264 uint32_t rn;
8265 uint32_t rm;
8266 uint32_t ra;
8267 bool pair;
8268 bool load;
8269
8270 if (Insn_utilities::aarch64_mlxl(insn2)
8271 && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
8272 {
8273 /* Any SIMD memory op is independent of the subsequent MLA
8274 by definition of the erratum. */
8275 if (Insn_utilities::aarch64_bit(insn1, 26))
8276 return true;
8277
8278 /* If not SIMD, check for integer memory ops and MLA relationship. */
8279 rn = Insn_utilities::aarch64_rn(insn2);
8280 ra = Insn_utilities::aarch64_ra(insn2);
8281 rm = Insn_utilities::aarch64_rm(insn2);
8282
8283 /* If this is a load and there's a true(RAW) dependency, we are safe
8284 and this is not an erratum sequence. */
8285 if (load &&
8286 (rt == rn || rt == rm || rt == ra
8287 || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
8288 return false;
8289
8290 /* We conservatively put out stubs for all other cases (including
8291 writebacks). */
8292 return true;
8293 }
8294
8295 return false;
8296 }
8297
8298
8299 // Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
8300
8301 template<int size, bool big_endian>
8302 void
8303 Target_aarch64<size, big_endian>::create_erratum_stub(
8304 AArch64_relobj<size, big_endian>* relobj,
8305 unsigned int shndx,
8306 section_size_type erratum_insn_offset,
8307 Address erratum_address,
8308 typename Insn_utilities::Insntype erratum_insn,
8309 int erratum_type,
8310 unsigned int e843419_adrp_offset)
8311 {
8312 gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
8313 The_stub_table* stub_table = relobj->stub_table(shndx);
8314 gold_assert(stub_table != NULL);
8315 if (stub_table->find_erratum_stub(relobj,
8316 shndx,
8317 erratum_insn_offset) == NULL)
8318 {
8319 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8320 The_erratum_stub* stub;
8321 if (erratum_type == ST_E_835769)
8322 stub = new The_erratum_stub(relobj, erratum_type, shndx,
8323 erratum_insn_offset);
8324 else if (erratum_type == ST_E_843419)
8325 stub = new E843419_stub<size, big_endian>(
8326 relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
8327 else
8328 gold_unreachable();
8329 stub->set_erratum_insn(erratum_insn);
8330 stub->set_erratum_address(erratum_address);
8331 // For erratum ST_E_843419 and ST_E_835769, the destination address is
8332 // always the next insn after erratum insn.
8333 stub->set_destination_address(erratum_address + BPI);
8334 stub_table->add_erratum_stub(stub);
8335 }
8336 }
8337
8338
8339 // Scan erratum for section SHNDX range [output_address + span_start,
8340 // output_address + span_end). Note here we do not share the code with
8341 // scan_erratum_843419_span function, because for 843419 we optimize by only
8342 // scanning the last few insns of a page, whereas for 835769, we need to scan
8343 // every insn.
8344
8345 template<int size, bool big_endian>
8346 void
8347 Target_aarch64<size, big_endian>::scan_erratum_835769_span(
8348 AArch64_relobj<size, big_endian>* relobj,
8349 unsigned int shndx,
8350 const section_size_type span_start,
8351 const section_size_type span_end,
8352 unsigned char* input_view,
8353 Address output_address)
8354 {
8355 typedef typename Insn_utilities::Insntype Insntype;
8356
8357 const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
8358
8359 // Adjust output_address and view to the start of span.
8360 output_address += span_start;
8361 input_view += span_start;
8362
8363 section_size_type span_length = span_end - span_start;
8364 section_size_type offset = 0;
8365 for (offset = 0; offset + BPI < span_length; offset += BPI)
8366 {
8367 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8368 Insntype insn1 = ip[0];
8369 Insntype insn2 = ip[1];
8370 if (is_erratum_835769_sequence(insn1, insn2))
8371 {
8372 Insntype erratum_insn = insn2;
8373 // "span_start + offset" is the offset for insn1. So for insn2, it is
8374 // "span_start + offset + BPI".
8375 section_size_type erratum_insn_offset = span_start + offset + BPI;
8376 Address erratum_address = output_address + offset + BPI;
8377 gold_info(_("Erratum 835769 found and fixed at \"%s\", "
8378 "section %d, offset 0x%08x."),
8379 relobj->name().c_str(), shndx,
8380 (unsigned int)(span_start + offset));
8381
8382 this->create_erratum_stub(relobj, shndx,
8383 erratum_insn_offset, erratum_address,
8384 erratum_insn, ST_E_835769);
8385 offset += BPI; // Skip mac insn.
8386 }
8387 }
8388 } // End of "Target_aarch64::scan_erratum_835769_span".
8389
8390
8391 // Scan erratum for section SHNDX range
8392 // [output_address + span_start, output_address + span_end).
8393
8394 template<int size, bool big_endian>
8395 void
8396 Target_aarch64<size, big_endian>::scan_erratum_843419_span(
8397 AArch64_relobj<size, big_endian>* relobj,
8398 unsigned int shndx,
8399 const section_size_type span_start,
8400 const section_size_type span_end,
8401 unsigned char* input_view,
8402 Address output_address)
8403 {
8404 typedef typename Insn_utilities::Insntype Insntype;
8405
8406 // Adjust output_address and view to the start of span.
8407 output_address += span_start;
8408 input_view += span_start;
8409
8410 if ((output_address & 0x03) != 0)
8411 return;
8412
8413 section_size_type offset = 0;
8414 section_size_type span_length = span_end - span_start;
8415 // The first instruction must be ending at 0xFF8 or 0xFFC.
8416 unsigned int page_offset = output_address & 0xFFF;
8417 // Make sure starting position, that is "output_address+offset",
8418 // starts at page position 0xff8 or 0xffc.
8419 if (page_offset < 0xff8)
8420 offset = 0xff8 - page_offset;
8421 while (offset + 3 * Insn_utilities::BYTES_PER_INSN <= span_length)
8422 {
8423 Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
8424 Insntype insn1 = ip[0];
8425 if (Insn_utilities::is_adrp(insn1))
8426 {
8427 Insntype insn2 = ip[1];
8428 Insntype insn3 = ip[2];
8429 Insntype erratum_insn;
8430 unsigned insn_offset;
8431 bool do_report = false;
8432 if (is_erratum_843419_sequence(insn1, insn2, insn3))
8433 {
8434 do_report = true;
8435 erratum_insn = insn3;
8436 insn_offset = 2 * Insn_utilities::BYTES_PER_INSN;
8437 }
8438 else if (offset + 4 * Insn_utilities::BYTES_PER_INSN <= span_length)
8439 {
8440 // Optionally we can have an insn between ins2 and ins3
8441 Insntype insn_opt = ip[2];
8442 // And insn_opt must not be a branch.
8443 if (!Insn_utilities::aarch64_b(insn_opt)
8444 && !Insn_utilities::aarch64_bl(insn_opt)
8445 && !Insn_utilities::aarch64_blr(insn_opt)
8446 && !Insn_utilities::aarch64_br(insn_opt))
8447 {
8448 // And insn_opt must not write to dest reg in insn1. However
8449 // we do a conservative scan, which means we may fix/report
8450 // more than necessary, but it doesn't hurt.
8451
8452 Insntype insn4 = ip[3];
8453 if (is_erratum_843419_sequence(insn1, insn2, insn4))
8454 {
8455 do_report = true;
8456 erratum_insn = insn4;
8457 insn_offset = 3 * Insn_utilities::BYTES_PER_INSN;
8458 }
8459 }
8460 }
8461 if (do_report)
8462 {
8463 unsigned int erratum_insn_offset =
8464 span_start + offset + insn_offset;
8465 Address erratum_address =
8466 output_address + offset + insn_offset;
8467 create_erratum_stub(relobj, shndx,
8468 erratum_insn_offset, erratum_address,
8469 erratum_insn, ST_E_843419,
8470 span_start + offset);
8471 }
8472 }
8473
8474 // Advance to next candidate instruction. We only consider instruction
8475 // sequences starting at a page offset of 0xff8 or 0xffc.
8476 page_offset = (output_address + offset) & 0xfff;
8477 if (page_offset == 0xff8)
8478 offset += 4;
8479 else // (page_offset == 0xffc), we move to next page's 0xff8.
8480 offset += 0xffc;
8481 }
8482 } // End of "Target_aarch64::scan_erratum_843419_span".
8483
8484
8485 // The selector for aarch64 object files.
8486
8487 template<int size, bool big_endian>
8488 class Target_selector_aarch64 : public Target_selector
8489 {
8490 public:
8491 Target_selector_aarch64();
8492
8493 virtual Target*
8494 do_instantiate_target()
8495 { return new Target_aarch64<size, big_endian>(); }
8496 };
8497
8498 template<>
8499 Target_selector_aarch64<32, true>::Target_selector_aarch64()
8500 : Target_selector(elfcpp::EM_AARCH64, 32, true,
8501 "elf32-bigaarch64", "aarch64_elf32_be_vec")
8502 { }
8503
8504 template<>
8505 Target_selector_aarch64<32, false>::Target_selector_aarch64()
8506 : Target_selector(elfcpp::EM_AARCH64, 32, false,
8507 "elf32-littleaarch64", "aarch64_elf32_le_vec")
8508 { }
8509
8510 template<>
8511 Target_selector_aarch64<64, true>::Target_selector_aarch64()
8512 : Target_selector(elfcpp::EM_AARCH64, 64, true,
8513 "elf64-bigaarch64", "aarch64_elf64_be_vec")
8514 { }
8515
8516 template<>
8517 Target_selector_aarch64<64, false>::Target_selector_aarch64()
8518 : Target_selector(elfcpp::EM_AARCH64, 64, false,
8519 "elf64-littleaarch64", "aarch64_elf64_le_vec")
8520 { }
8521
8522 Target_selector_aarch64<32, true> target_selector_aarch64elf32b;
8523 Target_selector_aarch64<32, false> target_selector_aarch64elf32;
8524 Target_selector_aarch64<64, true> target_selector_aarch64elfb;
8525 Target_selector_aarch64<64, false> target_selector_aarch64elf;
8526
8527 } // End anonymous namespace.