]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gold/x86_64.cc
* layout.cc (Layout::Layout): Initialize increase_relro_.
[thirdparty/binutils-gdb.git] / gold / x86_64.cc
1 // x86_64.cc -- x86_64 target support for gold.
2
3 // Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4 // Written by Ian Lance Taylor <iant@google.com>.
5
6 // This file is part of gold.
7
8 // This program is free software; you can redistribute it and/or modify
9 // it under the terms of the GNU General Public License as published by
10 // the Free Software Foundation; either version 3 of the License, or
11 // (at your option) any later version.
12
13 // This program is distributed in the hope that it will be useful,
14 // but WITHOUT ANY WARRANTY; without even the implied warranty of
15 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 // GNU General Public License for more details.
17
18 // You should have received a copy of the GNU General Public License
19 // along with this program; if not, write to the Free Software
20 // Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
21 // MA 02110-1301, USA.
22
23 #include "gold.h"
24
25 #include <cstring>
26
27 #include "elfcpp.h"
28 #include "parameters.h"
29 #include "reloc.h"
30 #include "x86_64.h"
31 #include "object.h"
32 #include "symtab.h"
33 #include "layout.h"
34 #include "output.h"
35 #include "copy-relocs.h"
36 #include "target.h"
37 #include "target-reloc.h"
38 #include "target-select.h"
39 #include "tls.h"
40 #include "freebsd.h"
41 #include "gc.h"
42
43 namespace
44 {
45
46 using namespace gold;
47
48 class Output_data_plt_x86_64;
49
50 // The x86_64 target class.
51 // See the ABI at
52 // http://www.x86-64.org/documentation/abi.pdf
53 // TLS info comes from
54 // http://people.redhat.com/drepper/tls.pdf
55 // http://www.lsd.ic.unicamp.br/~oliva/writeups/TLS/RFC-TLSDESC-x86.txt
56
57 class Target_x86_64 : public Target_freebsd<64, false>
58 {
59 public:
60 // In the x86_64 ABI (p 68), it says "The AMD64 ABI architectures
61 // uses only Elf64_Rela relocation entries with explicit addends."
62 typedef Output_data_reloc<elfcpp::SHT_RELA, true, 64, false> Reloc_section;
63
64 Target_x86_64()
65 : Target_freebsd<64, false>(&x86_64_info),
66 got_(NULL), plt_(NULL), got_plt_(NULL), rela_dyn_(NULL),
67 copy_relocs_(elfcpp::R_X86_64_COPY), dynbss_(NULL),
68 got_mod_index_offset_(-1U), tls_base_symbol_defined_(false)
69 { }
70
71 // Hook for a new output section.
72 void
73 do_new_output_section(Output_section*) const;
74
75 // Scan the relocations to look for symbol adjustments.
76 void
77 gc_process_relocs(Symbol_table* symtab,
78 Layout* layout,
79 Sized_relobj<64, false>* object,
80 unsigned int data_shndx,
81 unsigned int sh_type,
82 const unsigned char* prelocs,
83 size_t reloc_count,
84 Output_section* output_section,
85 bool needs_special_offset_handling,
86 size_t local_symbol_count,
87 const unsigned char* plocal_symbols);
88
89 // Scan the relocations to look for symbol adjustments.
90 void
91 scan_relocs(Symbol_table* symtab,
92 Layout* layout,
93 Sized_relobj<64, false>* object,
94 unsigned int data_shndx,
95 unsigned int sh_type,
96 const unsigned char* prelocs,
97 size_t reloc_count,
98 Output_section* output_section,
99 bool needs_special_offset_handling,
100 size_t local_symbol_count,
101 const unsigned char* plocal_symbols);
102
103 // Finalize the sections.
104 void
105 do_finalize_sections(Layout*, const Input_objects*, Symbol_table*);
106
107 // Return the value to use for a dynamic which requires special
108 // treatment.
109 uint64_t
110 do_dynsym_value(const Symbol*) const;
111
112 // Relocate a section.
113 void
114 relocate_section(const Relocate_info<64, false>*,
115 unsigned int sh_type,
116 const unsigned char* prelocs,
117 size_t reloc_count,
118 Output_section* output_section,
119 bool needs_special_offset_handling,
120 unsigned char* view,
121 elfcpp::Elf_types<64>::Elf_Addr view_address,
122 section_size_type view_size,
123 const Reloc_symbol_changes*);
124
125 // Scan the relocs during a relocatable link.
126 void
127 scan_relocatable_relocs(Symbol_table* symtab,
128 Layout* layout,
129 Sized_relobj<64, false>* object,
130 unsigned int data_shndx,
131 unsigned int sh_type,
132 const unsigned char* prelocs,
133 size_t reloc_count,
134 Output_section* output_section,
135 bool needs_special_offset_handling,
136 size_t local_symbol_count,
137 const unsigned char* plocal_symbols,
138 Relocatable_relocs*);
139
140 // Relocate a section during a relocatable link.
141 void
142 relocate_for_relocatable(const Relocate_info<64, false>*,
143 unsigned int sh_type,
144 const unsigned char* prelocs,
145 size_t reloc_count,
146 Output_section* output_section,
147 off_t offset_in_output_section,
148 const Relocatable_relocs*,
149 unsigned char* view,
150 elfcpp::Elf_types<64>::Elf_Addr view_address,
151 section_size_type view_size,
152 unsigned char* reloc_view,
153 section_size_type reloc_view_size);
154
155 // Return a string used to fill a code section with nops.
156 std::string
157 do_code_fill(section_size_type length) const;
158
159 // Return whether SYM is defined by the ABI.
160 bool
161 do_is_defined_by_abi(const Symbol* sym) const
162 { return strcmp(sym->name(), "__tls_get_addr") == 0; }
163
164 // Adjust -fstack-split code which calls non-stack-split code.
165 void
166 do_calls_non_split(Relobj* object, unsigned int shndx,
167 section_offset_type fnoffset, section_size_type fnsize,
168 unsigned char* view, section_size_type view_size,
169 std::string* from, std::string* to) const;
170
171 // Return the size of the GOT section.
172 section_size_type
173 got_size()
174 {
175 gold_assert(this->got_ != NULL);
176 return this->got_->data_size();
177 }
178
179 private:
180 // The class which scans relocations.
181 class Scan
182 {
183 public:
184 Scan()
185 : issued_non_pic_error_(false)
186 { }
187
188 inline void
189 local(Symbol_table* symtab, Layout* layout, Target_x86_64* target,
190 Sized_relobj<64, false>* object,
191 unsigned int data_shndx,
192 Output_section* output_section,
193 const elfcpp::Rela<64, false>& reloc, unsigned int r_type,
194 const elfcpp::Sym<64, false>& lsym);
195
196 inline void
197 global(Symbol_table* symtab, Layout* layout, Target_x86_64* target,
198 Sized_relobj<64, false>* object,
199 unsigned int data_shndx,
200 Output_section* output_section,
201 const elfcpp::Rela<64, false>& reloc, unsigned int r_type,
202 Symbol* gsym);
203
204 private:
205 static void
206 unsupported_reloc_local(Sized_relobj<64, false>*, unsigned int r_type);
207
208 static void
209 unsupported_reloc_global(Sized_relobj<64, false>*, unsigned int r_type,
210 Symbol*);
211
212 void
213 check_non_pic(Relobj*, unsigned int r_type);
214
215 // Whether we have issued an error about a non-PIC compilation.
216 bool issued_non_pic_error_;
217 };
218
219 // The class which implements relocation.
220 class Relocate
221 {
222 public:
223 Relocate()
224 : skip_call_tls_get_addr_(false), saw_tls_block_reloc_(false)
225 { }
226
227 ~Relocate()
228 {
229 if (this->skip_call_tls_get_addr_)
230 {
231 // FIXME: This needs to specify the location somehow.
232 gold_error(_("missing expected TLS relocation"));
233 }
234 }
235
236 // Do a relocation. Return false if the caller should not issue
237 // any warnings about this relocation.
238 inline bool
239 relocate(const Relocate_info<64, false>*, Target_x86_64*, Output_section*,
240 size_t relnum, const elfcpp::Rela<64, false>&,
241 unsigned int r_type, const Sized_symbol<64>*,
242 const Symbol_value<64>*,
243 unsigned char*, elfcpp::Elf_types<64>::Elf_Addr,
244 section_size_type);
245
246 private:
247 // Do a TLS relocation.
248 inline void
249 relocate_tls(const Relocate_info<64, false>*, Target_x86_64*,
250 size_t relnum, const elfcpp::Rela<64, false>&,
251 unsigned int r_type, const Sized_symbol<64>*,
252 const Symbol_value<64>*,
253 unsigned char*, elfcpp::Elf_types<64>::Elf_Addr,
254 section_size_type);
255
256 // Do a TLS General-Dynamic to Initial-Exec transition.
257 inline void
258 tls_gd_to_ie(const Relocate_info<64, false>*, size_t relnum,
259 Output_segment* tls_segment,
260 const elfcpp::Rela<64, false>&, unsigned int r_type,
261 elfcpp::Elf_types<64>::Elf_Addr value,
262 unsigned char* view,
263 elfcpp::Elf_types<64>::Elf_Addr,
264 section_size_type view_size);
265
266 // Do a TLS General-Dynamic to Local-Exec transition.
267 inline void
268 tls_gd_to_le(const Relocate_info<64, false>*, size_t relnum,
269 Output_segment* tls_segment,
270 const elfcpp::Rela<64, false>&, unsigned int r_type,
271 elfcpp::Elf_types<64>::Elf_Addr value,
272 unsigned char* view,
273 section_size_type view_size);
274
275 // Do a TLSDESC-style General-Dynamic to Initial-Exec transition.
276 inline void
277 tls_desc_gd_to_ie(const Relocate_info<64, false>*, size_t relnum,
278 Output_segment* tls_segment,
279 const elfcpp::Rela<64, false>&, unsigned int r_type,
280 elfcpp::Elf_types<64>::Elf_Addr value,
281 unsigned char* view,
282 elfcpp::Elf_types<64>::Elf_Addr,
283 section_size_type view_size);
284
285 // Do a TLSDESC-style General-Dynamic to Local-Exec transition.
286 inline void
287 tls_desc_gd_to_le(const Relocate_info<64, false>*, size_t relnum,
288 Output_segment* tls_segment,
289 const elfcpp::Rela<64, false>&, unsigned int r_type,
290 elfcpp::Elf_types<64>::Elf_Addr value,
291 unsigned char* view,
292 section_size_type view_size);
293
294 // Do a TLS Local-Dynamic to Local-Exec transition.
295 inline void
296 tls_ld_to_le(const Relocate_info<64, false>*, size_t relnum,
297 Output_segment* tls_segment,
298 const elfcpp::Rela<64, false>&, unsigned int r_type,
299 elfcpp::Elf_types<64>::Elf_Addr value,
300 unsigned char* view,
301 section_size_type view_size);
302
303 // Do a TLS Initial-Exec to Local-Exec transition.
304 static inline void
305 tls_ie_to_le(const Relocate_info<64, false>*, size_t relnum,
306 Output_segment* tls_segment,
307 const elfcpp::Rela<64, false>&, unsigned int r_type,
308 elfcpp::Elf_types<64>::Elf_Addr value,
309 unsigned char* view,
310 section_size_type view_size);
311
312 // This is set if we should skip the next reloc, which should be a
313 // PLT32 reloc against ___tls_get_addr.
314 bool skip_call_tls_get_addr_;
315
316 // This is set if we see a relocation which could load the address
317 // of the TLS block. Whether we see such a relocation determines
318 // how we handle the R_X86_64_DTPOFF32 relocation, which is used
319 // in debugging sections.
320 bool saw_tls_block_reloc_;
321 };
322
323 // A class which returns the size required for a relocation type,
324 // used while scanning relocs during a relocatable link.
325 class Relocatable_size_for_reloc
326 {
327 public:
328 unsigned int
329 get_size_for_reloc(unsigned int, Relobj*);
330 };
331
332 // Adjust TLS relocation type based on the options and whether this
333 // is a local symbol.
334 static tls::Tls_optimization
335 optimize_tls_reloc(bool is_final, int r_type);
336
337 // Get the GOT section, creating it if necessary.
338 Output_data_got<64, false>*
339 got_section(Symbol_table*, Layout*);
340
341 // Get the GOT PLT section.
342 Output_data_space*
343 got_plt_section() const
344 {
345 gold_assert(this->got_plt_ != NULL);
346 return this->got_plt_;
347 }
348
349 // Create the PLT section.
350 void
351 make_plt_section(Symbol_table* symtab, Layout* layout);
352
353 // Create a PLT entry for a global symbol.
354 void
355 make_plt_entry(Symbol_table*, Layout*, Symbol*);
356
357 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
358 void
359 define_tls_base_symbol(Symbol_table*, Layout*);
360
361 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
362 void
363 reserve_tlsdesc_entries(Symbol_table* symtab, Layout* layout);
364
365 // Create a GOT entry for the TLS module index.
366 unsigned int
367 got_mod_index_entry(Symbol_table* symtab, Layout* layout,
368 Sized_relobj<64, false>* object);
369
370 // Get the PLT section.
371 Output_data_plt_x86_64*
372 plt_section() const
373 {
374 gold_assert(this->plt_ != NULL);
375 return this->plt_;
376 }
377
378 // Get the dynamic reloc section, creating it if necessary.
379 Reloc_section*
380 rela_dyn_section(Layout*);
381
382 // Add a potential copy relocation.
383 void
384 copy_reloc(Symbol_table* symtab, Layout* layout,
385 Sized_relobj<64, false>* object,
386 unsigned int shndx, Output_section* output_section,
387 Symbol* sym, const elfcpp::Rela<64, false>& reloc)
388 {
389 this->copy_relocs_.copy_reloc(symtab, layout,
390 symtab->get_sized_symbol<64>(sym),
391 object, shndx, output_section,
392 reloc, this->rela_dyn_section(layout));
393 }
394
395 // Information about this specific target which we pass to the
396 // general Target structure.
397 static const Target::Target_info x86_64_info;
398
399 enum Got_type
400 {
401 GOT_TYPE_STANDARD = 0, // GOT entry for a regular symbol
402 GOT_TYPE_TLS_OFFSET = 1, // GOT entry for TLS offset
403 GOT_TYPE_TLS_PAIR = 2, // GOT entry for TLS module/offset pair
404 GOT_TYPE_TLS_DESC = 3 // GOT entry for TLS_DESC pair
405 };
406
407 // The GOT section.
408 Output_data_got<64, false>* got_;
409 // The PLT section.
410 Output_data_plt_x86_64* plt_;
411 // The GOT PLT section.
412 Output_data_space* got_plt_;
413 // The dynamic reloc section.
414 Reloc_section* rela_dyn_;
415 // Relocs saved to avoid a COPY reloc.
416 Copy_relocs<elfcpp::SHT_RELA, 64, false> copy_relocs_;
417 // Space for variables copied with a COPY reloc.
418 Output_data_space* dynbss_;
419 // Offset of the GOT entry for the TLS module index.
420 unsigned int got_mod_index_offset_;
421 // True if the _TLS_MODULE_BASE_ symbol has been defined.
422 bool tls_base_symbol_defined_;
423 };
424
425 const Target::Target_info Target_x86_64::x86_64_info =
426 {
427 64, // size
428 false, // is_big_endian
429 elfcpp::EM_X86_64, // machine_code
430 false, // has_make_symbol
431 false, // has_resolve
432 true, // has_code_fill
433 true, // is_default_stack_executable
434 '\0', // wrap_char
435 "/lib/ld64.so.1", // program interpreter
436 0x400000, // default_text_segment_address
437 0x1000, // abi_pagesize (overridable by -z max-page-size)
438 0x1000, // common_pagesize (overridable by -z common-page-size)
439 elfcpp::SHN_UNDEF, // small_common_shndx
440 elfcpp::SHN_X86_64_LCOMMON, // large_common_shndx
441 0, // small_common_section_flags
442 elfcpp::SHF_X86_64_LARGE, // large_common_section_flags
443 NULL, // attributes_section
444 NULL // attributes_vendor
445 };
446
447 // This is called when a new output section is created. This is where
448 // we handle the SHF_X86_64_LARGE.
449
450 void
451 Target_x86_64::do_new_output_section(Output_section *os) const
452 {
453 if ((os->flags() & elfcpp::SHF_X86_64_LARGE) != 0)
454 os->set_is_large_section();
455 }
456
457 // Get the GOT section, creating it if necessary.
458
459 Output_data_got<64, false>*
460 Target_x86_64::got_section(Symbol_table* symtab, Layout* layout)
461 {
462 if (this->got_ == NULL)
463 {
464 gold_assert(symtab != NULL && layout != NULL);
465
466 this->got_ = new Output_data_got<64, false>();
467
468 Output_section* os;
469 os = layout->add_output_section_data(".got", elfcpp::SHT_PROGBITS,
470 (elfcpp::SHF_ALLOC
471 | elfcpp::SHF_WRITE),
472 this->got_, false, true, true,
473 false);
474
475 this->got_plt_ = new Output_data_space(8, "** GOT PLT");
476 os = layout->add_output_section_data(".got.plt", elfcpp::SHT_PROGBITS,
477 (elfcpp::SHF_ALLOC
478 | elfcpp::SHF_WRITE),
479 this->got_plt_, false, false,
480 false, true);
481
482 // The first three entries are reserved.
483 this->got_plt_->set_current_data_size(3 * 8);
484
485 // Those bytes can go into the relro segment.
486 layout->increase_relro(3 * 8);
487
488 // Define _GLOBAL_OFFSET_TABLE_ at the start of the PLT.
489 symtab->define_in_output_data("_GLOBAL_OFFSET_TABLE_", NULL,
490 Symbol_table::PREDEFINED,
491 this->got_plt_,
492 0, 0, elfcpp::STT_OBJECT,
493 elfcpp::STB_LOCAL,
494 elfcpp::STV_HIDDEN, 0,
495 false, false);
496 }
497
498 return this->got_;
499 }
500
501 // Get the dynamic reloc section, creating it if necessary.
502
503 Target_x86_64::Reloc_section*
504 Target_x86_64::rela_dyn_section(Layout* layout)
505 {
506 if (this->rela_dyn_ == NULL)
507 {
508 gold_assert(layout != NULL);
509 this->rela_dyn_ = new Reloc_section(parameters->options().combreloc());
510 layout->add_output_section_data(".rela.dyn", elfcpp::SHT_RELA,
511 elfcpp::SHF_ALLOC, this->rela_dyn_, true,
512 false, false, false);
513 }
514 return this->rela_dyn_;
515 }
516
517 // A class to handle the PLT data.
518
519 class Output_data_plt_x86_64 : public Output_section_data
520 {
521 public:
522 typedef Output_data_reloc<elfcpp::SHT_RELA, true, 64, false> Reloc_section;
523
524 Output_data_plt_x86_64(Layout*, Output_data_got<64, false>*,
525 Output_data_space*);
526
527 // Add an entry to the PLT.
528 void
529 add_entry(Symbol* gsym);
530
531 // Add the reserved TLSDESC_PLT entry to the PLT.
532 void
533 reserve_tlsdesc_entry(unsigned int got_offset)
534 { this->tlsdesc_got_offset_ = got_offset; }
535
536 // Return true if a TLSDESC_PLT entry has been reserved.
537 bool
538 has_tlsdesc_entry() const
539 { return this->tlsdesc_got_offset_ != -1U; }
540
541 // Return the GOT offset for the reserved TLSDESC_PLT entry.
542 unsigned int
543 get_tlsdesc_got_offset() const
544 { return this->tlsdesc_got_offset_; }
545
546 // Return the offset of the reserved TLSDESC_PLT entry.
547 unsigned int
548 get_tlsdesc_plt_offset() const
549 { return (this->count_ + 1) * plt_entry_size; }
550
551 // Return the .rel.plt section data.
552 const Reloc_section*
553 rel_plt() const
554 { return this->rel_; }
555
556 protected:
557 void
558 do_adjust_output_section(Output_section* os);
559
560 // Write to a map file.
561 void
562 do_print_to_mapfile(Mapfile* mapfile) const
563 { mapfile->print_output_data(this, _("** PLT")); }
564
565 private:
566 // The size of an entry in the PLT.
567 static const int plt_entry_size = 16;
568
569 // The first entry in the PLT.
570 // From the AMD64 ABI: "Unlike Intel386 ABI, this ABI uses the same
571 // procedure linkage table for both programs and shared objects."
572 static unsigned char first_plt_entry[plt_entry_size];
573
574 // Other entries in the PLT for an executable.
575 static unsigned char plt_entry[plt_entry_size];
576
577 // The reserved TLSDESC entry in the PLT for an executable.
578 static unsigned char tlsdesc_plt_entry[plt_entry_size];
579
580 // Set the final size.
581 void
582 set_final_data_size();
583
584 // Write out the PLT data.
585 void
586 do_write(Output_file*);
587
588 // The reloc section.
589 Reloc_section* rel_;
590 // The .got section.
591 Output_data_got<64, false>* got_;
592 // The .got.plt section.
593 Output_data_space* got_plt_;
594 // The number of PLT entries.
595 unsigned int count_;
596 // Offset of the reserved TLSDESC_GOT entry when needed.
597 unsigned int tlsdesc_got_offset_;
598 };
599
600 // Create the PLT section. The ordinary .got section is an argument,
601 // since we need to refer to the start. We also create our own .got
602 // section just for PLT entries.
603
604 Output_data_plt_x86_64::Output_data_plt_x86_64(Layout* layout,
605 Output_data_got<64, false>* got,
606 Output_data_space* got_plt)
607 : Output_section_data(8), got_(got), got_plt_(got_plt), count_(0),
608 tlsdesc_got_offset_(-1U)
609 {
610 this->rel_ = new Reloc_section(false);
611 layout->add_output_section_data(".rela.plt", elfcpp::SHT_RELA,
612 elfcpp::SHF_ALLOC, this->rel_, true,
613 false, false, false);
614 }
615
616 void
617 Output_data_plt_x86_64::do_adjust_output_section(Output_section* os)
618 {
619 os->set_entsize(plt_entry_size);
620 }
621
622 // Add an entry to the PLT.
623
624 void
625 Output_data_plt_x86_64::add_entry(Symbol* gsym)
626 {
627 gold_assert(!gsym->has_plt_offset());
628
629 // Note that when setting the PLT offset we skip the initial
630 // reserved PLT entry.
631 gsym->set_plt_offset((this->count_ + 1) * plt_entry_size);
632
633 ++this->count_;
634
635 section_offset_type got_offset = this->got_plt_->current_data_size();
636
637 // Every PLT entry needs a GOT entry which points back to the PLT
638 // entry (this will be changed by the dynamic linker, normally
639 // lazily when the function is called).
640 this->got_plt_->set_current_data_size(got_offset + 8);
641
642 // Every PLT entry needs a reloc.
643 gsym->set_needs_dynsym_entry();
644 this->rel_->add_global(gsym, elfcpp::R_X86_64_JUMP_SLOT, this->got_plt_,
645 got_offset, 0);
646
647 // Note that we don't need to save the symbol. The contents of the
648 // PLT are independent of which symbols are used. The symbols only
649 // appear in the relocations.
650 }
651
652 // Set the final size.
653 void
654 Output_data_plt_x86_64::set_final_data_size()
655 {
656 unsigned int count = this->count_;
657 if (this->has_tlsdesc_entry())
658 ++count;
659 this->set_data_size((count + 1) * plt_entry_size);
660 }
661
662 // The first entry in the PLT for an executable.
663
664 unsigned char Output_data_plt_x86_64::first_plt_entry[plt_entry_size] =
665 {
666 // From AMD64 ABI Draft 0.98, page 76
667 0xff, 0x35, // pushq contents of memory address
668 0, 0, 0, 0, // replaced with address of .got + 8
669 0xff, 0x25, // jmp indirect
670 0, 0, 0, 0, // replaced with address of .got + 16
671 0x90, 0x90, 0x90, 0x90 // noop (x4)
672 };
673
674 // Subsequent entries in the PLT for an executable.
675
676 unsigned char Output_data_plt_x86_64::plt_entry[plt_entry_size] =
677 {
678 // From AMD64 ABI Draft 0.98, page 76
679 0xff, 0x25, // jmpq indirect
680 0, 0, 0, 0, // replaced with address of symbol in .got
681 0x68, // pushq immediate
682 0, 0, 0, 0, // replaced with offset into relocation table
683 0xe9, // jmpq relative
684 0, 0, 0, 0 // replaced with offset to start of .plt
685 };
686
687 // The reserved TLSDESC entry in the PLT for an executable.
688
689 unsigned char Output_data_plt_x86_64::tlsdesc_plt_entry[plt_entry_size] =
690 {
691 // From Alexandre Oliva, "Thread-Local Storage Descriptors for IA32
692 // and AMD64/EM64T", Version 0.9.4 (2005-10-10).
693 0xff, 0x35, // pushq x(%rip)
694 0, 0, 0, 0, // replaced with address of linkmap GOT entry (at PLTGOT + 8)
695 0xff, 0x25, // jmpq *y(%rip)
696 0, 0, 0, 0, // replaced with offset of reserved TLSDESC_GOT entry
697 0x0f, 0x1f, // nop
698 0x40, 0
699 };
700
701 // Write out the PLT. This uses the hand-coded instructions above,
702 // and adjusts them as needed. This is specified by the AMD64 ABI.
703
704 void
705 Output_data_plt_x86_64::do_write(Output_file* of)
706 {
707 const off_t offset = this->offset();
708 const section_size_type oview_size =
709 convert_to_section_size_type(this->data_size());
710 unsigned char* const oview = of->get_output_view(offset, oview_size);
711
712 const off_t got_file_offset = this->got_plt_->offset();
713 const section_size_type got_size =
714 convert_to_section_size_type(this->got_plt_->data_size());
715 unsigned char* const got_view = of->get_output_view(got_file_offset,
716 got_size);
717
718 unsigned char* pov = oview;
719
720 // The base address of the .plt section.
721 elfcpp::Elf_types<64>::Elf_Addr plt_address = this->address();
722 // The base address of the .got section.
723 elfcpp::Elf_types<64>::Elf_Addr got_base = this->got_->address();
724 // The base address of the PLT portion of the .got section,
725 // which is where the GOT pointer will point, and where the
726 // three reserved GOT entries are located.
727 elfcpp::Elf_types<64>::Elf_Addr got_address = this->got_plt_->address();
728
729 memcpy(pov, first_plt_entry, plt_entry_size);
730 // We do a jmp relative to the PC at the end of this instruction.
731 elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
732 (got_address + 8
733 - (plt_address + 6)));
734 elfcpp::Swap<32, false>::writeval(pov + 8,
735 (got_address + 16
736 - (plt_address + 12)));
737 pov += plt_entry_size;
738
739 unsigned char* got_pov = got_view;
740
741 memset(got_pov, 0, 24);
742 got_pov += 24;
743
744 unsigned int plt_offset = plt_entry_size;
745 unsigned int got_offset = 24;
746 const unsigned int count = this->count_;
747 for (unsigned int plt_index = 0;
748 plt_index < count;
749 ++plt_index,
750 pov += plt_entry_size,
751 got_pov += 8,
752 plt_offset += plt_entry_size,
753 got_offset += 8)
754 {
755 // Set and adjust the PLT entry itself.
756 memcpy(pov, plt_entry, plt_entry_size);
757 elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
758 (got_address + got_offset
759 - (plt_address + plt_offset
760 + 6)));
761
762 elfcpp::Swap_unaligned<32, false>::writeval(pov + 7, plt_index);
763 elfcpp::Swap<32, false>::writeval(pov + 12,
764 - (plt_offset + plt_entry_size));
765
766 // Set the entry in the GOT.
767 elfcpp::Swap<64, false>::writeval(got_pov, plt_address + plt_offset + 6);
768 }
769
770 if (this->has_tlsdesc_entry())
771 {
772 // Set and adjust the reserved TLSDESC PLT entry.
773 unsigned int tlsdesc_got_offset = this->get_tlsdesc_got_offset();
774 memcpy(pov, tlsdesc_plt_entry, plt_entry_size);
775 elfcpp::Swap_unaligned<32, false>::writeval(pov + 2,
776 (got_address + 8
777 - (plt_address + plt_offset
778 + 6)));
779 elfcpp::Swap_unaligned<32, false>::writeval(pov + 8,
780 (got_base
781 + tlsdesc_got_offset
782 - (plt_address + plt_offset
783 + 12)));
784 pov += plt_entry_size;
785 }
786
787 gold_assert(static_cast<section_size_type>(pov - oview) == oview_size);
788 gold_assert(static_cast<section_size_type>(got_pov - got_view) == got_size);
789
790 of->write_output_view(offset, oview_size, oview);
791 of->write_output_view(got_file_offset, got_size, got_view);
792 }
793
794 // Create the PLT section.
795
796 void
797 Target_x86_64::make_plt_section(Symbol_table* symtab, Layout* layout)
798 {
799 if (this->plt_ == NULL)
800 {
801 // Create the GOT sections first.
802 this->got_section(symtab, layout);
803
804 this->plt_ = new Output_data_plt_x86_64(layout, this->got_,
805 this->got_plt_);
806 layout->add_output_section_data(".plt", elfcpp::SHT_PROGBITS,
807 (elfcpp::SHF_ALLOC
808 | elfcpp::SHF_EXECINSTR),
809 this->plt_, false, false, false, false);
810 }
811 }
812
813 // Create a PLT entry for a global symbol.
814
815 void
816 Target_x86_64::make_plt_entry(Symbol_table* symtab, Layout* layout,
817 Symbol* gsym)
818 {
819 if (gsym->has_plt_offset())
820 return;
821
822 if (this->plt_ == NULL)
823 this->make_plt_section(symtab, layout);
824
825 this->plt_->add_entry(gsym);
826 }
827
828 // Define the _TLS_MODULE_BASE_ symbol in the TLS segment.
829
830 void
831 Target_x86_64::define_tls_base_symbol(Symbol_table* symtab, Layout* layout)
832 {
833 if (this->tls_base_symbol_defined_)
834 return;
835
836 Output_segment* tls_segment = layout->tls_segment();
837 if (tls_segment != NULL)
838 {
839 bool is_exec = parameters->options().output_is_executable();
840 symtab->define_in_output_segment("_TLS_MODULE_BASE_", NULL,
841 Symbol_table::PREDEFINED,
842 tls_segment, 0, 0,
843 elfcpp::STT_TLS,
844 elfcpp::STB_LOCAL,
845 elfcpp::STV_HIDDEN, 0,
846 (is_exec
847 ? Symbol::SEGMENT_END
848 : Symbol::SEGMENT_START),
849 true);
850 }
851 this->tls_base_symbol_defined_ = true;
852 }
853
854 // Create the reserved PLT and GOT entries for the TLS descriptor resolver.
855
856 void
857 Target_x86_64::reserve_tlsdesc_entries(Symbol_table* symtab,
858 Layout* layout)
859 {
860 if (this->plt_ == NULL)
861 this->make_plt_section(symtab, layout);
862
863 if (!this->plt_->has_tlsdesc_entry())
864 {
865 // Allocate the TLSDESC_GOT entry.
866 Output_data_got<64, false>* got = this->got_section(symtab, layout);
867 unsigned int got_offset = got->add_constant(0);
868
869 // Allocate the TLSDESC_PLT entry.
870 this->plt_->reserve_tlsdesc_entry(got_offset);
871 }
872 }
873
874 // Create a GOT entry for the TLS module index.
875
876 unsigned int
877 Target_x86_64::got_mod_index_entry(Symbol_table* symtab, Layout* layout,
878 Sized_relobj<64, false>* object)
879 {
880 if (this->got_mod_index_offset_ == -1U)
881 {
882 gold_assert(symtab != NULL && layout != NULL && object != NULL);
883 Reloc_section* rela_dyn = this->rela_dyn_section(layout);
884 Output_data_got<64, false>* got = this->got_section(symtab, layout);
885 unsigned int got_offset = got->add_constant(0);
886 rela_dyn->add_local(object, 0, elfcpp::R_X86_64_DTPMOD64, got,
887 got_offset, 0);
888 got->add_constant(0);
889 this->got_mod_index_offset_ = got_offset;
890 }
891 return this->got_mod_index_offset_;
892 }
893
894 // Optimize the TLS relocation type based on what we know about the
895 // symbol. IS_FINAL is true if the final address of this symbol is
896 // known at link time.
897
898 tls::Tls_optimization
899 Target_x86_64::optimize_tls_reloc(bool is_final, int r_type)
900 {
901 // If we are generating a shared library, then we can't do anything
902 // in the linker.
903 if (parameters->options().shared())
904 return tls::TLSOPT_NONE;
905
906 switch (r_type)
907 {
908 case elfcpp::R_X86_64_TLSGD:
909 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
910 case elfcpp::R_X86_64_TLSDESC_CALL:
911 // These are General-Dynamic which permits fully general TLS
912 // access. Since we know that we are generating an executable,
913 // we can convert this to Initial-Exec. If we also know that
914 // this is a local symbol, we can further switch to Local-Exec.
915 if (is_final)
916 return tls::TLSOPT_TO_LE;
917 return tls::TLSOPT_TO_IE;
918
919 case elfcpp::R_X86_64_TLSLD:
920 // This is Local-Dynamic, which refers to a local symbol in the
921 // dynamic TLS block. Since we know that we generating an
922 // executable, we can switch to Local-Exec.
923 return tls::TLSOPT_TO_LE;
924
925 case elfcpp::R_X86_64_DTPOFF32:
926 case elfcpp::R_X86_64_DTPOFF64:
927 // Another Local-Dynamic reloc.
928 return tls::TLSOPT_TO_LE;
929
930 case elfcpp::R_X86_64_GOTTPOFF:
931 // These are Initial-Exec relocs which get the thread offset
932 // from the GOT. If we know that we are linking against the
933 // local symbol, we can switch to Local-Exec, which links the
934 // thread offset into the instruction.
935 if (is_final)
936 return tls::TLSOPT_TO_LE;
937 return tls::TLSOPT_NONE;
938
939 case elfcpp::R_X86_64_TPOFF32:
940 // When we already have Local-Exec, there is nothing further we
941 // can do.
942 return tls::TLSOPT_NONE;
943
944 default:
945 gold_unreachable();
946 }
947 }
948
949 // Report an unsupported relocation against a local symbol.
950
951 void
952 Target_x86_64::Scan::unsupported_reloc_local(Sized_relobj<64, false>* object,
953 unsigned int r_type)
954 {
955 gold_error(_("%s: unsupported reloc %u against local symbol"),
956 object->name().c_str(), r_type);
957 }
958
959 // We are about to emit a dynamic relocation of type R_TYPE. If the
960 // dynamic linker does not support it, issue an error. The GNU linker
961 // only issues a non-PIC error for an allocated read-only section.
962 // Here we know the section is allocated, but we don't know that it is
963 // read-only. But we check for all the relocation types which the
964 // glibc dynamic linker supports, so it seems appropriate to issue an
965 // error even if the section is not read-only.
966
967 void
968 Target_x86_64::Scan::check_non_pic(Relobj* object, unsigned int r_type)
969 {
970 switch (r_type)
971 {
972 // These are the relocation types supported by glibc for x86_64.
973 case elfcpp::R_X86_64_RELATIVE:
974 case elfcpp::R_X86_64_GLOB_DAT:
975 case elfcpp::R_X86_64_JUMP_SLOT:
976 case elfcpp::R_X86_64_DTPMOD64:
977 case elfcpp::R_X86_64_DTPOFF64:
978 case elfcpp::R_X86_64_TPOFF64:
979 case elfcpp::R_X86_64_64:
980 case elfcpp::R_X86_64_32:
981 case elfcpp::R_X86_64_PC32:
982 case elfcpp::R_X86_64_COPY:
983 return;
984
985 default:
986 // This prevents us from issuing more than one error per reloc
987 // section. But we can still wind up issuing more than one
988 // error per object file.
989 if (this->issued_non_pic_error_)
990 return;
991 gold_assert(parameters->options().output_is_position_independent());
992 object->error(_("requires unsupported dynamic reloc; "
993 "recompile with -fPIC"));
994 this->issued_non_pic_error_ = true;
995 return;
996
997 case elfcpp::R_X86_64_NONE:
998 gold_unreachable();
999 }
1000 }
1001
1002 // Scan a relocation for a local symbol.
1003
1004 inline void
1005 Target_x86_64::Scan::local(Symbol_table* symtab,
1006 Layout* layout,
1007 Target_x86_64* target,
1008 Sized_relobj<64, false>* object,
1009 unsigned int data_shndx,
1010 Output_section* output_section,
1011 const elfcpp::Rela<64, false>& reloc,
1012 unsigned int r_type,
1013 const elfcpp::Sym<64, false>& lsym)
1014 {
1015 switch (r_type)
1016 {
1017 case elfcpp::R_X86_64_NONE:
1018 case elfcpp::R_386_GNU_VTINHERIT:
1019 case elfcpp::R_386_GNU_VTENTRY:
1020 break;
1021
1022 case elfcpp::R_X86_64_64:
1023 // If building a shared library (or a position-independent
1024 // executable), we need to create a dynamic relocation for this
1025 // location. The relocation applied at link time will apply the
1026 // link-time value, so we flag the location with an
1027 // R_X86_64_RELATIVE relocation so the dynamic loader can
1028 // relocate it easily.
1029 if (parameters->options().output_is_position_independent())
1030 {
1031 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
1032 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1033 rela_dyn->add_local_relative(object, r_sym,
1034 elfcpp::R_X86_64_RELATIVE,
1035 output_section, data_shndx,
1036 reloc.get_r_offset(),
1037 reloc.get_r_addend());
1038 }
1039 break;
1040
1041 case elfcpp::R_X86_64_32:
1042 case elfcpp::R_X86_64_32S:
1043 case elfcpp::R_X86_64_16:
1044 case elfcpp::R_X86_64_8:
1045 // If building a shared library (or a position-independent
1046 // executable), we need to create a dynamic relocation for this
1047 // location. We can't use an R_X86_64_RELATIVE relocation
1048 // because that is always a 64-bit relocation.
1049 if (parameters->options().output_is_position_independent())
1050 {
1051 this->check_non_pic(object, r_type);
1052
1053 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1054 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
1055 if (lsym.get_st_type() != elfcpp::STT_SECTION)
1056 rela_dyn->add_local(object, r_sym, r_type, output_section,
1057 data_shndx, reloc.get_r_offset(),
1058 reloc.get_r_addend());
1059 else
1060 {
1061 gold_assert(lsym.get_st_value() == 0);
1062 unsigned int shndx = lsym.get_st_shndx();
1063 bool is_ordinary;
1064 shndx = object->adjust_sym_shndx(r_sym, shndx,
1065 &is_ordinary);
1066 if (!is_ordinary)
1067 object->error(_("section symbol %u has bad shndx %u"),
1068 r_sym, shndx);
1069 else
1070 rela_dyn->add_local_section(object, shndx,
1071 r_type, output_section,
1072 data_shndx, reloc.get_r_offset(),
1073 reloc.get_r_addend());
1074 }
1075 }
1076 break;
1077
1078 case elfcpp::R_X86_64_PC64:
1079 case elfcpp::R_X86_64_PC32:
1080 case elfcpp::R_X86_64_PC16:
1081 case elfcpp::R_X86_64_PC8:
1082 break;
1083
1084 case elfcpp::R_X86_64_PLT32:
1085 // Since we know this is a local symbol, we can handle this as a
1086 // PC32 reloc.
1087 break;
1088
1089 case elfcpp::R_X86_64_GOTPC32:
1090 case elfcpp::R_X86_64_GOTOFF64:
1091 case elfcpp::R_X86_64_GOTPC64:
1092 case elfcpp::R_X86_64_PLTOFF64:
1093 // We need a GOT section.
1094 target->got_section(symtab, layout);
1095 // For PLTOFF64, we'd normally want a PLT section, but since we
1096 // know this is a local symbol, no PLT is needed.
1097 break;
1098
1099 case elfcpp::R_X86_64_GOT64:
1100 case elfcpp::R_X86_64_GOT32:
1101 case elfcpp::R_X86_64_GOTPCREL64:
1102 case elfcpp::R_X86_64_GOTPCREL:
1103 case elfcpp::R_X86_64_GOTPLT64:
1104 {
1105 // The symbol requires a GOT entry.
1106 Output_data_got<64, false>* got = target->got_section(symtab, layout);
1107 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
1108 if (got->add_local(object, r_sym, GOT_TYPE_STANDARD))
1109 {
1110 // If we are generating a shared object, we need to add a
1111 // dynamic relocation for this symbol's GOT entry.
1112 if (parameters->options().output_is_position_independent())
1113 {
1114 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1115 // R_X86_64_RELATIVE assumes a 64-bit relocation.
1116 if (r_type != elfcpp::R_X86_64_GOT32)
1117 rela_dyn->add_local_relative(
1118 object, r_sym, elfcpp::R_X86_64_RELATIVE, got,
1119 object->local_got_offset(r_sym, GOT_TYPE_STANDARD), 0);
1120 else
1121 {
1122 this->check_non_pic(object, r_type);
1123
1124 gold_assert(lsym.get_st_type() != elfcpp::STT_SECTION);
1125 rela_dyn->add_local(
1126 object, r_sym, r_type, got,
1127 object->local_got_offset(r_sym, GOT_TYPE_STANDARD), 0);
1128 }
1129 }
1130 }
1131 // For GOTPLT64, we'd normally want a PLT section, but since
1132 // we know this is a local symbol, no PLT is needed.
1133 }
1134 break;
1135
1136 case elfcpp::R_X86_64_COPY:
1137 case elfcpp::R_X86_64_GLOB_DAT:
1138 case elfcpp::R_X86_64_JUMP_SLOT:
1139 case elfcpp::R_X86_64_RELATIVE:
1140 // These are outstanding tls relocs, which are unexpected when linking
1141 case elfcpp::R_X86_64_TPOFF64:
1142 case elfcpp::R_X86_64_DTPMOD64:
1143 case elfcpp::R_X86_64_TLSDESC:
1144 gold_error(_("%s: unexpected reloc %u in object file"),
1145 object->name().c_str(), r_type);
1146 break;
1147
1148 // These are initial tls relocs, which are expected when linking
1149 case elfcpp::R_X86_64_TLSGD: // Global-dynamic
1150 case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url)
1151 case elfcpp::R_X86_64_TLSDESC_CALL:
1152 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
1153 case elfcpp::R_X86_64_DTPOFF32:
1154 case elfcpp::R_X86_64_DTPOFF64:
1155 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
1156 case elfcpp::R_X86_64_TPOFF32: // Local-exec
1157 {
1158 bool output_is_shared = parameters->options().shared();
1159 const tls::Tls_optimization optimized_type
1160 = Target_x86_64::optimize_tls_reloc(!output_is_shared, r_type);
1161 switch (r_type)
1162 {
1163 case elfcpp::R_X86_64_TLSGD: // General-dynamic
1164 if (optimized_type == tls::TLSOPT_NONE)
1165 {
1166 // Create a pair of GOT entries for the module index and
1167 // dtv-relative offset.
1168 Output_data_got<64, false>* got
1169 = target->got_section(symtab, layout);
1170 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
1171 unsigned int shndx = lsym.get_st_shndx();
1172 bool is_ordinary;
1173 shndx = object->adjust_sym_shndx(r_sym, shndx, &is_ordinary);
1174 if (!is_ordinary)
1175 object->error(_("local symbol %u has bad shndx %u"),
1176 r_sym, shndx);
1177 else
1178 got->add_local_pair_with_rela(object, r_sym,
1179 shndx,
1180 GOT_TYPE_TLS_PAIR,
1181 target->rela_dyn_section(layout),
1182 elfcpp::R_X86_64_DTPMOD64, 0);
1183 }
1184 else if (optimized_type != tls::TLSOPT_TO_LE)
1185 unsupported_reloc_local(object, r_type);
1186 break;
1187
1188 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
1189 target->define_tls_base_symbol(symtab, layout);
1190 if (optimized_type == tls::TLSOPT_NONE)
1191 {
1192 // Create reserved PLT and GOT entries for the resolver.
1193 target->reserve_tlsdesc_entries(symtab, layout);
1194
1195 // Generate a double GOT entry with an R_X86_64_TLSDESC reloc.
1196 Output_data_got<64, false>* got
1197 = target->got_section(symtab, layout);
1198 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
1199 unsigned int shndx = lsym.get_st_shndx();
1200 bool is_ordinary;
1201 shndx = object->adjust_sym_shndx(r_sym, shndx, &is_ordinary);
1202 if (!is_ordinary)
1203 object->error(_("local symbol %u has bad shndx %u"),
1204 r_sym, shndx);
1205 else
1206 got->add_local_pair_with_rela(object, r_sym,
1207 shndx,
1208 GOT_TYPE_TLS_DESC,
1209 target->rela_dyn_section(layout),
1210 elfcpp::R_X86_64_TLSDESC, 0);
1211 }
1212 else if (optimized_type != tls::TLSOPT_TO_LE)
1213 unsupported_reloc_local(object, r_type);
1214 break;
1215
1216 case elfcpp::R_X86_64_TLSDESC_CALL:
1217 break;
1218
1219 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
1220 if (optimized_type == tls::TLSOPT_NONE)
1221 {
1222 // Create a GOT entry for the module index.
1223 target->got_mod_index_entry(symtab, layout, object);
1224 }
1225 else if (optimized_type != tls::TLSOPT_TO_LE)
1226 unsupported_reloc_local(object, r_type);
1227 break;
1228
1229 case elfcpp::R_X86_64_DTPOFF32:
1230 case elfcpp::R_X86_64_DTPOFF64:
1231 break;
1232
1233 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
1234 layout->set_has_static_tls();
1235 if (optimized_type == tls::TLSOPT_NONE)
1236 {
1237 // Create a GOT entry for the tp-relative offset.
1238 Output_data_got<64, false>* got
1239 = target->got_section(symtab, layout);
1240 unsigned int r_sym = elfcpp::elf_r_sym<64>(reloc.get_r_info());
1241 got->add_local_with_rela(object, r_sym, GOT_TYPE_TLS_OFFSET,
1242 target->rela_dyn_section(layout),
1243 elfcpp::R_X86_64_TPOFF64);
1244 }
1245 else if (optimized_type != tls::TLSOPT_TO_LE)
1246 unsupported_reloc_local(object, r_type);
1247 break;
1248
1249 case elfcpp::R_X86_64_TPOFF32: // Local-exec
1250 layout->set_has_static_tls();
1251 if (output_is_shared)
1252 unsupported_reloc_local(object, r_type);
1253 break;
1254
1255 default:
1256 gold_unreachable();
1257 }
1258 }
1259 break;
1260
1261 case elfcpp::R_X86_64_SIZE32:
1262 case elfcpp::R_X86_64_SIZE64:
1263 default:
1264 gold_error(_("%s: unsupported reloc %u against local symbol"),
1265 object->name().c_str(), r_type);
1266 break;
1267 }
1268 }
1269
1270
1271 // Report an unsupported relocation against a global symbol.
1272
1273 void
1274 Target_x86_64::Scan::unsupported_reloc_global(Sized_relobj<64, false>* object,
1275 unsigned int r_type,
1276 Symbol* gsym)
1277 {
1278 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
1279 object->name().c_str(), r_type, gsym->demangled_name().c_str());
1280 }
1281
1282 // Scan a relocation for a global symbol.
1283
1284 inline void
1285 Target_x86_64::Scan::global(Symbol_table* symtab,
1286 Layout* layout,
1287 Target_x86_64* target,
1288 Sized_relobj<64, false>* object,
1289 unsigned int data_shndx,
1290 Output_section* output_section,
1291 const elfcpp::Rela<64, false>& reloc,
1292 unsigned int r_type,
1293 Symbol* gsym)
1294 {
1295 switch (r_type)
1296 {
1297 case elfcpp::R_X86_64_NONE:
1298 case elfcpp::R_386_GNU_VTINHERIT:
1299 case elfcpp::R_386_GNU_VTENTRY:
1300 break;
1301
1302 case elfcpp::R_X86_64_64:
1303 case elfcpp::R_X86_64_32:
1304 case elfcpp::R_X86_64_32S:
1305 case elfcpp::R_X86_64_16:
1306 case elfcpp::R_X86_64_8:
1307 {
1308 // Make a PLT entry if necessary.
1309 if (gsym->needs_plt_entry())
1310 {
1311 target->make_plt_entry(symtab, layout, gsym);
1312 // Since this is not a PC-relative relocation, we may be
1313 // taking the address of a function. In that case we need to
1314 // set the entry in the dynamic symbol table to the address of
1315 // the PLT entry.
1316 if (gsym->is_from_dynobj() && !parameters->options().shared())
1317 gsym->set_needs_dynsym_value();
1318 }
1319 // Make a dynamic relocation if necessary.
1320 if (gsym->needs_dynamic_reloc(Symbol::ABSOLUTE_REF))
1321 {
1322 if (gsym->may_need_copy_reloc())
1323 {
1324 target->copy_reloc(symtab, layout, object,
1325 data_shndx, output_section, gsym, reloc);
1326 }
1327 else if (r_type == elfcpp::R_X86_64_64
1328 && gsym->can_use_relative_reloc(false))
1329 {
1330 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1331 rela_dyn->add_global_relative(gsym, elfcpp::R_X86_64_RELATIVE,
1332 output_section, object,
1333 data_shndx, reloc.get_r_offset(),
1334 reloc.get_r_addend());
1335 }
1336 else
1337 {
1338 this->check_non_pic(object, r_type);
1339 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1340 rela_dyn->add_global(gsym, r_type, output_section, object,
1341 data_shndx, reloc.get_r_offset(),
1342 reloc.get_r_addend());
1343 }
1344 }
1345 }
1346 break;
1347
1348 case elfcpp::R_X86_64_PC64:
1349 case elfcpp::R_X86_64_PC32:
1350 case elfcpp::R_X86_64_PC16:
1351 case elfcpp::R_X86_64_PC8:
1352 {
1353 // Make a PLT entry if necessary.
1354 if (gsym->needs_plt_entry())
1355 target->make_plt_entry(symtab, layout, gsym);
1356 // Make a dynamic relocation if necessary.
1357 int flags = Symbol::NON_PIC_REF;
1358 if (gsym->is_func())
1359 flags |= Symbol::FUNCTION_CALL;
1360 if (gsym->needs_dynamic_reloc(flags))
1361 {
1362 if (gsym->may_need_copy_reloc())
1363 {
1364 target->copy_reloc(symtab, layout, object,
1365 data_shndx, output_section, gsym, reloc);
1366 }
1367 else
1368 {
1369 this->check_non_pic(object, r_type);
1370 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1371 rela_dyn->add_global(gsym, r_type, output_section, object,
1372 data_shndx, reloc.get_r_offset(),
1373 reloc.get_r_addend());
1374 }
1375 }
1376 }
1377 break;
1378
1379 case elfcpp::R_X86_64_GOT64:
1380 case elfcpp::R_X86_64_GOT32:
1381 case elfcpp::R_X86_64_GOTPCREL64:
1382 case elfcpp::R_X86_64_GOTPCREL:
1383 case elfcpp::R_X86_64_GOTPLT64:
1384 {
1385 // The symbol requires a GOT entry.
1386 Output_data_got<64, false>* got = target->got_section(symtab, layout);
1387 if (gsym->final_value_is_known())
1388 got->add_global(gsym, GOT_TYPE_STANDARD);
1389 else
1390 {
1391 // If this symbol is not fully resolved, we need to add a
1392 // dynamic relocation for it.
1393 Reloc_section* rela_dyn = target->rela_dyn_section(layout);
1394 if (gsym->is_from_dynobj()
1395 || gsym->is_undefined()
1396 || gsym->is_preemptible())
1397 got->add_global_with_rela(gsym, GOT_TYPE_STANDARD, rela_dyn,
1398 elfcpp::R_X86_64_GLOB_DAT);
1399 else
1400 {
1401 if (got->add_global(gsym, GOT_TYPE_STANDARD))
1402 rela_dyn->add_global_relative(
1403 gsym, elfcpp::R_X86_64_RELATIVE, got,
1404 gsym->got_offset(GOT_TYPE_STANDARD), 0);
1405 }
1406 }
1407 // For GOTPLT64, we also need a PLT entry (but only if the
1408 // symbol is not fully resolved).
1409 if (r_type == elfcpp::R_X86_64_GOTPLT64
1410 && !gsym->final_value_is_known())
1411 target->make_plt_entry(symtab, layout, gsym);
1412 }
1413 break;
1414
1415 case elfcpp::R_X86_64_PLT32:
1416 // If the symbol is fully resolved, this is just a PC32 reloc.
1417 // Otherwise we need a PLT entry.
1418 if (gsym->final_value_is_known())
1419 break;
1420 // If building a shared library, we can also skip the PLT entry
1421 // if the symbol is defined in the output file and is protected
1422 // or hidden.
1423 if (gsym->is_defined()
1424 && !gsym->is_from_dynobj()
1425 && !gsym->is_preemptible())
1426 break;
1427 target->make_plt_entry(symtab, layout, gsym);
1428 break;
1429
1430 case elfcpp::R_X86_64_GOTPC32:
1431 case elfcpp::R_X86_64_GOTOFF64:
1432 case elfcpp::R_X86_64_GOTPC64:
1433 case elfcpp::R_X86_64_PLTOFF64:
1434 // We need a GOT section.
1435 target->got_section(symtab, layout);
1436 // For PLTOFF64, we also need a PLT entry (but only if the
1437 // symbol is not fully resolved).
1438 if (r_type == elfcpp::R_X86_64_PLTOFF64
1439 && !gsym->final_value_is_known())
1440 target->make_plt_entry(symtab, layout, gsym);
1441 break;
1442
1443 case elfcpp::R_X86_64_COPY:
1444 case elfcpp::R_X86_64_GLOB_DAT:
1445 case elfcpp::R_X86_64_JUMP_SLOT:
1446 case elfcpp::R_X86_64_RELATIVE:
1447 // These are outstanding tls relocs, which are unexpected when linking
1448 case elfcpp::R_X86_64_TPOFF64:
1449 case elfcpp::R_X86_64_DTPMOD64:
1450 case elfcpp::R_X86_64_TLSDESC:
1451 gold_error(_("%s: unexpected reloc %u in object file"),
1452 object->name().c_str(), r_type);
1453 break;
1454
1455 // These are initial tls relocs, which are expected for global()
1456 case elfcpp::R_X86_64_TLSGD: // Global-dynamic
1457 case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url)
1458 case elfcpp::R_X86_64_TLSDESC_CALL:
1459 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
1460 case elfcpp::R_X86_64_DTPOFF32:
1461 case elfcpp::R_X86_64_DTPOFF64:
1462 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
1463 case elfcpp::R_X86_64_TPOFF32: // Local-exec
1464 {
1465 const bool is_final = gsym->final_value_is_known();
1466 const tls::Tls_optimization optimized_type
1467 = Target_x86_64::optimize_tls_reloc(is_final, r_type);
1468 switch (r_type)
1469 {
1470 case elfcpp::R_X86_64_TLSGD: // General-dynamic
1471 if (optimized_type == tls::TLSOPT_NONE)
1472 {
1473 // Create a pair of GOT entries for the module index and
1474 // dtv-relative offset.
1475 Output_data_got<64, false>* got
1476 = target->got_section(symtab, layout);
1477 got->add_global_pair_with_rela(gsym, GOT_TYPE_TLS_PAIR,
1478 target->rela_dyn_section(layout),
1479 elfcpp::R_X86_64_DTPMOD64,
1480 elfcpp::R_X86_64_DTPOFF64);
1481 }
1482 else if (optimized_type == tls::TLSOPT_TO_IE)
1483 {
1484 // Create a GOT entry for the tp-relative offset.
1485 Output_data_got<64, false>* got
1486 = target->got_section(symtab, layout);
1487 got->add_global_with_rela(gsym, GOT_TYPE_TLS_OFFSET,
1488 target->rela_dyn_section(layout),
1489 elfcpp::R_X86_64_TPOFF64);
1490 }
1491 else if (optimized_type != tls::TLSOPT_TO_LE)
1492 unsupported_reloc_global(object, r_type, gsym);
1493 break;
1494
1495 case elfcpp::R_X86_64_GOTPC32_TLSDESC:
1496 target->define_tls_base_symbol(symtab, layout);
1497 if (optimized_type == tls::TLSOPT_NONE)
1498 {
1499 // Create reserved PLT and GOT entries for the resolver.
1500 target->reserve_tlsdesc_entries(symtab, layout);
1501
1502 // Create a double GOT entry with an R_X86_64_TLSDESC reloc.
1503 Output_data_got<64, false>* got
1504 = target->got_section(symtab, layout);
1505 got->add_global_pair_with_rela(gsym, GOT_TYPE_TLS_DESC,
1506 target->rela_dyn_section(layout),
1507 elfcpp::R_X86_64_TLSDESC, 0);
1508 }
1509 else if (optimized_type == tls::TLSOPT_TO_IE)
1510 {
1511 // Create a GOT entry for the tp-relative offset.
1512 Output_data_got<64, false>* got
1513 = target->got_section(symtab, layout);
1514 got->add_global_with_rela(gsym, GOT_TYPE_TLS_OFFSET,
1515 target->rela_dyn_section(layout),
1516 elfcpp::R_X86_64_TPOFF64);
1517 }
1518 else if (optimized_type != tls::TLSOPT_TO_LE)
1519 unsupported_reloc_global(object, r_type, gsym);
1520 break;
1521
1522 case elfcpp::R_X86_64_TLSDESC_CALL:
1523 break;
1524
1525 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
1526 if (optimized_type == tls::TLSOPT_NONE)
1527 {
1528 // Create a GOT entry for the module index.
1529 target->got_mod_index_entry(symtab, layout, object);
1530 }
1531 else if (optimized_type != tls::TLSOPT_TO_LE)
1532 unsupported_reloc_global(object, r_type, gsym);
1533 break;
1534
1535 case elfcpp::R_X86_64_DTPOFF32:
1536 case elfcpp::R_X86_64_DTPOFF64:
1537 break;
1538
1539 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
1540 layout->set_has_static_tls();
1541 if (optimized_type == tls::TLSOPT_NONE)
1542 {
1543 // Create a GOT entry for the tp-relative offset.
1544 Output_data_got<64, false>* got
1545 = target->got_section(symtab, layout);
1546 got->add_global_with_rela(gsym, GOT_TYPE_TLS_OFFSET,
1547 target->rela_dyn_section(layout),
1548 elfcpp::R_X86_64_TPOFF64);
1549 }
1550 else if (optimized_type != tls::TLSOPT_TO_LE)
1551 unsupported_reloc_global(object, r_type, gsym);
1552 break;
1553
1554 case elfcpp::R_X86_64_TPOFF32: // Local-exec
1555 layout->set_has_static_tls();
1556 if (parameters->options().shared())
1557 unsupported_reloc_local(object, r_type);
1558 break;
1559
1560 default:
1561 gold_unreachable();
1562 }
1563 }
1564 break;
1565
1566 case elfcpp::R_X86_64_SIZE32:
1567 case elfcpp::R_X86_64_SIZE64:
1568 default:
1569 gold_error(_("%s: unsupported reloc %u against global symbol %s"),
1570 object->name().c_str(), r_type,
1571 gsym->demangled_name().c_str());
1572 break;
1573 }
1574 }
1575
1576 void
1577 Target_x86_64::gc_process_relocs(Symbol_table* symtab,
1578 Layout* layout,
1579 Sized_relobj<64, false>* object,
1580 unsigned int data_shndx,
1581 unsigned int sh_type,
1582 const unsigned char* prelocs,
1583 size_t reloc_count,
1584 Output_section* output_section,
1585 bool needs_special_offset_handling,
1586 size_t local_symbol_count,
1587 const unsigned char* plocal_symbols)
1588 {
1589
1590 if (sh_type == elfcpp::SHT_REL)
1591 {
1592 return;
1593 }
1594
1595 gold::gc_process_relocs<64, false, Target_x86_64, elfcpp::SHT_RELA,
1596 Target_x86_64::Scan>(
1597 symtab,
1598 layout,
1599 this,
1600 object,
1601 data_shndx,
1602 prelocs,
1603 reloc_count,
1604 output_section,
1605 needs_special_offset_handling,
1606 local_symbol_count,
1607 plocal_symbols);
1608
1609 }
1610 // Scan relocations for a section.
1611
1612 void
1613 Target_x86_64::scan_relocs(Symbol_table* symtab,
1614 Layout* layout,
1615 Sized_relobj<64, false>* object,
1616 unsigned int data_shndx,
1617 unsigned int sh_type,
1618 const unsigned char* prelocs,
1619 size_t reloc_count,
1620 Output_section* output_section,
1621 bool needs_special_offset_handling,
1622 size_t local_symbol_count,
1623 const unsigned char* plocal_symbols)
1624 {
1625 if (sh_type == elfcpp::SHT_REL)
1626 {
1627 gold_error(_("%s: unsupported REL reloc section"),
1628 object->name().c_str());
1629 return;
1630 }
1631
1632 gold::scan_relocs<64, false, Target_x86_64, elfcpp::SHT_RELA,
1633 Target_x86_64::Scan>(
1634 symtab,
1635 layout,
1636 this,
1637 object,
1638 data_shndx,
1639 prelocs,
1640 reloc_count,
1641 output_section,
1642 needs_special_offset_handling,
1643 local_symbol_count,
1644 plocal_symbols);
1645 }
1646
1647 // Finalize the sections.
1648
1649 void
1650 Target_x86_64::do_finalize_sections(
1651 Layout* layout,
1652 const Input_objects*,
1653 Symbol_table*)
1654 {
1655 // Fill in some more dynamic tags.
1656 Output_data_dynamic* const odyn = layout->dynamic_data();
1657 if (odyn != NULL)
1658 {
1659 if (this->got_plt_ != NULL
1660 && this->got_plt_->output_section() != NULL)
1661 odyn->add_section_address(elfcpp::DT_PLTGOT, this->got_plt_);
1662
1663 if (this->plt_ != NULL
1664 && this->plt_->output_section() != NULL)
1665 {
1666 const Output_data* od = this->plt_->rel_plt();
1667 odyn->add_section_size(elfcpp::DT_PLTRELSZ, od);
1668 odyn->add_section_address(elfcpp::DT_JMPREL, od);
1669 odyn->add_constant(elfcpp::DT_PLTREL, elfcpp::DT_RELA);
1670 if (this->plt_->has_tlsdesc_entry())
1671 {
1672 unsigned int plt_offset = this->plt_->get_tlsdesc_plt_offset();
1673 unsigned int got_offset = this->plt_->get_tlsdesc_got_offset();
1674 this->got_->finalize_data_size();
1675 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_PLT,
1676 this->plt_, plt_offset);
1677 odyn->add_section_plus_offset(elfcpp::DT_TLSDESC_GOT,
1678 this->got_, got_offset);
1679 }
1680 }
1681
1682 if (this->rela_dyn_ != NULL
1683 && this->rela_dyn_->output_section() != NULL)
1684 {
1685 const Output_data* od = this->rela_dyn_;
1686 odyn->add_section_address(elfcpp::DT_RELA, od);
1687 odyn->add_section_size(elfcpp::DT_RELASZ, od);
1688 odyn->add_constant(elfcpp::DT_RELAENT,
1689 elfcpp::Elf_sizes<64>::rela_size);
1690 }
1691
1692 if (!parameters->options().shared())
1693 {
1694 // The value of the DT_DEBUG tag is filled in by the dynamic
1695 // linker at run time, and used by the debugger.
1696 odyn->add_constant(elfcpp::DT_DEBUG, 0);
1697 }
1698 }
1699
1700 // Emit any relocs we saved in an attempt to avoid generating COPY
1701 // relocs.
1702 if (this->copy_relocs_.any_saved_relocs())
1703 this->copy_relocs_.emit(this->rela_dyn_section(layout));
1704 }
1705
1706 // Perform a relocation.
1707
1708 inline bool
1709 Target_x86_64::Relocate::relocate(const Relocate_info<64, false>* relinfo,
1710 Target_x86_64* target,
1711 Output_section*,
1712 size_t relnum,
1713 const elfcpp::Rela<64, false>& rela,
1714 unsigned int r_type,
1715 const Sized_symbol<64>* gsym,
1716 const Symbol_value<64>* psymval,
1717 unsigned char* view,
1718 elfcpp::Elf_types<64>::Elf_Addr address,
1719 section_size_type view_size)
1720 {
1721 if (this->skip_call_tls_get_addr_)
1722 {
1723 if ((r_type != elfcpp::R_X86_64_PLT32
1724 && r_type != elfcpp::R_X86_64_PC32)
1725 || gsym == NULL
1726 || strcmp(gsym->name(), "__tls_get_addr") != 0)
1727 {
1728 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1729 _("missing expected TLS relocation"));
1730 }
1731 else
1732 {
1733 this->skip_call_tls_get_addr_ = false;
1734 return false;
1735 }
1736 }
1737
1738 // Pick the value to use for symbols defined in shared objects.
1739 Symbol_value<64> symval;
1740 if (gsym != NULL
1741 && gsym->use_plt_offset(r_type == elfcpp::R_X86_64_PC64
1742 || r_type == elfcpp::R_X86_64_PC32
1743 || r_type == elfcpp::R_X86_64_PC16
1744 || r_type == elfcpp::R_X86_64_PC8))
1745 {
1746 symval.set_output_value(target->plt_section()->address()
1747 + gsym->plt_offset());
1748 psymval = &symval;
1749 }
1750
1751 const Sized_relobj<64, false>* object = relinfo->object;
1752 const elfcpp::Elf_Xword addend = rela.get_r_addend();
1753
1754 // Get the GOT offset if needed.
1755 // The GOT pointer points to the end of the GOT section.
1756 // We need to subtract the size of the GOT section to get
1757 // the actual offset to use in the relocation.
1758 bool have_got_offset = false;
1759 unsigned int got_offset = 0;
1760 switch (r_type)
1761 {
1762 case elfcpp::R_X86_64_GOT32:
1763 case elfcpp::R_X86_64_GOT64:
1764 case elfcpp::R_X86_64_GOTPLT64:
1765 case elfcpp::R_X86_64_GOTPCREL:
1766 case elfcpp::R_X86_64_GOTPCREL64:
1767 if (gsym != NULL)
1768 {
1769 gold_assert(gsym->has_got_offset(GOT_TYPE_STANDARD));
1770 got_offset = gsym->got_offset(GOT_TYPE_STANDARD) - target->got_size();
1771 }
1772 else
1773 {
1774 unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info());
1775 gold_assert(object->local_has_got_offset(r_sym, GOT_TYPE_STANDARD));
1776 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_STANDARD)
1777 - target->got_size());
1778 }
1779 have_got_offset = true;
1780 break;
1781
1782 default:
1783 break;
1784 }
1785
1786 switch (r_type)
1787 {
1788 case elfcpp::R_X86_64_NONE:
1789 case elfcpp::R_386_GNU_VTINHERIT:
1790 case elfcpp::R_386_GNU_VTENTRY:
1791 break;
1792
1793 case elfcpp::R_X86_64_64:
1794 Relocate_functions<64, false>::rela64(view, object, psymval, addend);
1795 break;
1796
1797 case elfcpp::R_X86_64_PC64:
1798 Relocate_functions<64, false>::pcrela64(view, object, psymval, addend,
1799 address);
1800 break;
1801
1802 case elfcpp::R_X86_64_32:
1803 // FIXME: we need to verify that value + addend fits into 32 bits:
1804 // uint64_t x = value + addend;
1805 // x == static_cast<uint64_t>(static_cast<uint32_t>(x))
1806 // Likewise for other <=32-bit relocations (but see R_X86_64_32S).
1807 Relocate_functions<64, false>::rela32(view, object, psymval, addend);
1808 break;
1809
1810 case elfcpp::R_X86_64_32S:
1811 // FIXME: we need to verify that value + addend fits into 32 bits:
1812 // int64_t x = value + addend; // note this quantity is signed!
1813 // x == static_cast<int64_t>(static_cast<int32_t>(x))
1814 Relocate_functions<64, false>::rela32(view, object, psymval, addend);
1815 break;
1816
1817 case elfcpp::R_X86_64_PC32:
1818 Relocate_functions<64, false>::pcrela32(view, object, psymval, addend,
1819 address);
1820 break;
1821
1822 case elfcpp::R_X86_64_16:
1823 Relocate_functions<64, false>::rela16(view, object, psymval, addend);
1824 break;
1825
1826 case elfcpp::R_X86_64_PC16:
1827 Relocate_functions<64, false>::pcrela16(view, object, psymval, addend,
1828 address);
1829 break;
1830
1831 case elfcpp::R_X86_64_8:
1832 Relocate_functions<64, false>::rela8(view, object, psymval, addend);
1833 break;
1834
1835 case elfcpp::R_X86_64_PC8:
1836 Relocate_functions<64, false>::pcrela8(view, object, psymval, addend,
1837 address);
1838 break;
1839
1840 case elfcpp::R_X86_64_PLT32:
1841 gold_assert(gsym == NULL
1842 || gsym->has_plt_offset()
1843 || gsym->final_value_is_known()
1844 || (gsym->is_defined()
1845 && !gsym->is_from_dynobj()
1846 && !gsym->is_preemptible()));
1847 // Note: while this code looks the same as for R_X86_64_PC32, it
1848 // behaves differently because psymval was set to point to
1849 // the PLT entry, rather than the symbol, in Scan::global().
1850 Relocate_functions<64, false>::pcrela32(view, object, psymval, addend,
1851 address);
1852 break;
1853
1854 case elfcpp::R_X86_64_PLTOFF64:
1855 {
1856 gold_assert(gsym);
1857 gold_assert(gsym->has_plt_offset()
1858 || gsym->final_value_is_known());
1859 elfcpp::Elf_types<64>::Elf_Addr got_address;
1860 got_address = target->got_section(NULL, NULL)->address();
1861 Relocate_functions<64, false>::rela64(view, object, psymval,
1862 addend - got_address);
1863 }
1864
1865 case elfcpp::R_X86_64_GOT32:
1866 gold_assert(have_got_offset);
1867 Relocate_functions<64, false>::rela32(view, got_offset, addend);
1868 break;
1869
1870 case elfcpp::R_X86_64_GOTPC32:
1871 {
1872 gold_assert(gsym);
1873 elfcpp::Elf_types<64>::Elf_Addr value;
1874 value = target->got_plt_section()->address();
1875 Relocate_functions<64, false>::pcrela32(view, value, addend, address);
1876 }
1877 break;
1878
1879 case elfcpp::R_X86_64_GOT64:
1880 // The ABI doc says "Like GOT64, but indicates a PLT entry is needed."
1881 // Since we always add a PLT entry, this is equivalent.
1882 case elfcpp::R_X86_64_GOTPLT64:
1883 gold_assert(have_got_offset);
1884 Relocate_functions<64, false>::rela64(view, got_offset, addend);
1885 break;
1886
1887 case elfcpp::R_X86_64_GOTPC64:
1888 {
1889 gold_assert(gsym);
1890 elfcpp::Elf_types<64>::Elf_Addr value;
1891 value = target->got_plt_section()->address();
1892 Relocate_functions<64, false>::pcrela64(view, value, addend, address);
1893 }
1894 break;
1895
1896 case elfcpp::R_X86_64_GOTOFF64:
1897 {
1898 elfcpp::Elf_types<64>::Elf_Addr value;
1899 value = (psymval->value(object, 0)
1900 - target->got_plt_section()->address());
1901 Relocate_functions<64, false>::rela64(view, value, addend);
1902 }
1903 break;
1904
1905 case elfcpp::R_X86_64_GOTPCREL:
1906 {
1907 gold_assert(have_got_offset);
1908 elfcpp::Elf_types<64>::Elf_Addr value;
1909 value = target->got_plt_section()->address() + got_offset;
1910 Relocate_functions<64, false>::pcrela32(view, value, addend, address);
1911 }
1912 break;
1913
1914 case elfcpp::R_X86_64_GOTPCREL64:
1915 {
1916 gold_assert(have_got_offset);
1917 elfcpp::Elf_types<64>::Elf_Addr value;
1918 value = target->got_plt_section()->address() + got_offset;
1919 Relocate_functions<64, false>::pcrela64(view, value, addend, address);
1920 }
1921 break;
1922
1923 case elfcpp::R_X86_64_COPY:
1924 case elfcpp::R_X86_64_GLOB_DAT:
1925 case elfcpp::R_X86_64_JUMP_SLOT:
1926 case elfcpp::R_X86_64_RELATIVE:
1927 // These are outstanding tls relocs, which are unexpected when linking
1928 case elfcpp::R_X86_64_TPOFF64:
1929 case elfcpp::R_X86_64_DTPMOD64:
1930 case elfcpp::R_X86_64_TLSDESC:
1931 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1932 _("unexpected reloc %u in object file"),
1933 r_type);
1934 break;
1935
1936 // These are initial tls relocs, which are expected when linking
1937 case elfcpp::R_X86_64_TLSGD: // Global-dynamic
1938 case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url)
1939 case elfcpp::R_X86_64_TLSDESC_CALL:
1940 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
1941 case elfcpp::R_X86_64_DTPOFF32:
1942 case elfcpp::R_X86_64_DTPOFF64:
1943 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
1944 case elfcpp::R_X86_64_TPOFF32: // Local-exec
1945 this->relocate_tls(relinfo, target, relnum, rela, r_type, gsym, psymval,
1946 view, address, view_size);
1947 break;
1948
1949 case elfcpp::R_X86_64_SIZE32:
1950 case elfcpp::R_X86_64_SIZE64:
1951 default:
1952 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
1953 _("unsupported reloc %u"),
1954 r_type);
1955 break;
1956 }
1957
1958 return true;
1959 }
1960
1961 // Perform a TLS relocation.
1962
1963 inline void
1964 Target_x86_64::Relocate::relocate_tls(const Relocate_info<64, false>* relinfo,
1965 Target_x86_64* target,
1966 size_t relnum,
1967 const elfcpp::Rela<64, false>& rela,
1968 unsigned int r_type,
1969 const Sized_symbol<64>* gsym,
1970 const Symbol_value<64>* psymval,
1971 unsigned char* view,
1972 elfcpp::Elf_types<64>::Elf_Addr address,
1973 section_size_type view_size)
1974 {
1975 Output_segment* tls_segment = relinfo->layout->tls_segment();
1976
1977 const Sized_relobj<64, false>* object = relinfo->object;
1978 const elfcpp::Elf_Xword addend = rela.get_r_addend();
1979
1980 elfcpp::Elf_types<64>::Elf_Addr value = psymval->value(relinfo->object, 0);
1981
1982 const bool is_final = (gsym == NULL
1983 ? !parameters->options().output_is_position_independent()
1984 : gsym->final_value_is_known());
1985 const tls::Tls_optimization optimized_type
1986 = Target_x86_64::optimize_tls_reloc(is_final, r_type);
1987 switch (r_type)
1988 {
1989 case elfcpp::R_X86_64_TLSGD: // Global-dynamic
1990 this->saw_tls_block_reloc_ = true;
1991 if (optimized_type == tls::TLSOPT_TO_LE)
1992 {
1993 gold_assert(tls_segment != NULL);
1994 this->tls_gd_to_le(relinfo, relnum, tls_segment,
1995 rela, r_type, value, view,
1996 view_size);
1997 break;
1998 }
1999 else
2000 {
2001 unsigned int got_type = (optimized_type == tls::TLSOPT_TO_IE
2002 ? GOT_TYPE_TLS_OFFSET
2003 : GOT_TYPE_TLS_PAIR);
2004 unsigned int got_offset;
2005 if (gsym != NULL)
2006 {
2007 gold_assert(gsym->has_got_offset(got_type));
2008 got_offset = gsym->got_offset(got_type) - target->got_size();
2009 }
2010 else
2011 {
2012 unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info());
2013 gold_assert(object->local_has_got_offset(r_sym, got_type));
2014 got_offset = (object->local_got_offset(r_sym, got_type)
2015 - target->got_size());
2016 }
2017 if (optimized_type == tls::TLSOPT_TO_IE)
2018 {
2019 gold_assert(tls_segment != NULL);
2020 value = target->got_plt_section()->address() + got_offset;
2021 this->tls_gd_to_ie(relinfo, relnum, tls_segment, rela, r_type,
2022 value, view, address, view_size);
2023 break;
2024 }
2025 else if (optimized_type == tls::TLSOPT_NONE)
2026 {
2027 // Relocate the field with the offset of the pair of GOT
2028 // entries.
2029 value = target->got_plt_section()->address() + got_offset;
2030 Relocate_functions<64, false>::pcrela32(view, value, addend,
2031 address);
2032 break;
2033 }
2034 }
2035 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
2036 _("unsupported reloc %u"), r_type);
2037 break;
2038
2039 case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url)
2040 case elfcpp::R_X86_64_TLSDESC_CALL:
2041 this->saw_tls_block_reloc_ = true;
2042 if (optimized_type == tls::TLSOPT_TO_LE)
2043 {
2044 gold_assert(tls_segment != NULL);
2045 this->tls_desc_gd_to_le(relinfo, relnum, tls_segment,
2046 rela, r_type, value, view,
2047 view_size);
2048 break;
2049 }
2050 else
2051 {
2052 unsigned int got_type = (optimized_type == tls::TLSOPT_TO_IE
2053 ? GOT_TYPE_TLS_OFFSET
2054 : GOT_TYPE_TLS_DESC);
2055 unsigned int got_offset;
2056 if (gsym != NULL)
2057 {
2058 gold_assert(gsym->has_got_offset(got_type));
2059 got_offset = gsym->got_offset(got_type) - target->got_size();
2060 }
2061 else
2062 {
2063 unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info());
2064 gold_assert(object->local_has_got_offset(r_sym, got_type));
2065 got_offset = (object->local_got_offset(r_sym, got_type)
2066 - target->got_size());
2067 }
2068 if (optimized_type == tls::TLSOPT_TO_IE)
2069 {
2070 gold_assert(tls_segment != NULL);
2071 value = target->got_plt_section()->address() + got_offset;
2072 this->tls_desc_gd_to_ie(relinfo, relnum, tls_segment,
2073 rela, r_type, value, view, address,
2074 view_size);
2075 break;
2076 }
2077 else if (optimized_type == tls::TLSOPT_NONE)
2078 {
2079 if (r_type == elfcpp::R_X86_64_GOTPC32_TLSDESC)
2080 {
2081 // Relocate the field with the offset of the pair of GOT
2082 // entries.
2083 value = target->got_plt_section()->address() + got_offset;
2084 Relocate_functions<64, false>::pcrela32(view, value, addend,
2085 address);
2086 }
2087 break;
2088 }
2089 }
2090 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
2091 _("unsupported reloc %u"), r_type);
2092 break;
2093
2094 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
2095 this->saw_tls_block_reloc_ = true;
2096 if (optimized_type == tls::TLSOPT_TO_LE)
2097 {
2098 gold_assert(tls_segment != NULL);
2099 this->tls_ld_to_le(relinfo, relnum, tls_segment, rela, r_type,
2100 value, view, view_size);
2101 break;
2102 }
2103 else if (optimized_type == tls::TLSOPT_NONE)
2104 {
2105 // Relocate the field with the offset of the GOT entry for
2106 // the module index.
2107 unsigned int got_offset;
2108 got_offset = (target->got_mod_index_entry(NULL, NULL, NULL)
2109 - target->got_size());
2110 value = target->got_plt_section()->address() + got_offset;
2111 Relocate_functions<64, false>::pcrela32(view, value, addend,
2112 address);
2113 break;
2114 }
2115 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
2116 _("unsupported reloc %u"), r_type);
2117 break;
2118
2119 case elfcpp::R_X86_64_DTPOFF32:
2120 if (optimized_type == tls::TLSOPT_TO_LE)
2121 {
2122 // This relocation type is used in debugging information.
2123 // In that case we need to not optimize the value. If we
2124 // haven't seen a TLSLD reloc, then we assume we should not
2125 // optimize this reloc.
2126 if (this->saw_tls_block_reloc_)
2127 {
2128 gold_assert(tls_segment != NULL);
2129 value -= tls_segment->memsz();
2130 }
2131 }
2132 Relocate_functions<64, false>::rela32(view, value, addend);
2133 break;
2134
2135 case elfcpp::R_X86_64_DTPOFF64:
2136 if (optimized_type == tls::TLSOPT_TO_LE)
2137 {
2138 // See R_X86_64_DTPOFF32, just above, for why we test this.
2139 if (this->saw_tls_block_reloc_)
2140 {
2141 gold_assert(tls_segment != NULL);
2142 value -= tls_segment->memsz();
2143 }
2144 }
2145 Relocate_functions<64, false>::rela64(view, value, addend);
2146 break;
2147
2148 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
2149 if (optimized_type == tls::TLSOPT_TO_LE)
2150 {
2151 gold_assert(tls_segment != NULL);
2152 Target_x86_64::Relocate::tls_ie_to_le(relinfo, relnum, tls_segment,
2153 rela, r_type, value, view,
2154 view_size);
2155 break;
2156 }
2157 else if (optimized_type == tls::TLSOPT_NONE)
2158 {
2159 // Relocate the field with the offset of the GOT entry for
2160 // the tp-relative offset of the symbol.
2161 unsigned int got_offset;
2162 if (gsym != NULL)
2163 {
2164 gold_assert(gsym->has_got_offset(GOT_TYPE_TLS_OFFSET));
2165 got_offset = (gsym->got_offset(GOT_TYPE_TLS_OFFSET)
2166 - target->got_size());
2167 }
2168 else
2169 {
2170 unsigned int r_sym = elfcpp::elf_r_sym<64>(rela.get_r_info());
2171 gold_assert(object->local_has_got_offset(r_sym,
2172 GOT_TYPE_TLS_OFFSET));
2173 got_offset = (object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET)
2174 - target->got_size());
2175 }
2176 value = target->got_plt_section()->address() + got_offset;
2177 Relocate_functions<64, false>::pcrela32(view, value, addend, address);
2178 break;
2179 }
2180 gold_error_at_location(relinfo, relnum, rela.get_r_offset(),
2181 _("unsupported reloc type %u"),
2182 r_type);
2183 break;
2184
2185 case elfcpp::R_X86_64_TPOFF32: // Local-exec
2186 value -= tls_segment->memsz();
2187 Relocate_functions<64, false>::rela32(view, value, addend);
2188 break;
2189 }
2190 }
2191
2192 // Do a relocation in which we convert a TLS General-Dynamic to an
2193 // Initial-Exec.
2194
2195 inline void
2196 Target_x86_64::Relocate::tls_gd_to_ie(const Relocate_info<64, false>* relinfo,
2197 size_t relnum,
2198 Output_segment*,
2199 const elfcpp::Rela<64, false>& rela,
2200 unsigned int,
2201 elfcpp::Elf_types<64>::Elf_Addr value,
2202 unsigned char* view,
2203 elfcpp::Elf_types<64>::Elf_Addr address,
2204 section_size_type view_size)
2205 {
2206 // .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
2207 // .word 0x6666; rex64; call __tls_get_addr
2208 // ==> movq %fs:0,%rax; addq x@gottpoff(%rip),%rax
2209
2210 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -4);
2211 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 12);
2212
2213 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2214 (memcmp(view - 4, "\x66\x48\x8d\x3d", 4) == 0));
2215 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2216 (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0));
2217
2218 memcpy(view - 4, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0\0", 16);
2219
2220 const elfcpp::Elf_Xword addend = rela.get_r_addend();
2221 Relocate_functions<64, false>::pcrela32(view + 8, value, addend - 8, address);
2222
2223 // The next reloc should be a PLT32 reloc against __tls_get_addr.
2224 // We can skip it.
2225 this->skip_call_tls_get_addr_ = true;
2226 }
2227
2228 // Do a relocation in which we convert a TLS General-Dynamic to a
2229 // Local-Exec.
2230
2231 inline void
2232 Target_x86_64::Relocate::tls_gd_to_le(const Relocate_info<64, false>* relinfo,
2233 size_t relnum,
2234 Output_segment* tls_segment,
2235 const elfcpp::Rela<64, false>& rela,
2236 unsigned int,
2237 elfcpp::Elf_types<64>::Elf_Addr value,
2238 unsigned char* view,
2239 section_size_type view_size)
2240 {
2241 // .byte 0x66; leaq foo@tlsgd(%rip),%rdi;
2242 // .word 0x6666; rex64; call __tls_get_addr
2243 // ==> movq %fs:0,%rax; leaq x@tpoff(%rax),%rax
2244
2245 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -4);
2246 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 12);
2247
2248 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2249 (memcmp(view - 4, "\x66\x48\x8d\x3d", 4) == 0));
2250 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2251 (memcmp(view + 4, "\x66\x66\x48\xe8", 4) == 0));
2252
2253 memcpy(view - 4, "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0\0", 16);
2254
2255 value -= tls_segment->memsz();
2256 Relocate_functions<64, false>::rela32(view + 8, value, 0);
2257
2258 // The next reloc should be a PLT32 reloc against __tls_get_addr.
2259 // We can skip it.
2260 this->skip_call_tls_get_addr_ = true;
2261 }
2262
2263 // Do a TLSDESC-style General-Dynamic to Initial-Exec transition.
2264
2265 inline void
2266 Target_x86_64::Relocate::tls_desc_gd_to_ie(
2267 const Relocate_info<64, false>* relinfo,
2268 size_t relnum,
2269 Output_segment*,
2270 const elfcpp::Rela<64, false>& rela,
2271 unsigned int r_type,
2272 elfcpp::Elf_types<64>::Elf_Addr value,
2273 unsigned char* view,
2274 elfcpp::Elf_types<64>::Elf_Addr address,
2275 section_size_type view_size)
2276 {
2277 if (r_type == elfcpp::R_X86_64_GOTPC32_TLSDESC)
2278 {
2279 // leaq foo@tlsdesc(%rip), %rax
2280 // ==> movq foo@gottpoff(%rip), %rax
2281 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
2282 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 4);
2283 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2284 view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x05);
2285 view[-2] = 0x8b;
2286 const elfcpp::Elf_Xword addend = rela.get_r_addend();
2287 Relocate_functions<64, false>::pcrela32(view, value, addend, address);
2288 }
2289 else
2290 {
2291 // call *foo@tlscall(%rax)
2292 // ==> nop; nop
2293 gold_assert(r_type == elfcpp::R_X86_64_TLSDESC_CALL);
2294 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 2);
2295 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2296 view[0] == 0xff && view[1] == 0x10);
2297 view[0] = 0x66;
2298 view[1] = 0x90;
2299 }
2300 }
2301
2302 // Do a TLSDESC-style General-Dynamic to Local-Exec transition.
2303
2304 inline void
2305 Target_x86_64::Relocate::tls_desc_gd_to_le(
2306 const Relocate_info<64, false>* relinfo,
2307 size_t relnum,
2308 Output_segment* tls_segment,
2309 const elfcpp::Rela<64, false>& rela,
2310 unsigned int r_type,
2311 elfcpp::Elf_types<64>::Elf_Addr value,
2312 unsigned char* view,
2313 section_size_type view_size)
2314 {
2315 if (r_type == elfcpp::R_X86_64_GOTPC32_TLSDESC)
2316 {
2317 // leaq foo@tlsdesc(%rip), %rax
2318 // ==> movq foo@tpoff, %rax
2319 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
2320 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 4);
2321 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2322 view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x05);
2323 view[-2] = 0xc7;
2324 view[-1] = 0xc0;
2325 value -= tls_segment->memsz();
2326 Relocate_functions<64, false>::rela32(view, value, 0);
2327 }
2328 else
2329 {
2330 // call *foo@tlscall(%rax)
2331 // ==> nop; nop
2332 gold_assert(r_type == elfcpp::R_X86_64_TLSDESC_CALL);
2333 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 2);
2334 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2335 view[0] == 0xff && view[1] == 0x10);
2336 view[0] = 0x66;
2337 view[1] = 0x90;
2338 }
2339 }
2340
2341 inline void
2342 Target_x86_64::Relocate::tls_ld_to_le(const Relocate_info<64, false>* relinfo,
2343 size_t relnum,
2344 Output_segment*,
2345 const elfcpp::Rela<64, false>& rela,
2346 unsigned int,
2347 elfcpp::Elf_types<64>::Elf_Addr,
2348 unsigned char* view,
2349 section_size_type view_size)
2350 {
2351 // leaq foo@tlsld(%rip),%rdi; call __tls_get_addr@plt;
2352 // ... leq foo@dtpoff(%rax),%reg
2353 // ==> .word 0x6666; .byte 0x66; movq %fs:0,%rax ... leaq x@tpoff(%rax),%rdx
2354
2355 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
2356 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 9);
2357
2358 tls::check_tls(relinfo, relnum, rela.get_r_offset(),
2359 view[-3] == 0x48 && view[-2] == 0x8d && view[-1] == 0x3d);
2360
2361 tls::check_tls(relinfo, relnum, rela.get_r_offset(), view[4] == 0xe8);
2362
2363 memcpy(view - 3, "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0\0", 12);
2364
2365 // The next reloc should be a PLT32 reloc against __tls_get_addr.
2366 // We can skip it.
2367 this->skip_call_tls_get_addr_ = true;
2368 }
2369
2370 // Do a relocation in which we convert a TLS Initial-Exec to a
2371 // Local-Exec.
2372
2373 inline void
2374 Target_x86_64::Relocate::tls_ie_to_le(const Relocate_info<64, false>* relinfo,
2375 size_t relnum,
2376 Output_segment* tls_segment,
2377 const elfcpp::Rela<64, false>& rela,
2378 unsigned int,
2379 elfcpp::Elf_types<64>::Elf_Addr value,
2380 unsigned char* view,
2381 section_size_type view_size)
2382 {
2383 // We need to examine the opcodes to figure out which instruction we
2384 // are looking at.
2385
2386 // movq foo@gottpoff(%rip),%reg ==> movq $YY,%reg
2387 // addq foo@gottpoff(%rip),%reg ==> addq $YY,%reg
2388
2389 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, -3);
2390 tls::check_range(relinfo, relnum, rela.get_r_offset(), view_size, 4);
2391
2392 unsigned char op1 = view[-3];
2393 unsigned char op2 = view[-2];
2394 unsigned char op3 = view[-1];
2395 unsigned char reg = op3 >> 3;
2396
2397 if (op2 == 0x8b)
2398 {
2399 // movq
2400 if (op1 == 0x4c)
2401 view[-3] = 0x49;
2402 view[-2] = 0xc7;
2403 view[-1] = 0xc0 | reg;
2404 }
2405 else if (reg == 4)
2406 {
2407 // Special handling for %rsp.
2408 if (op1 == 0x4c)
2409 view[-3] = 0x49;
2410 view[-2] = 0x81;
2411 view[-1] = 0xc0 | reg;
2412 }
2413 else
2414 {
2415 // addq
2416 if (op1 == 0x4c)
2417 view[-3] = 0x4d;
2418 view[-2] = 0x8d;
2419 view[-1] = 0x80 | reg | (reg << 3);
2420 }
2421
2422 value -= tls_segment->memsz();
2423 Relocate_functions<64, false>::rela32(view, value, 0);
2424 }
2425
2426 // Relocate section data.
2427
2428 void
2429 Target_x86_64::relocate_section(
2430 const Relocate_info<64, false>* relinfo,
2431 unsigned int sh_type,
2432 const unsigned char* prelocs,
2433 size_t reloc_count,
2434 Output_section* output_section,
2435 bool needs_special_offset_handling,
2436 unsigned char* view,
2437 elfcpp::Elf_types<64>::Elf_Addr address,
2438 section_size_type view_size,
2439 const Reloc_symbol_changes* reloc_symbol_changes)
2440 {
2441 gold_assert(sh_type == elfcpp::SHT_RELA);
2442
2443 gold::relocate_section<64, false, Target_x86_64, elfcpp::SHT_RELA,
2444 Target_x86_64::Relocate>(
2445 relinfo,
2446 this,
2447 prelocs,
2448 reloc_count,
2449 output_section,
2450 needs_special_offset_handling,
2451 view,
2452 address,
2453 view_size,
2454 reloc_symbol_changes);
2455 }
2456
2457 // Return the size of a relocation while scanning during a relocatable
2458 // link.
2459
2460 unsigned int
2461 Target_x86_64::Relocatable_size_for_reloc::get_size_for_reloc(
2462 unsigned int r_type,
2463 Relobj* object)
2464 {
2465 switch (r_type)
2466 {
2467 case elfcpp::R_X86_64_NONE:
2468 case elfcpp::R_386_GNU_VTINHERIT:
2469 case elfcpp::R_386_GNU_VTENTRY:
2470 case elfcpp::R_X86_64_TLSGD: // Global-dynamic
2471 case elfcpp::R_X86_64_GOTPC32_TLSDESC: // Global-dynamic (from ~oliva url)
2472 case elfcpp::R_X86_64_TLSDESC_CALL:
2473 case elfcpp::R_X86_64_TLSLD: // Local-dynamic
2474 case elfcpp::R_X86_64_DTPOFF32:
2475 case elfcpp::R_X86_64_DTPOFF64:
2476 case elfcpp::R_X86_64_GOTTPOFF: // Initial-exec
2477 case elfcpp::R_X86_64_TPOFF32: // Local-exec
2478 return 0;
2479
2480 case elfcpp::R_X86_64_64:
2481 case elfcpp::R_X86_64_PC64:
2482 case elfcpp::R_X86_64_GOTOFF64:
2483 case elfcpp::R_X86_64_GOTPC64:
2484 case elfcpp::R_X86_64_PLTOFF64:
2485 case elfcpp::R_X86_64_GOT64:
2486 case elfcpp::R_X86_64_GOTPCREL64:
2487 case elfcpp::R_X86_64_GOTPCREL:
2488 case elfcpp::R_X86_64_GOTPLT64:
2489 return 8;
2490
2491 case elfcpp::R_X86_64_32:
2492 case elfcpp::R_X86_64_32S:
2493 case elfcpp::R_X86_64_PC32:
2494 case elfcpp::R_X86_64_PLT32:
2495 case elfcpp::R_X86_64_GOTPC32:
2496 case elfcpp::R_X86_64_GOT32:
2497 return 4;
2498
2499 case elfcpp::R_X86_64_16:
2500 case elfcpp::R_X86_64_PC16:
2501 return 2;
2502
2503 case elfcpp::R_X86_64_8:
2504 case elfcpp::R_X86_64_PC8:
2505 return 1;
2506
2507 case elfcpp::R_X86_64_COPY:
2508 case elfcpp::R_X86_64_GLOB_DAT:
2509 case elfcpp::R_X86_64_JUMP_SLOT:
2510 case elfcpp::R_X86_64_RELATIVE:
2511 // These are outstanding tls relocs, which are unexpected when linking
2512 case elfcpp::R_X86_64_TPOFF64:
2513 case elfcpp::R_X86_64_DTPMOD64:
2514 case elfcpp::R_X86_64_TLSDESC:
2515 object->error(_("unexpected reloc %u in object file"), r_type);
2516 return 0;
2517
2518 case elfcpp::R_X86_64_SIZE32:
2519 case elfcpp::R_X86_64_SIZE64:
2520 default:
2521 object->error(_("unsupported reloc %u against local symbol"), r_type);
2522 return 0;
2523 }
2524 }
2525
2526 // Scan the relocs during a relocatable link.
2527
2528 void
2529 Target_x86_64::scan_relocatable_relocs(Symbol_table* symtab,
2530 Layout* layout,
2531 Sized_relobj<64, false>* object,
2532 unsigned int data_shndx,
2533 unsigned int sh_type,
2534 const unsigned char* prelocs,
2535 size_t reloc_count,
2536 Output_section* output_section,
2537 bool needs_special_offset_handling,
2538 size_t local_symbol_count,
2539 const unsigned char* plocal_symbols,
2540 Relocatable_relocs* rr)
2541 {
2542 gold_assert(sh_type == elfcpp::SHT_RELA);
2543
2544 typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA,
2545 Relocatable_size_for_reloc> Scan_relocatable_relocs;
2546
2547 gold::scan_relocatable_relocs<64, false, elfcpp::SHT_RELA,
2548 Scan_relocatable_relocs>(
2549 symtab,
2550 layout,
2551 object,
2552 data_shndx,
2553 prelocs,
2554 reloc_count,
2555 output_section,
2556 needs_special_offset_handling,
2557 local_symbol_count,
2558 plocal_symbols,
2559 rr);
2560 }
2561
2562 // Relocate a section during a relocatable link.
2563
2564 void
2565 Target_x86_64::relocate_for_relocatable(
2566 const Relocate_info<64, false>* relinfo,
2567 unsigned int sh_type,
2568 const unsigned char* prelocs,
2569 size_t reloc_count,
2570 Output_section* output_section,
2571 off_t offset_in_output_section,
2572 const Relocatable_relocs* rr,
2573 unsigned char* view,
2574 elfcpp::Elf_types<64>::Elf_Addr view_address,
2575 section_size_type view_size,
2576 unsigned char* reloc_view,
2577 section_size_type reloc_view_size)
2578 {
2579 gold_assert(sh_type == elfcpp::SHT_RELA);
2580
2581 gold::relocate_for_relocatable<64, false, elfcpp::SHT_RELA>(
2582 relinfo,
2583 prelocs,
2584 reloc_count,
2585 output_section,
2586 offset_in_output_section,
2587 rr,
2588 view,
2589 view_address,
2590 view_size,
2591 reloc_view,
2592 reloc_view_size);
2593 }
2594
2595 // Return the value to use for a dynamic which requires special
2596 // treatment. This is how we support equality comparisons of function
2597 // pointers across shared library boundaries, as described in the
2598 // processor specific ABI supplement.
2599
2600 uint64_t
2601 Target_x86_64::do_dynsym_value(const Symbol* gsym) const
2602 {
2603 gold_assert(gsym->is_from_dynobj() && gsym->has_plt_offset());
2604 return this->plt_section()->address() + gsym->plt_offset();
2605 }
2606
2607 // Return a string used to fill a code section with nops to take up
2608 // the specified length.
2609
2610 std::string
2611 Target_x86_64::do_code_fill(section_size_type length) const
2612 {
2613 if (length >= 16)
2614 {
2615 // Build a jmpq instruction to skip over the bytes.
2616 unsigned char jmp[5];
2617 jmp[0] = 0xe9;
2618 elfcpp::Swap_unaligned<32, false>::writeval(jmp + 1, length - 5);
2619 return (std::string(reinterpret_cast<char*>(&jmp[0]), 5)
2620 + std::string(length - 5, '\0'));
2621 }
2622
2623 // Nop sequences of various lengths.
2624 const char nop1[1] = { 0x90 }; // nop
2625 const char nop2[2] = { 0x66, 0x90 }; // xchg %ax %ax
2626 const char nop3[3] = { 0x0f, 0x1f, 0x00 }; // nop (%rax)
2627 const char nop4[4] = { 0x0f, 0x1f, 0x40, 0x00}; // nop 0(%rax)
2628 const char nop5[5] = { 0x0f, 0x1f, 0x44, 0x00, // nop 0(%rax,%rax,1)
2629 0x00 };
2630 const char nop6[6] = { 0x66, 0x0f, 0x1f, 0x44, // nopw 0(%rax,%rax,1)
2631 0x00, 0x00 };
2632 const char nop7[7] = { 0x0f, 0x1f, 0x80, 0x00, // nopl 0L(%rax)
2633 0x00, 0x00, 0x00 };
2634 const char nop8[8] = { 0x0f, 0x1f, 0x84, 0x00, // nopl 0L(%rax,%rax,1)
2635 0x00, 0x00, 0x00, 0x00 };
2636 const char nop9[9] = { 0x66, 0x0f, 0x1f, 0x84, // nopw 0L(%rax,%rax,1)
2637 0x00, 0x00, 0x00, 0x00,
2638 0x00 };
2639 const char nop10[10] = { 0x66, 0x2e, 0x0f, 0x1f, // nopw %cs:0L(%rax,%rax,1)
2640 0x84, 0x00, 0x00, 0x00,
2641 0x00, 0x00 };
2642 const char nop11[11] = { 0x66, 0x66, 0x2e, 0x0f, // data16
2643 0x1f, 0x84, 0x00, 0x00, // nopw %cs:0L(%rax,%rax,1)
2644 0x00, 0x00, 0x00 };
2645 const char nop12[12] = { 0x66, 0x66, 0x66, 0x2e, // data16; data16
2646 0x0f, 0x1f, 0x84, 0x00, // nopw %cs:0L(%rax,%rax,1)
2647 0x00, 0x00, 0x00, 0x00 };
2648 const char nop13[13] = { 0x66, 0x66, 0x66, 0x66, // data16; data16; data16
2649 0x2e, 0x0f, 0x1f, 0x84, // nopw %cs:0L(%rax,%rax,1)
2650 0x00, 0x00, 0x00, 0x00,
2651 0x00 };
2652 const char nop14[14] = { 0x66, 0x66, 0x66, 0x66, // data16; data16; data16
2653 0x66, 0x2e, 0x0f, 0x1f, // data16
2654 0x84, 0x00, 0x00, 0x00, // nopw %cs:0L(%rax,%rax,1)
2655 0x00, 0x00 };
2656 const char nop15[15] = { 0x66, 0x66, 0x66, 0x66, // data16; data16; data16
2657 0x66, 0x66, 0x2e, 0x0f, // data16; data16
2658 0x1f, 0x84, 0x00, 0x00, // nopw %cs:0L(%rax,%rax,1)
2659 0x00, 0x00, 0x00 };
2660
2661 const char* nops[16] = {
2662 NULL,
2663 nop1, nop2, nop3, nop4, nop5, nop6, nop7,
2664 nop8, nop9, nop10, nop11, nop12, nop13, nop14, nop15
2665 };
2666
2667 return std::string(nops[length], length);
2668 }
2669
2670 // FNOFFSET in section SHNDX in OBJECT is the start of a function
2671 // compiled with -fstack-split. The function calls non-stack-split
2672 // code. We have to change the function so that it always ensures
2673 // that it has enough stack space to run some random function.
2674
2675 void
2676 Target_x86_64::do_calls_non_split(Relobj* object, unsigned int shndx,
2677 section_offset_type fnoffset,
2678 section_size_type fnsize,
2679 unsigned char* view,
2680 section_size_type view_size,
2681 std::string* from,
2682 std::string* to) const
2683 {
2684 // The function starts with a comparison of the stack pointer and a
2685 // field in the TCB. This is followed by a jump.
2686
2687 // cmp %fs:NN,%rsp
2688 if (this->match_view(view, view_size, fnoffset, "\x64\x48\x3b\x24\x25", 5)
2689 && fnsize > 9)
2690 {
2691 // We will call __morestack if the carry flag is set after this
2692 // comparison. We turn the comparison into an stc instruction
2693 // and some nops.
2694 view[fnoffset] = '\xf9';
2695 this->set_view_to_nop(view, view_size, fnoffset + 1, 8);
2696 }
2697 // lea NN(%rsp),%r10
2698 // lea NN(%rsp),%r11
2699 else if ((this->match_view(view, view_size, fnoffset,
2700 "\x4c\x8d\x94\x24", 4)
2701 || this->match_view(view, view_size, fnoffset,
2702 "\x4c\x8d\x9c\x24", 4))
2703 && fnsize > 8)
2704 {
2705 // This is loading an offset from the stack pointer for a
2706 // comparison. The offset is negative, so we decrease the
2707 // offset by the amount of space we need for the stack. This
2708 // means we will avoid calling __morestack if there happens to
2709 // be plenty of space on the stack already.
2710 unsigned char* pval = view + fnoffset + 4;
2711 uint32_t val = elfcpp::Swap_unaligned<32, false>::readval(pval);
2712 val -= parameters->options().split_stack_adjust_size();
2713 elfcpp::Swap_unaligned<32, false>::writeval(pval, val);
2714 }
2715 else
2716 {
2717 if (!object->has_no_split_stack())
2718 object->error(_("failed to match split-stack sequence at "
2719 "section %u offset %0zx"),
2720 shndx, static_cast<size_t>(fnoffset));
2721 return;
2722 }
2723
2724 // We have to change the function so that it calls
2725 // __morestack_non_split instead of __morestack. The former will
2726 // allocate additional stack space.
2727 *from = "__morestack";
2728 *to = "__morestack_non_split";
2729 }
2730
2731 // The selector for x86_64 object files.
2732
2733 class Target_selector_x86_64 : public Target_selector_freebsd
2734 {
2735 public:
2736 Target_selector_x86_64()
2737 : Target_selector_freebsd(elfcpp::EM_X86_64, 64, false, "elf64-x86-64",
2738 "elf64-x86-64-freebsd")
2739 { }
2740
2741 Target*
2742 do_instantiate_target()
2743 { return new Target_x86_64(); }
2744
2745 };
2746
2747 Target_selector_x86_64 target_selector_x86_64;
2748
2749 } // End anonymous namespace.