/* X86-64 specific support for ELF
- Copyright (C) 2000-2022 Free Software Foundation, Inc.
+ Copyright (C) 2000-2024 Free Software Foundation, Inc.
Contributed by Jan Hubicka <jh@suse.cz>.
This file is part of BFD, the Binary File Descriptor library.
#include "elfxx-x86.h"
#include "dwarf2.h"
#include "libiberty.h"
+#include "sframe.h"
#include "opcode/i386.h"
HOWTO(R_X86_64_REX_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
true),
+ HOWTO(R_X86_64_CODE_4_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTPCRELX", false, 0, 0xffffffff,
+ true),
+ HOWTO(R_X86_64_CODE_4_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
+ bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTTPOFF", false, 0, 0xffffffff,
+ true),
+ HOWTO(R_X86_64_CODE_4_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
+ complain_overflow_bitfield, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_4_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
+ HOWTO(R_X86_64_CODE_5_GOTPCRELX, 0, 4, 32, true, 0,
+ complain_overflow_signed, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_5_GOTPCRELX", false, 0, 0xffffffff, true),
+ HOWTO(R_X86_64_CODE_5_GOTTPOFF, 0, 4, 32, true, 0,
+ complain_overflow_signed, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_5_GOTTPOFF", false, 0, 0xffffffff, true),
+ HOWTO(R_X86_64_CODE_5_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
+ complain_overflow_bitfield, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_5_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
+ HOWTO(R_X86_64_CODE_6_GOTPCRELX, 0, 4, 32, true, 0,
+ complain_overflow_signed, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_6_GOTPCRELX", false, 0, 0xffffffff, true),
+ HOWTO(R_X86_64_CODE_6_GOTTPOFF, 0, 4, 32, true, 0,
+ complain_overflow_signed, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_6_GOTTPOFF", false, 0, 0xffffffff, true),
+ HOWTO(R_X86_64_CODE_6_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
+ complain_overflow_bitfield, bfd_elf_generic_reloc,
+ "R_X86_64_CODE_6_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
/* We have a gap in the reloc numbers here.
R_X86_64_standard counts the number up to this point, and
R_X86_64_vt_offset is the value to subtract from a reloc type of
R_X86_64_GNU_VT* to form an index into this table. */
-#define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1)
+#define R_X86_64_standard (R_X86_64_CODE_6_GOTPC32_TLSDESC + 1)
#define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
/* GNU extension to record C++ vtable hierarchy. */
{ BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
{ BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
{ BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
+ { BFD_RELOC_X86_64_CODE_4_GOTPCRELX, R_X86_64_CODE_4_GOTPCRELX, },
+ { BFD_RELOC_X86_64_CODE_4_GOTTPOFF, R_X86_64_CODE_4_GOTTPOFF, },
+ { BFD_RELOC_X86_64_CODE_4_GOTPC32_TLSDESC, R_X86_64_CODE_4_GOTPC32_TLSDESC, },
+ { BFD_RELOC_X86_64_CODE_5_GOTPCRELX, R_X86_64_CODE_5_GOTPCRELX, },
+ { BFD_RELOC_X86_64_CODE_5_GOTTPOFF, R_X86_64_CODE_5_GOTTPOFF, },
+ { BFD_RELOC_X86_64_CODE_5_GOTPC32_TLSDESC, R_X86_64_CODE_5_GOTPC32_TLSDESC, },
+ { BFD_RELOC_X86_64_CODE_6_GOTPCRELX, R_X86_64_CODE_6_GOTPCRELX, },
+ { BFD_RELOC_X86_64_CODE_6_GOTTPOFF, R_X86_64_CODE_6_GOTTPOFF, },
+ { BFD_RELOC_X86_64_CODE_6_GOTPC32_TLSDESC, R_X86_64_CODE_6_GOTPC32_TLSDESC, },
{ BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
{ BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
};
default:
return false;
- case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */
+ case 124:
+ /* sizeof (struct elf_external_linux_prpsinfo32_ugid16). */
elf_tdata (abfd)->core->pid
= bfd_get_32 (abfd, note->descdata + 12);
elf_tdata (abfd)->core->program
= _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
break;
- case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */
+ case 128:
+ /* sizeof (struct elf_external_linux_prpsinfo32_ugid32). */
+ elf_tdata (abfd)->core->pid
+ = bfd_get_32 (abfd, note->descdata + 12);
+ elf_tdata (abfd)->core->program
+ = _bfd_elfcore_strndup (abfd, note->descdata + 32, 16);
+ elf_tdata (abfd)->core->command
+ = _bfd_elfcore_strndup (abfd, note->descdata + 48, 80);
+ break;
+
+ case 136:
+ /* sizeof (struct elf_prpsinfo) on Linux/x86_64. */
elf_tdata (abfd)->core->pid
= bfd_get_32 (abfd, note->descdata + 24);
elf_tdata (abfd)->core->program
DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
};
+static const sframe_frame_row_entry elf_x86_64_sframe_null_fre =
+{
+ 0,
+ {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
+ SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
+};
+
+/* .sframe FRE covering the .plt section entry. */
+static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre1 =
+{
+ 0, /* SFrame FRE start address. */
+ {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
+ SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
+};
+
+/* .sframe FRE covering the .plt section entry. */
+static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre2 =
+{
+ 6, /* SFrame FRE start address. */
+ {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
+ SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
+};
+
+/* .sframe FRE covering the .plt section entry. */
+static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre1 =
+{
+ 0, /* SFrame FRE start address. */
+ {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
+ SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
+};
+
+/* .sframe FRE covering the .plt section entry. */
+static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre2 =
+{
+ 11, /* SFrame FRE start address. */
+ {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
+ SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
+};
+
+/* .sframe FRE covering the second .plt section entry. */
+static const sframe_frame_row_entry elf_x86_64_sframe_sec_pltn_fre1 =
+{
+ 0, /* SFrame FRE start address. */
+ {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
+ SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
+};
+
+/* SFrame helper object for non-lazy PLT. Also used for IBT enabled PLT. */
+static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_plt =
+{
+ LAZY_PLT_ENTRY_SIZE,
+ 2, /* Number of FREs for PLT0. */
+ /* Array of SFrame FREs for plt0. */
+ { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
+ LAZY_PLT_ENTRY_SIZE,
+ 1, /* Number of FREs for PLTn. */
+ /* Array of SFrame FREs for plt. */
+ { &elf_x86_64_sframe_sec_pltn_fre1, &elf_x86_64_sframe_null_fre },
+ 0,
+ 0, /* There is no second PLT necessary. */
+ { &elf_x86_64_sframe_null_fre }
+};
+
+/* SFrame helper object for lazy PLT. Also used for IBT enabled PLT. */
+static const struct elf_x86_sframe_plt elf_x86_64_sframe_plt =
+{
+ LAZY_PLT_ENTRY_SIZE,
+ 2, /* Number of FREs for PLT0. */
+ /* Array of SFrame FREs for plt0. */
+ { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
+ LAZY_PLT_ENTRY_SIZE,
+ 2, /* Number of FREs for PLTn. */
+ /* Array of SFrame FREs for plt. */
+ { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_pltn_fre2 },
+ NON_LAZY_PLT_ENTRY_SIZE,
+ 1, /* Number of FREs for PLTn for second PLT. */
+ /* FREs for second plt (stack trace info for .plt.got is
+ identical). Used when IBT or non-lazy PLT is in effect. */
+ { &elf_x86_64_sframe_sec_pltn_fre1 }
+};
+
/* These are the standard parameters. */
static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
{
sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
};
-
static bool
elf64_x86_64_elf_object_p (bfd *abfd)
{
if (largepic)
return r_type == R_X86_64_PLTOFF64;
else if (indirect_call)
- return r_type == R_X86_64_GOTPCRELX;
+ return (r_type == R_X86_64_GOTPCRELX || r_type == R_X86_64_GOTPCREL);
else
return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
}
+ case R_X86_64_CODE_4_GOTTPOFF:
+ /* Check transition from IE access model:
+ mov foo@gottpoff(%rip), %reg
+ add foo@gottpoff(%rip), %reg
+ where reg is one of r16 to r31. */
+
+ if (offset < 4
+ || (offset + 4) > sec->size
+ || contents[offset - 4] != 0xd5)
+ return false;
+
+ goto check_gottpoff;
+
+ case R_X86_64_CODE_6_GOTTPOFF:
+ /* Check transition from IE access model:
+ add %reg1, foo@gottpoff(%rip), %reg2
+ where reg1/reg2 are one of r16 to r31. */
+
+ if (offset < 6
+ || (offset + 4) > sec->size
+ || contents[offset - 6] != 0x62)
+ return false;
+
+ val = bfd_get_8 (abfd, contents + offset - 2);
+ if (val != 0x01 && val != 0x03)
+ return false;
+
+ val = bfd_get_8 (abfd, contents + offset - 1);
+ return (val & 0xc7) == 5;
+
case R_X86_64_GOTTPOFF:
/* Check transition from IE access model:
mov foo@gottpoff(%rip), %reg
return false;
}
+ check_gottpoff:
val = bfd_get_8 (abfd, contents + offset - 2);
if (val != 0x8b && val != 0x03)
return false;
val = bfd_get_8 (abfd, contents + offset - 1);
return (val & 0xc7) == 5;
+ case R_X86_64_CODE_4_GOTPC32_TLSDESC:
+ /* Check transition from GDesc access model:
+ lea x@tlsdesc(%rip), %reg
+ where reg is one of r16 to r31. */
+
+ if (offset < 4
+ || (offset + 4) > sec->size
+ || contents[offset - 4] != 0xd5)
+ return false;
+
+ goto check_tlsdesc;
+
case R_X86_64_GOTPC32_TLSDESC:
/* Check transition from GDesc access model:
leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
return false;
+ check_tlsdesc:
if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
return false;
{
case R_X86_64_TLSGD:
case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_CODE_4_GOTPC32_TLSDESC:
case R_X86_64_TLSDESC_CALL:
case R_X86_64_GOTTPOFF:
+ case R_X86_64_CODE_4_GOTTPOFF:
+ case R_X86_64_CODE_6_GOTTPOFF:
if (bfd_link_executable (info))
{
if (h == NULL)
if (to_type == R_X86_64_TLSGD
|| to_type == R_X86_64_GOTPC32_TLSDESC
+ || to_type == R_X86_64_CODE_4_GOTPC32_TLSDESC
|| to_type == R_X86_64_TLSDESC_CALL)
{
if (tls_type == GOT_TLS_IE)
}
/* Return TRUE if there is no transition. */
- if (from_type == to_type)
+ if (from_type == to_type
+ || (from_type == R_X86_64_CODE_4_GOTTPOFF
+ && to_type == R_X86_64_GOTTPOFF)
+ || (from_type == R_X86_64_CODE_6_GOTTPOFF
+ && to_type == R_X86_64_GOTTPOFF))
return true;
/* Check if the transition can be performed. */
bfd_vma roff = irel->r_offset;
bfd_vma abs_relocation;
- if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))
+ if (roff < (r_type == R_X86_64_CODE_4_GOTPCRELX
+ ? 4 : (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)))
return true;
raddend = irel->r_addend;
htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
is_pic = bfd_link_pic (link_info);
- relocx = (r_type == R_X86_64_GOTPCRELX
- || r_type == R_X86_64_REX_GOTPCRELX);
+ if (r_type == R_X86_64_CODE_4_GOTPCRELX)
+ {
+ /* Skip if this isn't a REX2 instruction. */
+ opcode = bfd_get_8 (abfd, contents + roff - 4);
+ if (opcode != 0xd5)
+ return true;
+
+ relocx = true;
+ }
+ else
+ relocx = (r_type == R_X86_64_GOTPCRELX
+ || r_type == R_X86_64_REX_GOTPCRELX);
/* TRUE if --no-relax is used. */
no_overflow = link_info->disable_target_specific_optimizations > 1;
/* Convert mov to lea since it has been done for a while. */
if (opcode != 0x8b)
{
- /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX
- for call, jmp or one of adc, add, and, cmp, or, sbb, sub,
- test, xor instructions. */
+ /* Only convert R_X86_64_GOTPCRELX, R_X86_64_REX_GOTPCRELX
+ and R_X86_64_CODE_4_GOTPCRELX for call, jmp or one of adc,
+ add, and, cmp, or, sbb, sub, test, xor instructions. */
if (!relocx)
return true;
}
}
else
{
- unsigned int rex;
+ unsigned int rex = 0;
unsigned int rex_mask = REX_R;
+ unsigned int rex2 = 0;
+ unsigned int rex2_mask = REX_R | REX_R << 4;
+ bool rex_w = false;
- if (r_type == R_X86_64_REX_GOTPCRELX)
- rex = bfd_get_8 (abfd, contents + roff - 3);
- else
- rex = 0;
+ if (r_type == R_X86_64_CODE_4_GOTPCRELX)
+ {
+ rex2 = bfd_get_8 (abfd, contents + roff - 3);
+ rex_w = (rex2 & REX_W) != 0;
+ }
+ else if (r_type == R_X86_64_REX_GOTPCRELX)
+ {
+ rex = bfd_get_8 (abfd, contents + roff - 3);
+ rex_w = (rex & REX_W) != 0;
+ }
if (opcode == 0x8b)
{
opcode = 0xc7;
modrm = bfd_get_8 (abfd, contents + roff - 1);
modrm = 0xc0 | (modrm & 0x38) >> 3;
- if ((rex & REX_W) != 0
- && ABI_64_P (link_info->output_bfd))
+ if (rex_w && ABI_64_P (link_info->output_bfd))
{
/* Keep the REX_W bit in REX byte for LP64. */
r_type = R_X86_64_32S;
use R_X86_64_32 and clear the W bit to avoid
sign-extend imm32 to imm64. */
r_type = R_X86_64_32;
- /* Clear the W bit in REX byte. */
+ /* Clear the W bit in REX byte and REX2 payload. */
rex_mask |= REX_W;
+ rex2_mask |= REX_W;
goto rewrite_modrm_rex;
}
}
/* Use R_X86_64_32 with 32-bit operand to avoid relocation
overflow when sign-extending imm32 to imm64. */
- r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32;
+ r_type = rex_w ? R_X86_64_32S : R_X86_64_32;
rewrite_modrm_rex:
if (abs_relocation)
rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
bfd_put_8 (abfd, rex, contents + roff - 3);
}
+ else if (rex2)
+ {
+ /* Move the R bits to the B bits in REX2 payload byte. */
+ rex2 = ((rex2 & ~rex2_mask)
+ | (rex2 & (REX_R | REX_R << 4)) >> 2);
+ bfd_put_8 (abfd, rex2, contents + roff - 3);
+ }
/* No addend for R_X86_64_32/R_X86_64_32S relocations. */
irel->r_addend = 0;
/* Get the section contents. */
if (elf_section_data (sec)->this_hdr.contents != NULL)
contents = elf_section_data (sec)->this_hdr.contents;
- else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
+ else if (!_bfd_elf_mmap_section_contents (abfd, sec, &contents))
{
sec->check_relocs_failed = 1;
return false;
r_symndx = htab->r_sym (rel->r_info);
r_type = ELF32_R_TYPE (rel->r_info);
+ /* Don't check R_X86_64_NONE. */
+ if (r_type == R_X86_64_NONE)
+ continue;
+
if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
{
/* xgettext:c-format */
converted_reloc = false;
if ((r_type == R_X86_64_GOTPCREL
|| r_type == R_X86_64_GOTPCRELX
- || r_type == R_X86_64_REX_GOTPCRELX)
+ || r_type == R_X86_64_REX_GOTPCRELX
+ || r_type == R_X86_64_CODE_4_GOTPCRELX)
&& (h == NULL || h->type != STT_GNU_IFUNC))
{
Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
break;
case R_X86_64_GOTTPOFF:
+ case R_X86_64_CODE_4_GOTTPOFF:
+ case R_X86_64_CODE_6_GOTTPOFF:
if (!bfd_link_executable (info))
info->flags |= DF_STATIC_TLS;
/* Fall through */
case R_X86_64_GOTPCREL:
case R_X86_64_GOTPCRELX:
case R_X86_64_REX_GOTPCRELX:
+ case R_X86_64_CODE_4_GOTPCRELX:
case R_X86_64_TLSGD:
case R_X86_64_GOT64:
case R_X86_64_GOTPCREL64:
case R_X86_64_GOTPLT64:
case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_CODE_4_GOTPC32_TLSDESC:
case R_X86_64_TLSDESC_CALL:
/* This symbol requires a global offset table entry. */
{
tls_type = GOT_TLS_GD;
break;
case R_X86_64_GOTTPOFF:
+ case R_X86_64_CODE_4_GOTTPOFF:
+ case R_X86_64_CODE_6_GOTTPOFF:
tls_type = GOT_TLS_IE;
break;
case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_CODE_4_GOTPC32_TLSDESC:
case R_X86_64_TLSDESC_CALL:
tls_type = GOT_TLS_GDESC;
break;
break;
case R_X86_64_PLT32:
- case R_X86_64_PLT32_BND:
/* This symbol requires a procedure linkage table entry. We
actually build the entry in adjust_dynamic_symbol,
because this might be a case of linking PIC code which is
case R_X86_64_PC8:
case R_X86_64_PC16:
case R_X86_64_PC32:
- case R_X86_64_PC32_BND:
case R_X86_64_PC64:
case R_X86_64_64:
pointer:
}
}
}
- else if (r_type != R_X86_64_PC32_BND
- && r_type != R_X86_64_PC64)
+ else if (r_type != R_X86_64_PC64)
{
/* At run-time, R_X86_64_64 can be resolved for both
x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
|| (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
h->plt.refcount = 1;
- if (h->pointer_equality_needed
+ if (htab->elf.target_os != is_solaris
+ && h->pointer_equality_needed
&& h->type == STT_FUNC
&& eh->def_protected
&& !SYMBOL_DEFINED_NON_SHARED_P (h)
}
break;
+ case R_X86_64_CODE_5_GOTPCRELX:
+ case R_X86_64_CODE_5_GOTTPOFF:
+ case R_X86_64_CODE_5_GOTPC32_TLSDESC:
+ case R_X86_64_CODE_6_GOTPCRELX:
+ case R_X86_64_CODE_6_GOTPC32_TLSDESC:
+ {
+ /* These relocations are added only for completeness and
+ aren't be used. */
+ if (h)
+ name = h->root.root.string;
+ else
+ name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
+ NULL);
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%pB: unsupported relocation %s against symbol `%s'"),
+ abfd, x86_64_elf_howto_table[r_type].name, name);
+ }
+ break;
+
/* This relocation describes the C++ object vtable hierarchy.
Reconstruct it for later use during GC. */
case R_X86_64_GNU_VTINHERIT:
if (elf_section_data (sec)->this_hdr.contents != contents)
{
- if (!converted && !_bfd_link_keep_memory (info))
- free (contents);
+ if (!converted)
+ _bfd_elf_munmap_section_contents (sec, contents);
else
{
/* Cache the section contents for elf_link_input_bfd if any
error_return:
if (elf_section_data (sec)->this_hdr.contents != contents)
- free (contents);
+ _bfd_elf_munmap_section_contents (sec, contents);
sec->check_relocs_failed = 1;
return false;
}
static bool
-elf_x86_64_always_size_sections (bfd *output_bfd,
- struct bfd_link_info *info)
+elf_x86_64_early_size_sections (bfd *output_bfd, struct bfd_link_info *info)
{
bfd *abfd;
elf_x86_64_scan_relocs))
return false;
- return _bfd_x86_elf_always_size_sections (output_bfd, info);
+ return _bfd_x86_elf_early_size_sections (output_bfd, info);
}
/* Return the relocation value for @tpoff relocation
case R_X86_64_GOTPCREL:
case R_X86_64_GOTPCRELX:
case R_X86_64_REX_GOTPCRELX:
+ case R_X86_64_CODE_4_GOTPCRELX:
case R_X86_64_GOTPCREL64:
base_got = htab->elf.sgot;
off = h->got.offset;
}
/* FALLTHROUGH */
case R_X86_64_PC32:
- case R_X86_64_PC32_BND:
case R_X86_64_PC64:
case R_X86_64_PLT32:
- case R_X86_64_PLT32_BND:
goto do_relocation;
}
}
case R_X86_64_GOTPCREL:
case R_X86_64_GOTPCRELX:
case R_X86_64_REX_GOTPCRELX:
+ case R_X86_64_CODE_4_GOTPCRELX:
case R_X86_64_GOTPCREL64:
/* Use global offset table entry as symbol value. */
case R_X86_64_GOTPLT64:
&& !(sym->st_shndx == SHN_ABS
&& (r_type == R_X86_64_GOTPCREL
|| r_type == R_X86_64_GOTPCRELX
- || r_type == R_X86_64_REX_GOTPCRELX)))
+ || r_type == R_X86_64_REX_GOTPCRELX
+ || r_type == R_X86_64_CODE_4_GOTPCRELX)))
relative_reloc = true;
}
}
if (r_type != R_X86_64_GOTPCREL
&& r_type != R_X86_64_GOTPCRELX
&& r_type != R_X86_64_REX_GOTPCRELX
+ && r_type != R_X86_64_CODE_4_GOTPCRELX
&& r_type != R_X86_64_GOTPCREL64)
relocation -= htab->elf.sgotplt->output_section->vma
- htab->elf.sgotplt->output_offset;
break;
case R_X86_64_PLT32:
- case R_X86_64_PLT32_BND:
/* Relocation is to the entry for this symbol in the
procedure linkage table. */
case R_X86_64_PC8:
case R_X86_64_PC16:
case R_X86_64_PC32:
- case R_X86_64_PC32_BND:
/* Don't complain about -fPIC if the symbol is undefined when
building executable unless it is unresolved weak symbol,
references a dynamic definition in PIE or -z nocopyreloc
case R_X86_64_TLSGD:
case R_X86_64_GOTPC32_TLSDESC:
+ case R_X86_64_CODE_4_GOTPC32_TLSDESC:
case R_X86_64_TLSDESC_CALL:
case R_X86_64_GOTTPOFF:
+ case R_X86_64_CODE_4_GOTTPOFF:
+ case R_X86_64_CODE_6_GOTTPOFF:
tls_type = GOT_UNKNOWN;
if (h == NULL && local_got_offsets)
tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
{
bfd_vma roff = rel->r_offset;
+ if (roff >= input_section->size)
+ goto corrupt_input;
+
BFD_ASSERT (! unresolved_reloc);
if (r_type == R_X86_64_TLSGD)
int largepic = 0;
if (ABI_64_P (output_bfd))
{
+ if (roff + 5 >= input_section->size)
+ goto corrupt_input;
if (contents[roff + 5] == 0xb8)
{
if (roff < 3
"\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
15);
}
+
+ if (roff + 8 + largepic >= input_section->size)
+ goto corrupt_input;
+
bfd_put_32 (output_bfd,
elf_x86_64_tpoff (info, relocation),
contents + roff + 8 + largepic);
contents + roff);
continue;
}
+ else if (r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC)
+ {
+ /* GDesc -> LE transition.
+ It's originally something like:
+ lea x@tlsdesc(%rip), %reg
+
+ Change it to:
+ mov $x@tpoff, %reg
+ where reg is one of r16 to r31. */
+
+ unsigned int val, rex2;
+ unsigned int rex2_mask = REX_R | REX_R << 4;
+
+ if (roff < 4)
+ goto corrupt_input;
+ rex2 = bfd_get_8 (input_bfd, contents + roff - 3);
+ val = bfd_get_8 (input_bfd, contents + roff - 1);
+ /* Move the R bits to the B bits in REX2 payload
+ byte. */
+ bfd_put_8 (output_bfd,
+ ((rex2 & ~rex2_mask)
+ | (rex2 & rex2_mask) >> 2),
+ contents + roff - 3);
+ bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
+ bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
+ contents + roff - 1);
+ bfd_put_32 (output_bfd,
+ elf_x86_64_tpoff (info, relocation),
+ contents + roff);
+ continue;
+ }
else if (r_type == R_X86_64_TLSDESC_CALL)
{
/* GDesc -> LE transition.
}
if (prefix)
{
+ if (roff + 2 >= input_section->size)
+ goto corrupt_input;
+
bfd_put_8 (output_bfd, 0x0f, contents + roff);
bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
}
else
{
+ if (roff + 1 >= input_section->size)
+ goto corrupt_input;
+
bfd_put_8 (output_bfd, 0x66, contents + roff);
bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
}
contents + roff);
continue;
}
+ else if (r_type == R_X86_64_CODE_4_GOTTPOFF)
+ {
+ /* IE->LE transition:
+ Originally it can be one of:
+ mov foo@gottpoff(%rip), %reg
+ add foo@gottpoff(%rip), %reg
+ We change it into:
+ mov $foo@tpoff, %reg
+ add $foo@tpoff, %reg
+ where reg is one of r16 to r31. */
+
+ unsigned int rex2, type, reg;
+ unsigned int rex2_mask = REX_R | REX_R << 4;
+
+ if (roff < 4)
+ goto corrupt_input;
+
+ rex2 = bfd_get_8 (input_bfd, contents + roff - 3);
+ type = bfd_get_8 (input_bfd, contents + roff - 2);
+ reg = bfd_get_8 (input_bfd, contents + roff - 1);
+ reg >>= 3;
+ /* Move the R bits to the B bits in REX2 payload
+ byte. */
+ if (type == 0x8b)
+ type = 0xc7;
+ else
+ type = 0x81;
+ bfd_put_8 (output_bfd,
+ ((rex2 & ~rex2_mask)
+ | (rex2 & rex2_mask) >> 2),
+ contents + roff - 3);
+ bfd_put_8 (output_bfd, type,
+ contents + roff - 2);
+ bfd_put_8 (output_bfd, 0xc0 | reg,
+ contents + roff - 1);
+ bfd_put_32 (output_bfd,
+ elf_x86_64_tpoff (info, relocation),
+ contents + roff);
+ continue;
+ }
+ else if (r_type == R_X86_64_CODE_6_GOTTPOFF)
+ {
+ /* IE->LE transition:
+ Originally it is
+ add %reg1, foo@gottpoff(%rip), %reg2
+ or
+ add foo@gottpoff(%rip), %reg1, %reg2
+ We change it into:
+ add $foo@tpoff, %reg1, %reg2
+ */
+ unsigned int reg, byte1;
+ unsigned int updated_byte1;
+
+ if (roff < 6)
+ goto corrupt_input;
+
+ /* Move the R bits to the B bits in EVEX payload
+ byte 1. */
+ byte1 = bfd_get_8 (input_bfd, contents + roff - 5);
+ updated_byte1 = byte1;
+
+ /* Set the R bits since they is inverted. */
+ updated_byte1 |= 1 << 7 | 1 << 4;
+
+ /* Update the B bits from the R bits. */
+ if ((byte1 & (1 << 7)) == 0)
+ updated_byte1 &= ~(1 << 5);
+ if ((byte1 & (1 << 4)) == 0)
+ updated_byte1 |= 1 << 3;
+
+ reg = bfd_get_8 (input_bfd, contents + roff - 1);
+ reg >>= 3;
+
+ bfd_put_8 (output_bfd, updated_byte1,
+ contents + roff - 5);
+ bfd_put_8 (output_bfd, 0x81,
+ contents + roff - 2);
+ bfd_put_8 (output_bfd, 0xc0 | reg,
+ contents + roff - 1);
+ bfd_put_32 (output_bfd,
+ elf_x86_64_tpoff (info, relocation),
+ contents + roff);
+ continue;
+ }
else
BFD_ASSERT (false);
}
if (r_type_tls == r_type)
{
if (r_type == R_X86_64_GOTPC32_TLSDESC
+ || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC
|| r_type == R_X86_64_TLSDESC_CALL)
relocation = htab->elf.sgotplt->output_section->vma
+ htab->elf.sgotplt->output_offset
wrel++;
continue;
}
- else if (r_type == R_X86_64_GOTPC32_TLSDESC)
+ else if (r_type == R_X86_64_GOTPC32_TLSDESC
+ || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC)
{
/* GDesc -> IE transition.
It's originally something like:
bool local_undefweak;
htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
- if (htab == NULL)
- return false;
/* Use the second PLT section only if there is .plt section. */
use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
else
{
rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
- rela.r_addend = 0;
+ if (htab->params->mark_plt)
+ rela.r_addend = (resolved_plt->output_section->vma
+ + plt_offset
+ + htab->plt.plt_indirect_branch_offset);
+ else
+ rela.r_addend = 0;
plt_index = htab->next_jump_slot_index++;
}
const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
+ const struct elf_x86_lazy_plt_layout *x32_lazy_ibt_plt;
+ const struct elf_x86_non_lazy_plt_layout *x32_non_lazy_ibt_plt;
asection *plt;
enum elf_x86_plt_type plt_type;
struct elf_x86_plt plts[] =
{
lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
+ x32_lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
+ x32_non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
}
else
{
lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
+ x32_lazy_ibt_plt = NULL;
+ x32_non_lazy_ibt_plt = NULL;
}
count = 0;
for (j = 0; plts[j].name != NULL; j++)
{
plt = bfd_get_section_by_name (abfd, plts[j].name);
- if (plt == NULL || plt->size == 0)
+ if (plt == NULL
+ || plt->size == 0
+ || (plt->flags & SEC_HAS_CONTENTS) == 0)
continue;
/* Get the PLT section contents. */
- if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
+ if (!_bfd_elf_mmap_section_contents (abfd, plt, &plt_contents))
break;
/* Check what kind of PLT it is. */
lazy_plt->plt0_got1_offset) == 0)
&& (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
2) == 0))
- plt_type = plt_lazy;
+ {
+ if (x32_lazy_ibt_plt != NULL
+ && (memcmp (plt_contents
+ + x32_lazy_ibt_plt->plt_entry_size,
+ x32_lazy_ibt_plt->plt_entry,
+ x32_lazy_ibt_plt->plt_got_offset) == 0))
+ {
+ /* The fist entry in the x32 lazy IBT PLT is the same
+ as the lazy PLT. */
+ plt_type = plt_lazy | plt_second;
+ lazy_plt = x32_lazy_ibt_plt;
+ }
+ else
+ plt_type = plt_lazy;
+ }
else if (lazy_bnd_plt != NULL
&& (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
lazy_bnd_plt->plt0_got1_offset) == 0)
plt_type = plt_second;
non_lazy_plt = non_lazy_ibt_plt;
}
+ else if (x32_non_lazy_ibt_plt != NULL
+ && plt->size >= x32_non_lazy_ibt_plt->plt_entry_size
+ && (memcmp (plt_contents,
+ x32_non_lazy_ibt_plt->plt_entry,
+ x32_non_lazy_ibt_plt->plt_got_offset) == 0))
+ {
+ /* Match x32 IBT PLT. */
+ plt_type = plt_second;
+ non_lazy_plt = x32_non_lazy_ibt_plt;
+ }
}
if (plt_type == plt_unknown)
{
- free (plt_contents);
+ _bfd_elf_munmap_section_contents (plt, plt_contents);
continue;
}
return true;
}
+static bool
+elf_x86_64_section_flags (const Elf_Internal_Shdr *hdr)
+{
+ if ((hdr->sh_flags & SHF_X86_64_LARGE) != 0)
+ hdr->bfd_section->flags |= SEC_ELF_LARGE;
+
+ return true;
+}
+
+static bool
+elf_x86_64_fake_sections (bfd *abfd ATTRIBUTE_UNUSED,
+ Elf_Internal_Shdr *hdr, asection *sec)
+{
+ if (sec->flags & SEC_ELF_LARGE)
+ hdr->sh_flags |= SHF_X86_64_LARGE;
+
+ return true;
+}
+
+static bool
+elf_x86_64_copy_private_section_data (bfd *ibfd, asection *isec,
+ bfd *obfd, asection *osec)
+{
+ if (!_bfd_elf_copy_private_section_data (ibfd, isec, obfd, osec))
+ return false;
+
+ /* objcopy --set-section-flags without "large" drops SHF_X86_64_LARGE. */
+ if (ibfd != obfd)
+ elf_section_flags (osec) &= ~SHF_X86_64_LARGE;
+
+ return true;
+}
+
static int
elf_x86_64_additional_program_headers (bfd *abfd,
struct bfd_link_info *info ATTRIBUTE_UNUSED)
htab = elf_x86_hash_table (info, bed->target_id);
if (!htab)
abort ();
- if (htab->params->bndplt)
- {
- init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt;
- init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt;
- }
- else
- {
- init_table.lazy_plt = &elf_x86_64_lazy_plt;
- init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
- }
+
+ init_table.lazy_plt = &elf_x86_64_lazy_plt;
+ init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
+
+ init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
+ init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
if (ABI_64_P (info->output_bfd))
{
- init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
- init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
+ init_table.sframe_lazy_plt = &elf_x86_64_sframe_plt;
+ init_table.sframe_non_lazy_plt = &elf_x86_64_sframe_non_lazy_plt;
+ init_table.sframe_lazy_ibt_plt = &elf_x86_64_sframe_plt;
+ init_table.sframe_non_lazy_ibt_plt = &elf_x86_64_sframe_non_lazy_plt;
}
else
{
- init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
- init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
+ /* SFrame is not supported for non AMD64. */
+ init_table.sframe_lazy_plt = NULL;
+ init_table.sframe_non_lazy_plt = NULL;
}
if (ABI_64_P (info->output_bfd))
return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
}
+static void
+elf_x86_64_add_glibc_version_dependency
+ (struct elf_find_verdep_info *rinfo)
+{
+ unsigned int i = 0;
+ const char *version[3] = { NULL, NULL, NULL };
+ struct elf_x86_link_hash_table *htab;
+
+ if (rinfo->info->enable_dt_relr)
+ {
+ version[i] = "GLIBC_ABI_DT_RELR";
+ i++;
+ }
+
+ htab = elf_x86_hash_table (rinfo->info, X86_64_ELF_DATA);
+ if (htab != NULL && htab->params->mark_plt)
+ {
+ version[i] = "GLIBC_2.36";
+ i++;
+ }
+
+ if (i != 0)
+ _bfd_elf_link_add_glibc_version_dependency (rinfo, version);
+}
+
static const struct bfd_elf_special_section
elf_x86_64_special_sections[]=
{
#define ELF_ARCH bfd_arch_i386
#define ELF_TARGET_ID X86_64_ELF_DATA
#define ELF_MACHINE_CODE EM_X86_64
-#if DEFAULT_LD_Z_SEPARATE_CODE
-# define ELF_MAXPAGESIZE 0x1000
-#else
-# define ELF_MAXPAGESIZE 0x200000
-#endif
+#define ELF_MAXPAGESIZE 0x1000
#define ELF_COMMONPAGESIZE 0x1000
#define elf_backend_can_gc_sections 1
#define elf_info_to_howto elf_x86_64_info_to_howto
+#define bfd_elf64_bfd_copy_private_section_data \
+ elf_x86_64_copy_private_section_data
#define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
#define bfd_elf64_bfd_reloc_name_lookup \
elf_x86_64_reloc_name_lookup
#define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
-#define elf_backend_always_size_sections elf_x86_64_always_size_sections
+#define elf_backend_early_size_sections elf_x86_64_early_size_sections
#define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
#define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
#define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
elf_x86_64_merge_symbol
#define elf_backend_special_sections \
elf_x86_64_special_sections
+#define elf_backend_section_flags elf_x86_64_section_flags
+#define elf_backend_fake_sections elf_x86_64_fake_sections
#define elf_backend_additional_program_headers \
elf_x86_64_additional_program_headers
#define elf_backend_setup_gnu_properties \
elf_x86_64_link_setup_gnu_properties
#define elf_backend_hide_symbol \
_bfd_x86_elf_hide_symbol
+#define elf_backend_add_glibc_version_dependency \
+ elf_x86_64_add_glibc_version_dependency
#undef elf64_bed
#define elf64_bed elf64_x86_64_bed
#undef ELF_TARGET_OS
#undef ELF_OSABI
+#define bfd_elf32_bfd_copy_private_section_data \
+ elf_x86_64_copy_private_section_data
#define bfd_elf32_bfd_reloc_type_lookup \
elf_x86_64_reloc_type_lookup
#define bfd_elf32_bfd_reloc_name_lookup \