#define BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC BFD_RELOC_AARCH64_TLSDESC_LD64_LO12
#endif
+#define MORELLO_R(NAME) R_MORELLO_ ## NAME
+#define MORELLO_R_STR(NAME) "R_MORELLO_" #NAME
+
#if ARCH_SIZE == 32
#define AARCH64_R(NAME) R_AARCH64_P32_ ## NAME
#define AARCH64_R_STR(NAME) "R_AARCH64_P32_" #NAME
#define IS_AARCH64_TLS_RELAX_RELOC(R_TYPE) \
((R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
+ || (R_TYPE) == BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
+ || (R_TYPE) == BFD_RELOC_MORELLO_TLSDESC_CALL \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
+ || (R_TYPE) == BFD_RELOC_MORELLO_TLSDESC_LD128_LO12 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21 \
+ || (R_TYPE) == BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_CALL \
+ || (R_TYPE) == BFD_RELOC_MORELLO_TLSDESC_CALL \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD64_LO12 \
+ || (R_TYPE) == BFD_RELOC_MORELLO_TLSDESC_LD128_LO12 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LDR \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_LD_PREL19 \
|| (R_TYPE) == BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC \
elf_aarch64_link_hash_entry. */
#define RELOC_SIZE(HTAB) (sizeof (ElfNN_External_Rela))
-/* GOT Entry size - 8 bytes in ELF64 and 4 bytes in ELF32. */
-#define GOT_ENTRY_SIZE (ARCH_SIZE / 8)
+/* GOT Entry size - 16 bytes in C64, 8 bytes in ELF64 and 4 bytes in ELF32. */
+#define GOT_ENTRY_SIZE(htab) (ARCH_SIZE >> (3 - htab->c64_rel))
+#define GOT_RESERVED_HEADER_SLOTS (3)
#define PLT_ENTRY_SIZE (32)
#define PLT_SMALL_ENTRY_SIZE (16)
#define PLT_TLSDESC_ENTRY_SIZE (32)
#define aarch64_compute_jump_table_size(htab) \
(((htab)->root.srelplt == NULL) ? 0 \
- : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE)
+ : (htab)->root.srelplt->reloc_count * GOT_ENTRY_SIZE (htab))
/* The first entry in a procedure linkage table looks like this
if the distance between the PLTGOT and the PLT is < 4GB use
0x1f, 0x20, 0x03, 0xd5, /* nop */
};
+/* The C64 PLT0. */
+static const bfd_byte elfNN_c64_small_plt0_entry[PLT_ENTRY_SIZE] =
+{
+ 0xf0, 0x7b, 0xbf, 0x62, /* stp c16, c30, [csp, #-32]! */
+ 0x10, 0x00, 0x80, 0x90, /* adrp c16, (GOT+16) */
+ 0x11, 0x0a, 0x40, 0xc2, /* ldr c17, [c16, #PLT_GOT+0x10] */
+ 0x10, 0x02, 0x00, 0x02, /* add c16, c16,#PLT_GOT+0x10 */
+ 0x20, 0x12, 0xc2, 0xc2, /* br c17 */
+ 0x1f, 0x20, 0x03, 0xd5, /* nop */
+ 0x1f, 0x20, 0x03, 0xd5, /* nop */
+ 0x1f, 0x20, 0x03, 0xd5, /* nop */
+};
+
/* Per function entry in a procedure linkage table looks like this
if the distance between the PLTGOT and the PLT is < 4GB use
these PLT entries. Use BTI versions of the PLTs when enabled. */
0x20, 0x02, 0x1f, 0xd6, /* br x17. */
};
+/* The C64 PLT. */
+static const bfd_byte elfNN_c64_small_plt_entry[PLT_SMALL_ENTRY_SIZE] =
+{
+ 0x10, 0x00, 0x80, 0x90, /* adrp c16, PLTGOT + offset */
+ 0x11, 0x02, 0x40, 0xc2, /* ldr c17, [c16, PLTGOT + offset] */
+ 0x10, 0x02, 0x00, 0x02, /* add c16, c16, :lo12:PLTGOT + offset */
+ 0x20, 0x12, 0xc2, 0xc2, /* br c17. */
+};
+
static const bfd_byte
elfNN_aarch64_small_plt_bti_entry[PLT_BTI_SMALL_ENTRY_SIZE] =
{
0x1f, 0x20, 0x03, 0xd5, /* nop */
};
+static const bfd_byte
+elfNN_aarch64_tlsdesc_small_plt_c64_entry[PLT_TLSDESC_ENTRY_SIZE] =
+{
+ 0xe2, 0x8f, 0xbf, 0x62, /* stp c2, c3, [sp, #-16]! */
+ 0x02, 0x00, 0x80, 0x90, /* adrp c2, 0 */
+ 0x03, 0x00, 0x80, 0x90, /* adrp c3, 0 */
+ 0x42, 0x00, 0x40, 0xc2, /* ldr c2, [c2, #0] */
+ 0x63, 0x00, 0x00, 0x02, /* add c3, c3, 0 */
+ 0x40, 0x10, 0xc2, 0xc2, /* br c2 */
+ 0x1f, 0x20, 0x03, 0xd5, /* nop */
+ 0x1f, 0x20, 0x03, 0xd5, /* nop */
+};
+
#define elf_info_to_howto elfNN_aarch64_info_to_howto
#define elf_info_to_howto_rel elfNN_aarch64_info_to_howto
/* Relocations to generate 19, 21 and 33 bit PC-relative load/store
addresses: PG(x) is (x & ~0xfff). */
+ /* LD-lit: ((S+A-P) >> 4) & 0x1ffff */
+ HOWTO64 (MORELLO_R (LD_PREL_LO17), /* type */
+ 4, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 17, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (LD_PREL_LO17), /* name */
+ FALSE, /* partial_inplace */
+ 0x1ffff, /* src_mask */
+ 0x1ffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
/* LD-lit: ((S+A-P) >> 2) & 0x7ffff */
HOWTO (AARCH64_R (LD_PREL_LO19), /* type */
2, /* rightshift */
0x7ffff, /* dst_mask */
TRUE), /* pcrel_offset */
+ /* C64 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0xfffff */
+ HOWTO64 (MORELLO_R (ADR_PREL_PG_HI20), /* type */
+ 12, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 20, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (ADR_PREL_PG_HI20), /* name */
+ FALSE, /* partial_inplace */
+ 0xfffff, /* src_mask */
+ 0xfffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
+ /* C64 ADRP: ((PG(S+A)-PG(P)) >> 12) & 0xfffff [no overflow check] */
+ HOWTO64 (MORELLO_R (ADR_PREL_PG_HI20_NC), /* type */
+ 12, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 20, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (ADR_PREL_PG_HI20_NC), /* name */
+ FALSE, /* partial_inplace */
+ 0xfffff, /* src_mask */
+ 0xfffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
/* ADR: (S+A-P) & 0x1fffff */
HOWTO (AARCH64_R (ADR_PREL_LO21), /* type */
0, /* rightshift */
0x3ffffff, /* dst_mask */
TRUE), /* pcrel_offset */
+ /* TBZ/NZ: ((S+A-P) >> 2) & 0x3fff */
+ HOWTO64 (MORELLO_R (TSTBR14), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 14, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (TSTBR14), /* name */
+ FALSE, /* partial_inplace */
+ 0x3fff, /* src_mask */
+ 0x3fff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
+ /* B.cond: ((S+A-P) >> 2) & 0x7ffff */
+ HOWTO64 (MORELLO_R (CONDBR19), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 19, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (CONDBR19), /* name */
+ FALSE, /* partial_inplace */
+ 0x7ffff, /* src_mask */
+ 0x7ffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
+ /* B: ((S+A-P) >> 2) & 0x3ffffff */
+ HOWTO64 (MORELLO_R (JUMP26), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 26, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (JUMP26), /* name */
+ FALSE, /* partial_inplace */
+ 0x3ffffff, /* src_mask */
+ 0x3ffffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
+ /* BL: ((S+A-P) >> 2) & 0x3ffffff */
+ HOWTO64 (MORELLO_R (CALL26), /* type */
+ 2, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 26, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_signed, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (CALL26), /* name */
+ FALSE, /* partial_inplace */
+ 0x3ffffff, /* src_mask */
+ 0x3ffffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
/* LD/ST16: (S+A) & 0xffe */
HOWTO (AARCH64_R (LDST16_ABS_LO12_NC), /* type */
1, /* rightshift */
0x1fffff, /* dst_mask */
TRUE), /* pcrel_offset */
+ /* Get to the page for the GOT entry for the symbol
+ (G(S) - P) using a C64 ADRP instruction. */
+ HOWTO64 (MORELLO_R (ADR_GOT_PAGE), /* type */
+ 12, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 20, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (ADR_GOT_PAGE), /* name */
+ FALSE, /* partial_inplace */
+ 0xfffff, /* src_mask */
+ 0xfffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
/* LD64: GOT offset G(S) & 0xff8 */
HOWTO64 (AARCH64_R (LD64_GOT_LO12_NC), /* type */
3, /* rightshift */
0xff8, /* dst_mask */
FALSE), /* pcrel_offset */
+ /* LD128: GOT offset G(S) & 0xff0 */
+ HOWTO64 (MORELLO_R (LD128_GOT_LO12_NC), /* type */
+ 4, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 12, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (LD128_GOT_LO12_NC), /* name */
+ FALSE, /* partial_inplace */
+ 0xff0, /* src_mask */
+ 0xff0, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
/* LD32: GOT offset G(S) & 0xffc */
HOWTO32 (AARCH64_R (LD32_GOT_LO12_NC), /* type */
2, /* rightshift */
0x0, /* dst_mask */
FALSE), /* pcrel_offset */
+ /* Get to the page for the GOT entry for the symbol
+ (G(S) - P) using an ADRP instruction. */
+ HOWTO64 (MORELLO_R (TLSDESC_ADR_PAGE20), /* type */
+ 12, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 20, /* bitsize */
+ TRUE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (TLSDESC_ADR_PAGE20), /* name */
+ FALSE, /* partial_inplace */
+ 0xfffff, /* src_mask */
+ 0xfffff, /* dst_mask */
+ TRUE), /* pcrel_offset */
+
+ /* LD128: GOT offset G(S) & 0xff0. */
+ HOWTO64 (MORELLO_R (TLSDESC_LD128_LO12), /* type */
+ 4, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 12, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (TLSDESC_LD128_LO12), /* name */
+ FALSE, /* partial_inplace */
+ 0xff0, /* src_mask */
+ 0xff0, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO64 (MORELLO_R (TLSDESC_CALL), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (TLSDESC_CALL), /* name */
+ FALSE, /* partial_inplace */
+ 0x0, /* src_mask */
+ 0x0, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
HOWTO (AARCH64_R (COPY), /* type */
0, /* rightshift */
2, /* size (0 = byte, 1 = short, 2 = long) */
ALL_ONES, /* dst_mask */
FALSE), /* pcrel_offset */
+ HOWTO64 (MORELLO_R (CAPINIT), /* type */
+ 0, /* rightshift */
+ 4, /* size (0 = byte, 1 = short, 2 = long) */
+ 0, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (CAPINIT), /* name */
+ FALSE, /* partial_inplace */
+ ALL_ONES, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO64 (MORELLO_R (GLOB_DAT),/* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (GLOB_DAT), /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO64 (MORELLO_R (JUMP_SLOT), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (JUMP_SLOT), /* name */
+ TRUE, /* partial_inplace */
+ 0xffffffff, /* src_mask */
+ 0xffffffff, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO64 (MORELLO_R (RELATIVE), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (RELATIVE), /* name */
+ TRUE, /* partial_inplace */
+ ALL_ONES, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO64 (MORELLO_R (IRELATIVE), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_bitfield, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (IRELATIVE), /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
+ HOWTO64 (MORELLO_R (TLSDESC), /* type */
+ 0, /* rightshift */
+ 2, /* size (0 = byte, 1 = short, 2 = long) */
+ 64, /* bitsize */
+ FALSE, /* pc_relative */
+ 0, /* bitpos */
+ complain_overflow_dont, /* complain_on_overflow */
+ bfd_elf_generic_reloc, /* special_function */
+ MORELLO_R_STR (TLSDESC), /* name */
+ FALSE, /* partial_inplace */
+ 0, /* src_mask */
+ ALL_ONES, /* dst_mask */
+ FALSE), /* pcrel_offset */
+
EMPTY_HOWTO (0),
};
The entry_names are used to do simple name mangling on the stubs.
Given a function name, and its type, the stub can be found. The
name can be changed. The only requirement is the %s be present. */
-#define STUB_ENTRY_NAME "__%s_veneer"
+#define STUB_ENTRY_NAME "__%s%s_veneer"
/* The name of the dynamic interpreter. This is put in the .interp
section. */
#define AARCH64_MAX_ADRP_IMM ((1 << 20) - 1)
#define AARCH64_MIN_ADRP_IMM (-(1 << 20))
+#define C64_MAX_ADRP_IMM ((1 << 19) - 1)
+#define C64_MIN_ADRP_IMM (-(1 << 19))
+
+static bfd_boolean
+aarch64_branch_reloc_p (unsigned int r_type)
+{
+ switch (r_type)
+ {
+ case MORELLO_R (JUMP26):
+ case MORELLO_R (CALL26):
+ case AARCH64_R (JUMP26):
+ case AARCH64_R (CALL26):
+ return TRUE;
+
+ default: break;
+ }
+
+ return FALSE;
+}
+
static int
aarch64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
{
return offset <= AARCH64_MAX_ADRP_IMM && offset >= AARCH64_MIN_ADRP_IMM;
}
+static bfd_boolean
+c64_valid_for_adrp_p (bfd_vma value, bfd_vma place)
+{
+ bfd_signed_vma offset = (bfd_signed_vma) (PG (value) - PG (place)) >> 12;
+ return offset <= C64_MAX_ADRP_IMM && offset >= C64_MIN_ADRP_IMM;
+}
+
static int
aarch64_valid_branch_p (bfd_vma value, bfd_vma place)
{
0x14000000, /* b <label> */
};
+static const uint32_t aarch64_c64_branch_stub [] =
+{
+ 0xc2c273e0, /* bx #4 */
+ 0x90800010, /* adrp c16, X */
+ /* R_MORELLO_ADR_HI20_PCREL(X) */
+ 0x02000210, /* add c16, c16, :lo12:X */
+ /* R_AARCH64_ADD_ABS_LO12_NC(X) */
+ 0xc2c21200, /* br c16 */
+};
+
+static const uint32_t c64_aarch64_branch_stub [] =
+{
+ 0x90800010, /* adrp c16, X */
+ /* R_MORELLO_ADR_HI20_PCREL(X) */
+ 0x02000210, /* add c16, c16, :lo12:X */
+ /* R_AARCH64_ADD_ABS_LO12_NC(X) */
+ 0xc2c21200, /* br c16 */
+};
+
/* Section name for stubs is the associated section name plus this
string. */
#define STUB_SUFFIX ".stub"
aarch64_stub_long_branch,
aarch64_stub_erratum_835769_veneer,
aarch64_stub_erratum_843419_veneer,
+ aarch64_stub_branch_c64,
+ c64_stub_branch_aarch64,
+ c64_stub_branch_c64,
};
struct elf_aarch64_stub_hash_entry
unsigned int mapcount;
unsigned int mapsize;
elf_aarch64_section_map *map;
+ bfd_boolean sorted;
}
_aarch64_elf_section_data;
#define elf_aarch64_section_data(sec) \
((_aarch64_elf_section_data *) elf_section_data (sec))
-/* The size of the thread control block which is defined to be two pointers. */
-#define TCB_SIZE (ARCH_SIZE/8)*2
+/* Used to order a list of mapping symbols by address. */
-struct elf_aarch64_local_symbol
+static int
+elf_aarch64_compare_mapping (const void *a, const void *b)
{
- unsigned int got_type;
- bfd_signed_vma got_refcount;
+ const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
+ const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
+
+ if (amap->vma > bmap->vma)
+ return 1;
+ else if (amap->vma < bmap->vma)
+ return -1;
+ else if (amap->type > bmap->type)
+ /* Ensure results do not depend on the host qsort for objects with
+ multiple mapping symbols at the same address by sorting on type
+ after vma. */
+ return 1;
+ else if (amap->type < bmap->type)
+ return -1;
+ else
+ return 0;
+}
+
+static _aarch64_elf_section_data *
+elf_aarch64_section_data_get (asection *sec)
+{
+ _aarch64_elf_section_data *sec_data = elf_aarch64_section_data(sec);
+
+ /* A section that does not have aarch64 section data, so it does not have any
+ map information. Assume A64. */
+ if (sec_data == NULL || !sec_data->elf.is_target_section_data)
+ return NULL;
+
+ if (sec_data->sorted)
+ goto done;
+
+ qsort (sec_data->map, sec_data->mapcount, sizeof (elf_aarch64_section_map),
+ elf_aarch64_compare_mapping);
+
+ sec_data->sorted = TRUE;
+
+done:
+ return sec_data;
+}
+
+/* Returns TRUE if the label with st_value as VALUE is within a C64 code
+ section or not. */
+
+static bfd_boolean
+c64_value_p (asection *section, unsigned int value)
+{
+ struct _aarch64_elf_section_data *sec_data =
+ elf_aarch64_section_data_get (section);
+
+ if (sec_data == NULL)
+ return FALSE;
+
+ unsigned int span;
+
+ for (span = 0; span < sec_data->mapcount; span++)
+ {
+ unsigned int span_start = sec_data->map[span].vma;
+ unsigned int span_end = ((span == sec_data->mapcount - 1)
+ ? sec_data->map[0].vma + section->size
+ : sec_data->map[span + 1].vma);
+ char span_type = sec_data->map[span].type;
+
+ if (span_start <= value && value < span_end && span_type == 'c')
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/* The size of the thread control block which is defined to be two pointers. */
+#define TCB_SIZE (ARCH_SIZE/8)*2
+
+struct elf_aarch64_local_symbol
+{
+ unsigned int got_type;
+ bfd_signed_vma got_refcount;
bfd_vma got_offset;
/* Offset of the GOTPLT entry reserved for the TLS descriptor. The
/* PLT type based on security. */
aarch64_plt_type plt_type;
+
+ /* Flag to check if section maps have been initialised for all sections in
+ this object. */
+ bfd_boolean secmaps_initialised;
};
#define elf_aarch64_tdata(bfd) \
#define GOT_TLS_GD 2
#define GOT_TLS_IE 4
#define GOT_TLSDESC_GD 8
+#define GOT_CAP 16
#define GOT_TLS_GD_ANY_P(type) ((type & GOT_TLS_GD) || (type & GOT_TLSDESC_GD))
{
struct elf_link_hash_entry root;
- /* Track dynamic relocs copied for this symbol. */
- struct elf_dyn_relocs *dyn_relocs;
-
/* Since PLT entries have variable size, we need to record the
index into .got.plt instead of recomputing it from the PLT
offset. */
/* The bytes of the subsequent PLT entry. */
const bfd_byte *plt_entry;
- /* Small local sym cache. */
- struct sym_cache sym_cache;
-
/* For convenience in allocate_dynrelocs. */
bfd *obfd;
/* JUMP_SLOT relocs for variant PCS symbols may be present. */
int variant_pcs;
- /* The offset into splt of the PLT entry for the TLS descriptor
- resolver. Special values are 0, if not necessary (or not found
- to be necessary yet), and -1 if needed but not determined
- yet. */
- bfd_vma tlsdesc_plt;
-
/* The number of bytes in the PLT enty for the TLS descriptor. */
bfd_size_type tlsdesc_plt_entry_size;
- /* The GOT offset for the lazy trampoline. Communicated to the
- loader via DT_TLSDESC_GOT. The magic value (bfd_vma) -1
- indicates an offset is not allocated. */
- bfd_vma dt_tlsdesc_got;
-
/* Used by local STT_GNU_IFUNC symbols. */
htab_t loc_hash_table;
void * loc_hash_memory;
+
+ /* Used for capability relocations. */
+ asection *srelcaps;
+ int c64_rel;
+ bfd_boolean c64_output;
};
/* Create an entry in an AArch64 ELF linker hash table. */
table, string));
if (ret != NULL)
{
- ret->dyn_relocs = NULL;
ret->got_type = GOT_UNKNOWN;
ret->plt_got_offset = (bfd_vma) - 1;
ret->stub_cache = NULL;
edir = (struct elf_aarch64_link_hash_entry *) dir;
eind = (struct elf_aarch64_link_hash_entry *) ind;
- if (eind->dyn_relocs != NULL)
- {
- if (edir->dyn_relocs != NULL)
- {
- struct elf_dyn_relocs **pp;
- struct elf_dyn_relocs *p;
-
- /* Add reloc counts against the indirect sym to the direct sym
- list. Merge any entries against the same section. */
- for (pp = &eind->dyn_relocs; (p = *pp) != NULL;)
- {
- struct elf_dyn_relocs *q;
-
- for (q = edir->dyn_relocs; q != NULL; q = q->next)
- if (q->sec == p->sec)
- {
- q->pc_count += p->pc_count;
- q->count += p->count;
- *pp = p->next;
- break;
- }
- if (q == NULL)
- pp = &p->next;
- }
- *pp = edir->dyn_relocs;
- }
-
- edir->dyn_relocs = eind->dyn_relocs;
- eind->dyn_relocs = NULL;
- }
-
if (ind->root.type == bfd_link_hash_indirect)
{
/* Copy over PLT info. */
ret->plt_entry = elfNN_aarch64_small_plt_entry;
ret->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
ret->obfd = abfd;
- ret->dt_tlsdesc_got = (bfd_vma) - 1;
+ ret->root.tlsdesc_got = (bfd_vma) - 1;
if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
sizeof (struct elf_aarch64_stub_hash_entry)))
howto, value) == bfd_reloc_ok;
}
+/* Return interworking stub for a relocation. */
+
+static enum elf_aarch64_stub_type
+aarch64_interwork_stub (unsigned int r_type,
+ bfd_boolean branch_to_c64)
+{
+ switch (r_type)
+ {
+ case MORELLO_R (JUMP26):
+ case MORELLO_R (CALL26):
+ if (!branch_to_c64)
+ return c64_stub_branch_aarch64;
+ break;
+ case AARCH64_R (JUMP26):
+ case AARCH64_R (CALL26):
+ if (branch_to_c64)
+ return aarch64_stub_branch_c64;
+ break;
+ default:
+ break;
+ }
+
+ return aarch64_stub_none;
+}
+
static enum elf_aarch64_stub_type
aarch64_select_branch_stub (bfd_vma value, bfd_vma place)
{
{
bfd_vma location;
bfd_signed_vma branch_offset;
- unsigned int r_type;
+ unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
if (st_type != STT_FUNC
branch_offset = (bfd_signed_vma) (destination - location);
- r_type = ELFNN_R_TYPE (rel->r_info);
-
- /* We don't want to redirect any old unconditional jump in this way,
- only one which is being used for a sibcall, where it is
- acceptable for the IP0 and IP1 registers to be clobbered. */
- if ((r_type == AARCH64_R (CALL26) || r_type == AARCH64_R (JUMP26))
- && (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
- || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET))
+ /* For A64 <-> C64 branches we only come here for jumps to PLT. Treat them
+ as regular branches and leave the interworking to PLT. */
+ if (branch_offset > AARCH64_MAX_FWD_BRANCH_OFFSET
+ || branch_offset < AARCH64_MAX_BWD_BRANCH_OFFSET)
{
- stub_type = aarch64_stub_long_branch;
+ switch (r_type)
+ {
+ /* We don't want to redirect any old unconditional jump in this way,
+ only one which is being used for a sibcall, where it is
+ acceptable for the IP0 and IP1 registers to be clobbered. */
+ case AARCH64_R (CALL26):
+ case AARCH64_R (JUMP26):
+ return aarch64_stub_long_branch;
+ case MORELLO_R (CALL26):
+ case MORELLO_R (JUMP26):
+ return c64_stub_branch_c64;
+ default:
+ break;
+ }
}
- return stub_type;
+ return aarch64_stub_none;
+}
+
+/* Return a string to add as suffix to a veneer name. */
+
+static const char *
+aarch64_lookup_stub_type_suffix (enum elf_aarch64_stub_type stub_type)
+{
+ switch (stub_type)
+ {
+ case aarch64_stub_branch_c64:
+ return "_a64c64";
+ case c64_stub_branch_aarch64:
+ return "_c64a64";
+ break;
+ default:
+ return "";
+ }
}
/* Build a name for an entry in the stub hash table. */
elfNN_aarch64_stub_name (const asection *input_section,
const asection *sym_sec,
const struct elf_aarch64_link_hash_entry *hash,
- const Elf_Internal_Rela *rel)
+ const Elf_Internal_Rela *rel,
+ enum elf_aarch64_stub_type stub_type)
{
char *stub_name;
bfd_size_type len;
+ const char *suffix = aarch64_lookup_stub_type_suffix (stub_type);;
if (hash)
{
len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 16 + 1;
stub_name = bfd_malloc (len);
if (stub_name != NULL)
- snprintf (stub_name, len, "%08x_%s+%" BFD_VMA_FMT "x",
+ snprintf (stub_name, len, "%08x_%s%s+%" BFD_VMA_FMT "x",
(unsigned int) input_section->id,
hash->root.root.root.string,
- rel->r_addend);
+ suffix, rel->r_addend);
}
else
{
len = 8 + 1 + 8 + 1 + 8 + 1 + 16 + 1;
stub_name = bfd_malloc (len);
if (stub_name != NULL)
- snprintf (stub_name, len, "%08x_%x:%x+%" BFD_VMA_FMT "x",
+ snprintf (stub_name, len, "%08x_%x:%x%s+%" BFD_VMA_FMT "x",
(unsigned int) input_section->id,
(unsigned int) sym_sec->id,
(unsigned int) ELFNN_R_SYM (rel->r_info),
- rel->r_addend);
+ suffix, rel->r_addend);
}
return stub_name;
return _bfd_elf_hash_symbol (h);
}
-
/* Look up an entry in the stub hash. Stub entries are cached because
creating the stub name takes a bit of time. */
const asection *sym_sec,
struct elf_link_hash_entry *hash,
const Elf_Internal_Rela *rel,
- struct elf_aarch64_link_hash_table *htab)
+ struct elf_aarch64_link_hash_table *htab,
+ enum elf_aarch64_stub_type stub_type)
{
struct elf_aarch64_stub_hash_entry *stub_entry;
struct elf_aarch64_link_hash_entry *h =
{
char *stub_name;
- stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel);
+ stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, h, rel, stub_type);
if (stub_name == NULL)
return NULL;
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
+ bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
+ + stub_sec->output_offset);
+
if (stub_entry->stub_type == aarch64_stub_long_branch)
{
- bfd_vma place = (stub_entry->stub_offset + stub_sec->output_section->vma
- + stub_sec->output_offset);
-
/* See if we can relax the stub. */
if (aarch64_valid_for_adrp_p (sym_value, place))
stub_entry->stub_type = aarch64_select_branch_stub (sym_value, place);
}
+ if ((stub_entry->stub_type == aarch64_stub_branch_c64
+ || stub_entry->stub_type == c64_stub_branch_aarch64
+ || stub_entry->stub_type == c64_stub_branch_c64)
+ && !c64_valid_for_adrp_p (sym_value, place))
+ {
+ _bfd_error_handler
+ (_("%s: stub target out of range for %s branch"),
+ stub_entry->output_name,
+ (stub_entry->stub_type == aarch64_stub_branch_c64
+ ? "A64 to C64" : "C64 to A64"));
+ bfd_set_error (bfd_error_bad_value);
+ return FALSE;
+ }
+
switch (stub_entry->stub_type)
{
case aarch64_stub_adrp_branch:
template = aarch64_erratum_843419_stub;
template_size = sizeof (aarch64_erratum_843419_stub);
break;
+ case aarch64_stub_branch_c64:
+ template = aarch64_c64_branch_stub;
+ template_size = sizeof (aarch64_c64_branch_stub);
+ break;
+ case c64_stub_branch_aarch64:
+ case c64_stub_branch_c64:
+ template = c64_aarch64_branch_stub;
+ template_size = sizeof (c64_aarch64_branch_stub);
+ break;
default:
abort ();
}
template_size = (template_size + 7) & ~7;
stub_sec->size += template_size;
+ bfd_vma stub_offset = stub_entry->stub_offset;
+
switch (stub_entry->stub_type)
{
case aarch64_stub_adrp_branch:
BFD_FAIL ();
break;
+ case aarch64_stub_branch_c64:
+ stub_offset += 4;
+ /* Fall through. */
+ case c64_stub_branch_aarch64:
+ case c64_stub_branch_c64:
+ if (!aarch64_relocate (R_MORELLO_ADR_PREL_PG_HI20, stub_bfd, stub_sec,
+ stub_offset, sym_value))
+ /* We fail early if offset is out of range. */
+ BFD_FAIL ();
+
+ if (!aarch64_relocate (AARCH64_R (ADD_ABS_LO12_NC), stub_bfd, stub_sec,
+ stub_offset + 4, sym_value))
+ BFD_FAIL ();
+ break;
+
default:
abort ();
}
size = sizeof (aarch64_erratum_843419_stub);
}
break;
+ case aarch64_stub_branch_c64:
+ size = sizeof (aarch64_c64_branch_stub);
+ break;
+ case c64_stub_branch_aarch64:
+ case c64_stub_branch_c64:
+ size = sizeof (c64_aarch64_branch_stub);
+ break;
default:
abort ();
}
return FALSE;
}
-/* Used to order a list of mapping symbols by address. */
-
-static int
-elf_aarch64_compare_mapping (const void *a, const void *b)
-{
- const elf_aarch64_section_map *amap = (const elf_aarch64_section_map *) a;
- const elf_aarch64_section_map *bmap = (const elf_aarch64_section_map *) b;
-
- if (amap->vma > bmap->vma)
- return 1;
- else if (amap->vma < bmap->vma)
- return -1;
- else if (amap->type > bmap->type)
- /* Ensure results do not depend on the host qsort for objects with
- multiple mapping symbols at the same address by sorting on type
- after vma. */
- return 1;
- else if (amap->type < bmap->type)
- return -1;
- else
- return 0;
-}
-
static char *
_bfd_aarch64_erratum_835769_stub_name (unsigned num_fixes)
sec_data = elf_aarch64_section_data (section);
- qsort (sec_data->map, sec_data->mapcount,
- sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
+ if (sec_data->mapcount)
+ qsort (sec_data->map, sec_data->mapcount,
+ sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
for (span = 0; span < sec_data->mapcount; span++)
{
sec_data = elf_aarch64_section_data (section);
- qsort (sec_data->map, sec_data->mapcount,
- sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
+ if (sec_data->mapcount)
+ qsort (sec_data->map, sec_data->mapcount,
+ sizeof (elf_aarch64_section_map), elf_aarch64_compare_mapping);
for (span = 0; span < sec_data->mapcount; span++)
{
return TRUE;
}
+static bfd_boolean
+section_start_symbol (bfd *abfd ATTRIBUTE_UNUSED, asection *section,
+ void *valp)
+{
+ return section->vma == *(bfd_vma *)valp;
+}
+
+/* Capability format functions. */
+
+static unsigned
+exponent (uint64_t len)
+{
+#define CAP_MAX_EXPONENT 50
+ /* Size is a 65 bit value, so there's an implicit 0 MSB. */
+ unsigned zeroes = __builtin_clzl (len) + 1;
+
+ /* All bits up to and including CAP_MW - 2 are zero. */
+ if (CAP_MAX_EXPONENT < zeroes)
+ return (unsigned) -1;
+ else
+ return CAP_MAX_EXPONENT - zeroes;
+#undef CAP_MAX_EXPONENT
+}
+
+#define ONES(x) ((1ULL << (x)) - 1)
+#define ALIGN_UP(x, a) (((x) + ONES (a)) & (~ONES (a)))
+
+static bfd_boolean
+c64_valid_cap_range (bfd_vma *basep, bfd_vma *limitp)
+{
+ bfd_vma base = *basep, size = *limitp - *basep;
+
+ unsigned e, old_e;
+
+ if ((e = exponent (size)) == (unsigned) -1)
+ return TRUE;
+
+ size = ALIGN_UP (size, e + 3);
+ old_e = e;
+ e = exponent (size);
+ if (old_e != e)
+ size = ALIGN_UP (size, e + 3);
+
+ base = ALIGN_UP (base, e + 3);
+
+ if (base == *basep && *limitp == base + size)
+ return TRUE;
+
+ *basep = base;
+ *limitp = base + size;
+ return FALSE;
+}
+
+struct sec_change_queue
+{
+ asection *sec;
+ struct sec_change_queue *next;
+};
+
+/* Queue up the change, sorted in order of the output section vma. */
+
+static void
+queue_section_padding (struct sec_change_queue **queue, asection *sec)
+{
+ struct sec_change_queue *q = *queue, *last_q = NULL, *n;
+
+ while (q != NULL)
+ {
+ if (q->sec->vma > sec->vma)
+ break;
+ last_q = q;
+ q = q->next;
+ }
+
+ n = bfd_zmalloc (sizeof (struct sec_change_queue));
+
+ if (last_q == NULL)
+ *queue = n;
+ else
+ {
+ n->next = q;
+ last_q->next = n;
+ }
+
+ n->sec = sec;
+}
+
+/* Check if the bounds covering all sections between LOW_SEC and HIGH_SEC will
+ get rounded off in the Morello capability format and if it does, queue up a
+ change to fix up the section layout. */
+static inline void
+record_section_change (asection *sec, struct sec_change_queue **queue)
+{
+ bfd_vma low = sec->vma;
+ bfd_vma high = sec->vma + sec->size;
+
+ if (!c64_valid_cap_range (&low, &high))
+ queue_section_padding (queue, sec);
+}
+
+/* Make sure that all capabilities that refer to sections have bounds that
+ won't overlap with neighbouring sections. This is needed in two specific
+ cases. The first case is that of PCC, which needs to span across all
+ executable sections as well as the GOT and PLT sections in the output
+ binary. The second case is that of linker and ldscript defined symbols that
+ indicate start and/or end of sections.
+
+ In both cases, overlap of capability bounds are avoided by aligning the base
+ of the section and if necessary, adding a pad at the end of the section so
+ that the section following it starts only after the pad. */
+
+static bfd_vma pcc_low;
+static bfd_vma pcc_high;
+void
+elfNN_c64_resize_sections (bfd *output_bfd, struct bfd_link_info *info,
+ void (*c64_pad_section) (asection *, bfd_vma),
+ void (*layout_sections_again) (void))
+{
+ asection *sec, *pcc_low_sec = NULL, *pcc_high_sec = NULL;
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+ bfd_vma low = (bfd_vma) -1, high = 0;
+ bfd *input_bfd;
+
+ htab->layout_sections_again = layout_sections_again;
+
+ if (!htab->c64_output)
+ return;
+
+ struct sec_change_queue *queue = NULL;
+
+ /* First, walk through all the relocations to find those referring to linker
+ defined and ldscript defined symbols since we set their range to their
+ output sections. */
+ for (input_bfd = info->input_bfds;
+ htab->c64_rel && input_bfd != NULL; input_bfd = input_bfd->link.next)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ for (sec = input_bfd->sections; sec != NULL; sec = sec->next)
+ {
+ Elf_Internal_Rela *irelaend, *irela;
+
+ /* If there aren't any relocs, then there's nothing more to do. */
+ if ((sec->flags & SEC_RELOC) == 0 || sec->reloc_count == 0)
+ continue;
+
+ irela = _bfd_elf_link_read_relocs (input_bfd, sec, NULL, NULL,
+ info->keep_memory);
+ if (irela == NULL)
+ continue;
+
+ /* Now examine each relocation. */
+ irelaend = irela + sec->reloc_count;
+ for (; irela < irelaend; irela++)
+ {
+ unsigned int r_indx;
+ struct elf_link_hash_entry *h;
+ int e_indx;
+ asection *os;
+
+ r_indx = ELFNN_R_SYM (irela->r_info);
+
+ /* Linker defined or linker script defined symbols are always in
+ the symbol hash. */
+ if (r_indx < symtab_hdr->sh_info)
+ continue;
+
+ e_indx = r_indx - symtab_hdr->sh_info;
+ h = elf_sym_hashes (input_bfd)[e_indx];
+
+ /* XXX Does this ever happen? */
+ if (h == NULL)
+ continue;
+
+ os = h->root.u.def.section->output_section;
+
+ if (h->root.linker_def)
+ record_section_change (os, &queue);
+ else if (h->root.ldscript_def)
+ {
+ const char *name = h->root.root.string;
+ size_t len = strlen (name);
+
+ if (len > 8 && name[0] == '_' && name[1] == '_'
+ && (!strncmp (name + 2, "start_", 6)
+ || !strcmp (name + len - 6, "_start")))
+
+ {
+ bfd_vma value = os->vma + os->size;
+
+ os = bfd_sections_find_if (info->output_bfd,
+ section_start_symbol, &value);
+
+ if (os != NULL)
+ record_section_change (os, &queue);
+ }
+ /* XXX We're overfitting here because the offset of H within
+ the output section is not yet resolved and ldscript
+ defined symbols do not have input section information. */
+ else
+ record_section_change (os, &queue);
+ }
+ }
+ }
+ }
+
+ /* Next, walk through output sections to find the PCC span and add a padding
+ at the end to ensure that PCC bounds don't bleed into neighbouring
+ sections. For now PCC needs to encompass all code sections, .got, .plt
+ and .got.plt. */
+ for (sec = output_bfd->sections; sec != NULL; sec = sec->next)
+ {
+ /* XXX This is a good place to figure out if there are any readable or
+ writable sections in the PCC range that are not in the list of
+ sections we want the PCC to span and then warn the user of it. */
+
+#define NOT_OP_SECTION(s) ((s) == NULL || (s)->output_section != sec)
+
+ if ((sec->flags & SEC_CODE) == 0
+ && NOT_OP_SECTION (htab->root.sgotplt)
+ && NOT_OP_SECTION (htab->root.igotplt)
+ && NOT_OP_SECTION (htab->root.sgot)
+ && NOT_OP_SECTION (htab->root.splt)
+ && NOT_OP_SECTION (htab->root.iplt))
+ continue;
+
+ if (sec->vma < low)
+ {
+ low = sec->vma;
+ pcc_low_sec = sec;
+ }
+ if (sec->vma + sec->size > high)
+ {
+ high = sec->vma + sec->size;
+ pcc_high_sec = sec;
+ }
+
+#undef NOT_OP_SECTION
+ }
+
+ /* Sequentially add alignment and padding as required. We also need to
+ account for the PCC-related alignment and padding here since its
+ requirements could change based on the range of sections it encompasses
+ and whether they need to be padded or aligned. */
+ while (queue)
+ {
+ unsigned align = 0;
+ bfd_vma padding = 0;
+
+ low = queue->sec->vma;
+ high = queue->sec->vma + queue->sec->size;
+
+ if (!c64_valid_cap_range (&low, &high))
+ {
+ align = __builtin_ctzl (low);
+
+ if (queue->sec->alignment_power < align)
+ queue->sec->alignment_power = align;
+
+ padding = high - queue->sec->vma - queue->sec->size;
+
+ if (queue->sec != pcc_high_sec)
+ {
+ c64_pad_section (queue->sec, padding);
+ padding = 0;
+ }
+ }
+
+ /* If we have crossed all sections within the PCC range, set up alignment
+ and padding for the PCC range. */
+ if (pcc_high_sec != NULL && pcc_low_sec != NULL
+ && (queue->next == NULL
+ || queue->next->sec->vma > pcc_high_sec->vma))
+ {
+ /* Layout sections since it affects the final range of PCC. */
+ (*htab->layout_sections_again) ();
+
+ pcc_low = pcc_low_sec->vma;
+ pcc_high = pcc_high_sec->vma + pcc_high_sec->size + padding;
+
+ if (!c64_valid_cap_range (&pcc_low, &pcc_high))
+ {
+ align = __builtin_ctzl (pcc_low);
+ if (pcc_low_sec->alignment_power < align)
+ pcc_low_sec->alignment_power = align;
+
+ padding = pcc_high - pcc_high_sec->vma - pcc_high_sec->size;
+ c64_pad_section (pcc_high_sec, padding);
+ }
+ }
+
+ (*htab->layout_sections_again) ();
+
+ struct sec_change_queue *queue_free = queue;
+
+ queue = queue->next;
+ free (queue_free);
+ }
+
+ if (pcc_low_sec)
+ {
+ if (!pcc_high_sec)
+ abort ();
+ pcc_low = pcc_low_sec->vma;
+ pcc_high = pcc_high_sec->vma + pcc_high_sec->size;
+ }
+}
/* Determine and set the size of the stub section for a final link.
The basic idea here is to examine all the relocations looking for
- PC-relative calls to a target that is unreachable with a "bl"
- instruction. */
+ PC-relative calls to a target that either needs a PE state change (A64 to
+ C64 or vice versa) or in case of unconditional branches (B/BL), is
+ unreachable. */
bfd_boolean
elfNN_aarch64_size_stubs (bfd *output_bfd,
{
Elf_Internal_Shdr *symtab_hdr;
asection *section;
- Elf_Internal_Sym *local_syms = NULL;
if (!is_aarch64_elf (input_bfd)
|| (input_bfd->flags & BFD_LINKER_CREATED) != 0)
for (; irela < irelaend; irela++)
{
unsigned int r_type, r_indx;
- enum elf_aarch64_stub_type stub_type;
+ enum elf_aarch64_stub_type stub_type = aarch64_stub_none;
struct elf_aarch64_stub_hash_entry *stub_entry;
asection *sym_sec;
bfd_vma sym_value;
const asection *id_sec;
unsigned char st_type;
bfd_size_type len;
+ unsigned branch_to_c64 = FALSE;
+ const char *suffix;
r_type = ELFNN_R_TYPE (irela->r_info);
r_indx = ELFNN_R_SYM (irela->r_info);
/* Only look for stubs on unconditional branch and
branch and link instructions. */
- if (r_type != (unsigned int) AARCH64_R (CALL26)
- && r_type != (unsigned int) AARCH64_R (JUMP26))
+ if (!aarch64_branch_reloc_p (r_type))
continue;
/* Now determine the call target, its name, value,
if (r_indx < symtab_hdr->sh_info)
{
/* It's a local symbol. */
- Elf_Internal_Sym *sym;
- Elf_Internal_Shdr *hdr;
+ Elf_Internal_Sym *sym =
+ bfd_sym_from_r_symndx (&htab->root.sym_cache,
+ input_bfd, r_indx);
+ if (sym == NULL)
+ goto error_ret_free_internal;
- if (local_syms == NULL)
- {
- local_syms
- = (Elf_Internal_Sym *) symtab_hdr->contents;
- if (local_syms == NULL)
- local_syms
- = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
- symtab_hdr->sh_info, 0,
- NULL, NULL, NULL);
- if (local_syms == NULL)
- goto error_ret_free_internal;
- }
+ branch_to_c64 |= (sym->st_target_internal
+ & ST_BRANCH_TO_C64);
+
+ Elf_Internal_Shdr *hdr =
+ elf_elfsections (input_bfd)[sym->st_shndx];
- sym = local_syms + r_indx;
- hdr = elf_elfsections (input_bfd)[sym->st_shndx];
sym_sec = hdr->bfd_section;
if (!sym_sec)
/* This is an undefined symbol. It can never
= bfd_elf_string_from_elf_section (input_bfd,
symtab_hdr->sh_link,
sym->st_name);
+
+ /* Get the interworking stub if needed. */
+ stub_type = aarch64_interwork_stub (r_type,
+ branch_to_c64);
}
else
{
int e_indx;
+ struct elf_aarch64_link_hash_table *globals =
+ elf_aarch64_hash_table (info);
e_indx = r_indx - symtab_hdr->sh_info;
hash = ((struct elf_aarch64_link_hash_entry *)
hash = ((struct elf_aarch64_link_hash_entry *)
hash->root.root.u.i.link);
+ /* Static executable. */
+ if (globals->root.splt == NULL || hash == NULL
+ || hash->root.plt.offset == (bfd_vma) - 1)
+ {
+ branch_to_c64 |= (hash->root.target_internal
+ & ST_BRANCH_TO_C64);
+ stub_type = aarch64_interwork_stub (r_type,
+ branch_to_c64);
+ }
+
if (hash->root.root.type == bfd_link_hash_defined
|| hash->root.root.type == bfd_link_hash_defweak)
{
- struct elf_aarch64_link_hash_table *globals =
- elf_aarch64_hash_table (info);
sym_sec = hash->root.root.u.def.section;
sym_value = hash->root.root.u.def.value;
/* For a destination in a shared library,
target address to decide whether a long
branch stub is needed.
For absolute code, they cannot be handled. */
- struct elf_aarch64_link_hash_table *globals =
- elf_aarch64_hash_table (info);
if (globals->root.splt != NULL && hash != NULL
&& hash->root.plt.offset != (bfd_vma) - 1)
}
/* Determine what (if any) linker stub is needed. */
- stub_type = aarch64_type_of_stub (section, irela, sym_sec,
- st_type, destination);
+ if (stub_type == aarch64_stub_none)
+ stub_type = aarch64_type_of_stub (section, irela, sym_sec,
+ st_type, destination);
+
if (stub_type == aarch64_stub_none)
continue;
/* Get the name of this stub. */
stub_name = elfNN_aarch64_stub_name (id_sec, sym_sec, hash,
- irela);
+ irela, stub_type);
if (!stub_name)
goto error_ret_free_internal;
/* Always update this stub's target since it may have
changed after layout. */
stub_entry->target_value = sym_value + irela->r_addend;
+
+ /* Set LSB for A64 to C64 branch. */
+ if (branch_to_c64)
+ stub_entry->target_value |= 1;
+
continue;
}
}
stub_entry->target_value = sym_value + irela->r_addend;
+ /* Set LSB for A64 to C64 branch. */
+ if (branch_to_c64)
+ stub_entry->target_value |= 1;
+
stub_entry->target_section = sym_sec;
stub_entry->stub_type = stub_type;
stub_entry->h = hash;
stub_entry->st_type = st_type;
+ suffix = aarch64_lookup_stub_type_suffix (stub_type);
+
if (sym_name == NULL)
sym_name = "unnamed";
- len = sizeof (STUB_ENTRY_NAME) + strlen (sym_name);
+ len = (sizeof (STUB_ENTRY_NAME) + strlen (sym_name)
+ + strlen (suffix));
stub_entry->output_name = bfd_alloc (htab->stub_bfd, len);
if (stub_entry->output_name == NULL)
{
}
snprintf (stub_entry->output_name, len, STUB_ENTRY_NAME,
- sym_name);
+ sym_name, suffix);
stub_changed = TRUE;
}
/* Build the stubs as directed by the stub hash table. */
table = &htab->stub_hash_table;
+
+ bfd_error_type save_error = bfd_get_error ();
+ bfd_set_error (bfd_error_no_error);
bfd_hash_traverse (table, aarch64_build_one_stub, info);
+ if (bfd_get_error () != bfd_error_no_error)
+ return FALSE;
+
+ bfd_set_error (save_error);
+
return TRUE;
}
/* Add an entry to the code/data map for section SEC. */
static void
-elfNN_aarch64_section_map_add (asection *sec, char type, bfd_vma vma)
+elfNN_aarch64_section_map_add (bfd *abfd, asection *sec, char type,
+ bfd_vma vma)
{
struct _aarch64_elf_section_data *sec_data =
elf_aarch64_section_data (sec);
unsigned int newidx;
+ /* The aarch64 section hook was not called for this section. */
+ if (!sec_data->elf.is_target_section_data)
+ {
+ struct _aarch64_elf_section_data *newdata =
+ bfd_zalloc (abfd, sizeof (*newdata));
+
+ if (newdata == NULL)
+ return;
+
+ newdata->elf = sec_data->elf;
+ newdata->elf.is_target_section_data = TRUE;
+ free (sec_data);
+ sec->used_by_bfd = sec_data = newdata;
+ }
+
if (sec_data->map == NULL)
{
sec_data->map = bfd_malloc (sizeof (elf_aarch64_section_map));
/* Initialise maps of insn/data for input BFDs. */
void
-bfd_elfNN_aarch64_init_maps (bfd *abfd)
+bfd_elfNN_aarch64_init_maps (bfd *abfd, struct bfd_link_info *info)
{
Elf_Internal_Sym *isymbuf;
Elf_Internal_Shdr *hdr;
if (!is_aarch64_elf (abfd))
return;
+ if (elf_aarch64_tdata (abfd)->secmaps_initialised)
+ return;
+
if ((abfd->flags & DYNAMIC) != 0)
return;
if (isymbuf == NULL)
return;
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table ((info));
+
for (i = 0; i < localsyms; i++)
{
Elf_Internal_Sym *isym = &isymbuf[i];
if (bfd_is_aarch64_special_symbol_name
(name, BFD_AARCH64_SPECIAL_SYM_TYPE_MAP))
- elfNN_aarch64_section_map_add (sec, name[1], isym->st_value);
+ {
+ elfNN_aarch64_section_map_add (abfd, sec, name[1],
+ isym->st_value);
+ if (!htab->c64_output && name[1] == 'c')
+ htab->c64_output = TRUE;
+ }
}
}
+ elf_aarch64_tdata (abfd)->secmaps_initialised = TRUE;
}
static void
struct elf_aarch64_link_hash_table *globals;
globals = elf_aarch64_hash_table (link_info);
+ /* Set up plt stubs in case we need C64 PLT. Override BTI/PAC since they're
+ not compatible. PLT stub sizes are the same as the default ones. */
+ if (globals->c64_rel)
+ {
+ if (plt_type != PLT_NORMAL)
+ _bfd_error_handler
+ (_("ignoring C64-incompatible extensions: %s"),
+ (plt_type == PLT_BTI_PAC ? "BTI, PAC"
+ : plt_type == PLT_BTI ? "BTI" : "PAC"));
+
+ globals->plt0_entry = elfNN_c64_small_plt0_entry;
+ globals->plt_entry = elfNN_c64_small_plt_entry;
+ return;
+ }
+
if (plt_type == PLT_BTI_PAC)
{
globals->plt0_entry = elfNN_aarch64_small_plt0_bti_entry;
workaround. */
globals->fix_erratum_843419 = fix_erratum_843419;
globals->no_apply_dynamic_relocs = no_apply_dynamic_relocs;
+ globals->c64_rel = 0;
BFD_ASSERT (is_aarch64_elf (output_bfd));
elf_aarch64_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
break;
}
elf_aarch64_tdata (output_bfd)->plt_type = bp_info.plt_type;
- setup_plt_values (link_info, bp_info.plt_type);
+ elf_aarch64_tdata (output_bfd)->secmaps_initialised = FALSE;
}
static bfd_vma
static bfd_reloc_code_real_type
aarch64_tls_transition_without_check (bfd_reloc_code_real_type r_type,
- struct elf_link_hash_entry *h)
+ struct bfd_link_info *info,
+ struct elf_link_hash_entry *h,
+ bfd_boolean morello_reloc)
{
bfd_boolean is_local = h == NULL;
switch (r_type)
{
+ case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
+ return (is_local || !bfd_link_pic (info)
+ ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1
+ : r_type);
+
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
return (is_local
? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2
: BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1);
+ case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
+ return ((is_local || !bfd_link_pie (info)
+ ? BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC : r_type));
+
case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
return (is_local
? BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12
: BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19);
- case BFD_RELOC_AARCH64_TLSDESC_ADD:
+ case BFD_RELOC_MORELLO_TLSDESC_CALL:
+ return ((is_local || !bfd_link_pie (info))
+ ? BFD_RELOC_AARCH64_NONE : r_type);
+
case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
+ if (morello_reloc && !is_local && bfd_link_pie (info))
+ return r_type;
+ /* Fall through. */
+ case BFD_RELOC_AARCH64_TLSDESC_ADD:
case BFD_RELOC_AARCH64_TLSDESC_CALL:
- /* Instructions with these relocations will become NOPs. */
+ /* Instructions with these relocations will be fully resolved during the
+ transition into either a NOP in the A64 case or movk and add in
+ C64. */
return BFD_RELOC_AARCH64_NONE;
case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
return GOT_NORMAL;
+ case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
+ case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
+ return GOT_CAP;
+
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
return GOT_TLS_GD;
+ case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
+ case BFD_RELOC_MORELLO_TLSDESC_CALL:
+ case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
+ return GOT_TLSDESC_GD | GOT_CAP;
+
case BFD_RELOC_AARCH64_TLSDESC_ADD:
case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
static bfd_boolean
aarch64_can_relax_tls (bfd *input_bfd,
struct bfd_link_info *info,
- bfd_reloc_code_real_type r_type,
+ const Elf_Internal_Rela *rel,
struct elf_link_hash_entry *h,
unsigned long r_symndx)
{
unsigned int symbol_got_type;
unsigned int reloc_got_type;
- if (! IS_AARCH64_TLS_RELAX_RELOC (r_type))
+ bfd_reloc_code_real_type bfd_r_type
+ = elfNN_aarch64_bfd_reloc_from_type (input_bfd,
+ ELFNN_R_TYPE (rel->r_info));
+
+ if (! IS_AARCH64_TLS_RELAX_RELOC (bfd_r_type))
return FALSE;
symbol_got_type = elfNN_aarch64_symbol_got_type (h, input_bfd, r_symndx);
- reloc_got_type = aarch64_reloc_got_type (r_type);
+ reloc_got_type = aarch64_reloc_got_type (bfd_r_type);
if (symbol_got_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (reloc_got_type))
return TRUE;
static bfd_reloc_code_real_type
aarch64_tls_transition (bfd *input_bfd,
struct bfd_link_info *info,
- unsigned int r_type,
+ const Elf_Internal_Rela *rel,
struct elf_link_hash_entry *h,
unsigned long r_symndx)
{
bfd_reloc_code_real_type bfd_r_type
- = elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type);
+ = elfNN_aarch64_bfd_reloc_from_type (input_bfd,
+ ELFNN_R_TYPE (rel->r_info));
+
+ if (! aarch64_can_relax_tls (input_bfd, info, rel, h, r_symndx))
+ return bfd_r_type;
+
+ bfd_boolean morello_reloc = (bfd_r_type == BFD_RELOC_AARCH64_TLSDESC_ADD_LO12
+ && (ELFNN_R_TYPE (rel[1].r_info)
+ == MORELLO_R (TLSDESC_CALL)));
- if (! aarch64_can_relax_tls (input_bfd, info, bfd_r_type, h, r_symndx))
+ /* GD -> IE is not supported for Morello TLSDESC yet. We do however allow
+ lowering of GD -> LE for static non-pie executables. XXX It ought to be
+ safe to do this for A64 as well but it is not implemented yet. */
+ if (h != NULL && morello_reloc && bfd_link_pie (info))
return bfd_r_type;
- return aarch64_tls_transition_without_check (bfd_r_type, h);
+ return aarch64_tls_transition_without_check (bfd_r_type, info, h,
+ morello_reloc);
}
/* Return the base VMA address which should be subtracted from real addresses
if ((htab->fix_erratum_843419 & ERRAT_ADR)
&& (imm >= AARCH64_MIN_ADRP_IMM && imm <= AARCH64_MAX_ADRP_IMM))
{
- insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm)
+ insn = (_bfd_aarch64_reencode_adr_imm (AARCH64_ADR_OP, imm, 0)
| AARCH64_RT (insn));
bfd_putl32 (insn, contents + stub_entry->adrp_offset);
/* Stub is not needed, don't map it out. */
|| reloc == BFD_RELOC_AARCH64_MOVW_GOTOFF_G1);
}
+/* Build capability meta data, i.e. size and permissions for a capability. */
+
+static bfd_vma
+cap_meta (size_t size, const asection *sec)
+{
+
+ if (size >= (1ULL << 56))
+ return (bfd_vma) -1;
+
+ /* N.b. We are only ever using this function for Morello.
+ Morello is little-endian.
+ We are returning a 64bit sized integer.
+ The format this metadata is supposed to fit is
+ | 56 bit length | 8 bit permissions |
+ This means that (in little endian layout) we need to put the 56 bit size
+ in the *lower* bits of the uint64_t. */
+ uint64_t flags = 0;
+ if (sec->flags & SEC_CODE)
+ flags = 4;
+ else if (sec->flags & SEC_READONLY
+ || sec->flags & SEC_ROM)
+ flags = 1;
+ else if (sec->flags & SEC_ALLOC)
+ flags = 2;
+
+ /* We should always be able to derive a valid set of permissions
+ from the section flags. */
+ if (flags == 0)
+ abort ();
+ return size | (flags << 56);
+}
+
+enum c64_section_perm_type {
+ C64_SYM_UNKNOWN = 0,
+ C64_SYM_STANDARD,
+ C64_SYM_LINKER_DEF,
+ C64_SYM_LDSCRIPT_DEF,
+ C64_SYM_LDSCRIPT_START,
+};
+
+static enum c64_section_perm_type
+c64_symbol_section_adjustment (struct elf_link_hash_entry *h, bfd_vma value,
+ asection *sym_sec, asection **ret_sec,
+ struct bfd_link_info *info)
+{
+ if (!sym_sec)
+ return C64_SYM_UNKNOWN;
+
+ *ret_sec = sym_sec;
+ if (!h)
+ return C64_SYM_STANDARD;
+
+ /* Linker defined symbols are always at the start of the section they
+ track. */
+ if (h->root.linker_def)
+ return C64_SYM_LINKER_DEF;
+ else if (h->root.ldscript_def)
+ {
+ const char *name = h->root.root.string;
+ size_t len = strlen (name);
+
+ bfd_vma size = sym_sec->size - (value - sym_sec->vma);
+ /* The special case: the symbol is at the end of the section.
+ This could either mean that it is an end symbol or it is the
+ start of the output section following the symbol. We try to
+ guess if it is a start of the next section by reading its
+ name. This is a compatibility hack, ideally linker scripts
+ should be written such that start symbols are defined within
+ the output section it intends to track. */
+ if (size == 0
+ && (len > 8 && name[0] == '_' && name[1] == '_'
+ && (!strncmp (name + 2, "start_", 6)
+ || !strcmp (name + len - 6, "_start"))))
+ {
+ asection *s = bfd_sections_find_if (info->output_bfd,
+ section_start_symbol,
+ &value);
+ if (s != NULL)
+ {
+ *ret_sec = s;
+ return C64_SYM_LDSCRIPT_START;
+ }
+ }
+ return C64_SYM_LDSCRIPT_DEF;
+ }
+ return C64_SYM_STANDARD;
+}
+
+static bfd_reloc_status_type
+c64_fixup_frag (bfd *input_bfd, struct bfd_link_info *info,
+ Elf_Internal_Sym *sym, struct elf_link_hash_entry *h,
+ asection *sym_sec, bfd_byte *frag_loc, bfd_vma value,
+ bfd_signed_vma addend)
+{
+ BFD_ASSERT (h || sym);
+ bfd_vma size = sym ? sym->st_size : h->size;
+ asection *perm_sec = sym_sec;
+ bfd_boolean bounds_ok = FALSE;
+
+ if (size == 0 && sym_sec)
+ {
+ bounds_ok = TRUE;
+ enum c64_section_perm_type type
+ = c64_symbol_section_adjustment (h, value, sym_sec, &perm_sec, info);
+
+ switch (type)
+ {
+ case C64_SYM_STANDARD:
+ break;
+ case C64_SYM_LINKER_DEF:
+ size = perm_sec->output_section->size;
+ break;
+ case C64_SYM_LDSCRIPT_DEF:
+ size = perm_sec->size - (value - perm_sec->vma);
+ break;
+ case C64_SYM_LDSCRIPT_START:
+ size = perm_sec->size;
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ /* Negative addends are not allowed for capability symbols. */
+ if (addend < 0 || (bfd_vma) addend > size)
+ return bfd_reloc_outofrange;
+
+ bfd_vma base = value, limit = value + size;
+
+ if (!bounds_ok && !c64_valid_cap_range (&base, &limit))
+ {
+ /* xgettext:c-format */
+ _bfd_error_handler (_("%pB: capability range may exceed object bounds"),
+ input_bfd);
+ bfd_set_error (bfd_error_bad_value);
+ return bfd_reloc_notsupported;
+ }
+
+ if (perm_sec && perm_sec->flags & SEC_CODE)
+ {
+ /* Any symbol pointing into an executable section gets bounds according
+ to PCC. In this case the relocation is set up so that the value is
+ the base of the PCC, the addend is the offset from the PCC base to the
+ VA that we want, and the size is the length of the PCC range.
+ In this function we only use `value` to check the bounds make sense,
+ which is somewhat superfluous when we're using pcc_high and pcc_low
+ since we already enforced that in elfNN_c64_resize_sections. No harm
+ in instead checking that the bounds on the object that were requested
+ made sense even if they were overridden because this symbol points
+ into an executable section.
+
+ `size` on the other hand is part of the fragment that we output to and
+ we need to change it in order to have functions that can access global
+ data or jump to other functions. */
+ size = pcc_high - pcc_low;
+ }
+
+ if (perm_sec != NULL)
+ {
+ bfd_vma frag = cap_meta (size, perm_sec);
+
+ if (frag == (bfd_vma) -1)
+ return bfd_reloc_outofrange;
+
+ bfd_put_64 (input_bfd, frag, frag_loc);
+ }
+
+ return bfd_reloc_continue;
+}
+
+/* Given either a local symbol SYM or global symbol H, do we need to adjust
+ capability relocations against the symbol due to the fact that it points to
+ a code section? */
+static bfd_boolean
+c64_symbol_adjust (struct elf_link_hash_entry *h,
+ bfd_vma value, asection *sym_sec, struct bfd_link_info *info,
+ bfd_vma *adjust_addr)
+{
+ asection *tmp_sec;
+ enum c64_section_perm_type type
+ = c64_symbol_section_adjustment (h, value, sym_sec, &tmp_sec, info);
+
+ if (type == C64_SYM_UNKNOWN)
+ return FALSE;
+
+ if (tmp_sec->flags & SEC_CODE)
+ {
+ *adjust_addr = pcc_low;
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
/* Perform a relocation as part of a final link. The input relocation type
should be TLS relaxed. */
bfd_vma orig_value = value;
bfd_boolean resolved_to_zero;
bfd_boolean abs_symbol_p;
- bfd_boolean via_plt_p;
+ Elf_Internal_Sym *isym = NULL;
+ bfd_boolean c64_rtype = FALSE;
+ bfd_boolean to_c64 = FALSE;
globals = elf_aarch64_hash_table (info);
: bfd_is_und_section (sym_sec));
abs_symbol_p = h != NULL && bfd_is_abs_symbol (&h->root);
- via_plt_p = (globals->root.splt != NULL && h != NULL
- && h->plt.offset != (bfd_vma) - 1);
+ if (sym)
+ {
+ isym = bfd_sym_from_r_symndx (&globals->root.sym_cache, input_bfd,
+ r_symndx);
+ BFD_ASSERT (isym != NULL);
+ to_c64 = (isym->st_target_internal & ST_BRANCH_TO_C64) != 0;
+ }
+ else
+ to_c64 = (h->target_internal & ST_BRANCH_TO_C64) != 0;
+
/* Since STT_GNU_IFUNC symbol must go through PLT, we handle
it here if it is defined in a non-shared object. */
|| bfd_link_executable (info))
{
/* This symbol is resolved locally. */
- outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
+ outrel.r_info = (elf_aarch64_hash_entry (h)->got_type
+ == GOT_CAP
+ ? ELFNN_R_INFO (0, MORELLO_R (IRELATIVE))
+ : ELFNN_R_INFO (0, AARCH64_R (IRELATIVE)));
outrel.r_addend = (h->root.u.def.value
+ h->root.u.def.section->output_section->vma
+ h->root.u.def.section->output_offset);
return bfd_reloc_ok;
}
/* FALLTHROUGH */
+ case BFD_RELOC_MORELLO_CALL26:
+ case BFD_RELOC_MORELLO_JUMP26:
case BFD_RELOC_AARCH64_CALL26:
case BFD_RELOC_AARCH64_JUMP26:
value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type,
howto, value);
case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
case BFD_RELOC_AARCH64_GOT_LD_PREL19:
case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
base_got = globals->root.sgot;
off = h->got.offset;
{
plt_index = ((h->plt.offset - globals->plt_header_size) /
globals->plt_entry_size);
- off = (plt_index + 3) * GOT_ENTRY_SIZE;
+ off = (plt_index + 3) * GOT_ENTRY_SIZE (globals);
base_got = globals->root.sgotplt;
}
else
{
plt_index = h->plt.offset / globals->plt_entry_size;
- off = plt_index * GOT_ENTRY_SIZE;
+ off = plt_index * GOT_ENTRY_SIZE (globals);
base_got = globals->root.igotplt;
}
return _bfd_aarch64_elf_put_addend (input_bfd, hit_data, bfd_r_type, howto, value);
case BFD_RELOC_AARCH64_ADD_LO12:
case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_MORELLO_ADR_HI20_PCREL:
break;
}
}
case BFD_RELOC_AARCH64_TLSDESC_ADD:
case BFD_RELOC_AARCH64_TLSDESC_CALL:
case BFD_RELOC_AARCH64_TLSDESC_LDR:
+ case BFD_RELOC_MORELLO_TLSDESC_CALL:
*unresolved_reloc_p = FALSE;
return bfd_reloc_ok;
value += signed_addend;
break;
- case BFD_RELOC_AARCH64_BRANCH19:
- case BFD_RELOC_AARCH64_TSTBR14:
- /* A conditional branch to an undefined weak symbol is converted to a
- branch to itself. */
- if (weak_undef_p && !via_plt_p)
- {
- value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
- place, value,
- signed_addend,
- weak_undef_p);
- break;
- }
- /* Fall through. */
+ case BFD_RELOC_MORELLO_CALL26:
+ case BFD_RELOC_MORELLO_JUMP26:
case BFD_RELOC_AARCH64_CALL26:
case BFD_RELOC_AARCH64_JUMP26:
{
asection *splt = globals->root.splt;
+ bfd_boolean via_plt_p =
+ splt != NULL && h != NULL && h->plt.offset != (bfd_vma) - 1;
/* A call to an undefined weak symbol is converted to a jump to
the next instruction unless a PLT entry will be created.
is too far away. */
struct elf_aarch64_stub_hash_entry *stub_entry = NULL;
+ enum elf_aarch64_stub_type c64_stub = aarch64_stub_none;
+
+ /* Figure out if we need an interworking stub and if yes, what
+ kind. */
+ if (!via_plt_p)
+ c64_stub = aarch64_interwork_stub (r_type, to_c64);
+
/* If the branch destination is directed to plt stub, "value" will be
the final destination, otherwise we should plus signed_addend, it may
contain non-zero value, for example call to local function symbol
which are turned into "sec_sym + sec_off", and sec_off is kept in
signed_addend. */
- if (! aarch64_valid_branch_p (via_plt_p ? value : value + signed_addend,
- place))
- /* The target is out of reach, so redirect the branch to
- the local stub for this function. */
- stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec, h,
- rel, globals);
+ if (c64_stub != aarch64_stub_none
+ || (aarch64_branch_reloc_p (r_type)
+ && !aarch64_valid_branch_p ((via_plt_p ? value
+ : value + signed_addend), place)))
+ {
+ /* The target is out of reach, so redirect the branch to
+ the local stub for this function. */
+ stub_entry = elfNN_aarch64_get_stub_entry (input_section, sym_sec,
+ h, rel, globals,
+ c64_stub);
+ }
+
if (stub_entry != NULL)
{
value = (stub_entry->stub_offset
case BFD_RELOC_AARCH64_64_PCREL:
case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_MORELLO_ADR_HI20_NC_PCREL:
+ case BFD_RELOC_MORELLO_ADR_HI20_PCREL:
case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
case BFD_RELOC_AARCH64_LD_LO19_PCREL:
+ case BFD_RELOC_MORELLO_LD_LO17_PCREL:
case BFD_RELOC_AARCH64_MOVW_PREL_G0:
case BFD_RELOC_AARCH64_MOVW_PREL_G0_NC:
case BFD_RELOC_AARCH64_MOVW_PREL_G1:
bfd_set_error (bfd_error_bad_value);
return bfd_reloc_notsupported;
}
+ value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
+ place, value,
+ signed_addend,
+ weak_undef_p);
+
+ if (bfd_r_type == BFD_RELOC_AARCH64_ADR_LO21_PCREL && isym != NULL
+ && isym->st_target_internal & ST_BRANCH_TO_C64)
+ value |= 1;
+ break;
+
+ case BFD_RELOC_MORELLO_BRANCH19:
+ case BFD_RELOC_MORELLO_TSTBR14:
+ c64_rtype = TRUE;
+ /* Fall through. */
+ case BFD_RELOC_AARCH64_BRANCH19:
+ case BFD_RELOC_AARCH64_TSTBR14:
+ if (h && h->root.type == bfd_link_hash_undefined)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%pB: conditional branch to undefined symbol `%s' "
+ "not allowed"), input_bfd, h->root.root.string);
+ bfd_set_error (bfd_error_bad_value);
+ return bfd_reloc_notsupported;
+ }
+ {
+ int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
+
+ if ((c64_rtype && !to_c64) || (!c64_rtype && to_c64))
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%pB: interworking not supported on relocation %s"),
+ input_bfd, elfNN_aarch64_howto_table[howto_index].name);
+ return bfd_reloc_notsupported;
+ }
+ }
/* Fall through. */
case BFD_RELOC_AARCH64_16:
value = _bfd_aarch64_elf_resolve_relocation (input_bfd, bfd_r_type,
place, value,
signed_addend, weak_undef_p);
+ if (bfd_r_type == BFD_RELOC_AARCH64_ADD_LO12 && isym != NULL
+ && isym->st_target_internal & ST_BRANCH_TO_C64)
+ value |= 1;
+
break;
case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
case BFD_RELOC_AARCH64_GOT_LD_PREL19:
case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
- if (globals->root.sgot == NULL)
+ off = symbol_got_offset (input_bfd, h, r_symndx);
+ base_got = globals->root.sgot;
+
+ bfd_boolean c64_reloc =
+ (bfd_r_type == BFD_RELOC_MORELLO_LD128_GOT_LO12_NC
+ || bfd_r_type == BFD_RELOC_MORELLO_ADR_GOT_PAGE);
+
+ if (signed_addend != 0)
+ {
+ int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%pB: symbol plus addend can not be placed into the GOT "
+ "for relocation %s"),
+ input_bfd, elfNN_aarch64_howto_table[howto_index].name);
+ abort ();
+ }
+
+ if (base_got == NULL)
BFD_ASSERT (h != NULL);
relative_reloc = FALSE;
if (h != NULL)
{
bfd_vma addend = 0;
+ bfd_vma frag_value;
/* If a symbol is not dynamic and is not undefined weak, bind it
locally and generate a RELATIVE relocation under PIC mode.
NOTE: one symbol may be referenced by several relocations, we
should only generate one RELATIVE relocation for that symbol.
- Therefore, check GOT offset mark first. */
- if (h->dynindx == -1
- && !h->forced_local
- && h->root.type != bfd_link_hash_undefweak
- && bfd_link_pic (info)
+ Therefore, check GOT offset mark first.
+
+ NOTE2: Symbol references via GOT in C64 static binaries without
+ PIC should always have relative relocations, so we do that here
+ early. */
+ if (((h->dynindx == -1
+ && !h->forced_local
+ && h->root.type != bfd_link_hash_undefweak
+ && bfd_link_pic (info))
+ || (!bfd_link_pic (info) && bfd_link_executable (info)
+ && c64_reloc))
&& !symbol_got_offset_mark_p (input_bfd, h, r_symndx))
relative_reloc = TRUE;
- value = aarch64_calculate_got_entry_vma (h, globals, info, value,
+ if (c64_reloc
+ && c64_symbol_adjust (h, value, sym_sec, info, &frag_value))
+ signed_addend = (value | h->target_internal) - frag_value;
+ else
+ frag_value = value | h->target_internal;
+
+ value = aarch64_calculate_got_entry_vma (h, globals, info,
+ frag_value,
output_bfd,
unresolved_reloc_p);
/* Record the GOT entry address which will be used when generating
abort ();
}
- off = symbol_got_offset (input_bfd, h, r_symndx);
- base_got = globals->root.sgot;
got_entry_addr = (base_got->output_section->vma
+ base_got->output_offset + off);
if (!symbol_got_offset_mark_p (input_bfd, h, r_symndx))
{
- bfd_put_64 (output_bfd, value, base_got->contents + off);
+ bfd_vma frag_value;
+
+ if (c64_reloc
+ && c64_symbol_adjust (h, value, sym_sec, info, &frag_value))
+ signed_addend = (value | sym->st_target_internal) - frag_value;
+ else
+ frag_value = value | sym->st_target_internal;
+
+ bfd_put_64 (output_bfd, frag_value, base_got->contents + off);
/* For local symbol, we have done absolute relocation in static
linking stage. While for shared library, we need to update the
content of GOT entry according to the shared object's runtime
base address. So, we need to generate a R_AARCH64_RELATIVE reloc
for dynamic linker. */
- if (bfd_link_pic (info))
+ if (bfd_link_pic (info)
+ || (!bfd_link_pic (info) && bfd_link_executable (info)
+ && c64_reloc))
relative_reloc = TRUE;
symbol_got_offset_mark (input_bfd, h, r_symndx);
asection *s;
Elf_Internal_Rela outrel;
+ enum elf_aarch64_reloc_type rtype = AARCH64_R (RELATIVE);
+
s = globals->root.srelgot;
+
+ /* For a C64 relative relocation, also add size and permissions into
+ the frag. */
+ if (c64_reloc)
+ {
+ bfd_reloc_status_type ret;
+
+ ret = c64_fixup_frag (input_bfd, info, sym, h,
+ sym_sec, base_got->contents + off + 8,
+ orig_value, 0);
+
+ if (ret != bfd_reloc_continue)
+ return ret;
+
+ rtype = MORELLO_R (RELATIVE);
+
+ if (bfd_link_executable (info) && !bfd_link_pic (info))
+ s = globals->srelcaps;
+
+ outrel.r_addend = signed_addend;
+ }
+ else
+ outrel.r_addend = orig_value;
+
if (s == NULL)
abort ();
outrel.r_offset = got_entry_addr;
- outrel.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
- outrel.r_addend = orig_value;
+ outrel.r_info = ELFNN_R_INFO (0, rtype);
elf_append_rela (output_bfd, s, &outrel);
}
break;
case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
+ case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12:
case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
+ case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
if (globals->root.sgot == NULL)
return bfd_reloc_notsupported;
value = (symbol_tlsdesc_got_offset (input_bfd, h, r_symndx)
*unresolved_reloc_p = FALSE;
break;
+ case BFD_RELOC_MORELLO_CAPINIT:
+ {
+ Elf_Internal_Rela outrel;
+
+ if (input_section->flags & SEC_READONLY)
+ {
+ _bfd_error_handler
+ /* xgettext:c-format */
+ (_("%pB: capability relocation section must be writable"),
+ input_bfd);
+ bfd_set_error (bfd_error_bad_value);
+ return bfd_reloc_notsupported;
+ }
+
+ outrel.r_offset = _bfd_elf_section_offset (output_bfd, info,
+ input_section,
+ rel->r_offset);
+
+ outrel.r_offset += (input_section->output_section->vma
+ + input_section->output_offset);
+
+ /* Capability-aligned. */
+ if (outrel.r_offset & 0xf)
+ return bfd_reloc_overflow;
+
+ bfd_reloc_status_type ret;
+
+ ret = c64_fixup_frag (input_bfd, info, sym, h, sym_sec,
+ hit_data + 8, value, signed_addend);
+
+ if (ret != bfd_reloc_continue)
+ return ret;
+
+ outrel.r_addend = signed_addend;
+ value |= (h != NULL ? h->target_internal : sym->st_target_internal);
+
+ /* Emit a dynamic relocation if we are building PIC. */
+ if (h != NULL
+ && h->dynindx != -1
+ && bfd_link_pic (info)
+ && !SYMBOL_REFERENCES_LOCAL (info, h))
+ outrel.r_info = ELFNN_R_INFO (h->dynindx, r_type);
+ else
+ outrel.r_info = ELFNN_R_INFO (0, MORELLO_R (RELATIVE));
+
+ /* Symbols without size information get bounds to the
+ whole section: adjust the base of the capability to the
+ start of the section and set the addend to obtain the
+ correct address for the symbol. */
+ bfd_vma new_value;
+ if (c64_symbol_adjust (h, value, sym_sec, info, &new_value))
+ {
+ outrel.r_addend += (value - new_value);
+ value = new_value;
+ }
+
+ asection *s = globals->srelcaps;
+
+ elf_append_rela (output_bfd, s, &outrel);
+ *unresolved_reloc_p = FALSE;
+ }
+ break;
+
default:
return bfd_reloc_notsupported;
}
}
}
+#define BUILD_MOVZ(_reg, _imm) (movz_R0 \
+ | ((((_imm) >> 16) & 0xffff) << 5) \
+ | (_reg))
+#define BUILD_MOVK(_reg, _imm) (movk_R0 | (((_imm) & 0xffff) << 5) | (_reg))
+
/* Handle TLS relaxations. Relaxing is possible for symbols that use
R_AARCH64_TLSDESC_ADR_{PAGE, LD64_LO12_NC, ADD_LO12_NC} during a static
link.
case of error. */
static bfd_reloc_status_type
-elfNN_aarch64_tls_relax (struct elf_aarch64_link_hash_table *globals,
- bfd *input_bfd, asection *input_section,
+elfNN_aarch64_tls_relax (bfd *input_bfd, struct bfd_link_info *info,
+ asection *input_section,
bfd_byte *contents, Elf_Internal_Rela *rel,
- struct elf_link_hash_entry *h)
+ struct elf_link_hash_entry *h, unsigned long r_symndx)
{
bfd_boolean is_local = h == NULL;
+
unsigned int r_type = ELFNN_R_TYPE (rel->r_info);
unsigned long insn;
+ bfd_vma sym_size = 0;
+ struct elf_aarch64_link_hash_table *globals = elf_aarch64_hash_table (info);
BFD_ASSERT (globals && input_bfd && contents && rel);
+ if (is_local)
+ {
+ if (h != NULL)
+ sym_size = h->size;
+ else
+ {
+ Elf_Internal_Sym *sym;
+
+ sym = bfd_sym_from_r_symndx (&globals->root.sym_cache, input_bfd,
+ r_symndx);
+ BFD_ASSERT (sym != NULL);
+ sym_size = sym->st_size;
+ }
+ }
+
switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
{
+ case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
+ if (is_local || !bfd_link_pic (info))
+ {
+ /* GD->LE relaxation:
+ nop => movz x1, objsize_hi16
+ adrp x0, :tlsdesc:var => movz x0, :tprel_g1:var */
+ bfd_putl32 (BUILD_MOVZ(1, sym_size), contents + rel->r_offset - 4);
+ bfd_putl32 (movz_R0, contents + rel->r_offset);
+
+ /* We have relaxed the adrp into a mov, we may have to clear any
+ pending erratum fixes. */
+ clear_erratum_843419_entry (globals, rel->r_offset, input_section);
+ return bfd_reloc_continue;
+ }
+ else
+ {
+ /* GD->IE relaxation: Not implemented. */
+ return bfd_reloc_continue;
+ }
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
if (is_local)
case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
return bfd_reloc_continue;
+ case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
+ if (is_local || !bfd_link_pic (info))
+ {
+ /* GD->LE relaxation:
+ ldr xd, [x0, #:tlsdesc_lo12:var] => movk x0, :tprel_g0_nc:var */
+ bfd_putl32 (movk_R0, contents + rel->r_offset);
+ return bfd_reloc_continue;
+ }
+ else
+ {
+ /* GD->IE relaxation: not implemented. */
+ return bfd_reloc_continue;
+ }
case BFD_RELOC_AARCH64_TLSDESC_LDNN_LO12_NC:
if (is_local)
{
return bfd_reloc_continue;
}
- case BFD_RELOC_AARCH64_TLSDESC_ADD:
+ case BFD_RELOC_MORELLO_TLSDESC_CALL:
+ /* GD->LE relaxation:
+ blr cd => add c0, c2, x0 */
+ if (is_local || !bfd_link_pic (info))
+ {
+ bfd_putl32 (0xc2a06040, contents + rel->r_offset);
+ return bfd_reloc_ok;
+ }
+ else
+ goto set_nop;
+
case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
+ /* GD->LE relaxation:
+ ldr cd, [c0, #:tlsdesc_lo12:var] => movk x1, objsize_lo16 */
+ if ((is_local || !bfd_link_pic (info))
+ && ELFNN_R_TYPE (rel[1].r_info) == MORELLO_R (TLSDESC_CALL))
+ {
+ bfd_putl32 (BUILD_MOVK(1, sym_size), contents + rel->r_offset);
+ return bfd_reloc_continue;
+ }
+
+ /* Fall through. */
+ case BFD_RELOC_AARCH64_TLSDESC_ADD:
case BFD_RELOC_AARCH64_TLSDESC_CALL:
/* GD->IE/LE relaxation:
add x0, x0, #:tlsdesc_lo12:var => nop
blr xd => nop
*/
+set_nop:
bfd_putl32 (INSN_NOP, contents + rel->r_offset);
return bfd_reloc_ok;
input_section, (uint64_t) rel->r_offset, howto->name, name);
}
+ if (r_symndx
+ && h
+ && IS_AARCH64_TLS_RELOC (bfd_r_type)
+ && h->root.type == bfd_link_hash_undefweak)
+ /* We have already warned about these in aarch64_check_relocs,
+ so just skip over them. */
+ continue;
+
/* We relax only if we can see that there can be a valid transition
from a reloc type to another.
We call elfNN_aarch64_final_link_relocate unless we're completely
done, i.e., the relaxation produced the final output we want. */
- relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, r_type,
+ relaxed_bfd_r_type = aarch64_tls_transition (input_bfd, info, rel,
h, r_symndx);
if (relaxed_bfd_r_type != bfd_r_type)
{
howto = elfNN_aarch64_howto_from_bfd_reloc (bfd_r_type);
BFD_ASSERT (howto != NULL);
r_type = howto->type;
- r = elfNN_aarch64_tls_relax (globals, input_bfd, input_section,
- contents, rel, h);
+ r = elfNN_aarch64_tls_relax (input_bfd, info, input_section,
+ contents, rel, h, r_symndx);
unresolved_reloc = 0;
}
else
h, &unresolved_reloc,
save_addend, &addend, sym);
+ bfd_boolean c64_rtype = FALSE;
+
switch (elfNN_aarch64_bfd_reloc_from_type (input_bfd, r_type))
{
case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
base address when invoke runtime TLS resolver. */
bfd_put_NN (output_bfd, 0,
globals->root.sgot->contents + off
- + GOT_ENTRY_SIZE);
+ + GOT_ENTRY_SIZE (globals));
}
else if (indx == 0)
{
bfd_put_NN (output_bfd,
relocation - dtpoff_base (info),
globals->root.sgot->contents + off
- + GOT_ENTRY_SIZE);
+ + GOT_ENTRY_SIZE (globals));
}
else
{
rela.r_offset =
(globals->root.sgot->output_section->vma
+ globals->root.sgot->output_offset + off
- + GOT_ENTRY_SIZE);
+ + GOT_ENTRY_SIZE (globals));
loc = globals->root.srelgot->contents;
loc += globals->root.srelgot->reloc_count++
bfd_elfNN_swap_reloca_out (output_bfd, &rela, loc);
bfd_put_NN (output_bfd, (bfd_vma) 0,
globals->root.sgot->contents + off
- + GOT_ENTRY_SIZE);
+ + GOT_ENTRY_SIZE (globals));
}
}
else
bfd_put_NN (output_bfd,
relocation - dtpoff_base (info),
globals->root.sgot->contents + off
- + GOT_ENTRY_SIZE);
+ + GOT_ENTRY_SIZE (globals));
}
symbol_got_offset_mark (input_bfd, h, r_symndx);
}
break;
+ case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
+ case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
+ c64_rtype = TRUE;
+ /* Fall through. */
+
case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
{
bfd_byte *loc;
Elf_Internal_Rela rela;
- rela.r_info = ELFNN_R_INFO (indx, AARCH64_R (TLSDESC));
+
+ rela.r_info = ELFNN_R_INFO (indx,
+ (c64_rtype ? MORELLO_R (TLSDESC)
+ : AARCH64_R (TLSDESC)));
rela.r_addend = 0;
rela.r_offset = (globals->root.sgotplt->output_section->vma
bfd_put_NN (output_bfd, (bfd_vma) 0,
globals->root.sgotplt->contents + off +
globals->sgotplt_jump_table_size +
- GOT_ENTRY_SIZE);
+ GOT_ENTRY_SIZE (globals));
}
symbol_tlsdesc_got_offset_mark (input_bfd, h, r_symndx);
Try to catch this situation here and provide a more helpful
error message to the user. */
- if (addend & ((1 << howto->rightshift) - 1)
+ if (addend & (((bfd_vma) 1 << howto->rightshift) - 1)
/* FIXME: Are we testing all of the appropriate reloc
types here ? */
&& (real_r_type == BFD_RELOC_AARCH64_LD_LO19_PCREL
alignment than was declared where it was defined"),
name, input_bfd, input_section, rel->r_offset);
}
+
+ if (real_r_type == BFD_RELOC_MORELLO_CAPINIT)
+ info->callbacks->warning
+ (info, _("relocation offset must be capability aligned"),
+ name, input_bfd, input_section, rel->r_offset);
break;
case bfd_reloc_undefined:
fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
if (flags)
- fprintf (file, _("<Unrecognised flag bits set>"));
-
- fputc ('\n', file);
-
- return TRUE;
-}
-
-/* Find dynamic relocs for H that apply to read-only sections. */
-
-static asection *
-readonly_dynrelocs (struct elf_link_hash_entry *h)
-{
- struct elf_dyn_relocs *p;
+ fprintf (file, _("<Unrecognised flag bits set>"));
- for (p = elf_aarch64_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
- {
- asection *s = p->sec->output_section;
+ fputc ('\n', file);
- if (s != NULL && (s->flags & SEC_READONLY) != 0)
- return p->sec;
- }
- return NULL;
+ return TRUE;
}
/* Return true if we need copy relocation against EH. */
struct elf_dyn_relocs *p;
asection *s;
- for (p = eh->dyn_relocs; p != NULL; p = p->next)
+ for (p = eh->root.dyn_relocs; p != NULL; p = p->next)
{
/* If there is any pc-relative reference, we need to keep copy relocation
to avoid propagating the relocation into runtime that current glibc
return TRUE;
}
+/* Initialise the .got section to hold the global offset table. */
+
+static void
+aarch64_elf_init_got_section (bfd *abfd, struct bfd_link_info *info)
+{
+ const struct elf_backend_data *bed = get_elf_backend_data (abfd);
+ asection *s;
+ struct elf_aarch64_link_hash_table *globals = elf_aarch64_hash_table (info);
+ unsigned int align = bed->s->log_file_align + globals->c64_rel;
+
+ if (globals->root.sgot != NULL)
+ {
+ bfd_set_section_alignment (globals->root.srelgot,
+ bed->s->log_file_align);
+ bfd_set_section_alignment (globals->root.sgot, align);
+ globals->root.sgot->size += GOT_ENTRY_SIZE (globals);
+ }
+
+ /* Track capability initialisation for static non-PIE binaries. */
+ if (bfd_link_executable (info) && !bfd_link_pic (info)
+ && globals->srelcaps == NULL)
+ globals->srelcaps = globals->root.srelgot;
+
+ if (globals->root.igotplt != NULL)
+ bfd_set_section_alignment (globals->root.igotplt, align);
+
+ s = globals->root.sgot;
+
+ if (globals->root.sgotplt != NULL)
+ {
+ bfd_set_section_alignment (globals->root.sgotplt, align);
+ s = globals->root.sgotplt;
+ }
+
+ /* The first bit of the global offset table is the header. */
+ if (s != NULL)
+ s->size += bed->got_header_size (info);
+}
+
/* Create the .got section to hold the global offset table. */
static bfd_boolean
? ".rela.got" : ".rel.got"),
(bed->dynamic_sec_flags
| SEC_READONLY));
- if (s == NULL
- || !bfd_set_section_alignment (s, bed->s->log_file_align))
+ if (s == NULL)
return FALSE;
htab->srelgot = s;
s = bfd_make_section_anyway_with_flags (abfd, ".got", flags);
- if (s == NULL
- || !bfd_set_section_alignment (s, bed->s->log_file_align))
+ if (s == NULL)
return FALSE;
htab->sgot = s;
- htab->sgot->size += GOT_ENTRY_SIZE;
if (bed->want_got_sym)
{
if (bed->want_got_plt)
{
s = bfd_make_section_anyway_with_flags (abfd, ".got.plt", flags);
- if (s == NULL
- || !bfd_set_section_alignment (s, bed->s->log_file_align))
+ if (s == NULL)
return FALSE;
htab->sgotplt = s;
}
- /* The first bit of the global offset table is the header. */
- s->size += bed->got_header_size;
-
return TRUE;
}
symtab_hdr = &elf_symtab_hdr (abfd);
sym_hashes = elf_sym_hashes (abfd);
+ bfd_elfNN_aarch64_init_maps (abfd, info);
+
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
{
struct elf_link_hash_entry *h;
- unsigned int r_symndx;
- unsigned int r_type;
+ unsigned int r_symndx, r_type;
bfd_reloc_code_real_type bfd_r_type;
Elf_Internal_Sym *isym;
r_symndx = ELFNN_R_SYM (rel->r_info);
r_type = ELFNN_R_TYPE (rel->r_info);
+ bfd_r_type = elfNN_aarch64_bfd_reloc_from_type (abfd, r_type);
if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
{
if (r_symndx < symtab_hdr->sh_info)
{
/* A local symbol. */
- isym = bfd_sym_from_r_symndx (&htab->sym_cache,
+ isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
abfd, r_symndx);
if (isym == NULL)
return FALSE;
h = (struct elf_link_hash_entry *) h->root.u.i.link;
}
+ /* Ignore TLS relocations against weak undef symbols and warn about them.
+ The behaviour of weak TLS variables is not well defined. Since making
+ these well behaved is not a priority for Morello, we simply ignore
+ TLS relocations against such symbols here to avoid the linker crashing
+ on these and to enable making progress in other areas. */
+ if (r_symndx
+ && h
+ && IS_AARCH64_TLS_RELOC (bfd_r_type)
+ && h->root.type == bfd_link_hash_undefweak)
+ {
+ int howto_index = bfd_r_type - BFD_RELOC_AARCH64_RELOC_START;
+ _bfd_error_handler (_("%pB(%pA+%#" PRIx64 "): ignoring TLS relocation "
+ "%s against undef weak symbol %s"),
+ abfd, sec,
+ (uint64_t) rel->r_offset,
+ elfNN_aarch64_howto_table[howto_index].name,
+ h->root.root.string);
+ continue;
+ }
+
/* Could be done earlier, if h were already available. */
- bfd_r_type = aarch64_tls_transition (abfd, info, r_type, h, r_symndx);
+ bfd_r_type = aarch64_tls_transition (abfd, info, rel, h, r_symndx);
if (h != NULL)
{
default:
break;
+ case BFD_RELOC_MORELLO_CALL26:
+ case BFD_RELOC_MORELLO_JUMP26:
+ /* For dynamic symbols record caller information so that we can
+ decide what kind of PLT stubs to emit. */
+ if (h != NULL)
+ elf_aarch64_hash_entry (h)->got_type = GOT_CAP;
+ /* Fall through. */
+
case BFD_RELOC_AARCH64_ADD_LO12:
case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
+ case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_MORELLO_ADR_HI20_PCREL:
case BFD_RELOC_AARCH64_CALL26:
case BFD_RELOC_AARCH64_GOT_LD_PREL19:
case BFD_RELOC_AARCH64_JUMP26:
case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
+ case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
case BFD_RELOC_AARCH64_NN:
case BFD_RELOC_AARCH64_ADD_LO12:
case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
+ case BFD_RELOC_MORELLO_ADR_HI20_NC_PCREL:
+ case BFD_RELOC_MORELLO_ADR_HI20_PCREL:
case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
case BFD_RELOC_AARCH64_LDST128_LO12:
case BFD_RELOC_AARCH64_LDST16_LO12:
case BFD_RELOC_AARCH64_LDST64_LO12:
case BFD_RELOC_AARCH64_LDST8_LO12:
case BFD_RELOC_AARCH64_LD_LO19_PCREL:
+ case BFD_RELOC_MORELLO_LD_LO17_PCREL:
if (h == NULL || bfd_link_pic (info))
break;
/* Fall through. */
relocations we need for this symbol. */
if (h != NULL)
{
- struct elf_aarch64_link_hash_entry *eh;
- eh = (struct elf_aarch64_link_hash_entry *) h;
- head = &eh->dyn_relocs;
+ head = &h->dyn_relocs;
}
else
{
asection *s;
void **vpp;
- isym = bfd_sym_from_r_symndx (&htab->sym_cache,
+ isym = bfd_sym_from_r_symndx (&htab->root.sym_cache,
abfd, r_symndx);
if (isym == NULL)
return FALSE;
/* RR: We probably want to keep a consistency check that
there are no dangling GOT_PAGE relocs. */
+ case BFD_RELOC_MORELLO_ADR_GOT_PAGE:
+ case BFD_RELOC_MORELLO_LD128_GOT_LO12_NC:
+ case BFD_RELOC_MORELLO_TLSDESC_ADR_PAGE20:
+ case BFD_RELOC_MORELLO_TLSDESC_LD128_LO12:
+ htab->c64_rel = 1;
+ /* Fall through. */
+
case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
case BFD_RELOC_AARCH64_GOT_LD_PREL19:
case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
is a TLS/non-TLS mismatch, based on the symbol type.
So just combine any TLS types needed. */
if (old_got_type != GOT_UNKNOWN && old_got_type != GOT_NORMAL
- && got_type != GOT_NORMAL)
+ && got_type != GOT_NORMAL && old_got_type != GOT_CAP
+ && got_type != GOT_CAP)
got_type |= old_got_type;
/* If the symbol is accessed by both IE and GD methods, we
if ((got_type & GOT_TLS_IE) && GOT_TLS_GD_ANY_P (got_type))
got_type &= ~ (GOT_TLSDESC_GD | GOT_TLS_GD);
+ /* Prefer the capability reference. */
+ if ((old_got_type & GOT_CAP) && (got_type & GOT_NORMAL))
+ {
+ got_type &= ~GOT_NORMAL;
+ got_type |= GOT_CAP;
+ }
+
if (old_got_type != got_type)
{
if (h != NULL)
break;
}
- case BFD_RELOC_AARCH64_BRANCH19:
- case BFD_RELOC_AARCH64_TSTBR14:
+ case BFD_RELOC_MORELLO_CALL26:
+ case BFD_RELOC_MORELLO_JUMP26:
+ htab->c64_rel = 1;
+ if (h != NULL)
+ elf_aarch64_hash_entry (h)->got_type = GOT_CAP;
+
+ /* Fall through. */
case BFD_RELOC_AARCH64_CALL26:
case BFD_RELOC_AARCH64_JUMP26:
- /* If this is a local symbol then we resolve it
- directly without creating a PLT entry. */
if (h == NULL)
- continue;
+ {
+ isym = bfd_sym_from_r_symndx (&htab->root.sym_cache, abfd,
+ r_symndx);
+ if (isym == NULL)
+ return FALSE;
+
+ asection *s = bfd_section_from_elf_index (abfd, isym->st_shndx);
+
+ if (s == NULL)
+ s = sec;
+
+ if (c64_value_p (s, isym->st_value))
+ isym->st_target_internal |= ST_BRANCH_TO_C64;
+
+ /* If this is a local symbol then we resolve it
+ directly without creating a PLT entry. */
+ continue;
+ }
+
+ if (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak)
+ {
+ asection *sym_sec = h->root.u.def.section;
+ bfd_vma sym_value = h->root.u.def.value;
+
+ if (sym_sec != NULL && c64_value_p (sym_sec, sym_value))
+ h->target_internal |= ST_BRANCH_TO_C64;
+ }
h->needs_plt = 1;
if (h->plt.refcount <= 0)
h->plt.refcount += 1;
break;
+ case BFD_RELOC_MORELLO_CAPINIT:
+ if (htab->srelcaps == NULL)
+ {
+ if (htab->root.dynobj == NULL)
+ htab->root.dynobj = abfd;
+
+ sreloc = _bfd_elf_make_dynamic_reloc_section
+ (sec, htab->root.dynobj, LOG_FILE_ALIGN,
+ abfd, /*rela? */ TRUE);
+
+ if (sreloc == NULL)
+ return FALSE;
+
+ htab->srelcaps = sreloc;
+ }
+ htab->srelcaps->size += RELOC_SIZE (htab);
+
+ break;
+
default:
break;
}
switch ((int) ELFNN_R_TYPE (rela->r_info))
{
case AARCH64_R (IRELATIVE):
+ case MORELLO_R (IRELATIVE):
return reloc_class_ifunc;
case AARCH64_R (RELATIVE):
+ case MORELLO_R (RELATIVE):
return reloc_class_relative;
case AARCH64_R (JUMP_SLOT):
+ case MORELLO_R (JUMP_SLOT):
return reloc_class_plt;
case AARCH64_R (COPY):
return reloc_class_copy;
return TRUE;
}
-/* A structure used to record a list of sections, independently
- of the next and prev fields in the asection structure. */
-typedef struct section_list
-{
- asection *sec;
- struct section_list *next;
- struct section_list *prev;
-}
-section_list;
-
-/* Unfortunately we need to keep a list of sections for which
- an _aarch64_elf_section_data structure has been allocated. This
- is because it is possible for functions like elfNN_aarch64_write_section
- to be called on a section which has had an elf_data_structure
- allocated for it (and so the used_by_bfd field is valid) but
- for which the AArch64 extended version of this structure - the
- _aarch64_elf_section_data structure - has not been allocated. */
-static section_list *sections_with_aarch64_elf_section_data = NULL;
-
-static void
-record_section_with_aarch64_elf_section_data (asection *sec)
-{
- struct section_list *entry;
-
- entry = bfd_malloc (sizeof (*entry));
- if (entry == NULL)
- return;
- entry->sec = sec;
- entry->next = sections_with_aarch64_elf_section_data;
- entry->prev = NULL;
- if (entry->next != NULL)
- entry->next->prev = entry;
- sections_with_aarch64_elf_section_data = entry;
-}
-
-static struct section_list *
-find_aarch64_elf_section_entry (asection *sec)
-{
- struct section_list *entry;
- static struct section_list *last_entry = NULL;
-
- /* This is a short cut for the typical case where the sections are added
- to the sections_with_aarch64_elf_section_data list in forward order and
- then looked up here in backwards order. This makes a real difference
- to the ld-srec/sec64k.exp linker test. */
- entry = sections_with_aarch64_elf_section_data;
- if (last_entry != NULL)
- {
- if (last_entry->sec == sec)
- entry = last_entry;
- else if (last_entry->next != NULL && last_entry->next->sec == sec)
- entry = last_entry->next;
- }
-
- for (; entry; entry = entry->next)
- if (entry->sec == sec)
- break;
-
- if (entry)
- /* Record the entry prior to this one - it is the entry we are
- most likely to want to locate next time. Also this way if we
- have been called from
- unrecord_section_with_aarch64_elf_section_data () we will not
- be caching a pointer that is about to be freed. */
- last_entry = entry->prev;
-
- return entry;
-}
-
-static void
-unrecord_section_with_aarch64_elf_section_data (asection *sec)
-{
- struct section_list *entry;
-
- entry = find_aarch64_elf_section_entry (sec);
-
- if (entry)
- {
- if (entry->prev != NULL)
- entry->prev->next = entry->next;
- if (entry->next != NULL)
- entry->next->prev = entry->prev;
- if (entry == sections_with_aarch64_elf_section_data)
- sections_with_aarch64_elf_section_data = entry->next;
- free (entry);
- }
-}
-
-
typedef struct
{
void *finfo;
enum map_symbol_type
{
AARCH64_MAP_INSN,
- AARCH64_MAP_DATA
+ AARCH64_MAP_DATA,
+ AARCH64_MAP_C64,
};
elfNN_aarch64_output_map_sym (output_arch_syminfo *osi,
enum map_symbol_type type, bfd_vma offset)
{
- static const char *names[2] = { "$x", "$d" };
+ static const char *names[3] = { "$x", "$d", "$c" };
Elf_Internal_Sym sym;
sym.st_value = (osi->sec->output_section->vma
sym.st_other = 0;
sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
sym.st_shndx = osi->sec_shndx;
+ sym.st_target_internal = 0;
return osi->func (osi->finfo, names[type], &sym, osi->sec, NULL) == 1;
}
sym.st_other = 0;
sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
sym.st_shndx = osi->sec_shndx;
+ sym.st_target_internal = 0;
return osi->func (osi->finfo, name, &sym, osi->sec, NULL) == 1;
}
if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_INSN, addr))
return FALSE;
break;
+ case aarch64_stub_branch_c64:
+ if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
+ sizeof (aarch64_c64_branch_stub)))
+ return FALSE;
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_C64, addr))
+ return FALSE;
+ break;
+ case c64_stub_branch_aarch64:
+ case c64_stub_branch_c64:
+ if (!elfNN_aarch64_output_stub_sym (osi, stub_name, addr,
+ sizeof (c64_aarch64_branch_stub)))
+ return FALSE;
+ if (!elfNN_aarch64_output_map_sym (osi, AARCH64_MAP_C64, addr))
+ return FALSE;
+ break;
case aarch64_stub_none:
break;
(output_bfd, htab->root.splt->output_section);
osi.sec = htab->root.splt;
- elfNN_aarch64_output_map_sym (&osi, AARCH64_MAP_INSN, 0);
+ elfNN_aarch64_output_map_sym (&osi, (htab->c64_rel ? AARCH64_MAP_C64
+ : AARCH64_MAP_INSN), 0);
return TRUE;
sdata = bfd_zalloc (abfd, amt);
if (sdata == NULL)
return FALSE;
+ sdata->elf.is_target_section_data = TRUE;
sec->used_by_bfd = sdata;
}
- record_section_with_aarch64_elf_section_data (sec);
-
return _bfd_elf_new_section_hook (abfd, sec);
}
-static void
-unrecord_section_via_map_over_sections (bfd *abfd ATTRIBUTE_UNUSED,
- asection *sec,
- void *ignore ATTRIBUTE_UNUSED)
-{
- unrecord_section_with_aarch64_elf_section_data (sec);
-}
-
-static bfd_boolean
-elfNN_aarch64_close_and_cleanup (bfd *abfd)
-{
- if (abfd->sections)
- bfd_map_over_sections (abfd,
- unrecord_section_via_map_over_sections, NULL);
-
- return _bfd_elf_close_and_cleanup (abfd);
-}
-
-static bfd_boolean
-elfNN_aarch64_bfd_free_cached_info (bfd *abfd)
-{
- if (abfd->sections)
- bfd_map_over_sections (abfd,
- unrecord_section_via_map_over_sections, NULL);
-
- return _bfd_free_cached_info (abfd);
-}
-
/* Create dynamic sections. This is different from the ARM backend in that
the got, plt, gotplt and their relocation sections are all created in the
standard part of the bfd elf backend. */
/* We also need to make an entry in the .got.plt section, which
will be placed in the .got section by the linker script. */
- htab->root.sgotplt->size += GOT_ENTRY_SIZE;
+ htab->root.sgotplt->size += GOT_ENTRY_SIZE (htab);
/* We also need to make an entry in the .rela.plt section. */
htab->root.srelplt->size += RELOC_SIZE (htab);
if (got_type == GOT_UNKNOWN)
{
}
- else if (got_type == GOT_NORMAL)
+ else if (got_type == GOT_NORMAL
+ || got_type == GOT_CAP)
{
h->got.offset = htab->root.sgot->size;
- htab->root.sgot->size += GOT_ENTRY_SIZE;
+ htab->root.sgot->size += GOT_ENTRY_SIZE (htab);
if ((ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
|| h->root.type != bfd_link_hash_undefweak)
&& (bfd_link_pic (info)
{
htab->root.srelgot->size += RELOC_SIZE (htab);
}
+ else if (bfd_link_executable (info) && !bfd_link_pic (info))
+ htab->srelcaps->size += RELOC_SIZE (htab);
}
else
{
eh->tlsdesc_got_jump_table_offset =
(htab->root.sgotplt->size
- aarch64_compute_jump_table_size (htab));
- htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
+ htab->root.sgotplt->size += GOT_ENTRY_SIZE (htab) * 2;
h->got.offset = (bfd_vma) - 2;
}
if (got_type & GOT_TLS_GD)
{
h->got.offset = htab->root.sgot->size;
- htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
+ htab->root.sgot->size += GOT_ENTRY_SIZE (htab) * 2;
}
if (got_type & GOT_TLS_IE)
{
h->got.offset = htab->root.sgot->size;
- htab->root.sgot->size += GOT_ENTRY_SIZE;
+ htab->root.sgot->size += GOT_ENTRY_SIZE (htab);
}
indx = h && h->dynindx != -1 ? h->dynindx : 0;
|| h->root.type != bfd_link_hash_undefweak)
&& (!bfd_link_executable (info)
|| indx != 0
- || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)))
+ || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h)
+ /* On Morello support only TLSDESC_GD to TLSLE relaxation;
+ for everything else we must emit a dynamic relocation. */
+ || got_type & GOT_CAP))
{
if (got_type & GOT_TLSDESC_GD)
{
type. */
/* TLSDESC PLT is now needed, but not yet determined. */
- htab->tlsdesc_plt = (bfd_vma) - 1;
+ htab->root.tlsdesc_plt = (bfd_vma) - 1;
}
if (got_type & GOT_TLS_GD)
h->got.offset = (bfd_vma) - 1;
}
- if (eh->dyn_relocs == NULL)
+ if (h->dyn_relocs == NULL)
return TRUE;
/* In the shared -Bsymbolic case, discard space allocated for
{
struct elf_dyn_relocs **pp;
- for (pp = &eh->dyn_relocs; (p = *pp) != NULL;)
+ for (pp = &h->dyn_relocs; (p = *pp) != NULL;)
{
p->count -= p->pc_count;
p->pc_count = 0;
/* Also discard relocs on undefined weak syms with non-default
visibility. */
- if (eh->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
+ if (h->dyn_relocs != NULL && h->root.type == bfd_link_hash_undefweak)
{
if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
|| UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
- eh->dyn_relocs = NULL;
+ h->dyn_relocs = NULL;
/* Make sure undefined weak symbols are output as a dynamic
symbol in PIEs. */
goto keep;
}
- eh->dyn_relocs = NULL;
+ h->dyn_relocs = NULL;
keep:;
}
/* Finally, allocate space. */
- for (p = eh->dyn_relocs; p != NULL; p = p->next)
+ for (p = h->dyn_relocs; p != NULL; p = p->next)
{
asection *sreloc;
{
struct bfd_link_info *info;
struct elf_aarch64_link_hash_table *htab;
- struct elf_aarch64_link_hash_entry *eh;
/* An example of a bfd_link_hash_indirect symbol is versioned
symbol. For example: __gxx_personality_v0(bfd_link_hash_indirect)
info = (struct bfd_link_info *) inf;
htab = elf_aarch64_hash_table (info);
- eh = (struct elf_aarch64_link_hash_entry *) h;
-
/* Since STT_GNU_IFUNC symbol must go through PLT, we handle it
here if it is defined and referenced in a non-shared object. */
if (h->type == STT_GNU_IFUNC
&& h->def_regular)
return _bfd_elf_allocate_ifunc_dyn_relocs (info, h,
- &eh->dyn_relocs,
- NULL,
+ &h->dyn_relocs,
htab->plt_entry_size,
htab->plt_header_size,
- GOT_ENTRY_SIZE,
+ GOT_ENTRY_SIZE (htab),
FALSE);
return TRUE;
}
return elfNN_aarch64_allocate_ifunc_dynrelocs (h, inf);
}
-/* Set DF_TEXTREL if we find any dynamic relocs that apply to
- read-only sections. */
-
-static bfd_boolean
-maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
-{
- asection *sec;
-
- if (h->root.type == bfd_link_hash_indirect)
- return TRUE;
-
- sec = readonly_dynrelocs (h);
- if (sec != NULL)
- {
- struct bfd_link_info *info = (struct bfd_link_info *) info_p;
-
- info->flags |= DF_TEXTREL;
- info->callbacks->minfo
- (_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
- sec->owner, h->root.root.string, sec);
-
- /* Not an error, just cut short the traversal. */
- return FALSE;
- }
- return TRUE;
-}
-
/* This is the most important function of all . Innocuosly named
though ! */
static bfd_boolean
-elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
+elfNN_aarch64_size_dynamic_sections (bfd *output_bfd,
struct bfd_link_info *info)
{
struct elf_aarch64_link_hash_table *htab;
}
}
+ aarch64_elf_init_got_section (output_bfd, info);
+
+ setup_plt_values (info, elf_aarch64_tdata (output_bfd)->plt_type);
+
/* Set up .got offsets for local syms, and space for local dynamic
relocs. */
for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
locals[i].tlsdesc_got_jump_table_offset =
(htab->root.sgotplt->size
- aarch64_compute_jump_table_size (htab));
- htab->root.sgotplt->size += GOT_ENTRY_SIZE * 2;
+ htab->root.sgotplt->size += GOT_ENTRY_SIZE (htab) * 2;
locals[i].got_offset = (bfd_vma) - 2;
}
if (got_type & GOT_TLS_GD)
{
locals[i].got_offset = htab->root.sgot->size;
- htab->root.sgot->size += GOT_ENTRY_SIZE * 2;
+ htab->root.sgot->size += GOT_ENTRY_SIZE (htab) * 2;
}
if (got_type & GOT_TLS_IE
- || got_type & GOT_NORMAL)
+ || got_type & GOT_NORMAL
+ || got_type & GOT_CAP)
{
locals[i].got_offset = htab->root.sgot->size;
- htab->root.sgot->size += GOT_ENTRY_SIZE;
+ htab->root.sgot->size += GOT_ENTRY_SIZE (htab);
}
if (got_type == GOT_UNKNOWN)
{
htab->root.srelplt->size += RELOC_SIZE (htab);
/* Note RELOC_COUNT not incremented here! */
- htab->tlsdesc_plt = (bfd_vma) - 1;
+ htab->root.tlsdesc_plt = (bfd_vma) - 1;
}
if (got_type & GOT_TLS_GD)
htab->root.srelgot->size += RELOC_SIZE (htab) * 2;
if (got_type & GOT_TLS_IE
- || got_type & GOT_NORMAL)
+ || got_type & GOT_NORMAL
+ || got_type & GOT_CAP)
htab->root.srelgot->size += RELOC_SIZE (htab);
}
+ /* Static binary; put relocs into srelcaps. */
+ else if (bfd_link_executable (info) && (got_type & GOT_CAP))
+ htab->srelcaps->size += RELOC_SIZE (htab);
}
else
{
elfNN_aarch64_allocate_local_ifunc_dynrelocs,
info);
+ if (bfd_link_executable (info)
+ && !bfd_link_pic (info)
+ && htab->srelcaps
+ && htab->srelcaps->size > 0)
+ {
+ struct elf_link_hash_entry *h;
+
+ h = _bfd_elf_define_linkage_sym (output_bfd, info,
+ htab->srelcaps,
+ "__rela_dyn_start");
+ h = _bfd_elf_define_linkage_sym (output_bfd, info,
+ htab->srelcaps,
+ "__rela_dyn_end");
+
+ h->root.u.def.value = htab->srelcaps->vma + htab->srelcaps->size;
+ }
+
/* For every jump slot reserved in the sgotplt, reloc_count is
incremented. However, when we reserve space for TLS descriptors,
it's not incremented, so in order to compute the space reserved
if (htab->root.srelplt)
htab->sgotplt_jump_table_size = aarch64_compute_jump_table_size (htab);
- if (htab->tlsdesc_plt)
+ if (htab->root.tlsdesc_plt)
{
if (htab->root.splt->size == 0)
htab->root.splt->size += htab->plt_header_size;
/* If we're not using lazy TLS relocations, don't generate the
GOT and PLT entry required. */
- if (!(info->flags & DF_BIND_NOW))
+ if ((info->flags & DF_BIND_NOW))
+ htab->root.tlsdesc_plt = 0;
+ else
{
- htab->tlsdesc_plt = htab->root.splt->size;
+ htab->root.tlsdesc_plt = htab->root.splt->size;
htab->root.splt->size += htab->tlsdesc_plt_entry_size;
- htab->dt_tlsdesc_got = htab->root.sgot->size;
- htab->root.sgot->size += GOT_ENTRY_SIZE;
+ htab->root.tlsdesc_got = htab->root.sgot->size;
+ htab->root.sgot->size += GOT_ENTRY_SIZE (htab);
}
}
{
if (!is_aarch64_elf (ibfd))
continue;
- bfd_elfNN_aarch64_init_maps (ibfd);
+ bfd_elfNN_aarch64_init_maps (ibfd, info);
}
/* We now have determined the sizes of the various dynamic sections.
#define add_dynamic_entry(TAG, VAL) \
_bfd_elf_add_dynamic_entry (info, TAG, VAL)
- if (bfd_link_executable (info))
- {
- if (!add_dynamic_entry (DT_DEBUG, 0))
- return FALSE;
- }
+ if (!_bfd_elf_add_dynamic_tags (output_bfd, info, relocs))
+ return FALSE;
if (htab->root.splt->size != 0)
{
- if (!add_dynamic_entry (DT_PLTGOT, 0)
- || !add_dynamic_entry (DT_PLTRELSZ, 0)
- || !add_dynamic_entry (DT_PLTREL, DT_RELA)
- || !add_dynamic_entry (DT_JMPREL, 0))
- return FALSE;
-
if (htab->variant_pcs
&& !add_dynamic_entry (DT_AARCH64_VARIANT_PCS, 0))
return FALSE;
- if (htab->tlsdesc_plt
- && !(info->flags & DF_BIND_NOW)
- && (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
- || !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
- return FALSE;
-
if ((elf_aarch64_tdata (output_bfd)->plt_type == PLT_BTI_PAC)
&& (!add_dynamic_entry (DT_AARCH64_BTI_PLT, 0)
|| !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0)))
&& !add_dynamic_entry (DT_AARCH64_PAC_PLT, 0))
return FALSE;
}
-
- if (relocs)
- {
- if (!add_dynamic_entry (DT_RELA, 0)
- || !add_dynamic_entry (DT_RELASZ, 0)
- || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
- return FALSE;
-
- /* If any dynamic relocs apply to a read-only section,
- then we need a DT_TEXTREL entry. */
- if ((info->flags & DF_TEXTREL) == 0)
- elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
-
- if ((info->flags & DF_TEXTREL) != 0)
- {
- if (!add_dynamic_entry (DT_TEXTREL, 0))
- return FALSE;
- }
- }
}
#undef add_dynamic_entry
(void) _bfd_aarch64_elf_put_addend (output_bfd, plt_entry, r_type, howto, value);
}
+static void
+aarch64_update_c64_plt_entry (bfd *output_bfd, bfd_byte *plt_entry,
+ bfd_vma plt_base, bfd_vma plt_got_ent)
+{
+ /* Fill in the top 20 bits for this: ADRP c16, PLT_GOT + n * 16.
+ ADRP: ((PG(S+A)-PG(P)) >> 12) & 0xfffff */
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_MORELLO_ADR_HI20_PCREL,
+ plt_entry,
+ PG (plt_got_ent) - PG (plt_base));
+
+ elf_aarch64_update_plt_entry (output_bfd,
+ BFD_RELOC_AARCH64_LDST128_LO12,
+ plt_entry + 4,
+ PG_OFFSET (plt_got_ent));
+
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
+ plt_entry + 8,
+ PG_OFFSET (plt_got_ent));
+}
+
static void
elfNN_aarch64_create_small_pltn_entry (struct elf_link_hash_entry *h,
struct elf_aarch64_link_hash_table
if (plt == htab->root.splt)
{
plt_index = (h->plt.offset - htab->plt_header_size) / htab->plt_entry_size;
- got_offset = (plt_index + 3) * GOT_ENTRY_SIZE;
+ got_offset = (plt_index + 3) * GOT_ENTRY_SIZE (htab);
}
else
{
plt_index = h->plt.offset / htab->plt_entry_size;
- got_offset = plt_index * GOT_ENTRY_SIZE;
+ got_offset = plt_index * GOT_ENTRY_SIZE (htab);
}
plt_entry = plt->contents + h->plt.offset;
/* Copy in the boiler-plate for the PLTn entry. */
memcpy (plt_entry, htab->plt_entry, htab->plt_entry_size);
- /* First instruction in BTI enabled PLT stub is a BTI
- instruction so skip it. */
- if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
- && elf_elfheader (output_bfd)->e_type == ET_EXEC)
- plt_entry = plt_entry + 4;
+ if (htab->c64_rel)
+ aarch64_update_c64_plt_entry (output_bfd, plt_entry, plt_entry_address,
+ gotplt_entry_address);
+ else
+ {
- /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
- ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
- elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADR_HI21_PCREL,
- plt_entry,
- PG (gotplt_entry_address) -
- PG (plt_entry_address));
+ /* First instruction in BTI enabled PLT stub is a BTI
+ instruction so skip it. */
+ if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI
+ && elf_elfheader (output_bfd)->e_type == ET_EXEC)
+ plt_entry = plt_entry + 4;
- /* Fill in the lo12 bits for the load from the pltgot. */
- elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
- plt_entry + 4,
- PG_OFFSET (gotplt_entry_address));
+ /* Fill in the top 21 bits for this: ADRP x16, PLT_GOT + n * 8.
+ ADRP: ((PG(S+A)-PG(P)) >> 12) & 0x1fffff */
+ elf_aarch64_update_plt_entry (output_bfd,
+ BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ plt_entry,
+ PG (gotplt_entry_address) -
+ PG (plt_entry_address));
- /* Fill in the lo12 bits for the add from the pltgot entry. */
- elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
- plt_entry + 8,
- PG_OFFSET (gotplt_entry_address));
+ /* Fill in the lo12 bits for the load from the pltgot. */
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_LDSTNN_LO12,
+ plt_entry + 4,
+ PG_OFFSET (gotplt_entry_address));
- /* All the GOTPLT Entries are essentially initialized to PLT0. */
- bfd_put_NN (output_bfd,
- plt->output_section->vma + plt->output_offset,
- gotplt->contents + got_offset);
+ /* Fill in the lo12 bits for the add from the pltgot entry. */
+ elf_aarch64_update_plt_entry (output_bfd, BFD_RELOC_AARCH64_ADD_LO12,
+ plt_entry + 8,
+ PG_OFFSET (gotplt_entry_address));
+ }
+
+ /* All the GOTPLT Entries are essentially initialized to PLT0. Set LSB if
+ the PLT is C64. */
+ bfd_vma plt0 = ((plt->output_section->vma + plt->output_offset)
+ | htab->c64_rel);
+ bfd_put_NN (output_bfd, plt0, gotplt->contents + got_offset);
rela.r_offset = gotplt_entry_address;
{
/* If an STT_GNU_IFUNC symbol is locally defined, generate
R_AARCH64_IRELATIVE instead of R_AARCH64_JUMP_SLOT. */
- rela.r_info = ELFNN_R_INFO (0, AARCH64_R (IRELATIVE));
+ rela.r_info = (elf_aarch64_hash_entry (h)->got_type == GOT_CAP
+ ? ELFNN_R_INFO (0, MORELLO_R (IRELATIVE))
+ : ELFNN_R_INFO (0, AARCH64_R (IRELATIVE)));
rela.r_addend = (h->root.u.def.value
+ h->root.u.def.section->output_section->vma
+ h->root.u.def.section->output_offset);
else
{
/* Fill in the entry in the .rela.plt section. */
- rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT));
+ rela.r_info = (elf_aarch64_hash_entry (h)->got_type == GOT_CAP
+ ? ELFNN_R_INFO (h->dynindx, MORELLO_R (JUMP_SLOT))
+ : ELFNN_R_INFO (h->dynindx, AARCH64_R (JUMP_SLOT)));
rela.r_addend = 0;
}
}
}
+ bfd_boolean is_c64 = elf_aarch64_hash_entry (h)->got_type == GOT_CAP;
+
if (h->got.offset != (bfd_vma) - 1
- && elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
+ && (elf_aarch64_hash_entry (h)->got_type == GOT_NORMAL
+ || elf_aarch64_hash_entry (h)->got_type == GOT_CAP)
/* Undefined weak symbol in static PIE resolves to 0 without
any dynamic relocations. */
&& !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
return FALSE;
BFD_ASSERT ((h->got.offset & 1) != 0);
- rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
- rela.r_addend = (h->root.u.def.value
- + h->root.u.def.section->output_section->vma
- + h->root.u.def.section->output_offset);
+ if (is_c64)
+ {
+ rela.r_info = ELFNN_R_INFO (0, MORELLO_R (RELATIVE));
+ rela.r_addend = 0;
+ }
+ else
+ {
+ rela.r_info = ELFNN_R_INFO (0, AARCH64_R (RELATIVE));
+ rela.r_addend = (h->root.u.def.value
+ + h->root.u.def.section->output_section->vma
+ + h->root.u.def.section->output_offset);
+ }
}
else
{
BFD_ASSERT ((h->got.offset & 1) == 0);
bfd_put_NN (output_bfd, (bfd_vma) 0,
htab->root.sgot->contents + h->got.offset);
- rela.r_info = ELFNN_R_INFO (h->dynindx, AARCH64_R (GLOB_DAT));
+ rela.r_info = ELFNN_R_INFO (h->dynindx,
+ (is_c64 ? MORELLO_R (GLOB_DAT)
+ : AARCH64_R (GLOB_DAT)));
rela.r_addend = 0;
}
memcpy (htab->root.splt->contents, htab->plt0_entry,
htab->plt_header_size);
- elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize =
- htab->plt_header_size;
+
+ /* PR 26312: Explicitly set the sh_entsize to 0 so that
+ consumers do not think that the section contains fixed
+ sized objects. */
+ elf_section_data (htab->root.splt->output_section)->this_hdr.sh_entsize = 0;
plt_got_2nd_ent = (htab->root.sgotplt->output_section->vma
+ htab->root.sgotplt->output_offset
- + GOT_ENTRY_SIZE * 2);
+ + GOT_ENTRY_SIZE (htab) * 2);
plt_base = htab->root.splt->output_section->vma +
htab->root.splt->output_offset;
+ bfd_byte *plt0_entry = htab->root.splt->contents;
+
+ if (htab->c64_rel)
+ {
+ aarch64_update_c64_plt_entry (output_bfd, plt0_entry + 4,
+ plt_base + 4, plt_got_2nd_ent);
+ return;
+ }
+
/* First instruction in BTI enabled PLT stub is a BTI
instruction so skip it. */
- bfd_byte *plt0_entry = htab->root.splt->contents;
if (elf_aarch64_tdata (output_bfd)->plt_type & PLT_BTI)
plt0_entry = plt0_entry + 4;
case DT_TLSDESC_PLT:
s = htab->root.splt;
dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
- + htab->tlsdesc_plt;
+ + htab->root.tlsdesc_plt;
break;
case DT_TLSDESC_GOT:
s = htab->root.sgot;
- BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1);
+ BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
- + htab->dt_tlsdesc_got;
+ + htab->root.tlsdesc_got;
break;
}
{
elfNN_aarch64_init_small_plt0_entry (output_bfd, htab);
- elf_section_data (htab->root.splt->output_section)->
- this_hdr.sh_entsize = htab->plt_entry_size;
-
-
- if (htab->tlsdesc_plt && !(info->flags & DF_BIND_NOW))
+ if (htab->root.tlsdesc_plt && !(info->flags & DF_BIND_NOW))
{
- BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1);
+ BFD_ASSERT (htab->root.tlsdesc_got != (bfd_vma)-1);
bfd_put_NN (output_bfd, (bfd_vma) 0,
- htab->root.sgot->contents + htab->dt_tlsdesc_got);
+ htab->root.sgot->contents + htab->root.tlsdesc_got);
const bfd_byte *entry = elfNN_aarch64_tlsdesc_small_plt_entry;
htab->tlsdesc_plt_entry_size = PLT_TLSDESC_ENTRY_SIZE;
+ unsigned adrp_rtype = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
+ unsigned ldr_rtype = BFD_RELOC_AARCH64_LDSTNN_LO12;
+
aarch64_plt_type type = elf_aarch64_tdata (output_bfd)->plt_type;
- if (type == PLT_BTI || type == PLT_BTI_PAC)
+ if (htab->c64_rel)
+ {
+ entry = elfNN_aarch64_tlsdesc_small_plt_c64_entry;
+ adrp_rtype = BFD_RELOC_MORELLO_ADR_HI20_PCREL;
+ ldr_rtype = BFD_RELOC_AARCH64_LDST128_LO12;
+ }
+ else if (type == PLT_BTI || type == PLT_BTI_PAC)
{
entry = elfNN_aarch64_tlsdesc_small_plt_bti_entry;
}
- memcpy (htab->root.splt->contents + htab->tlsdesc_plt,
+ memcpy (htab->root.splt->contents + htab->root.tlsdesc_plt,
entry, htab->tlsdesc_plt_entry_size);
{
bfd_vma adrp1_addr =
htab->root.splt->output_section->vma
- + htab->root.splt->output_offset + htab->tlsdesc_plt + 4;
+ + htab->root.splt->output_offset
+ + htab->root.tlsdesc_plt + 4;
bfd_vma adrp2_addr = adrp1_addr + 4;
htab->root.sgotplt->output_section->vma
+ htab->root.sgotplt->output_offset;
- bfd_vma dt_tlsdesc_got = got_addr + htab->dt_tlsdesc_got;
+ bfd_vma dt_tlsdesc_got = got_addr + htab->root.tlsdesc_got;
bfd_byte *plt_entry =
- htab->root.splt->contents + htab->tlsdesc_plt;
+ htab->root.splt->contents + htab->root.tlsdesc_plt;
/* First instruction in BTI enabled PLT stub is a BTI
instruction so skip it. */
/* adrp x2, DT_TLSDESC_GOT */
elf_aarch64_update_plt_entry (output_bfd,
- BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ adrp_rtype,
plt_entry + 4,
(PG (dt_tlsdesc_got)
- PG (adrp1_addr)));
/* adrp x3, 0 */
elf_aarch64_update_plt_entry (output_bfd,
- BFD_RELOC_AARCH64_ADR_HI21_PCREL,
+ adrp_rtype,
plt_entry + 8,
(PG (pltgot_addr)
- PG (adrp2_addr)));
/* ldr x2, [x2, #0] */
elf_aarch64_update_plt_entry (output_bfd,
- BFD_RELOC_AARCH64_LDSTNN_LO12,
+ ldr_rtype,
plt_entry + 12,
PG_OFFSET (dt_tlsdesc_got));
/* Write GOT[1] and GOT[2], needed for the dynamic linker. */
bfd_put_NN (output_bfd,
(bfd_vma) 0,
- htab->root.sgotplt->contents + GOT_ENTRY_SIZE);
+ htab->root.sgotplt->contents + GOT_ENTRY_SIZE (htab));
bfd_put_NN (output_bfd,
(bfd_vma) 0,
- htab->root.sgotplt->contents + GOT_ENTRY_SIZE * 2);
+ (htab->root.sgotplt->contents
+ + GOT_ENTRY_SIZE (htab) * 2));
}
if (htab->root.sgot)
}
elf_section_data (htab->root.sgotplt->output_section)->
- this_hdr.sh_entsize = GOT_ENTRY_SIZE;
+ this_hdr.sh_entsize = GOT_ENTRY_SIZE (htab);
}
if (htab->root.sgot && htab->root.sgot->size > 0)
elf_section_data (htab->root.sgot->output_section)->this_hdr.sh_entsize
- = GOT_ENTRY_SIZE;
+ = GOT_ENTRY_SIZE (htab);
/* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
htab_traverse (htab->loc_hash_table,
the mapping symbols could have acquired a prefix.
We do not support this here, since such symbols no
longer conform to the ARM ELF ABI. */
- && (name[1] == 'd' || name[1] == 'x')
+ && (name[1] == 'd' || name[1] == 'x' || name[1] == 'c')
&& (name[2] == 0 || name[2] == '.');
/* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
any characters that follow the period are legal characters for the body
elf_aarch64_tdata (info->output_bfd)->gnu_and_prop = prop;
elf_aarch64_tdata (info->output_bfd)->plt_type
|= (prop & GNU_PROPERTY_AARCH64_FEATURE_1_BTI) ? PLT_BTI : 0;
- setup_plt_values (info, elf_aarch64_tdata (info->output_bfd)->plt_type);
return pbfd;
}
bprop, prop);
}
+/* Demangle c64 function symbols as we read them in. */
+
+static bfd_boolean
+aarch64_elfNN_swap_symbol_in (bfd * abfd,
+ const void *psrc,
+ const void *pshn,
+ Elf_Internal_Sym *dst)
+{
+ if (!bfd_elfNN_swap_symbol_in (abfd, psrc, pshn, dst))
+ return FALSE;
+
+ dst->st_target_internal = 0;
+
+ if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
+ || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
+ {
+ dst->st_target_internal = dst->st_value & ST_BRANCH_TO_C64;
+ dst->st_value &= ~(bfd_vma) ST_BRANCH_TO_C64;
+ }
+
+ return TRUE;
+}
+
+
+/* Mangle c64 function symbols as we write them out. */
+
+static void
+aarch64_elfNN_swap_symbol_out (bfd *abfd,
+ const Elf_Internal_Sym *src,
+ void *cdst,
+ void *shndx)
+{
+ Elf_Internal_Sym newsym = *src;
+
+ if ((ELF_ST_TYPE (newsym.st_info) == STT_FUNC
+ || ELF_ST_TYPE (newsym.st_info) == STT_GNU_IFUNC)
+ && newsym.st_shndx != SHN_UNDEF)
+ newsym.st_value |= newsym.st_target_internal;
+
+ bfd_elfNN_swap_symbol_out (abfd, &newsym, cdst, shndx);
+}
+
+/* Define the size of a GOT element for the generic mid-end. */
+
+static bfd_vma
+elfNN_aarch64_got_elt_size (bfd *abfd ATTRIBUTE_UNUSED,
+ struct bfd_link_info *info,
+ struct elf_link_hash_entry *h ATTRIBUTE_UNUSED,
+ bfd *ibfd ATTRIBUTE_UNUSED,
+ unsigned long symndx ATTRIBUTE_UNUSED)
+{
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+
+ return GOT_ENTRY_SIZE (htab);
+}
+
+/* Define the size of a GOT header, which is the minimum size of the GOT section
+ when one is needed. */
+
+static bfd_vma
+elfNN_aarch64_got_header_size (struct bfd_link_info *info)
+{
+ struct elf_aarch64_link_hash_table *htab = elf_aarch64_hash_table (info);
+
+ return GOT_ENTRY_SIZE (htab) * GOT_RESERVED_HEADER_SLOTS;
+}
+
+/* Identify the 'C' in the CIE augmentation string. */
+
+static bfd_boolean
+elf64_aarch64_eh_frame_augmentation_char (const char aug)
+{
+ return aug == 'C';
+}
+
/* We use this so we can override certain functions
(though currently we don't). */
bfd_elfNN_write_shdrs_and_ehdr,
bfd_elfNN_checksum_contents,
bfd_elfNN_write_relocs,
- bfd_elfNN_swap_symbol_in,
- bfd_elfNN_swap_symbol_out,
+ aarch64_elfNN_swap_symbol_in,
+ aarch64_elfNN_swap_symbol_out,
bfd_elfNN_slurp_reloc_table,
bfd_elfNN_slurp_symbol_table,
bfd_elfNN_swap_dyn_in,
#define ELF_MINPAGESIZE 0x1000
#define ELF_COMMONPAGESIZE 0x1000
-#define bfd_elfNN_close_and_cleanup \
- elfNN_aarch64_close_and_cleanup
-
-#define bfd_elfNN_bfd_free_cached_info \
- elfNN_aarch64_bfd_free_cached_info
-
#define bfd_elfNN_bfd_is_target_special_symbol \
elfNN_aarch64_is_target_special_symbol
#define elf_backend_merge_gnu_properties \
elfNN_aarch64_merge_gnu_properties
+#define elf_backend_got_header_size \
+ elfNN_aarch64_got_header_size
+
+#define elf_backend_got_elt_size \
+ elfNN_aarch64_got_elt_size
+
+#define elf_backend_eh_frame_augmentation_char \
+ elf64_aarch64_eh_frame_augmentation_char
+
#define elf_backend_can_refcount 1
#define elf_backend_can_gc_sections 1
#define elf_backend_plt_readonly 1
#define elf_backend_default_use_rela_p 1
#define elf_backend_rela_normal 1
#define elf_backend_dtrel_excludes_plt 1
-#define elf_backend_got_header_size (GOT_ENTRY_SIZE * 3)
#define elf_backend_default_execstack 0
#define elf_backend_extern_protected_data 1
#define elf_backend_hash_symbol elf_aarch64_hash_symbol