]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blobdiff - gold/aarch64.cc
Remove FIXMEs from tui-layout.c
[thirdparty/binutils-gdb.git] / gold / aarch64.cc
index 8dfd933966ba869fa0da22ec59a3d27647f75fe4..a45d3fd6671985332d7b77ab02846e8761435684 100644 (file)
@@ -1,6 +1,6 @@
 // aarch64.cc -- aarch64 target support for gold.
 
-// Copyright (C) 2014-2015 Free Software Foundation, Inc.
+// Copyright (C) 2014-2019 Free Software Foundation, Inc.
 // Written by Jing Yu <jingyu@google.com> and Han Shen <shenhan@google.com>.
 
 // This file is part of gold.
@@ -77,7 +77,10 @@ class AArch64_insn_utilities
 public:
   typedef typename elfcpp::Swap<32, big_endian>::Valtype Insntype;
 
-  static const int BYTES_PER_INSN = 4;
+  static const int BYTES_PER_INSN;
+
+  // Zero register encoding - 31.
+  static const unsigned int AARCH64_ZR;
 
   static unsigned int
   aarch64_bit(Insntype insn, int pos)
@@ -87,10 +90,30 @@ public:
   aarch64_bits(Insntype insn, int pos, int l)
   { return (insn >> pos) & ((1 << l) - 1); }
 
+  // Get the encoding field "op31" of 3-source data processing insns. "op31" is
+  // the name defined in armv8 insn manual C3.5.9.
+  static unsigned int
+  aarch64_op31(Insntype insn)
+  { return aarch64_bits(insn, 21, 3); }
+
+  // Get the encoding field "ra" of 3-source data processing insns. "ra" is the
+  // third source register. See armv8 insn manual C3.5.9.
+  static unsigned int
+  aarch64_ra(Insntype insn)
+  { return aarch64_bits(insn, 10, 5); }
+
+  static bool
+  is_adr(const Insntype insn)
+  { return (insn & 0x9F000000) == 0x10000000; }
+
   static bool
   is_adrp(const Insntype insn)
   { return (insn & 0x9F000000) == 0x90000000; }
 
+  static bool
+  is_mrs_tpidr_el0(const Insntype insn)
+  { return (insn & 0xFFFFFFE0) == 0xd53bd040; }
+
   static unsigned int
   aarch64_rm(const Insntype insn)
   { return aarch64_bits(insn, 16, 5); }
@@ -111,6 +134,39 @@ public:
   aarch64_rt2(const Insntype insn)
   { return aarch64_bits(insn, 10, 5); }
 
+  // Encode imm21 into adr. Signed imm21 is in the range of [-1M, 1M).
+  static Insntype
+  aarch64_adr_encode_imm(Insntype adr, int imm21)
+  {
+    gold_assert(is_adr(adr));
+    gold_assert(-(1 << 20) <= imm21 && imm21 < (1 << 20));
+    const int mask19 = (1 << 19) - 1;
+    const int mask2 = 3;
+    adr &= ~((mask19 << 5) | (mask2 << 29));
+    adr |= ((imm21 & mask2) << 29) | (((imm21 >> 2) & mask19) << 5);
+    return adr;
+  }
+
+  // Retrieve encoded adrp 33-bit signed imm value. This value is obtained by
+  // 21-bit signed imm encoded in the insn multiplied by 4k (page size) and
+  // 64-bit sign-extended, resulting in [-4G, 4G) with 12-lsb being 0.
+  static int64_t
+  aarch64_adrp_decode_imm(const Insntype adrp)
+  {
+    const int mask19 = (1 << 19) - 1;
+    const int mask2 = 3;
+    gold_assert(is_adrp(adrp));
+    // 21-bit imm encoded in adrp.
+    uint64_t imm = ((adrp >> 29) & mask2) | (((adrp >> 5) & mask19) << 2);
+    // Retrieve msb of 21-bit-signed imm for sign extension.
+    uint64_t msbt = (imm >> 20) & 1;
+    // Real value is imm multiplied by 4k. Value now has 33-bit information.
+    int64_t value = imm << 12;
+    // Sign extend to 64-bit by repeating msbt 31 (64-33) times and merge it
+    // with value.
+    return ((((uint64_t)(1) << 32) - msbt) << 33) | value;
+  }
+
   static bool
   aarch64_b(const Insntype insn)
   { return (insn & 0xFC000000) == 0x14000000; }
@@ -214,7 +270,7 @@ public:
     uint32_t v = 0;
     uint32_t opc_v = 0;
 
-    /* Bail out quickly if INSN doesn't fall into the the load-store
+    /* Bail out quickly if INSN doesn't fall into the load-store
        encoding space.  */
     if (!aarch64_ldst (insn))
       return false;
@@ -330,8 +386,42 @@ public:
        return true;
       }
     return false;
+  }  // End of "aarch64_mem_op_p".
+
+  // Return true if INSN is mac insn.
+  static bool
+  aarch64_mac(Insntype insn)
+  { return (insn & 0xff000000) == 0x9b000000; }
+
+  // Return true if INSN is multiply-accumulate.
+  // (This is similar to implementaton in elfnn-aarch64.c.)
+  static bool
+  aarch64_mlxl(Insntype insn)
+  {
+    uint32_t op31 = aarch64_op31(insn);
+    if (aarch64_mac(insn)
+       && (op31 == 0 || op31 == 1 || op31 == 5)
+       /* Exclude MUL instructions which are encoded as a multiple-accumulate
+          with RA = XZR.  */
+       && aarch64_ra(insn) != AARCH64_ZR)
+      {
+       return true;
+      }
+    return false;
   }
-};
+};  // End of "AArch64_insn_utilities".
+
+
+// Insn length in byte.
+
+template<bool big_endian>
+const int AArch64_insn_utilities<big_endian>::BYTES_PER_INSN = 4;
+
+
+// Zero register encoding - 31.
+
+template<bool big_endian>
+const unsigned int AArch64_insn_utilities<big_endian>::AARCH64_ZR = 0x1f;
 
 
 // Output_data_got_aarch64 class.
@@ -603,8 +693,11 @@ enum
   // Stub for erratum 843419 handling.
   ST_E_843419 = 4,
 
+  // Stub for erratum 835769 handling.
+  ST_E_835769 = 5,
+
   // Number of total stub types.
-  ST_NUMBER = 5
+  ST_NUMBER = 6
 };
 
 
@@ -695,6 +788,15 @@ Stub_template_repertoire<big_endian>::Stub_template_repertoire()
       0x14000000,    /* b <label> */
     };
 
+  // ST_E_835769 has the same stub template as ST_E_843419
+  // but we reproduce the array here so that the sizeof
+  // expressions in install_insn_template will work.
+  const static Insntype ST_E_835769_INSNS[] =
+    {
+      0x00000000,    /* Placeholder for erratum insn. */
+      0x14000000,    /* b <label> */
+    };
+
 #define install_insn_template(T) \
   const static Stub_template<big_endian> template_##T = {  \
     T##_INSNS, sizeof(T##_INSNS) / sizeof(T##_INSNS[0]) }; \
@@ -705,6 +807,7 @@ Stub_template_repertoire<big_endian>::Stub_template_repertoire()
   install_insn_template(ST_LONG_BRANCH_ABS);
   install_insn_template(ST_LONG_BRANCH_PCREL);
   install_insn_template(ST_E_843419);
+  install_insn_template(ST_E_835769);
 
 #undef install_insn_template
 }
@@ -827,7 +930,7 @@ private:
 
 // Erratum stub class. An erratum stub differs from a reloc stub in that for
 // each erratum occurrence, we generate an erratum stub. We never share erratum
-// stubs, whereas for reloc stubs, different branches insns share a single reloc
+// stubs, whereas for reloc stubs, different branch insns share a single reloc
 // stub as long as the branch targets are the same. (More to the point, reloc
 // stubs can be shared because they're used to reach a specific target, whereas
 // erratum stubs branch back to the original control flow.)
@@ -838,6 +941,7 @@ class Erratum_stub : public Stub_base<size, big_endian>
 public:
   typedef AArch64_relobj<size, big_endian> The_aarch64_relobj;
   typedef typename elfcpp::Elf_types<size>::Elf_Addr AArch64_address;
+  typedef AArch64_insn_utilities<big_endian> Insn_utilities;
   typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
 
   static const int STUB_ADDR_ALIGN;
@@ -882,6 +986,38 @@ public:
   set_erratum_insn(Insntype insn)
   { this->erratum_insn_ = insn; }
 
+  // For 843419, the erratum insn is ld/st xt, [xn, #uimm], which may be a
+  // relocation spot, in this case, the erratum_insn_ recorded at scanning phase
+  // is no longer the one we want to write out to the stub, update erratum_insn_
+  // with relocated version. Also note that in this case xn must not be "PC", so
+  // it is safe to move the erratum insn from the origin place to the stub. For
+  // 835769, the erratum insn is multiply-accumulate insn, which could not be a
+  // relocation spot (assertion added though).
+  void
+  update_erratum_insn(Insntype insn)
+  {
+    gold_assert(this->erratum_insn_ != this->invalid_insn);
+    switch (this->type())
+      {
+      case ST_E_843419:
+       gold_assert(Insn_utilities::aarch64_ldst_uimm(insn));
+       gold_assert(Insn_utilities::aarch64_ldst_uimm(this->erratum_insn()));
+       gold_assert(Insn_utilities::aarch64_rd(insn) ==
+                   Insn_utilities::aarch64_rd(this->erratum_insn()));
+       gold_assert(Insn_utilities::aarch64_rn(insn) ==
+                   Insn_utilities::aarch64_rn(this->erratum_insn()));
+       // Update plain ld/st insn with relocated insn.
+       this->erratum_insn_ = insn;
+       break;
+      case ST_E_835769:
+       gold_assert(insn == this->erratum_insn());
+       break;
+      default:
+       gold_unreachable();
+      }
+  }
+
+
   // Return the address where an erratum must be done.
   AArch64_address
   erratum_address() const
@@ -895,8 +1031,20 @@ public:
   set_erratum_address(AArch64_address addr)
   { this->erratum_address_ = addr; }
 
+  // Later relaxation passes of may alter the recorded erratum and destination
+  // address. Given an up to date output section address of shidx_ in
+  // relobj_ we can derive the erratum_address and destination address.
+  void
+  update_erratum_address(AArch64_address output_section_addr)
+  {
+    const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
+    AArch64_address updated_addr = output_section_addr + this->sh_offset_;
+    this->set_erratum_address(updated_addr);
+    this->set_destination_address(updated_addr + BPI);
+  }
+
   // Comparator used to group Erratum_stubs in a set by (obj, shndx,
-  // sh_offset). We do not include 'type' in the calculation, becuase there is
+  // sh_offset). We do not include 'type' in the calculation, because there is
   // at most one stub type at (obj, shndx, sh_offset).
   bool
   operator<(const Erratum_stub<size, big_endian>& k) const
@@ -913,6 +1061,17 @@ public:
     return this->sh_offset_ < k.sh_offset_;
   }
 
+  void
+  invalidate_erratum_stub()
+  {
+     gold_assert(this->erratum_insn_ != invalid_insn);
+     this->erratum_insn_ = invalid_insn;
+  }
+
+  bool
+  is_invalidated_erratum_stub()
+  { return this->erratum_insn_ == invalid_insn; }
+
 protected:
   virtual void
   do_write(unsigned char*, section_size_type);
@@ -930,6 +1089,35 @@ private:
   AArch64_address erratum_address_;
 };  // End of "Erratum_stub".
 
+
+// Erratum sub class to wrap additional info needed by 843419.  In fixing this
+// erratum, we may choose to replace 'adrp' with 'adr', in this case, we need
+// adrp's code position (two or three insns before erratum insn itself).
+
+template<int size, bool big_endian>
+class E843419_stub : public Erratum_stub<size, big_endian>
+{
+public:
+  typedef typename AArch64_insn_utilities<big_endian>::Insntype Insntype;
+
+  E843419_stub(AArch64_relobj<size, big_endian>* relobj,
+                     unsigned int shndx, unsigned int sh_offset,
+                     unsigned int adrp_sh_offset)
+    : Erratum_stub<size, big_endian>(relobj, ST_E_843419, shndx, sh_offset),
+      adrp_sh_offset_(adrp_sh_offset)
+  {}
+
+  unsigned int
+  adrp_sh_offset() const
+  { return this->adrp_sh_offset_; }
+
+private:
+  // Section offset of "adrp". (We do not need a "adrp_shndx_" field, because we
+  // can obtain it from its parent.)
+  const unsigned int adrp_sh_offset_;
+};
+
+
 template<int size, bool big_endian>
 const int Erratum_stub<size, big_endian>::STUB_ADDR_ALIGN = 4;
 
@@ -953,9 +1141,9 @@ Erratum_stub<size, big_endian>::do_write(unsigned char* view, section_size_type)
   const Insntype* insns = this->insns();
   uint32_t num_insns = this->insn_num();
   Insntype* ip = reinterpret_cast<Insntype*>(view);
-  // For current implemnted erratum 843419, (and 835769 which is to be
-  // implemented soon), the first insn in the stub is always a copy of the
-  // problematic insn (in 843419, the mem access insn), followed by a jump-back.
+  // For current implemented erratum 843419 and 835769, the first insn in the
+  // stub is always a copy of the problematic insn (in 843419, the mem access
+  // insn, in 835769, the mac insn), followed by a jump-back.
   elfcpp::Swap<32, big_endian>::writeval(ip, this->erratum_insn());
   for (uint32_t i = 1; i < num_insns; ++i)
     elfcpp::Swap<32, big_endian>::writeval(ip + i, insns[i]);
@@ -1172,14 +1360,17 @@ Reloc_stub<size, big_endian>::stub_type_for_reloc(
   if (aarch64_valid_for_adrp_p(location, dest))
     return ST_ADRP_BRANCH;
 
-  if (parameters->options().output_is_position_independent()
-      && parameters->options().output_is_executable())
+  // Always use PC-relative addressing in case of -shared or -pie.
+  if (parameters->options().output_is_position_independent())
     return ST_LONG_BRANCH_PCREL;
 
+  // This saves 2 insns per stub, compared to ST_LONG_BRANCH_PCREL.
+  // But is only applicable to non-shared or non-pie.
   return ST_LONG_BRANCH_ABS;
 }
 
-// A class to hold stubs for the ARM target.
+// A class to hold stubs for the ARM target. This contains 2 different types of
+// stubs - reloc stubs and erratum stubs.
 
 template<int size, bool big_endian>
 class Stub_table : public Output_data
@@ -1271,14 +1462,18 @@ class Stub_table : public Output_data
     return (p != this->reloc_stubs_.end()) ? p->second : NULL;
   }
 
-  // Relocate stubs in this stub table.
+  // Relocate reloc stubs in this stub table. This does not relocate erratum stubs.
   void
-  relocate_stubs(const The_relocate_info*,
-                The_target_aarch64*,
-                Output_section*,
-                unsigned char*,
-                AArch64_address,
-                section_size_type);
+  relocate_reloc_stubs(const The_relocate_info*,
+                       The_target_aarch64*,
+                       Output_section*,
+                       unsigned char*,
+                       AArch64_address,
+                       section_size_type);
+
+  // Relocate an erratum stub.
+  void
+  relocate_erratum_stub(The_erratum_stub*, unsigned char*);
 
   // Update data size at the end of a relaxation pass.  Return true if data size
   // is different from that of the previous relaxation pass.
@@ -1318,15 +1513,15 @@ class Stub_table : public Output_data
   { this->set_data_size(this->current_data_size()); }
 
  private:
-  // Relocate one stub.
+  // Relocate one reloc stub.
   void
-  relocate_stub(The_reloc_stub*,
-               const The_relocate_info*,
-               The_target_aarch64*,
-               Output_section*,
-               unsigned char*,
-               AArch64_address,
-               section_size_type);
+  relocate_reloc_stub(The_reloc_stub*,
+                      const The_relocate_info*,
+                      The_target_aarch64*,
+                      Output_section*,
+                      unsigned char*,
+                      AArch64_address,
+                      section_size_type);
 
  private:
   // Owner of this stub table.
@@ -1361,7 +1556,7 @@ Stub_table<size, big_endian>::add_erratum_stub(The_erratum_stub* stub)
 }
 
 
-// Find if such erratum exists for givein (obj, shndx, sh_offset).
+// Find if such erratum exists for given (obj, shndx, sh_offset).
 
 template<int size, bool big_endian>
 Erratum_stub<size, big_endian>*
@@ -1426,69 +1621,85 @@ Stub_table<size, big_endian>::add_reloc_stub(
 }
 
 
-// Relocate all stubs in this stub table.
+// Relocate an erratum stub.
 
 template<int size, bool big_endian>
 void
 Stub_table<size, big_endian>::
-relocate_stubs(const The_relocate_info* relinfo,
-              The_target_aarch64* target_aarch64,
-              Output_section* output_section,
-              unsigned char* view,
-              AArch64_address address,
-              section_size_type view_size)
+relocate_erratum_stub(The_erratum_stub* estub,
+                      unsigned char* view)
 {
-  // "view_size" is the total size of the stub_table.
-  gold_assert(address == this->address() &&
-             view_size == static_cast<section_size_type>(this->data_size()));
-  for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
-      p != this->reloc_stubs_.end(); ++p)
-    relocate_stub(p->second, relinfo, target_aarch64, output_section,
-                 view, address, view_size);
-
   // Just for convenience.
   const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
 
-  // Now 'relocate' erratum stubs.
-  for(Erratum_stub_set_iter i = this->erratum_stubs_.begin();
-      i != this->erratum_stubs_.end(); ++i)
+  gold_assert(!estub->is_invalidated_erratum_stub());
+  AArch64_address stub_address = this->erratum_stub_address(estub);
+  // The address of "b" in the stub that is to be "relocated".
+  AArch64_address stub_b_insn_address;
+  // Branch offset that is to be filled in "b" insn.
+  int b_offset = 0;
+  switch (estub->type())
     {
-      AArch64_address stub_address = this->erratum_stub_address(*i);
-      // The address of "b" in the stub that is to be "relocated".
-      AArch64_address stub_b_insn_address;
-      // Branch offset that is to be filled in "b" insn.
-      int b_offset = 0;
-      switch ((*i)->type())
-       {
-       case ST_E_843419:
-         // For the erratum, the 2nd insn is a b-insn to be patched
-         // (relocated).
-         stub_b_insn_address = stub_address + 1 * BPI;
-         b_offset = (*i)->destination_address() - stub_b_insn_address;
-         AArch64_relocate_functions<size, big_endian>::construct_b(
-             view + (stub_b_insn_address - this->address()),
-             ((unsigned int)(b_offset)) & 0xfffffff);
-         break;
-       default:
-         gold_unreachable();
-         break;
-       }
+    case ST_E_843419:
+    case ST_E_835769:
+      // The 1st insn of the erratum could be a relocation spot,
+      // in this case we need to fix it with
+      // "(*i)->erratum_insn()".
+      elfcpp::Swap<32, big_endian>::writeval(
+          view + (stub_address - this->address()),
+          estub->erratum_insn());
+      // For the erratum, the 2nd insn is a b-insn to be patched
+      // (relocated).
+      stub_b_insn_address = stub_address + 1 * BPI;
+      b_offset = estub->destination_address() - stub_b_insn_address;
+      AArch64_relocate_functions<size, big_endian>::construct_b(
+          view + (stub_b_insn_address - this->address()),
+          ((unsigned int)(b_offset)) & 0xfffffff);
+      break;
+    default:
+      gold_unreachable();
+      break;
     }
+  estub->invalidate_erratum_stub();
 }
 
 
-// Relocate one stub.  This is a helper for Stub_table::relocate_stubs().
+// Relocate only reloc stubs in this stub table. This does not relocate erratum
+// stubs.
 
 template<int size, bool big_endian>
 void
 Stub_table<size, big_endian>::
-relocate_stub(The_reloc_stub* stub,
-             const The_relocate_info* relinfo,
-             The_target_aarch64* target_aarch64,
-             Output_section* output_section,
-             unsigned char* view,
-             AArch64_address address,
-             section_size_type view_size)
+relocate_reloc_stubs(const The_relocate_info* relinfo,
+                     The_target_aarch64* target_aarch64,
+                     Output_section* output_section,
+                     unsigned char* view,
+                     AArch64_address address,
+                     section_size_type view_size)
+{
+  // "view_size" is the total size of the stub_table.
+  gold_assert(address == this->address() &&
+             view_size == static_cast<section_size_type>(this->data_size()));
+  for(Reloc_stub_map_const_iter p = this->reloc_stubs_.begin();
+      p != this->reloc_stubs_.end(); ++p)
+    relocate_reloc_stub(p->second, relinfo, target_aarch64, output_section,
+                        view, address, view_size);
+}
+
+
+// Relocate one reloc stub. This is a helper for
+// Stub_table::relocate_reloc_stubs().
+
+template<int size, bool big_endian>
+void
+Stub_table<size, big_endian>::
+relocate_reloc_stub(The_reloc_stub* stub,
+                    const The_relocate_info* relinfo,
+                    The_target_aarch64* target_aarch64,
+                    Output_section* output_section,
+                    unsigned char* view,
+                    AArch64_address address,
+                    section_size_type view_size)
 {
   // "offset" is the offset from the beginning of the stub_table.
   section_size_type offset = stub->offset();
@@ -1496,8 +1707,8 @@ relocate_stub(The_reloc_stub* stub,
   // "view_size" is the total size of the stub_table.
   gold_assert(offset + stub_size <= view_size);
 
-  target_aarch64->relocate_stub(stub, relinfo, output_section,
-                               view + offset, address + offset, view_size);
+  target_aarch64->relocate_reloc_stub(stub, relinfo, output_section,
+                                      view + offset, address + offset, view_size);
 }
 
 
@@ -1580,12 +1791,12 @@ class AArch64_relobj : public Sized_relobj_file<size, big_endian>
     this->stub_tables_[shndx] = stub_table;
   }
 
-  // Entrance to erratum_843419 scanning.
+  // Entrance to errata scanning.
   void
-  scan_erratum_843419(unsigned int shndx,
-                     const elfcpp::Shdr<size, big_endian>&,
-                     Output_section*, const Symbol_table*,
-                     The_target_aarch64*);
+  scan_errata(unsigned int shndx,
+             const elfcpp::Shdr<size, big_endian>&,
+             Output_section*, const Symbol_table*,
+             The_target_aarch64*);
 
   // Scan all relocation sections for stub generation.
   void
@@ -1599,10 +1810,11 @@ class AArch64_relobj : public Sized_relobj_file<size, big_endian>
 
   // Convert regular input section with index SHNDX to a relaxed section.
   void
-  convert_input_section_to_relaxed_section(unsigned /* shndx */)
+  convert_input_section_to_relaxed_section(unsigned shndx)
   {
     // The stubs have relocations and we need to process them after writing
     // out the stubs.  So relocation now must follow section write.
+    this->set_section_offset(shndx, -1ULL);
     this->set_relocs_must_follow_section_writes();
   }
 
@@ -1654,9 +1866,18 @@ class AArch64_relobj : public Sized_relobj_file<size, big_endian>
                         Stringpool_template<char>*);
 
  private:
-  // Fix all errata in the object.
+  // Fix all errata in the object, and for each erratum, relocate corresponding
+  // erratum stub.
   void
-  fix_errata(typename Sized_relobj_file<size, big_endian>::Views* pviews);
+  fix_errata_and_relocate_erratum_stubs(
+      typename Sized_relobj_file<size, big_endian>::Views* pviews);
+
+  // Try to fix erratum 843419 in an optimized way. Return true if patch is
+  // applied.
+  bool
+  try_fix_erratum_843419_optimized(
+      The_erratum_stub*, AArch64_address,
+      typename Sized_relobj_file<size, big_endian>::View_size&);
 
   // Whether a section needs to be scanned for relocation stubs.
   bool
@@ -1682,7 +1903,8 @@ AArch64_relobj<size, big_endian>::do_count_local_symbols(
 
   // Only erratum-fixing work needs mapping symbols, so skip this time consuming
   // processing if not fixing erratum.
-  if (!parameters->options().fix_cortex_a53_843419())
+  if (!parameters->options().fix_cortex_a53_843419()
+      && !parameters->options().fix_cortex_a53_835769())
     return;
 
   const unsigned int loccount = this->local_symbol_count();
@@ -1734,10 +1956,17 @@ AArch64_relobj<size, big_endian>::do_count_local_symbols(
       Symbol_value<size>& lv((*plocal_values)[i]);
       AArch64_address input_value = lv.input_value();
 
-      // Check to see if this is a mapping symbol.
+      // Check to see if this is a mapping symbol. AArch64 mapping symbols are
+      // defined in "ELF for the ARM 64-bit Architecture", Table 4-4, Mapping
+      // symbols.
+      // Mapping symbols could be one of the following 4 forms -
+      //   a) $x
+      //   b) $x.<any...>
+      //   c) $d
+      //   d) $d.<any...>
       const char* sym_name = pnames + sym.get_st_name();
       if (sym_name[0] == '$' && (sym_name[1] == 'x' || sym_name[1] == 'd')
-         && sym_name[2] == '\0')
+         && (sym_name[2] == '\0' || sym_name[2] == '.'))
        {
          bool is_ordinary;
          unsigned int input_shndx =
@@ -1753,15 +1982,17 @@ AArch64_relobj<size, big_endian>::do_count_local_symbols(
 }
 
 
-// Fix all errata in the object.
+// Fix all errata in the object and for each erratum, we relocate the
+// corresponding erratum stub (by calling Stub_table::relocate_erratum_stub).
 
 template<int size, bool big_endian>
 void
-AArch64_relobj<size, big_endian>::fix_errata(
+AArch64_relobj<size, big_endian>::fix_errata_and_relocate_erratum_stubs(
     typename Sized_relobj_file<size, big_endian>::Views* pviews)
 {
   typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
   unsigned int shnum = this->shnum();
+  const Relobj::Output_sections& out_sections(this->output_sections());
   for (unsigned int i = 1; i < shnum; ++i)
     {
       The_stub_table* stub_table = this->stub_table(i);
@@ -1770,31 +2001,145 @@ AArch64_relobj<size, big_endian>::fix_errata(
       std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
        ipair(stub_table->find_erratum_stubs_for_input_section(this, i));
       Erratum_stub_set_iter p = ipair.first, end = ipair.second;
+      typename Sized_relobj_file<size, big_endian>::View_size&
+       pview((*pviews)[i]);
+      AArch64_address view_offset = 0;
+      if (pview.is_input_output_view)
+       {
+         // In this case, write_sections has not added the output offset to
+         // the view's address, so we must do so. Currently this only happens
+         // for a relaxed section.
+         unsigned int index = this->adjust_shndx(i);
+         const Output_relaxed_input_section* poris =
+             out_sections[index]->find_relaxed_input_section(this, index);
+         gold_assert(poris != NULL);
+         view_offset = poris->address() - pview.address;
+       }
+
       while (p != end)
        {
          The_erratum_stub* stub = *p;
-         typename Sized_relobj_file<size, big_endian>::View_size&
-           pview((*pviews)[i]);
 
          // Double check data before fix.
+         gold_assert(pview.address + view_offset + stub->sh_offset()
+                     == stub->erratum_address());
+
+         // Update previously recorded erratum insn with relocated
+         // version.
          Insntype* ip =
-           reinterpret_cast<Insntype*>(pview.view + stub->sh_offset());
+           reinterpret_cast<Insntype*>(
+             pview.view + view_offset + stub->sh_offset());
          Insntype insn_to_fix = ip[0];
-         gold_assert(insn_to_fix == stub->erratum_insn());
-         gold_assert(pview.address + stub->sh_offset()
-                     == stub->erratum_address());
+         stub->update_erratum_insn(insn_to_fix);
+
+         // First try to see if erratum is 843419 and if it can be fixed
+         // without using branch-to-stub.
+         if (!try_fix_erratum_843419_optimized(stub, view_offset, pview))
+           {
+             // Replace the erratum insn with a branch-to-stub.
+             AArch64_address stub_address =
+               stub_table->erratum_stub_address(stub);
+             unsigned int b_offset = stub_address - stub->erratum_address();
+             AArch64_relocate_functions<size, big_endian>::construct_b(
+               pview.view + view_offset + stub->sh_offset(),
+               b_offset & 0xfffffff);
+           }
 
-         AArch64_address stub_address =
-           stub_table->erratum_stub_address(stub);
-         unsigned int b_offset = stub_address - stub->erratum_address();
-         AArch64_relocate_functions<size, big_endian>::construct_b(
-           pview.view + stub->sh_offset(), b_offset & 0xfffffff);
+          // Erratum fix is done (or skipped), continue to relocate erratum
+          // stub. Note, when erratum fix is skipped (either because we
+          // proactively change the code sequence or the code sequence is
+          // changed by relaxation, etc), we can still safely relocate the
+          // erratum stub, ignoring the fact the erratum could never be
+          // executed.
+          stub_table->relocate_erratum_stub(
+           stub,
+           pview.view + (stub_table->address() - pview.address));
+
+          // Next erratum stub.
          ++p;
        }
     }
 }
 
 
+// This is an optimization for 843419. This erratum requires the sequence begin
+// with 'adrp', when final value calculated by adrp fits in adr, we can just
+// replace 'adrp' with 'adr', so we save 2 jumps per occurrence. (Note, however,
+// in this case, we do not delete the erratum stub (too late to do so), it is
+// merely generated without ever being called.)
+
+template<int size, bool big_endian>
+bool
+AArch64_relobj<size, big_endian>::try_fix_erratum_843419_optimized(
+    The_erratum_stub* stub, AArch64_address view_offset,
+    typename Sized_relobj_file<size, big_endian>::View_size& pview)
+{
+  if (stub->type() != ST_E_843419)
+    return false;
+
+  typedef AArch64_insn_utilities<big_endian> Insn_utilities;
+  typedef typename elfcpp::Swap<32,big_endian>::Valtype Insntype;
+  E843419_stub<size, big_endian>* e843419_stub =
+    reinterpret_cast<E843419_stub<size, big_endian>*>(stub);
+  AArch64_address pc =
+    pview.address + view_offset + e843419_stub->adrp_sh_offset();
+  unsigned int adrp_offset = e843419_stub->adrp_sh_offset ();
+  Insntype* adrp_view =
+    reinterpret_cast<Insntype*>(pview.view + view_offset + adrp_offset);
+  Insntype adrp_insn = adrp_view[0];
+
+  // If the instruction at adrp_sh_offset is "mrs R, tpidr_el0", it may come
+  // from IE -> LE relaxation etc.  This is a side-effect of TLS relaxation that
+  // ADRP has been turned into MRS, there is no erratum risk anymore.
+  // Therefore, we return true to avoid doing unnecessary branch-to-stub.
+  if (Insn_utilities::is_mrs_tpidr_el0(adrp_insn))
+    return true;
+
+  // If the instruction at adrp_sh_offset is not ADRP and the instruction before
+  // it is "mrs R, tpidr_el0", it may come from LD -> LE relaxation etc.
+  // Like the above case, there is no erratum risk any more, we can safely
+  // return true.
+  if (!Insn_utilities::is_adrp(adrp_insn) && adrp_offset)
+    {
+      Insntype* prev_view =
+       reinterpret_cast<Insntype*>(
+         pview.view + view_offset + adrp_offset - 4);
+      Insntype prev_insn = prev_view[0];
+
+      if (Insn_utilities::is_mrs_tpidr_el0(prev_insn))
+       return true;
+    }
+
+  /* If we reach here, the first instruction must be ADRP.  */
+  gold_assert(Insn_utilities::is_adrp(adrp_insn));
+  // Get adrp 33-bit signed imm value.
+  int64_t adrp_imm = Insn_utilities::
+    aarch64_adrp_decode_imm(adrp_insn);
+  // adrp - final value transferred to target register is calculated as:
+  //     PC[11:0] = Zeros(12)
+  //     adrp_dest_value = PC + adrp_imm;
+  int64_t adrp_dest_value = (pc & ~((1 << 12) - 1)) + adrp_imm;
+  // adr -final value transferred to target register is calucalted as:
+  //     PC + adr_imm
+  // So we have:
+  //     PC + adr_imm = adrp_dest_value
+  //   ==>
+  //     adr_imm = adrp_dest_value - PC
+  int64_t adr_imm = adrp_dest_value - pc;
+  // Check if imm fits in adr (21-bit signed).
+  if (-(1 << 20) <= adr_imm && adr_imm < (1 << 20))
+    {
+      // Convert 'adrp' into 'adr'.
+      Insntype adr_insn = adrp_insn & ((1u << 31) - 1);
+      adr_insn = Insn_utilities::
+       aarch64_adr_encode_imm(adr_insn, adr_imm);
+      elfcpp::Swap<32, big_endian>::writeval(adrp_view, adr_insn);
+      return true;
+    }
+  return false;
+}
+
+
 // Relocate sections.
 
 template<int size, bool big_endian>
@@ -1804,23 +2149,27 @@ AArch64_relobj<size, big_endian>::do_relocate_sections(
     const unsigned char* pshdrs, Output_file* of,
     typename Sized_relobj_file<size, big_endian>::Views* pviews)
 {
-  // Call parent to relocate sections.
-  Sized_relobj_file<size, big_endian>::do_relocate_sections(symtab, layout,
-                                                           pshdrs, of, pviews);
+  // Relocate the section data.
+  this->relocate_section_range(symtab, layout, pshdrs, of, pviews,
+                              1, this->shnum() - 1);
 
   // We do not generate stubs if doing a relocatable link.
   if (parameters->options().relocatable())
     return;
 
-  if (parameters->options().fix_cortex_a53_843419())
-    this->fix_errata(pviews);
+  // This part only relocates erratum stubs that belong to input sections of this
+  // object file.
+  if (parameters->options().fix_cortex_a53_843419()
+      || parameters->options().fix_cortex_a53_835769())
+    this->fix_errata_and_relocate_erratum_stubs(pviews);
 
   Relocate_info<size, big_endian> relinfo;
   relinfo.symtab = symtab;
   relinfo.layout = layout;
   relinfo.object = this;
 
-  // Relocate stub tables.
+  // This part relocates all reloc stubs that are contained in stub_tables of
+  // this object file.
   unsigned int shnum = this->shnum();
   The_target_aarch64* target = The_target_aarch64::current_target();
 
@@ -1849,8 +2198,8 @@ AArch64_relobj<size, big_endian>::do_relocate_sections(
          unsigned char* view = view_struct.view + offset;
          AArch64_address address = stub_table->address();
          section_size_type view_size = stub_table->data_size();
-         stub_table->relocate_stubs(&relinfo, target, os, view, address,
-                                    view_size);
+         stub_table->relocate_reloc_stubs(&relinfo, target, os, view, address,
+                                          view_size);
        }
     }
 }
@@ -1938,11 +2287,11 @@ AArch64_relobj<size, big_endian>::section_needs_reloc_stub_scanning(
 }
 
 
-// Scan section SHNDX for erratum 843419.
+// Scan section SHNDX for erratum 843419 and 835769.
 
 template<int size, bool big_endian>
 void
-AArch64_relobj<size, big_endian>::scan_erratum_843419(
+AArch64_relobj<size, big_endian>::scan_errata(
     unsigned int shndx, const elfcpp::Shdr<size, big_endian>& shdr,
     Output_section* os, const Symbol_table* symtab,
     The_target_aarch64* target)
@@ -1967,6 +2316,19 @@ AArch64_relobj<size, big_endian>::scan_erratum_843419(
       output_address = poris->address();
     }
 
+  // Update the addresses in previously generated erratum stubs. Unlike when
+  // we scan relocations for stubs, if section addresses have changed due to
+  // other relaxations we are unlikely to scan the same erratum instances
+  // again.
+  The_stub_table* stub_table = this->stub_table(shndx);
+  if (stub_table)
+    {
+      std::pair<Erratum_stub_set_iter, Erratum_stub_set_iter>
+         ipair(stub_table->find_erratum_stubs_for_input_section(this, shndx));
+      for (Erratum_stub_set_iter p = ipair.first;  p != ipair.second; ++p)
+          (*p)->update_erratum_address(output_address);
+    }
+
   section_size_type input_view_size = 0;
   const unsigned char* input_view =
     this->section_contents(shndx, &input_view_size, false);
@@ -1975,10 +2337,6 @@ AArch64_relobj<size, big_endian>::scan_erratum_843419(
   // Find the first mapping symbol record within section shndx.
   typename Mapping_symbol_info::const_iterator p =
     this->mapping_symbol_info_.lower_bound(section_start);
-  if (p == this->mapping_symbol_info_.end() || p->first.shndx_ != shndx)
-    gold_warning(_("cannot scan executable section %u of %s for Cortex-A53 "
-                  "erratum because it has no mapping symbols."),
-                shndx, this->name().c_str());
   while (p != this->mapping_symbol_info_.end() &&
         p->first.shndx_ == shndx)
     {
@@ -1994,7 +2352,18 @@ AArch64_relobj<size, big_endian>::scan_erratum_843419(
            span_end = convert_to_section_size_type(p->first.offset_);
          else
            span_end = convert_to_section_size_type(shdr.get_sh_size());
-         target->scan_erratum_843419_span(
+
+         // Here we do not share the scanning code of both errata. For 843419,
+         // only the last few insns of each page are examined, which is fast,
+         // whereas, for 835769, every insn pair needs to be checked.
+
+         if (parameters->options().fix_cortex_a53_843419())
+           target->scan_erratum_843419_span(
+             this, shndx, span_start, span_end,
+             const_cast<unsigned char*>(input_view), output_address);
+
+         if (parameters->options().fix_cortex_a53_835769())
+           target->scan_erratum_835769_span(
              this, shndx, span_start, span_end,
              const_cast<unsigned char*>(input_view), output_address);
        }
@@ -2035,8 +2404,9 @@ AArch64_relobj<size, big_endian>::scan_sections_for_stubs(
   for (unsigned int i = 1; i < shnum; ++i, p += shdr_size)
     {
       const elfcpp::Shdr<size, big_endian> shdr(p);
-      if (parameters->options().fix_cortex_a53_843419())
-       scan_erratum_843419(i, shdr, out_sections[i], symtab, target);
+      if (parameters->options().fix_cortex_a53_843419()
+         || parameters->options().fix_cortex_a53_835769())
+       scan_errata(i, shdr, out_sections[i], symtab, target);
       if (this->section_needs_reloc_stub_scanning(shdr, out_sections, symtab,
                                                  pshdrs))
        {
@@ -2622,6 +2992,21 @@ class Target_aarch64 : public Sized_target<size, big_endian>
                          const unsigned char* plocal_symbols,
                          Relocatable_relocs*);
 
+  // Scan the relocs for --emit-relocs.
+  void
+  emit_relocs_scan(Symbol_table* symtab,
+                  Layout* layout,
+                  Sized_relobj_file<size, big_endian>* object,
+                  unsigned int data_shndx,
+                  unsigned int sh_type,
+                  const unsigned char* prelocs,
+                  size_t reloc_count,
+                  Output_section* output_section,
+                  bool needs_special_offset_handling,
+                  size_t local_symbol_count,
+                  const unsigned char* plocal_syms,
+                  Relocatable_relocs* rr);
+
   // Relocate a section during a relocatable link.
   void
   relocate_relocs(
@@ -2631,7 +3016,6 @@ class Target_aarch64 : public Sized_target<size, big_endian>
       size_t reloc_count,
       Output_section* output_section,
       typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
-      const Relocatable_relocs*,
       unsigned char* view,
       typename elfcpp::Elf_types<size>::Elf_Addr view_address,
       section_size_type view_size,
@@ -2719,11 +3103,11 @@ class Target_aarch64 : public Sized_target<size, big_endian>
       Address view_address,
       section_size_type);
 
-  // Relocate a single stub.
+  // Relocate a single reloc stub.
   void
-  relocate_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
-               Output_section*, unsigned char*, Address,
-               section_size_type);
+  relocate_reloc_stub(The_reloc_stub*, const Relocate_info<size, big_endian>*,
+                      Output_section*, unsigned char*, Address,
+                      section_size_type);
 
   // Get the default AArch64 target.
   static This*
@@ -2736,11 +3120,21 @@ class Target_aarch64 : public Sized_target<size, big_endian>
   }
 
 
-  // Scan erratum for a part of a section.
+  // Scan erratum 843419 for a part of a section.
   void
   scan_erratum_843419_span(
     AArch64_relobj<size, big_endian>*,
-    unsigned int shndx,
+    unsigned int,
+    const section_size_type,
+    const section_size_type,
+    unsigned char*,
+    Address);
+
+  // Scan erratum 835769 for a part of a section.
+  void
+  scan_erratum_835769_span(
+    AArch64_relobj<size, big_endian>*,
+    unsigned int,
     const section_size_type,
     const section_size_type,
     unsigned char*,
@@ -2900,11 +3294,9 @@ class Target_aarch64 : public Sized_target<size, big_endian>
     // Do a relocation.  Return false if the caller should not issue
     // any warnings about this relocation.
     inline bool
-    relocate(const Relocate_info<size, big_endian>*, Target_aarch64*,
-            Output_section*,
-            size_t relnum, const elfcpp::Rela<size, big_endian>&,
-            unsigned int r_type, const Sized_symbol<size>*,
-            const Symbol_value<size>*,
+    relocate(const Relocate_info<size, big_endian>*, unsigned int,
+            Target_aarch64*, Output_section*, size_t, const unsigned char*,
+            const Sized_symbol<size>*, const Symbol_value<size>*,
             unsigned char*, typename elfcpp::Elf_types<size>::Elf_Addr,
             section_size_type);
 
@@ -2970,15 +3362,6 @@ class Target_aarch64 : public Sized_target<size, big_endian>
 
   };  // End of class Relocate
 
-  // A class which returns the size required for a relocation type,
-  // used while scanning relocs during a relocatable link.
-  class Relocatable_size_for_reloc
-  {
-   public:
-    unsigned int
-    get_size_for_reloc(unsigned int, Relobj*);
-  };
-
   // Adjust TLS relocation type based on the options and whether this
   // is a local symbol.
   static tls::Tls_optimization
@@ -3039,12 +3422,29 @@ class Target_aarch64 : public Sized_target<size, big_endian>
     return this->plt_;
   }
 
+  // Helper method to create erratum stubs for ST_E_843419 and ST_E_835769. For
+  // ST_E_843419, we need an additional field for adrp offset.
+  void create_erratum_stub(
+    AArch64_relobj<size, big_endian>* relobj,
+    unsigned int shndx,
+    section_size_type erratum_insn_offset,
+    Address erratum_address,
+    typename Insn_utilities::Insntype erratum_insn,
+    int erratum_type,
+    unsigned int e843419_adrp_offset=0);
+
   // Return whether this is a 3-insn erratum sequence.
   bool is_erratum_843419_sequence(
       typename elfcpp::Swap<32,big_endian>::Valtype insn1,
       typename elfcpp::Swap<32,big_endian>::Valtype insn2,
       typename elfcpp::Swap<32,big_endian>::Valtype insn3);
 
+  // Return whether this is a 835769 sequence.
+  // (Similarly implemented as in elfnn-aarch64.c.)
+  bool is_erratum_835769_sequence(
+      typename elfcpp::Swap<32,big_endian>::Valtype,
+      typename elfcpp::Swap<32,big_endian>::Valtype);
+
   // Get the dynamic reloc section, creating it if necessary.
   Reloc_section*
   rela_dyn_section(Layout*);
@@ -3064,10 +3464,13 @@ class Target_aarch64 : public Sized_target<size, big_endian>
             unsigned int shndx, Output_section* output_section,
             Symbol* sym, const elfcpp::Rela<size, big_endian>& reloc)
   {
+    unsigned int r_type = elfcpp::elf_r_type<size>(reloc.get_r_info());
     this->copy_relocs_.copy_reloc(symtab, layout,
                                  symtab->get_sized_symbol<size>(sym),
                                  object, shndx, output_section,
-                                 reloc, this->rela_dyn_section(layout));
+                                 r_type, reloc.get_r_offset(),
+                                 reloc.get_r_addend(),
+                                 this->rela_dyn_section(layout));
   }
 
   // Information about this specific target which we pass to the
@@ -3145,12 +3548,12 @@ const Target::Target_info Target_aarch64<64, false>::aarch64_info =
   false,               // has_make_symbol
   false,               // has_resolve
   false,               // has_code_fill
-  true,                        // is_default_stack_executable
+  false,               // is_default_stack_executable
   true,                        // can_icf_inline_merge_sections
   '\0',                        // wrap_char
   "/lib/ld.so.1",      // program interpreter
   0x400000,            // default_text_segment_address
-  0x1000             // abi_pagesize (overridable by -z max-page-size)
+  0x10000,             // abi_pagesize (overridable by -z max-page-size)
   0x1000,              // common_pagesize (overridable by -z common-page-size)
   false,                // isolate_execinstr
   0,                    // rosegment_gap
@@ -3160,7 +3563,9 @@ const Target::Target_info Target_aarch64<64, false>::aarch64_info =
   0,                   // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_PROGBITS,        // unwind_section_type
 };
 
 template<>
@@ -3172,12 +3577,12 @@ const Target::Target_info Target_aarch64<32, false>::aarch64_info =
   false,               // has_make_symbol
   false,               // has_resolve
   false,               // has_code_fill
-  true,                        // is_default_stack_executable
+  false,               // is_default_stack_executable
   false,               // can_icf_inline_merge_sections
   '\0',                        // wrap_char
   "/lib/ld.so.1",      // program interpreter
   0x400000,            // default_text_segment_address
-  0x1000             // abi_pagesize (overridable by -z max-page-size)
+  0x10000,             // abi_pagesize (overridable by -z max-page-size)
   0x1000,              // common_pagesize (overridable by -z common-page-size)
   false,                // isolate_execinstr
   0,                    // rosegment_gap
@@ -3187,7 +3592,9 @@ const Target::Target_info Target_aarch64<32, false>::aarch64_info =
   0,                   // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_PROGBITS,        // unwind_section_type
 };
 
 template<>
@@ -3199,12 +3606,12 @@ const Target::Target_info Target_aarch64<64, true>::aarch64_info =
   false,               // has_make_symbol
   false,               // has_resolve
   false,               // has_code_fill
-  true,                        // is_default_stack_executable
+  false,               // is_default_stack_executable
   true,                        // can_icf_inline_merge_sections
   '\0',                        // wrap_char
   "/lib/ld.so.1",      // program interpreter
   0x400000,            // default_text_segment_address
-  0x1000             // abi_pagesize (overridable by -z max-page-size)
+  0x10000,             // abi_pagesize (overridable by -z max-page-size)
   0x1000,              // common_pagesize (overridable by -z common-page-size)
   false,                // isolate_execinstr
   0,                    // rosegment_gap
@@ -3214,7 +3621,9 @@ const Target::Target_info Target_aarch64<64, true>::aarch64_info =
   0,                   // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_PROGBITS,        // unwind_section_type
 };
 
 template<>
@@ -3226,12 +3635,12 @@ const Target::Target_info Target_aarch64<32, true>::aarch64_info =
   false,               // has_make_symbol
   false,               // has_resolve
   false,               // has_code_fill
-  true,                        // is_default_stack_executable
+  false,               // is_default_stack_executable
   false,               // can_icf_inline_merge_sections
   '\0',                        // wrap_char
   "/lib/ld.so.1",      // program interpreter
   0x400000,            // default_text_segment_address
-  0x1000             // abi_pagesize (overridable by -z max-page-size)
+  0x10000,             // abi_pagesize (overridable by -z max-page-size)
   0x1000,              // common_pagesize (overridable by -z common-page-size)
   false,                // isolate_execinstr
   0,                    // rosegment_gap
@@ -3241,7 +3650,9 @@ const Target::Target_info Target_aarch64<32, true>::aarch64_info =
   0,                   // large_common_section_flags
   NULL,                        // attributes_section
   NULL,                        // attributes_vendor
-  "_start"             // entry_symbol_name
+  "_start",            // entry_symbol_name
+  32,                  // hash_entry_size
+  elfcpp::SHT_PROGBITS,        // unwind_section_type
 };
 
 // Get the GOT section, creating it if necessary.
@@ -3448,13 +3859,18 @@ Target_aarch64<size, big_endian>::scan_reloc_for_stub(
       if (gsym->use_plt_offset(arp->reference_flags()))
        {
          // This uses a PLT, change the symbol value.
-         symval.set_output_value(this->plt_section()->address()
-                                 + gsym->plt_offset());
+         symval.set_output_value(this->plt_address_for_global(gsym));
          psymval = &symval;
        }
       else if (gsym->is_undefined())
-       // There is no need to generate a stub symbol is undefined.
-       return;
+       {
+          // There is no need to generate a stub symbol if the original symbol
+          // is undefined.
+          gold_debug(DEBUG_TARGET,
+                     "stub: not creating a stub for undefined symbol %s in file %s",
+                     gsym->name(), aarch64_relobj->name().c_str());
+          return;
+       }
     }
 
   // Get the symbol value.
@@ -3556,6 +3972,7 @@ Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
       const Symbol_value<size> *psymval;
       bool is_defined_in_discarded_section;
       unsigned int shndx;
+      const Symbol* gsym = NULL;
       if (r_sym < local_count)
        {
          sym = NULL;
@@ -3579,6 +3996,8 @@ Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
          if (!is_defined_in_discarded_section)
            {
              typedef Sized_relobj_file<size, big_endian> ObjType;
+             if (psymval->is_section_symbol())
+               symval.set_is_section_symbol();
              typename ObjType::Compute_final_local_value_status status =
                object->compute_final_local_value(r_sym, psymval, &symval,
                                                  relinfo->symtab);
@@ -3606,7 +4025,6 @@ Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
        }
       else
        {
-         const Symbol* gsym;
          gsym = object->global_symbol(r_sym);
          gold_assert(gsym != NULL);
          if (gsym->is_forwarder())
@@ -3647,16 +4065,16 @@ Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
       Symbol_value<size> symval2;
       if (is_defined_in_discarded_section)
        {
+         std::string name = object->section_name(relinfo->data_shndx);
+
          if (comdat_behavior == CB_UNDETERMINED)
-           {
-             std::string name = object->section_name(relinfo->data_shndx);
              comdat_behavior = default_comdat_behavior.get(name.c_str());
-           }
+
          if (comdat_behavior == CB_PRETEND)
            {
              bool found;
              typename elfcpp::Elf_types<size>::Elf_Addr value =
-               object->map_to_kept_section(shndx, &found);
+               object->map_to_kept_section(shndx, name, &found);
              if (found)
                symval2.set_output_value(value + psymval->input_value());
              else
@@ -3664,21 +4082,14 @@ Target_aarch64<size, big_endian>::scan_reloc_section_for_stubs(
            }
          else
            {
-             if (comdat_behavior == CB_WARNING)
-               gold_warning_at_location(relinfo, i, offset,
-                                        _("relocation refers to discarded "
-                                          "section"));
+             if (comdat_behavior == CB_ERROR)
+               issue_discarded_error(relinfo, i, offset, r_sym, gsym);
              symval2.set_output_value(0);
            }
          symval2.set_no_output_symtab_entry();
          psymval = &symval2;
        }
 
-      // If symbol is a section symbol, we don't know the actual type of
-      // destination.  Give up.
-      if (psymval->is_section_symbol())
-       continue;
-
       this->scan_reloc_for_stub(relinfo, r_type, sym, r_sym, psymval,
                                addend, view_address + offset);
     }  // End of iterating relocs in a section
@@ -3713,16 +4124,16 @@ Target_aarch64<size, big_endian>::scan_section_for_stubs(
 }
 
 
-// Relocate a single stub.
+// Relocate a single reloc stub.
 
 template<int size, bool big_endian>
 void Target_aarch64<size, big_endian>::
-relocate_stub(The_reloc_stub* stub,
-             const The_relocate_info*,
-             Output_section*,
-             unsigned char* view,
-             Address address,
-             section_size_type)
+relocate_reloc_stub(The_reloc_stub* stub,
+                    const The_relocate_info*,
+                    Output_section*,
+                    unsigned char* view,
+                    Address address,
+                    section_size_type)
 {
   typedef AArch64_relocate_functions<size, big_endian> The_reloc_functions;
   typedef typename The_reloc_functions::Status The_reloc_functions_status;
@@ -4820,6 +5231,8 @@ class AArch64_relocate_functions
       static_cast<Valtype>(val | (immed << doffset)));
   }
 
+ public:
+
   // Update selected bits in text.
 
   template<int valsize>
@@ -4847,8 +5260,6 @@ class AArch64_relocate_functions
            : This::STATUS_OVERFLOW);
   }
 
- public:
-
   // Construct a B insn. Note, although we group it here with other relocation
   // operation, there is actually no 'relocation' involved here.
   static inline void
@@ -5110,6 +5521,21 @@ maybe_apply_stub(unsigned int r_type,
 
   const The_aarch64_relobj* aarch64_relobj =
       static_cast<const The_aarch64_relobj*>(object);
+  const AArch64_reloc_property* arp =
+    aarch64_reloc_property_table->get_reloc_property(r_type);
+  gold_assert(arp != NULL);
+
+  // We don't create stubs for undefined symbols, but do for weak.
+  if (gsym
+      && !gsym->use_plt_offset(arp->reference_flags())
+      && gsym->is_undefined())
+    {
+      gold_debug(DEBUG_TARGET,
+                "stub: looking for a stub for undefined symbol %s in file %s",
+                gsym->name(), aarch64_relobj->name().c_str());
+      return false;
+    }
+
   The_stub_table* stub_table = aarch64_relobj->stub_table(relinfo->data_shndx);
   gold_assert(stub_table != NULL);
 
@@ -5121,9 +5547,6 @@ maybe_apply_stub(unsigned int r_type,
   Address new_branch_target = stub_table->address() + stub->offset();
   typename elfcpp::Swap<size, big_endian>::Valtype branch_offset =
       new_branch_target - address;
-  const AArch64_reloc_property* arp =
-      aarch64_reloc_property_table->get_reloc_property(r_type);
-  gold_assert(arp != NULL);
   typename This::Status status = This::template
       rela_general<32>(view, branch_offset, 0, arp);
   if (status != This::STATUS_OKAY)
@@ -5495,6 +5918,14 @@ Target_aarch64<size, big_endian>::optimize_tls_reloc(bool is_final,
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
       // When we already have Local-Exec, there is nothing further we
       // can do.
       return tls::TLSOPT_NONE;
@@ -5672,8 +6103,6 @@ Target_aarch64<size, big_endian>::Scan::local(
 
   typedef Output_data_reloc<elfcpp::SHT_RELA, true, size, big_endian>
       Reloc_section;
-  Output_data_got_aarch64<size, big_endian>* got =
-      target->got_section(symtab, layout);
   unsigned int r_sym = elfcpp::elf_r_sym<size>(rela.get_r_info());
 
   // A local STT_GNU_IFUNC symbol may require a PLT entry.
@@ -5683,6 +6112,9 @@ Target_aarch64<size, big_endian>::Scan::local(
 
   switch (r_type)
     {
+    case elfcpp::R_AARCH64_NONE:
+      break;
+
     case elfcpp::R_AARCH64_ABS32:
     case elfcpp::R_AARCH64_ABS16:
       if (parameters->options().output_is_position_independent())
@@ -5713,6 +6145,49 @@ Target_aarch64<size, big_endian>::Scan::local(
     case elfcpp::R_AARCH64_PREL16:
       break;
 
+    case elfcpp::R_AARCH64_ADR_GOT_PAGE:
+    case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
+    case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
+      // The above relocations are used to access GOT entries.
+      {
+       Output_data_got_aarch64<size, big_endian>* got =
+           target->got_section(symtab, layout);
+       bool is_new = false;
+       // This symbol requires a GOT entry.
+       if (is_ifunc)
+         is_new = got->add_local_plt(object, r_sym, GOT_TYPE_STANDARD);
+       else
+         is_new = got->add_local(object, r_sym, GOT_TYPE_STANDARD);
+       if (is_new && parameters->options().output_is_position_independent())
+         target->rela_dyn_section(layout)->
+           add_local_relative(object,
+                              r_sym,
+                              elfcpp::R_AARCH64_RELATIVE,
+                              got,
+                              object->local_got_offset(r_sym,
+                                                       GOT_TYPE_STANDARD),
+                              0,
+                              false);
+      }
+      break;
+
+    case elfcpp::R_AARCH64_MOVW_UABS_G0:        // 263
+    case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:     // 264
+    case elfcpp::R_AARCH64_MOVW_UABS_G1:        // 265
+    case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:     // 266
+    case elfcpp::R_AARCH64_MOVW_UABS_G2:        // 267
+    case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:     // 268
+    case elfcpp::R_AARCH64_MOVW_UABS_G3:        // 269
+    case elfcpp::R_AARCH64_MOVW_SABS_G0:        // 270
+    case elfcpp::R_AARCH64_MOVW_SABS_G1:        // 271
+    case elfcpp::R_AARCH64_MOVW_SABS_G2:        // 272
+      if (parameters->options().output_is_position_independent())
+       {
+         gold_error(_("%s: unsupported reloc %u in pos independent link."),
+                    object->name().c_str(), r_type);
+       }
+      break;
+
     case elfcpp::R_AARCH64_LD_PREL_LO19:        // 273
     case elfcpp::R_AARCH64_ADR_PREL_LO21:       // 274
     case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:    // 275
@@ -5746,6 +6221,8 @@ Target_aarch64<size, big_endian>::Scan::local(
        // Create a GOT entry for the tp-relative offset.
        if (!parameters->doing_static_link())
          {
+           Output_data_got_aarch64<size, big_endian>* got =
+               target->got_section(symtab, layout);
            got->add_local_with_rel(object, r_sym, GOT_TYPE_TLS_OFFSET,
                                    target->rela_dyn_section(layout),
                                    elfcpp::R_AARCH64_TLS_TPREL64);
@@ -5753,6 +6230,8 @@ Target_aarch64<size, big_endian>::Scan::local(
        else if (!object->local_has_got_offset(r_sym,
                                               GOT_TYPE_TLS_OFFSET))
          {
+           Output_data_got_aarch64<size, big_endian>* got =
+               target->got_section(symtab, layout);
            got->add_local(object, r_sym, GOT_TYPE_TLS_OFFSET);
            unsigned int got_offset =
                object->local_got_offset(r_sym, GOT_TYPE_TLS_OFFSET);
@@ -5776,6 +6255,8 @@ Target_aarch64<size, big_endian>::Scan::local(
          }
        gold_assert(tlsopt == tls::TLSOPT_NONE);
 
+       Output_data_got_aarch64<size, big_endian>* got =
+           target->got_section(symtab, layout);
        got->add_local_pair_with_rel(object,r_sym, data_shndx,
                                     GOT_TYPE_TLS_PAIR,
                                     target->rela_dyn_section(layout),
@@ -5791,6 +6272,14 @@ Target_aarch64<size, big_endian>::Scan::local(
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
       {
        layout->set_has_static_tls();
        bool output_is_shared = parameters->options().shared();
@@ -5910,6 +6399,9 @@ Target_aarch64<size, big_endian>::Scan::global(
 
   switch (r_type)
     {
+    case elfcpp::R_AARCH64_NONE:
+      break;
+
     case elfcpp::R_AARCH64_ABS16:
     case elfcpp::R_AARCH64_ABS32:
     case elfcpp::R_AARCH64_ABS64:
@@ -5989,6 +6481,34 @@ Target_aarch64<size, big_endian>::Scan::global(
        }
       break;
 
+    case elfcpp::R_AARCH64_MOVW_UABS_G0:        // 263
+    case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:     // 264
+    case elfcpp::R_AARCH64_MOVW_UABS_G1:        // 265
+    case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:     // 266
+    case elfcpp::R_AARCH64_MOVW_UABS_G2:        // 267
+    case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:     // 268
+    case elfcpp::R_AARCH64_MOVW_UABS_G3:        // 269
+    case elfcpp::R_AARCH64_MOVW_SABS_G0:        // 270
+    case elfcpp::R_AARCH64_MOVW_SABS_G1:        // 271
+    case elfcpp::R_AARCH64_MOVW_SABS_G2:        // 272
+      if (parameters->options().output_is_position_independent())
+       {
+         gold_error(_("%s: unsupported reloc %u in pos independent link."),
+                    object->name().c_str(), r_type);
+       }
+      // Make a PLT entry if necessary.
+      if (gsym->needs_plt_entry())
+       {
+         target->make_plt_entry(symtab, layout, gsym);
+         // Since this is not a PC-relative relocation, we may be
+         // taking the address of a function. In that case we need to
+         // set the entry in the dynamic symbol table to the address of
+         // the PLT entry.
+         if (gsym->is_from_dynobj() && !parameters->options().shared())
+           gsym->set_needs_dynsym_value();
+       }
+      break;
+
     case elfcpp::R_AARCH64_LD_PREL_LO19:        // 273
     case elfcpp::R_AARCH64_ADR_PREL_LO21:       // 274
     case elfcpp::R_AARCH64_ADR_PREL_PG_HI21:    // 275
@@ -6017,8 +6537,9 @@ Target_aarch64<size, big_endian>::Scan::global(
 
     case elfcpp::R_AARCH64_ADR_GOT_PAGE:
     case elfcpp::R_AARCH64_LD64_GOT_LO12_NC:
+    case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
       {
-       // This pair of relocations is used to access a specific GOT entry.
+       // The above relocations are used to access GOT entries.
        // Note a GOT entry is an *address* to a symbol.
        // The symbol requires a GOT entry
        Output_data_got_aarch64<size, big_endian>* got =
@@ -6187,7 +6708,15 @@ Target_aarch64<size, big_endian>::Scan::global(
     case elfcpp::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
-    case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:  // Local executable
+    case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:  // Local executable
       layout->set_has_static_tls();
       if (parameters->options().shared())
        gold_error(_("%s: unsupported TLSLE reloc type %u in shared objects."),
@@ -6329,17 +6858,16 @@ Target_aarch64<size, big_endian>::gc_process_relocs(
     size_t local_symbol_count,
     const unsigned char* plocal_symbols)
 {
+  typedef Target_aarch64<size, big_endian> Aarch64;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+
   if (sh_type == elfcpp::SHT_REL)
     {
       return;
     }
 
-  gold::gc_process_relocs<
-    size, big_endian,
-    Target_aarch64<size, big_endian>,
-    elfcpp::SHT_RELA,
-    typename Target_aarch64<size, big_endian>::Scan,
-    typename Target_aarch64<size, big_endian>::Relocatable_size_for_reloc>(
+  gold::gc_process_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -6370,13 +6898,18 @@ Target_aarch64<size, big_endian>::scan_relocs(
     size_t local_symbol_count,
     const unsigned char* plocal_symbols)
 {
+  typedef Target_aarch64<size, big_endian> Aarch64;
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+
   if (sh_type == elfcpp::SHT_REL)
     {
       gold_error(_("%s: unsupported REL reloc section"),
                 object->name().c_str());
       return;
     }
-  gold::scan_relocs<size, big_endian, Target_aarch64, elfcpp::SHT_RELA, Scan>(
+
+  gold::scan_relocs<size, big_endian, Aarch64, Scan, Classify_reloc>(
     symtab,
     layout,
     this,
@@ -6443,11 +6976,11 @@ Target_aarch64<size, big_endian>::do_finalize_sections(
     }
 
   // Set the size of the _GLOBAL_OFFSET_TABLE_ symbol to the size of
-  // the .got.plt section.
+  // the .got section.
   Symbol* sym = this->global_offset_table_;
   if (sym != NULL)
     {
-      uint64_t data_size = this->got_plt_->current_data_size();
+      uint64_t data_size = this->got_->current_data_size();
       symtab->get_sized_symbol<size>(sym)->set_symsize(data_size);
 
       // If the .got section is more than 0x8000 bytes, we add
@@ -6508,11 +7041,11 @@ template<int size, bool big_endian>
 inline bool
 Target_aarch64<size, big_endian>::Relocate::relocate(
     const Relocate_info<size, big_endian>* relinfo,
+    unsigned int,
     Target_aarch64<size, big_endian>* target,
     Output_section* ,
     size_t relnum,
-    const elfcpp::Rela<size, big_endian>& rela,
-    unsigned int r_type,
+    const unsigned char* preloc,
     const Sized_symbol<size>* gsym,
     const Symbol_value<size>* psymval,
     unsigned char* view,
@@ -6524,6 +7057,8 @@ Target_aarch64<size, big_endian>::Relocate::relocate(
 
   typedef AArch64_relocate_functions<size, big_endian> Reloc;
 
+  const elfcpp::Rela<size, big_endian> rela(preloc);
+  unsigned int r_type = elfcpp::elf_r_type<size>(rela.get_r_info());
   const AArch64_reloc_property* reloc_property =
       aarch64_reloc_property_table->get_reloc_property(r_type);
 
@@ -6610,16 +7145,41 @@ Target_aarch64<size, big_endian>::Relocate::relocate(
       break;
 
     case elfcpp::R_AARCH64_ABS64:
+      if (!parameters->options().apply_dynamic_relocs()
+          && parameters->options().output_is_position_independent()
+          && gsym != NULL
+          && gsym->needs_dynamic_reloc(reloc_property->reference_flags())
+          && !gsym->can_use_relative_reloc(false))
+        // We have generated an absolute dynamic relocation, so do not
+        // apply the relocation statically. (Works around bugs in older
+        // Android dynamic linkers.)
+        break;
       reloc_status = Reloc::template rela_ua<64>(
        view, object, psymval, addend, reloc_property);
       break;
 
     case elfcpp::R_AARCH64_ABS32:
+      if (!parameters->options().apply_dynamic_relocs()
+          && parameters->options().output_is_position_independent()
+          && gsym != NULL
+          && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
+        // We have generated an absolute dynamic relocation, so do not
+        // apply the relocation statically. (Works around bugs in older
+        // Android dynamic linkers.)
+        break;
       reloc_status = Reloc::template rela_ua<32>(
        view, object, psymval, addend, reloc_property);
       break;
 
     case elfcpp::R_AARCH64_ABS16:
+      if (!parameters->options().apply_dynamic_relocs()
+          && parameters->options().output_is_position_independent()
+          && gsym != NULL
+          && gsym->needs_dynamic_reloc(reloc_property->reference_flags()))
+        // We have generated an absolute dynamic relocation, so do not
+        // apply the relocation statically. (Works around bugs in older
+        // Android dynamic linkers.)
+        break;
       reloc_status = Reloc::template rela_ua<16>(
        view, object, psymval, addend, reloc_property);
       break;
@@ -6639,6 +7199,23 @@ Target_aarch64<size, big_endian>::Relocate::relocate(
        view, object, psymval, addend, address, reloc_property);
       break;
 
+    case elfcpp::R_AARCH64_MOVW_UABS_G0:
+    case elfcpp::R_AARCH64_MOVW_UABS_G0_NC:
+    case elfcpp::R_AARCH64_MOVW_UABS_G1:
+    case elfcpp::R_AARCH64_MOVW_UABS_G1_NC:
+    case elfcpp::R_AARCH64_MOVW_UABS_G2:
+    case elfcpp::R_AARCH64_MOVW_UABS_G2_NC:
+    case elfcpp::R_AARCH64_MOVW_UABS_G3:
+      reloc_status = Reloc::template rela_general<32>(
+       view, object, psymval, addend, reloc_property);
+      break;
+    case elfcpp::R_AARCH64_MOVW_SABS_G0:
+    case elfcpp::R_AARCH64_MOVW_SABS_G1:
+    case elfcpp::R_AARCH64_MOVW_SABS_G2:
+      reloc_status = Reloc::movnz(view, psymval->value(object, addend),
+                                 reloc_property);
+      break;
+
     case elfcpp::R_AARCH64_LD_PREL_LO19:
       reloc_status = Reloc::template pcrela_general<32>(
          view, object, psymval, addend, address, reloc_property);
@@ -6679,13 +7256,13 @@ Target_aarch64<size, big_endian>::Relocate::relocate(
          // Return false to stop further processing this reloc.
          return false;
        }
-      // Fallthrough
+      // Fall through.
     case elfcpp::R_AARCH64_JUMP26:
       if (Reloc::maybe_apply_stub(r_type, relinfo, rela, view, address,
                                  gsym, psymval, object,
                                  target->stub_group_size_))
        break;
-      // Fallthrough
+      // Fall through.
     case elfcpp::R_AARCH64_TSTBR14:
     case elfcpp::R_AARCH64_CONDBR19:
       reloc_status = Reloc::template pcrela_general<32>(
@@ -6705,6 +7282,19 @@ Target_aarch64<size, big_endian>::Relocate::relocate(
        view, value, addend, reloc_property);
       break;
 
+    case elfcpp::R_AARCH64_LD64_GOTPAGE_LO15:
+      {
+       gold_assert(have_got_offset);
+       value = target->got_->address() + got_base + got_offset + addend -
+         Reloc::Page(target->got_->address() + got_base);
+       if ((value & 7) != 0)
+         reloc_status = Reloc::STATUS_OVERFLOW;
+       else
+         reloc_status = Reloc::template reloc_common<32>(
+           view, value, reloc_property);
+       break;
+      }
+
     case elfcpp::R_AARCH64_TLSGD_ADR_PAGE21:
     case elfcpp::R_AARCH64_TLSGD_ADD_LO12_NC:
     case elfcpp::R_AARCH64_TLSLD_ADR_PAGE21:
@@ -6723,6 +7313,14 @@ Target_aarch64<size, big_endian>::Relocate::relocate(
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
     case elfcpp::R_AARCH64_TLSDESC_ADR_PAGE21:
     case elfcpp::R_AARCH64_TLSDESC_LD64_LO12:
     case elfcpp::R_AARCH64_TLSDESC_ADD_LO12:
@@ -7002,6 +7600,14 @@ Target_aarch64<size, big_endian>::Relocate::relocate_tls(
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_HI12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12:
     case elfcpp::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12:
+    case elfcpp::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
       {
        gold_assert(tls_segment != NULL);
        AArch64_address value = psymval->value(object, 0);
@@ -7055,15 +7661,15 @@ Target_aarch64<size, big_endian>::Relocate::relocate_tls(
            tls_got_offset_type = (tlsopt == tls::TLSOPT_TO_IE
                                   ? GOT_TYPE_TLS_OFFSET
                                   : GOT_TYPE_TLS_DESC);
-           unsigned int got_tlsdesc_offset = 0;
+           int got_tlsdesc_offset = 0;
            if (r_type != elfcpp::R_AARCH64_TLSDESC_CALL
                && tlsopt == tls::TLSOPT_NONE)
              {
                // We created GOT entries in the .got.tlsdesc portion of the
                // .got.plt section, but the offset stored in the symbol is the
                // offset within .got.tlsdesc.
-               got_tlsdesc_offset = (target->got_->data_size()
-                                     + target->got_plt_section()->data_size());
+               got_tlsdesc_offset = (target->got_tlsdesc_->address()
+                                     - target->got_->address());
              }
            typename elfcpp::Elf_types<size>::Elf_Addr got_entry_address;
            if (gsym != NULL)
@@ -7084,12 +7690,6 @@ Target_aarch64<size, big_endian>::Relocate::relocate_tls(
              }
            if (tlsopt == tls::TLSOPT_TO_IE)
              {
-               if (tls_segment == NULL)
-                 {
-                   gold_assert(parameters->errors()->error_count() > 0
-                               || issue_undefined_symbol_error(gsym));
-                   return aarch64_reloc_funcs::STATUS_BAD_RELOC;
-                 }
                return tls_desc_gd_to_ie(relinfo, target, rela, r_type,
                                         view, psymval, got_entry_address,
                                         address);
@@ -7274,8 +7874,8 @@ Target_aarch64<size, big_endian>::Relocate::tls_ld_to_le(
     {
       // Ideally we should give up gd_to_le relaxation and do gd access.
       // However the gd_to_le relaxation decision has been made early
-      // in the scan stage, where we did not allocate any GOT entry for
-      // this symbol. Therefore we have to exit and report error now.
+      // in the scan stage, where we did not allocate a GOT entry for
+      // this symbol. Therefore we have to exit and report an error now.
       gold_error(_("unexpected reloc insn sequence while relaxing "
                   "tls gd to le for reloc %u."), r_type);
       return aarch64_reloc_funcs::STATUS_BAD_RELOC;
@@ -7540,10 +8140,39 @@ Target_aarch64<size, big_endian>::relocate_section(
     section_size_type view_size,
     const Reloc_symbol_changes* reloc_symbol_changes)
 {
-  gold_assert(sh_type == elfcpp::SHT_RELA);
+  typedef typename elfcpp::Elf_types<size>::Elf_Addr Address;
+  typedef Target_aarch64<size, big_endian> Aarch64;
   typedef typename Target_aarch64<size, big_endian>::Relocate AArch64_relocate;
-  gold::relocate_section<size, big_endian, Target_aarch64, elfcpp::SHT_RELA,
-                        AArch64_relocate, gold::Default_comdat_behavior>(
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+
+  gold_assert(sh_type == elfcpp::SHT_RELA);
+
+  // See if we are relocating a relaxed input section.  If so, the view
+  // covers the whole output section and we need to adjust accordingly.
+  if (needs_special_offset_handling)
+    {
+      const Output_relaxed_input_section* poris =
+       output_section->find_relaxed_input_section(relinfo->object,
+                                                  relinfo->data_shndx);
+      if (poris != NULL)
+       {
+         Address section_address = poris->address();
+         section_size_type section_size = poris->data_size();
+
+         gold_assert((section_address >= address)
+                     && ((section_address + section_size)
+                         <= (address + view_size)));
+
+         off_t offset = section_address - address;
+         view += offset;
+         address += offset;
+         view_size = section_size;
+       }
+    }
+
+  gold::relocate_section<size, big_endian, Aarch64, AArch64_relocate,
+                        gold::Default_comdat_behavior, Classify_reloc>(
     relinfo,
     this,
     prelocs,
@@ -7556,21 +8185,6 @@ Target_aarch64<size, big_endian>::relocate_section(
     reloc_symbol_changes);
 }
 
-// Return the size of a relocation while scanning during a relocatable
-// link.
-
-template<int size, bool big_endian>
-unsigned int
-Target_aarch64<size, big_endian>::Relocatable_size_for_reloc::
-get_size_for_reloc(
-    unsigned int ,
-    Relobj* )
-{
-  // We will never support SHT_REL relocations.
-  gold_unreachable();
-  return 0;
-}
-
 // Scan the relocs during a relocatable link.
 
 template<int size, bool big_endian>
@@ -7589,13 +8203,14 @@ Target_aarch64<size, big_endian>::scan_relocatable_relocs(
     const unsigned char* plocal_symbols,
     Relocatable_relocs* rr)
 {
-  gold_assert(sh_type == elfcpp::SHT_RELA);
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+  typedef gold::Default_scan_relocatable_relocs<Classify_reloc>
+      Scan_relocatable_relocs;
 
-  typedef gold::Default_scan_relocatable_relocs<elfcpp::SHT_RELA,
-    Relocatable_size_for_reloc> Scan_relocatable_relocs;
+  gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::scan_relocatable_relocs<size, big_endian, elfcpp::SHT_RELA,
-      Scan_relocatable_relocs>(
+  gold::scan_relocatable_relocs<size, big_endian, Scan_relocatable_relocs>(
     symtab,
     layout,
     object,
@@ -7609,6 +8224,45 @@ Target_aarch64<size, big_endian>::scan_relocatable_relocs(
     rr);
 }
 
+// Scan the relocs for --emit-relocs.
+
+template<int size, bool big_endian>
+void
+Target_aarch64<size, big_endian>::emit_relocs_scan(
+    Symbol_table* symtab,
+    Layout* layout,
+    Sized_relobj_file<size, big_endian>* object,
+    unsigned int data_shndx,
+    unsigned int sh_type,
+    const unsigned char* prelocs,
+    size_t reloc_count,
+    Output_section* output_section,
+    bool needs_special_offset_handling,
+    size_t local_symbol_count,
+    const unsigned char* plocal_syms,
+    Relocatable_relocs* rr)
+{
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+  typedef gold::Default_emit_relocs_strategy<Classify_reloc>
+      Emit_relocs_strategy;
+
+  gold_assert(sh_type == elfcpp::SHT_RELA);
+
+  gold::scan_relocatable_relocs<size, big_endian, Emit_relocs_strategy>(
+    symtab,
+    layout,
+    object,
+    data_shndx,
+    prelocs,
+    reloc_count,
+    output_section,
+    needs_special_offset_handling,
+    local_symbol_count,
+    plocal_syms,
+    rr);
+}
+
 // Relocate a section during a relocatable link.
 
 template<int size, bool big_endian>
@@ -7620,22 +8274,23 @@ Target_aarch64<size, big_endian>::relocate_relocs(
     size_t reloc_count,
     Output_section* output_section,
     typename elfcpp::Elf_types<size>::Elf_Off offset_in_output_section,
-    const Relocatable_relocs* rr,
     unsigned char* view,
     typename elfcpp::Elf_types<size>::Elf_Addr view_address,
     section_size_type view_size,
     unsigned char* reloc_view,
     section_size_type reloc_view_size)
 {
+  typedef gold::Default_classify_reloc<elfcpp::SHT_RELA, size, big_endian>
+      Classify_reloc;
+
   gold_assert(sh_type == elfcpp::SHT_RELA);
 
-  gold::relocate_relocs<size, big_endian, elfcpp::SHT_RELA>(
+  gold::relocate_relocs<size, big_endian, Classify_reloc>(
     relinfo,
     prelocs,
     reloc_count,
     output_section,
     offset_in_output_section,
-    rr,
     view,
     view_address,
     view_size,
@@ -7673,6 +8328,144 @@ Target_aarch64<size, big_endian>::is_erratum_843419_sequence(
 }
 
 
+// Return whether this is a 835769 sequence.
+// (Similarly implemented as in elfnn-aarch64.c.)
+
+template<int size, bool big_endian>
+bool
+Target_aarch64<size, big_endian>::is_erratum_835769_sequence(
+    typename elfcpp::Swap<32,big_endian>::Valtype insn1,
+    typename elfcpp::Swap<32,big_endian>::Valtype insn2)
+{
+  uint32_t rt;
+  uint32_t rt2 = 0;
+  uint32_t rn;
+  uint32_t rm;
+  uint32_t ra;
+  bool pair;
+  bool load;
+
+  if (Insn_utilities::aarch64_mlxl(insn2)
+      && Insn_utilities::aarch64_mem_op_p (insn1, &rt, &rt2, &pair, &load))
+    {
+      /* Any SIMD memory op is independent of the subsequent MLA
+        by definition of the erratum.  */
+      if (Insn_utilities::aarch64_bit(insn1, 26))
+       return true;
+
+      /* If not SIMD, check for integer memory ops and MLA relationship.  */
+      rn = Insn_utilities::aarch64_rn(insn2);
+      ra = Insn_utilities::aarch64_ra(insn2);
+      rm = Insn_utilities::aarch64_rm(insn2);
+
+      /* If this is a load and there's a true(RAW) dependency, we are safe
+        and this is not an erratum sequence.  */
+      if (load &&
+         (rt == rn || rt == rm || rt == ra
+          || (pair && (rt2 == rn || rt2 == rm || rt2 == ra))))
+       return false;
+
+      /* We conservatively put out stubs for all other cases (including
+        writebacks).  */
+      return true;
+    }
+
+  return false;
+}
+
+
+// Helper method to create erratum stub for ST_E_843419 and ST_E_835769.
+
+template<int size, bool big_endian>
+void
+Target_aarch64<size, big_endian>::create_erratum_stub(
+    AArch64_relobj<size, big_endian>* relobj,
+    unsigned int shndx,
+    section_size_type erratum_insn_offset,
+    Address erratum_address,
+    typename Insn_utilities::Insntype erratum_insn,
+    int erratum_type,
+    unsigned int e843419_adrp_offset)
+{
+  gold_assert(erratum_type == ST_E_843419 || erratum_type == ST_E_835769);
+  The_stub_table* stub_table = relobj->stub_table(shndx);
+  gold_assert(stub_table != NULL);
+  if (stub_table->find_erratum_stub(relobj,
+                                   shndx,
+                                   erratum_insn_offset) == NULL)
+    {
+      const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
+      The_erratum_stub* stub;
+      if (erratum_type == ST_E_835769)
+       stub = new The_erratum_stub(relobj, erratum_type, shndx,
+                                   erratum_insn_offset);
+      else if (erratum_type == ST_E_843419)
+       stub = new E843419_stub<size, big_endian>(
+           relobj, shndx, erratum_insn_offset, e843419_adrp_offset);
+      else
+       gold_unreachable();
+      stub->set_erratum_insn(erratum_insn);
+      stub->set_erratum_address(erratum_address);
+      // For erratum ST_E_843419 and ST_E_835769, the destination address is
+      // always the next insn after erratum insn.
+      stub->set_destination_address(erratum_address + BPI);
+      stub_table->add_erratum_stub(stub);
+    }
+}
+
+
+// Scan erratum for section SHNDX range [output_address + span_start,
+// output_address + span_end). Note here we do not share the code with
+// scan_erratum_843419_span function, because for 843419 we optimize by only
+// scanning the last few insns of a page, whereas for 835769, we need to scan
+// every insn.
+
+template<int size, bool big_endian>
+void
+Target_aarch64<size, big_endian>::scan_erratum_835769_span(
+    AArch64_relobj<size, big_endian>*  relobj,
+    unsigned int shndx,
+    const section_size_type span_start,
+    const section_size_type span_end,
+    unsigned char* input_view,
+    Address output_address)
+{
+  typedef typename Insn_utilities::Insntype Insntype;
+
+  const int BPI = AArch64_insn_utilities<big_endian>::BYTES_PER_INSN;
+
+  // Adjust output_address and view to the start of span.
+  output_address += span_start;
+  input_view += span_start;
+
+  section_size_type span_length = span_end - span_start;
+  section_size_type offset = 0;
+  for (offset = 0; offset + BPI < span_length; offset += BPI)
+    {
+      Insntype* ip = reinterpret_cast<Insntype*>(input_view + offset);
+      Insntype insn1 = ip[0];
+      Insntype insn2 = ip[1];
+      if (is_erratum_835769_sequence(insn1, insn2))
+       {
+         Insntype erratum_insn = insn2;
+         // "span_start + offset" is the offset for insn1. So for insn2, it is
+         // "span_start + offset + BPI".
+         section_size_type erratum_insn_offset = span_start + offset + BPI;
+         Address erratum_address = output_address + offset + BPI;
+         gold_info(_("Erratum 835769 found and fixed at \"%s\", "
+                        "section %d, offset 0x%08x."),
+                      relobj->name().c_str(), shndx,
+                      (unsigned int)(span_start + offset));
+
+         this->create_erratum_stub(relobj, shndx,
+                                   erratum_insn_offset, erratum_address,
+                                   erratum_insn, ST_E_835769);
+         offset += BPI;  // Skip mac insn.
+       }
+    }
+}  // End of "Target_aarch64::scan_erratum_835769_span".
+
+
 // Scan erratum for section SHNDX range
 // [output_address + span_start, output_address + span_end).
 
@@ -7745,32 +8538,14 @@ Target_aarch64<size, big_endian>::scan_erratum_843419_span(
            }
          if (do_report)
            {
-             gold_warning(_("Erratum 843419 found and fixed at \"%s\", "
-                            "section %d, offset 0x%08x."),
-                          relobj->name().c_str(), shndx,
-                          (unsigned int)(span_start + offset));
-             unsigned int errata_insn_offset =
+             unsigned int erratum_insn_offset =
                span_start + offset + insn_offset;
-             The_stub_table* stub_table = relobj->stub_table(shndx);
-             gold_assert(stub_table != NULL);
-             if (stub_table->find_erratum_stub(relobj,
-                                               shndx,
-                                               errata_insn_offset) == NULL)
-               {
-                 The_erratum_stub* stub = new The_erratum_stub(
-                     relobj, ST_E_843419, shndx,
-                     errata_insn_offset);
-                 Address erratum_address =
-                   output_address + offset + insn_offset;
-                 // Stub destination address is the next insn after the
-                 // erratum.
-                 Address dest_address = erratum_address
-                   + Insn_utilities::BYTES_PER_INSN;
-                 stub->set_erratum_insn(erratum_insn);
-                 stub->set_erratum_address(erratum_address);
-                 stub->set_destination_address(dest_address);
-                 stub_table->add_erratum_stub(stub);
-               }
+             Address erratum_address =
+               output_address + offset + insn_offset;
+             create_erratum_stub(relobj, shndx,
+                                 erratum_insn_offset, erratum_address,
+                                 erratum_insn, ST_E_843419,
+                                 span_start + offset);
            }
        }