--- /dev/null
+From 4abff6d48dbcea8200c7ea35ba70c242d128ebf3 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Sun, 17 Apr 2022 17:03:36 +0200
+Subject: objtool: Fix code relocs vs weak symbols
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 4abff6d48dbcea8200c7ea35ba70c242d128ebf3 upstream.
+
+Occasionally objtool driven code patching (think .static_call_sites
+.retpoline_sites etc..) goes sideways and it tries to patch an
+instruction that doesn't match.
+
+Much head-scatching and cursing later the problem is as outlined below
+and affects every section that objtool generates for us, very much
+including the ORC data. The below uses .static_call_sites because it's
+convenient for demonstration purposes, but as mentioned the ORC
+sections, .retpoline_sites and __mount_loc are all similarly affected.
+
+Consider:
+
+foo-weak.c:
+
+ extern void __SCT__foo(void);
+
+ __attribute__((weak)) void foo(void)
+ {
+ return __SCT__foo();
+ }
+
+foo.c:
+
+ extern void __SCT__foo(void);
+ extern void my_foo(void);
+
+ void foo(void)
+ {
+ my_foo();
+ return __SCT__foo();
+ }
+
+These generate the obvious code
+(gcc -O2 -fcf-protection=none -fno-asynchronous-unwind-tables -c foo*.c):
+
+foo-weak.o:
+0000000000000000 <foo>:
+ 0: e9 00 00 00 00 jmpq 5 <foo+0x5> 1: R_X86_64_PLT32 __SCT__foo-0x4
+
+foo.o:
+0000000000000000 <foo>:
+ 0: 48 83 ec 08 sub $0x8,%rsp
+ 4: e8 00 00 00 00 callq 9 <foo+0x9> 5: R_X86_64_PLT32 my_foo-0x4
+ 9: 48 83 c4 08 add $0x8,%rsp
+ d: e9 00 00 00 00 jmpq 12 <foo+0x12> e: R_X86_64_PLT32 __SCT__foo-0x4
+
+Now, when we link these two files together, you get something like
+(ld -r -o foos.o foo-weak.o foo.o):
+
+foos.o:
+0000000000000000 <foo-0x10>:
+ 0: e9 00 00 00 00 jmpq 5 <foo-0xb> 1: R_X86_64_PLT32 __SCT__foo-0x4
+ 5: 66 2e 0f 1f 84 00 00 00 00 00 nopw %cs:0x0(%rax,%rax,1)
+ f: 90 nop
+
+0000000000000010 <foo>:
+ 10: 48 83 ec 08 sub $0x8,%rsp
+ 14: e8 00 00 00 00 callq 19 <foo+0x9> 15: R_X86_64_PLT32 my_foo-0x4
+ 19: 48 83 c4 08 add $0x8,%rsp
+ 1d: e9 00 00 00 00 jmpq 22 <foo+0x12> 1e: R_X86_64_PLT32 __SCT__foo-0x4
+
+Noting that ld preserves the weak function text, but strips the symbol
+off of it (hence objdump doing that funny negative offset thing). This
+does lead to 'interesting' unused code issues with objtool when ran on
+linked objects, but that seems to be working (fingers crossed).
+
+So far so good.. Now lets consider the objtool static_call output
+section (readelf output, old binutils):
+
+foo-weak.o:
+
+Relocation section '.rela.static_call_sites' at offset 0x2c8 contains 1 entry:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+0000000000000000 0000000200000002 R_X86_64_PC32 0000000000000000 .text + 0
+0000000000000004 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+
+foo.o:
+
+Relocation section '.rela.static_call_sites' at offset 0x310 contains 2 entries:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+0000000000000000 0000000200000002 R_X86_64_PC32 0000000000000000 .text + d
+0000000000000004 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+
+foos.o:
+
+Relocation section '.rela.static_call_sites' at offset 0x430 contains 4 entries:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+0000000000000000 0000000100000002 R_X86_64_PC32 0000000000000000 .text + 0
+0000000000000004 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+0000000000000008 0000000100000002 R_X86_64_PC32 0000000000000000 .text + 1d
+000000000000000c 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+
+So we have two patch sites, one in the dead code of the weak foo and one
+in the real foo. All is well.
+
+*HOWEVER*, when the toolchain strips unused section symbols it
+generates things like this (using new enough binutils):
+
+foo-weak.o:
+
+Relocation section '.rela.static_call_sites' at offset 0x2c8 contains 1 entry:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+0000000000000000 0000000200000002 R_X86_64_PC32 0000000000000000 foo + 0
+0000000000000004 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+
+foo.o:
+
+Relocation section '.rela.static_call_sites' at offset 0x310 contains 2 entries:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+0000000000000000 0000000200000002 R_X86_64_PC32 0000000000000000 foo + d
+0000000000000004 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+
+foos.o:
+
+Relocation section '.rela.static_call_sites' at offset 0x430 contains 4 entries:
+ Offset Info Type Symbol's Value Symbol's Name + Addend
+0000000000000000 0000000100000002 R_X86_64_PC32 0000000000000000 foo + 0
+0000000000000004 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+0000000000000008 0000000100000002 R_X86_64_PC32 0000000000000000 foo + d
+000000000000000c 0000000d00000002 R_X86_64_PC32 0000000000000000 __SCT__foo + 1
+
+And now we can see how that foos.o .static_call_sites goes side-ways, we
+now have _two_ patch sites in foo. One for the weak symbol at foo+0
+(which is no longer a static_call site!) and one at foo+d which is in
+fact the right location.
+
+This seems to happen when objtool cannot find a section symbol, in which
+case it falls back to any other symbol to key off of, however in this
+case that goes terribly wrong!
+
+As such, teach objtool to create a section symbol when there isn't
+one.
+
+Fixes: 44f6a7c0755d ("objtool: Fix seg fault with Clang non-section symbols")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20220419203807.655552918@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/elf.c | 187 +++++++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 165 insertions(+), 22 deletions(-)
+
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -514,37 +514,180 @@ int elf_add_reloc(struct elf *elf, struc
+ return 0;
+ }
+
+-int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+- unsigned long offset, unsigned int type,
+- struct section *insn_sec, unsigned long insn_off)
++/*
++ * Ensure that any reloc section containing references to @sym is marked
++ * changed such that it will get re-generated in elf_rebuild_reloc_sections()
++ * with the new symbol index.
++ */
++static void elf_dirty_reloc_sym(struct elf *elf, struct symbol *sym)
++{
++ struct section *sec;
++
++ list_for_each_entry(sec, &elf->sections, list) {
++ struct reloc *reloc;
++
++ if (sec->changed)
++ continue;
++
++ list_for_each_entry(reloc, &sec->reloc_list, list) {
++ if (reloc->sym == sym) {
++ sec->changed = true;
++ break;
++ }
++ }
++ }
++}
++
++/*
++ * Move the first global symbol, as per sh_info, into a new, higher symbol
++ * index. This fees up the shndx for a new local symbol.
++ */
++static int elf_move_global_symbol(struct elf *elf, struct section *symtab,
++ struct section *symtab_shndx)
+ {
++ Elf_Data *data, *shndx_data = NULL;
++ Elf32_Word first_non_local;
+ struct symbol *sym;
+- int addend;
++ Elf_Scn *s;
+
+- if (insn_sec->sym) {
+- sym = insn_sec->sym;
+- addend = insn_off;
++ first_non_local = symtab->sh.sh_info;
+
+- } else {
+- /*
+- * The Clang assembler strips section symbols, so we have to
+- * reference the function symbol instead:
+- */
+- sym = find_symbol_containing(insn_sec, insn_off);
+- if (!sym) {
+- /*
+- * Hack alert. This happens when we need to reference
+- * the NOP pad insn immediately after the function.
+- */
+- sym = find_symbol_containing(insn_sec, insn_off - 1);
++ sym = find_symbol_by_index(elf, first_non_local);
++ if (!sym) {
++ WARN("no non-local symbols !?");
++ return first_non_local;
++ }
++
++ s = elf_getscn(elf->elf, symtab->idx);
++ if (!s) {
++ WARN_ELF("elf_getscn");
++ return -1;
++ }
++
++ data = elf_newdata(s);
++ if (!data) {
++ WARN_ELF("elf_newdata");
++ return -1;
++ }
++
++ data->d_buf = &sym->sym;
++ data->d_size = sizeof(sym->sym);
++ data->d_align = 1;
++ data->d_type = ELF_T_SYM;
++
++ sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
++ elf_dirty_reloc_sym(elf, sym);
++
++ symtab->sh.sh_info += 1;
++ symtab->sh.sh_size += data->d_size;
++ symtab->changed = true;
++
++ if (symtab_shndx) {
++ s = elf_getscn(elf->elf, symtab_shndx->idx);
++ if (!s) {
++ WARN_ELF("elf_getscn");
++ return -1;
+ }
+
+- if (!sym) {
+- WARN("can't find symbol containing %s+0x%lx", insn_sec->name, insn_off);
++ shndx_data = elf_newdata(s);
++ if (!shndx_data) {
++ WARN_ELF("elf_newshndx_data");
+ return -1;
+ }
+
+- addend = insn_off - sym->offset;
++ shndx_data->d_buf = &sym->sec->idx;
++ shndx_data->d_size = sizeof(Elf32_Word);
++ shndx_data->d_align = 4;
++ shndx_data->d_type = ELF_T_WORD;
++
++ symtab_shndx->sh.sh_size += 4;
++ symtab_shndx->changed = true;
++ }
++
++ return first_non_local;
++}
++
++static struct symbol *
++elf_create_section_symbol(struct elf *elf, struct section *sec)
++{
++ struct section *symtab, *symtab_shndx;
++ Elf_Data *shndx_data = NULL;
++ struct symbol *sym;
++ Elf32_Word shndx;
++
++ symtab = find_section_by_name(elf, ".symtab");
++ if (symtab) {
++ symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
++ if (symtab_shndx)
++ shndx_data = symtab_shndx->data;
++ } else {
++ WARN("no .symtab");
++ return NULL;
++ }
++
++ sym = malloc(sizeof(*sym));
++ if (!sym) {
++ perror("malloc");
++ return NULL;
++ }
++ memset(sym, 0, sizeof(*sym));
++
++ sym->idx = elf_move_global_symbol(elf, symtab, symtab_shndx);
++ if (sym->idx < 0) {
++ WARN("elf_move_global_symbol");
++ return NULL;
++ }
++
++ sym->name = sec->name;
++ sym->sec = sec;
++
++ // st_name 0
++ sym->sym.st_info = GELF_ST_INFO(STB_LOCAL, STT_SECTION);
++ // st_other 0
++ // st_value 0
++ // st_size 0
++ shndx = sec->idx;
++ if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) {
++ sym->sym.st_shndx = shndx;
++ if (!shndx_data)
++ shndx = 0;
++ } else {
++ sym->sym.st_shndx = SHN_XINDEX;
++ if (!shndx_data) {
++ WARN("no .symtab_shndx");
++ return NULL;
++ }
++ }
++
++ if (!gelf_update_symshndx(symtab->data, shndx_data, sym->idx, &sym->sym, shndx)) {
++ WARN_ELF("gelf_update_symshndx");
++ return NULL;
++ }
++
++ elf_add_symbol(elf, sym);
++
++ return sym;
++}
++
++int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
++ unsigned long offset, unsigned int type,
++ struct section *insn_sec, unsigned long insn_off)
++{
++ struct symbol *sym = insn_sec->sym;
++ int addend = insn_off;
++
++ if (!sym) {
++ /*
++ * Due to how weak functions work, we must use section based
++ * relocations. Symbol based relocations would result in the
++ * weak and non-weak function annotations being overlaid on the
++ * non-weak function after linking.
++ */
++ sym = elf_create_section_symbol(elf, insn_sec);
++ if (!sym)
++ return -1;
++
++ insn_sec->sym = sym;
+ }
+
+ return elf_add_reloc(elf, sec, offset, type, sym, addend);
--- /dev/null
+From c087c6e7b551b7f208c0b852304f044954cf2bb3 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Sun, 17 Apr 2022 17:03:40 +0200
+Subject: objtool: Fix type of reloc::addend
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit c087c6e7b551b7f208c0b852304f044954cf2bb3 upstream.
+
+Elf{32,64}_Rela::r_addend is of type: Elf{32,64}_Sword, that means
+that our reloc::addend needs to be long or face tuncation issues when
+we do elf_rebuild_reloc_section():
+
+ - 107: 48 b8 00 00 00 00 00 00 00 00 movabs $0x0,%rax 109: R_X86_64_64 level4_kernel_pgt+0x80000067
+ + 107: 48 b8 00 00 00 00 00 00 00 00 movabs $0x0,%rax 109: R_X86_64_64 level4_kernel_pgt-0x7fffff99
+
+Fixes: 627fce14809b ("objtool: Add ORC unwind table generation")
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lkml.kernel.org/r/20220419203807.596871927@infradead.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/check.c | 8 ++++----
+ tools/objtool/elf.c | 2 +-
+ tools/objtool/include/objtool/elf.h | 4 ++--
+ 3 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -393,12 +393,12 @@ static int add_dead_ends(struct objtool_
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
+ insn = find_last_insn(file, reloc->sym->sec);
+ if (!insn) {
+- WARN("can't find unreachable insn at %s+0x%x",
++ WARN("can't find unreachable insn at %s+0x%lx",
+ reloc->sym->sec->name, reloc->addend);
+ return -1;
+ }
+ } else {
+- WARN("can't find unreachable insn at %s+0x%x",
++ WARN("can't find unreachable insn at %s+0x%lx",
+ reloc->sym->sec->name, reloc->addend);
+ return -1;
+ }
+@@ -428,12 +428,12 @@ reachable:
+ else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
+ insn = find_last_insn(file, reloc->sym->sec);
+ if (!insn) {
+- WARN("can't find reachable insn at %s+0x%x",
++ WARN("can't find reachable insn at %s+0x%lx",
+ reloc->sym->sec->name, reloc->addend);
+ return -1;
+ }
+ } else {
+- WARN("can't find reachable insn at %s+0x%x",
++ WARN("can't find reachable insn at %s+0x%lx",
+ reloc->sym->sec->name, reloc->addend);
+ return -1;
+ }
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -485,7 +485,7 @@ static struct section *elf_create_reloc_
+ int reltype);
+
+ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+- unsigned int type, struct symbol *sym, int addend)
++ unsigned int type, struct symbol *sym, long addend)
+ {
+ struct reloc *reloc;
+
+--- a/tools/objtool/include/objtool/elf.h
++++ b/tools/objtool/include/objtool/elf.h
+@@ -69,7 +69,7 @@ struct reloc {
+ struct symbol *sym;
+ unsigned long offset;
+ unsigned int type;
+- int addend;
++ long addend;
+ int idx;
+ bool jump_table_start;
+ };
+@@ -131,7 +131,7 @@ struct elf *elf_open_read(const char *na
+ struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
+
+ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+- unsigned int type, struct symbol *sym, int addend);
++ unsigned int type, struct symbol *sym, long addend);
+ int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+ unsigned long offset, unsigned int type,
+ struct section *insn_sec, unsigned long insn_off);
--- /dev/null
+From d799769188529abc6cbf035a10087a51f7832b6b Mon Sep 17 00:00:00 2001
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+Date: Wed, 9 Mar 2022 17:18:22 +1100
+Subject: powerpc/64: Add UADDR64 relocation support
+
+From: Alexey Kardashevskiy <aik@ozlabs.ru>
+
+commit d799769188529abc6cbf035a10087a51f7832b6b upstream.
+
+When ld detects unaligned relocations, it emits R_PPC64_UADDR64
+relocations instead of R_PPC64_RELATIVE. Currently R_PPC64_UADDR64 are
+detected by arch/powerpc/tools/relocs_check.sh and expected not to work.
+Below is a simple chunk to trigger this behaviour (this disables
+optimization for the demonstration purposes only, this also happens with
+-O1/-O2 when CONFIG_PRINTK_INDEX=y, for example):
+
+ \#pragma GCC push_options
+ \#pragma GCC optimize ("O0")
+ struct entry {
+ const char *file;
+ int line;
+ } __attribute__((packed));
+ static const struct entry e1 = { .file = __FILE__, .line = __LINE__ };
+ static const struct entry e2 = { .file = __FILE__, .line = __LINE__ };
+ ...
+ prom_printf("e1=%s %lx %lx\n", e1.file, (unsigned long) e1.file, mfmsr());
+ prom_printf("e2=%s %lx\n", e2.file, (unsigned long) e2.file);
+ \#pragma GCC pop_options
+
+This adds support for UADDR64 for 64bit. This reuses __dynamic_symtab
+from the 32bit code which supports more relocation types already.
+
+Because RELACOUNT includes only R_PPC64_RELATIVE, this replaces it with
+RELASZ which is the size of all relocation records.
+
+Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Link: https://lore.kernel.org/r/20220309061822.168173-1-aik@ozlabs.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kernel/reloc_64.S | 67 +++++++++++++++++++++++++------------
+ arch/powerpc/kernel/vmlinux.lds.S | 2 -
+ arch/powerpc/tools/relocs_check.sh | 7 ---
+ 3 files changed, 48 insertions(+), 28 deletions(-)
+
+--- a/arch/powerpc/kernel/reloc_64.S
++++ b/arch/powerpc/kernel/reloc_64.S
+@@ -8,8 +8,10 @@
+ #include <asm/ppc_asm.h>
+
+ RELA = 7
+-RELACOUNT = 0x6ffffff9
++RELASZ = 8
++RELAENT = 9
+ R_PPC64_RELATIVE = 22
++R_PPC64_UADDR64 = 43
+
+ /*
+ * r3 = desired final address of kernel
+@@ -25,29 +27,38 @@ _GLOBAL(relocate)
+ add r9,r9,r12 /* r9 has runtime addr of .rela.dyn section */
+ ld r10,(p_st - 0b)(r12)
+ add r10,r10,r12 /* r10 has runtime addr of _stext */
++ ld r13,(p_sym - 0b)(r12)
++ add r13,r13,r12 /* r13 has runtime addr of .dynsym */
+
+ /*
+- * Scan the dynamic section for the RELA and RELACOUNT entries.
++ * Scan the dynamic section for the RELA, RELASZ and RELAENT entries.
+ */
+ li r7,0
+ li r8,0
+-1: ld r6,0(r11) /* get tag */
++.Ltags:
++ ld r6,0(r11) /* get tag */
+ cmpdi r6,0
+- beq 4f /* end of list */
++ beq .Lend_of_list /* end of list */
+ cmpdi r6,RELA
+ bne 2f
+ ld r7,8(r11) /* get RELA pointer in r7 */
+- b 3f
+-2: addis r6,r6,(-RELACOUNT)@ha
+- cmpdi r6,RELACOUNT@l
++ b 4f
++2: cmpdi r6,RELASZ
+ bne 3f
+- ld r8,8(r11) /* get RELACOUNT value in r8 */
+-3: addi r11,r11,16
+- b 1b
+-4: cmpdi r7,0 /* check we have both RELA and RELACOUNT */
++ ld r8,8(r11) /* get RELASZ value in r8 */
++ b 4f
++3: cmpdi r6,RELAENT
++ bne 4f
++ ld r12,8(r11) /* get RELAENT value in r12 */
++4: addi r11,r11,16
++ b .Ltags
++.Lend_of_list:
++ cmpdi r7,0 /* check we have RELA, RELASZ, RELAENT */
+ cmpdi cr1,r8,0
+- beq 6f
+- beq cr1,6f
++ beq .Lout
++ beq cr1,.Lout
++ cmpdi r12,0
++ beq .Lout
+
+ /*
+ * Work out linktime address of _stext and hence the
+@@ -62,23 +73,39 @@ _GLOBAL(relocate)
+
+ /*
+ * Run through the list of relocations and process the
+- * R_PPC64_RELATIVE ones.
++ * R_PPC64_RELATIVE and R_PPC64_UADDR64 ones.
+ */
++ divd r8,r8,r12 /* RELASZ / RELAENT */
+ mtctr r8
+-5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
++.Lrels: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
+ cmpdi r0,R_PPC64_RELATIVE
+- bne 6f
++ bne .Luaddr64
+ ld r6,0(r9) /* reloc->r_offset */
+ ld r0,16(r9) /* reloc->r_addend */
++ b .Lstore
++.Luaddr64:
++ srdi r14,r0,32 /* ELF64_R_SYM(reloc->r_info) */
++ clrldi r0,r0,32
++ cmpdi r0,R_PPC64_UADDR64
++ bne .Lnext
++ ld r6,0(r9)
++ ld r0,16(r9)
++ mulli r14,r14,24 /* 24 == sizeof(elf64_sym) */
++ add r14,r14,r13 /* elf64_sym[ELF64_R_SYM] */
++ ld r14,8(r14)
++ add r0,r0,r14
++.Lstore:
+ add r0,r0,r3
+ stdx r0,r7,r6
+- addi r9,r9,24
+- bdnz 5b
+-
+-6: blr
++.Lnext:
++ add r9,r9,r12
++ bdnz .Lrels
++.Lout:
++ blr
+
+ .balign 8
+ p_dyn: .8byte __dynamic_start - 0b
+ p_rela: .8byte __rela_dyn_start - 0b
++p_sym: .8byte __dynamic_symtab - 0b
+ p_st: .8byte _stext - 0b
+
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -275,9 +275,7 @@ SECTIONS
+ . = ALIGN(8);
+ .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
+ {
+-#ifdef CONFIG_PPC32
+ __dynamic_symtab = .;
+-#endif
+ *(.dynsym)
+ }
+ .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
+--- a/arch/powerpc/tools/relocs_check.sh
++++ b/arch/powerpc/tools/relocs_check.sh
+@@ -39,6 +39,7 @@ $objdump -R "$vmlinux" |
+ # R_PPC_NONE
+ grep -F -w -v 'R_PPC64_RELATIVE
+ R_PPC64_NONE
++R_PPC64_UADDR64
+ R_PPC_ADDR16_LO
+ R_PPC_ADDR16_HI
+ R_PPC_ADDR16_HA
+@@ -54,9 +55,3 @@ fi
+ num_bad=$(echo "$bad_relocs" | wc -l)
+ echo "WARNING: $num_bad bad relocations"
+ echo "$bad_relocs"
+-
+-# If we see this type of relocation it's an idication that
+-# we /may/ be using an old version of binutils.
+-if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
+- echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
+-fi