]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 11 Dec 2020 14:41:39 +0000 (15:41 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 11 Dec 2020 14:41:39 +0000 (15:41 +0100)
added patches:
kbuild-do-not-emit-debug-info-for-assembly-with-llvm_ias-1.patch
x86-lib-change-.weak-to-sym_func_start_weak-for-arch-x86-lib-mem-_64.s.patch

queue-5.4/kbuild-do-not-emit-debug-info-for-assembly-with-llvm_ias-1.patch [new file with mode: 0644]
queue-5.4/x86-lib-change-.weak-to-sym_func_start_weak-for-arch-x86-lib-mem-_64.s.patch [new file with mode: 0644]

diff --git a/queue-5.4/kbuild-do-not-emit-debug-info-for-assembly-with-llvm_ias-1.patch b/queue-5.4/kbuild-do-not-emit-debug-info-for-assembly-with-llvm_ias-1.patch
new file mode 100644 (file)
index 0000000..5547c31
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Fri Dec 11 03:25:04 PM CET 2020
+From: Nick Desaulniers <ndesaulniers@google.com>
+Date: Mon, 9 Nov 2020 10:35:28 -0800
+Subject: Kbuild: do not emit debug info for assembly with LLVM_IAS=1
+
+From: Nick Desaulniers <ndesaulniers@google.com>
+
+commit b8a9092330da2030496ff357272f342eb970d51b upstream.
+
+Clang's integrated assembler produces the warning for assembly files:
+
+warning: DWARF2 only supports one section per compilation unit
+
+If -Wa,-gdwarf-* is unspecified, then debug info is not emitted for
+assembly sources (it is still emitted for C sources).  This will be
+re-enabled for newer DWARF versions in a follow up patch.
+
+Enables defconfig+CONFIG_DEBUG_INFO to build cleanly with
+LLVM=1 LLVM_IAS=1 for x86_64 and arm64.
+
+Cc: <stable@vger.kernel.org>
+Link: https://github.com/ClangBuiltLinux/linux/issues/716
+Reported-by: Dmitry Golovin <dima@golovin.in>
+Reported-by: Nathan Chancellor <natechancellor@gmail.com>
+Suggested-by: Dmitry Golovin <dima@golovin.in>
+Suggested-by: Nathan Chancellor <natechancellor@gmail.com>
+Suggested-by: Sedat Dilek <sedat.dilek@gmail.com>
+Reviewed-by: Fangrui Song <maskray@google.com>
+Reviewed-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+[nd: backport to avoid conflicts from:
+  commit 10e68b02c861 ("Makefile: support compressed debug info")
+  commit 7b16994437c7 ("Makefile: Improve compressed debug info support detection")
+  commit 695afd3d7d58 ("kbuild: Simplify DEBUG_INFO Kconfig handling")]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/Makefile
++++ b/Makefile
+@@ -802,8 +802,11 @@ DEBUG_CFLAGS      += -gsplit-dwarf
+ else
+ DEBUG_CFLAGS  += -g
+ endif
++ifneq ($(LLVM_IAS),1)
+ KBUILD_AFLAGS += -Wa,-gdwarf-2
+ endif
++endif
++
+ ifdef CONFIG_DEBUG_INFO_DWARF4
+ DEBUG_CFLAGS  += -gdwarf-4
+ endif
diff --git a/queue-5.4/x86-lib-change-.weak-to-sym_func_start_weak-for-arch-x86-lib-mem-_64.s.patch b/queue-5.4/x86-lib-change-.weak-to-sym_func_start_weak-for-arch-x86-lib-mem-_64.s.patch
new file mode 100644 (file)
index 0000000..be511a1
--- /dev/null
@@ -0,0 +1,106 @@
+From foo@baz Fri Dec 11 03:25:50 PM CET 2020
+From: Fangrui Song <maskray@google.com>
+Date: Mon, 2 Nov 2020 17:23:58 -0800
+Subject: x86/lib: Change .weak to SYM_FUNC_START_WEAK for arch/x86/lib/mem*_64.S
+
+From: Fangrui Song <maskray@google.com>
+
+commit 4d6ffa27b8e5116c0abb318790fd01d4e12d75e6 upstream.
+
+Commit
+
+  393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")
+
+added .weak directives to arch/x86/lib/mem*_64.S instead of changing the
+existing ENTRY macros to WEAK. This can lead to the assembly snippet
+
+  .weak memcpy
+  ...
+  .globl memcpy
+
+which will produce a STB_WEAK memcpy with GNU as but STB_GLOBAL memcpy
+with LLVM's integrated assembler before LLVM 12. LLVM 12 (since
+https://reviews.llvm.org/D90108) will error on such an overridden symbol
+binding.
+
+Commit
+
+  ef1e03152cb0 ("x86/asm: Make some functions local")
+
+changed ENTRY in arch/x86/lib/memcpy_64.S to SYM_FUNC_START_LOCAL, which
+was ineffective due to the preceding .weak directive.
+
+Use the appropriate SYM_FUNC_START_WEAK instead.
+
+Fixes: 393f203f5fd5 ("x86_64: kasan: add interceptors for memset/memmove/memcpy functions")
+Fixes: ef1e03152cb0 ("x86/asm: Make some functions local")
+Reported-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Fangrui Song <maskray@google.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Nathan Chancellor <natechancellor@gmail.com>
+Tested-by: Nick Desaulniers <ndesaulniers@google.com>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/20201103012358.168682-1-maskray@google.com
+[nd: backport due to missing commit e9b9d020c487 ("x86/asm: Annotate aliases")]
+Signed-off-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/lib/memcpy_64.S  |    4 +---
+ arch/x86/lib/memmove_64.S |    4 +---
+ arch/x86/lib/memset_64.S  |    4 +---
+ 3 files changed, 3 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/lib/memcpy_64.S
++++ b/arch/x86/lib/memcpy_64.S
+@@ -15,8 +15,6 @@
+  * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
+  */
+-.weak memcpy
+-
+ /*
+  * memcpy - Copy a memory block.
+  *
+@@ -29,7 +27,7 @@
+  * rax original destination
+  */
+ ENTRY(__memcpy)
+-ENTRY(memcpy)
++SYM_FUNC_START_WEAK(memcpy)
+       ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
+                     "jmp memcpy_erms", X86_FEATURE_ERMS
+--- a/arch/x86/lib/memmove_64.S
++++ b/arch/x86/lib/memmove_64.S
+@@ -24,9 +24,7 @@
+  * Output:
+  * rax: dest
+  */
+-.weak memmove
+-
+-ENTRY(memmove)
++SYM_FUNC_START_WEAK(memmove)
+ ENTRY(__memmove)
+       /* Handle more 32 bytes in loop */
+--- a/arch/x86/lib/memset_64.S
++++ b/arch/x86/lib/memset_64.S
+@@ -6,8 +6,6 @@
+ #include <asm/alternative-asm.h>
+ #include <asm/export.h>
+-.weak memset
+-
+ /*
+  * ISO C memset - set a memory block to a byte value. This function uses fast
+  * string to get better performance than the original function. The code is
+@@ -19,7 +17,7 @@
+  *
+  * rax   original destination
+  */
+-ENTRY(memset)
++SYM_FUNC_START_WEAK(memset)
+ ENTRY(__memset)
+       /*
+        * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended