]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Sep 2022 10:06:51 +0000 (12:06 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 1 Sep 2022 10:06:51 +0000 (12:06 +0200)
added patches:
crypto-lib-remove-unneeded-selection-of-xor_blocks.patch
x86-nospec-fix-i386-rsb-stuffing.patch
x86-nospec-unwreck-the-rsb-stuffing.patch

queue-5.10/crypto-lib-remove-unneeded-selection-of-xor_blocks.patch [new file with mode: 0644]
queue-5.10/series
queue-5.10/x86-nospec-fix-i386-rsb-stuffing.patch [new file with mode: 0644]
queue-5.10/x86-nospec-unwreck-the-rsb-stuffing.patch [new file with mode: 0644]

diff --git a/queue-5.10/crypto-lib-remove-unneeded-selection-of-xor_blocks.patch b/queue-5.10/crypto-lib-remove-unneeded-selection-of-xor_blocks.patch
new file mode 100644 (file)
index 0000000..743ef91
--- /dev/null
@@ -0,0 +1,37 @@
+From 874b301985ef2f89b8b592ad255e03fb6fbfe605 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@google.com>
+Date: Thu, 25 Aug 2022 22:04:56 -0700
+Subject: crypto: lib - remove unneeded selection of XOR_BLOCKS
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit 874b301985ef2f89b8b592ad255e03fb6fbfe605 upstream.
+
+CRYPTO_LIB_CHACHA_GENERIC doesn't need to select XOR_BLOCKS.  It perhaps
+was thought that it's needed for __crypto_xor, but that's not the case.
+
+Enabling XOR_BLOCKS is problematic because the XOR_BLOCKS code runs a
+benchmark when it is initialized.  That causes a boot time regression on
+systems that didn't have it enabled before.
+
+Therefore, remove this unnecessary and problematic selection.
+
+Fixes: e56e18985596 ("lib/crypto: add prompts back to crypto libraries")
+Cc: stable@vger.kernel.org
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/crypto/Kconfig |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/lib/crypto/Kconfig
++++ b/lib/crypto/Kconfig
+@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA
+ config CRYPTO_LIB_CHACHA_GENERIC
+       tristate
+-      select XOR_BLOCKS
+       help
+         This symbol can be depended upon by arch implementations of the
+         ChaCha library interface that require the generic code as a
index bfcfae48adfcc40a9b90a547c48088c000b4107f..a167100e76332ef017524bef054149b1ba73b6f8 100644 (file)
@@ -1 +1,4 @@
 mm-force-tlb-flush-for-pfnmap-mappings-before-unlink_file_vma.patch
+x86-nospec-unwreck-the-rsb-stuffing.patch
+x86-nospec-fix-i386-rsb-stuffing.patch
+crypto-lib-remove-unneeded-selection-of-xor_blocks.patch
diff --git a/queue-5.10/x86-nospec-fix-i386-rsb-stuffing.patch b/queue-5.10/x86-nospec-fix-i386-rsb-stuffing.patch
new file mode 100644 (file)
index 0000000..0d6db63
--- /dev/null
@@ -0,0 +1,50 @@
+From 332924973725e8cdcc783c175f68cf7e162cb9e5 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 19 Aug 2022 13:01:35 +0200
+Subject: x86/nospec: Fix i386 RSB stuffing
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 332924973725e8cdcc783c175f68cf7e162cb9e5 upstream.
+
+Turns out that i386 doesn't unconditionally have LFENCE, as such the
+loop in __FILL_RETURN_BUFFER isn't actually speculation safe on such
+chips.
+
+Fixes: ba6e31af2be9 ("x86/speculation: Add LFENCE to RSB fill sequence")
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/Yv9tj9vbQ9nNlXoY@worktop.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -50,6 +50,7 @@
+  * the optimal version — two calls, each with their own speculation
+  * trap should their return address end up getting used, in a loop.
+  */
++#ifdef CONFIG_X86_64
+ #define __FILL_RETURN_BUFFER(reg, nr)                 \
+       mov     $(nr/2), reg;                           \
+ 771:                                                  \
+@@ -60,6 +61,17 @@
+       jnz     771b;                                   \
+       /* barrier for jnz misprediction */             \
+       lfence;
++#else
++/*
++ * i386 doesn't unconditionally have LFENCE, as such it can't
++ * do a loop.
++ */
++#define __FILL_RETURN_BUFFER(reg, nr)                 \
++      .rept nr;                                       \
++      __FILL_RETURN_SLOT;                             \
++      .endr;                                          \
++      add     $(BITS_PER_LONG/8) * nr, %_ASM_SP;
++#endif
+ /*
+  * Stuff a single RSB slot.
diff --git a/queue-5.10/x86-nospec-unwreck-the-rsb-stuffing.patch b/queue-5.10/x86-nospec-unwreck-the-rsb-stuffing.patch
new file mode 100644 (file)
index 0000000..b902353
--- /dev/null
@@ -0,0 +1,130 @@
+From 4e3aa9238277597c6c7624f302d81a7b568b6f2d Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 16 Aug 2022 14:28:36 +0200
+Subject: x86/nospec: Unwreck the RSB stuffing
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 4e3aa9238277597c6c7624f302d81a7b568b6f2d upstream.
+
+Commit 2b1299322016 ("x86/speculation: Add RSB VM Exit protections")
+made a right mess of the RSB stuffing, rewrite the whole thing to not
+suck.
+
+Thanks to Andrew for the enlightening comment about Post-Barrier RSB
+things so we can make this code less magical.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/YvuNdDWoUZSBjYcm@worktop.programming.kicks-ass.net
+[bwh: Backported to 5.10: adjust context]
+Signed-off-by: Ben Hutchings <benh@debian.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/nospec-branch.h |   80 +++++++++++++++++------------------
+ 1 file changed, 39 insertions(+), 41 deletions(-)
+
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -35,33 +35,44 @@
+ #define RSB_CLEAR_LOOPS               32      /* To forcibly overwrite all entries */
+ /*
++ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
++ */
++#define __FILL_RETURN_SLOT                    \
++      ANNOTATE_INTRA_FUNCTION_CALL;           \
++      call    772f;                           \
++      int3;                                   \
++772:
++
++/*
++ * Stuff the entire RSB.
++ *
+  * Google experimented with loop-unrolling and this turned out to be
+  * the optimal version — two calls, each with their own speculation
+  * trap should their return address end up getting used, in a loop.
+  */
+-#define __FILL_RETURN_BUFFER(reg, nr, sp)     \
+-      mov     $(nr/2), reg;                   \
+-771:                                          \
+-      ANNOTATE_INTRA_FUNCTION_CALL;           \
+-      call    772f;                           \
+-773:  /* speculation trap */                  \
+-      UNWIND_HINT_EMPTY;                      \
+-      pause;                                  \
+-      lfence;                                 \
+-      jmp     773b;                           \
+-772:                                          \
+-      ANNOTATE_INTRA_FUNCTION_CALL;           \
+-      call    774f;                           \
+-775:  /* speculation trap */                  \
+-      UNWIND_HINT_EMPTY;                      \
+-      pause;                                  \
+-      lfence;                                 \
+-      jmp     775b;                           \
+-774:                                          \
+-      add     $(BITS_PER_LONG/8) * 2, sp;     \
+-      dec     reg;                            \
+-      jnz     771b;                           \
+-      /* barrier for jnz misprediction */     \
++#define __FILL_RETURN_BUFFER(reg, nr)                 \
++      mov     $(nr/2), reg;                           \
++771:                                                  \
++      __FILL_RETURN_SLOT                              \
++      __FILL_RETURN_SLOT                              \
++      add     $(BITS_PER_LONG/8) * 2, %_ASM_SP;       \
++      dec     reg;                                    \
++      jnz     771b;                                   \
++      /* barrier for jnz misprediction */             \
++      lfence;
++
++/*
++ * Stuff a single RSB slot.
++ *
++ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
++ * forced to retire before letting a RET instruction execute.
++ *
++ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
++ * before this point.
++ */
++#define __FILL_ONE_RETURN                             \
++      __FILL_RETURN_SLOT                              \
++      add     $(BITS_PER_LONG/8), %_ASM_SP;           \
+       lfence;
+ #ifdef __ASSEMBLY__
+@@ -120,28 +131,15 @@
+ #endif
+ .endm
+-.macro ISSUE_UNBALANCED_RET_GUARD
+-      ANNOTATE_INTRA_FUNCTION_CALL
+-      call .Lunbalanced_ret_guard_\@
+-      int3
+-.Lunbalanced_ret_guard_\@:
+-      add $(BITS_PER_LONG/8), %_ASM_SP
+-      lfence
+-.endm
+-
+  /*
+   * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
+   * monstrosity above, manually.
+   */
+-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2
+-.ifb \ftr2
+-      ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr
+-.else
+-      ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2
+-.endif
+-      __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)
+-.Lunbalanced_\@:
+-      ISSUE_UNBALANCED_RET_GUARD
++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
++      ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
++              __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
++              __stringify(__FILL_ONE_RETURN), \ftr2
++
+ .Lskip_rsb_\@:
+ .endm