From: Greg Kroah-Hartman Date: Thu, 1 Sep 2022 10:06:51 +0000 (+0200) Subject: 5.10-stable patches X-Git-Tag: v4.9.327~57 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=498a33e7aad1a76c5f9e4738f6840068cb8bcb4a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: crypto-lib-remove-unneeded-selection-of-xor_blocks.patch x86-nospec-fix-i386-rsb-stuffing.patch x86-nospec-unwreck-the-rsb-stuffing.patch --- diff --git a/queue-5.10/crypto-lib-remove-unneeded-selection-of-xor_blocks.patch b/queue-5.10/crypto-lib-remove-unneeded-selection-of-xor_blocks.patch new file mode 100644 index 00000000000..743ef91fbce --- /dev/null +++ b/queue-5.10/crypto-lib-remove-unneeded-selection-of-xor_blocks.patch @@ -0,0 +1,37 @@ +From 874b301985ef2f89b8b592ad255e03fb6fbfe605 Mon Sep 17 00:00:00 2001 +From: Eric Biggers +Date: Thu, 25 Aug 2022 22:04:56 -0700 +Subject: crypto: lib - remove unneeded selection of XOR_BLOCKS + +From: Eric Biggers + +commit 874b301985ef2f89b8b592ad255e03fb6fbfe605 upstream. + +CRYPTO_LIB_CHACHA_GENERIC doesn't need to select XOR_BLOCKS. It perhaps +was thought that it's needed for __crypto_xor, but that's not the case. + +Enabling XOR_BLOCKS is problematic because the XOR_BLOCKS code runs a +benchmark when it is initialized. That causes a boot time regression on +systems that didn't have it enabled before. + +Therefore, remove this unnecessary and problematic selection. + +Fixes: e56e18985596 ("lib/crypto: add prompts back to crypto libraries") +Cc: stable@vger.kernel.org +Signed-off-by: Eric Biggers +Signed-off-by: Herbert Xu +Signed-off-by: Greg Kroah-Hartman +--- + lib/crypto/Kconfig | 1 - + 1 file changed, 1 deletion(-) + +--- a/lib/crypto/Kconfig ++++ b/lib/crypto/Kconfig +@@ -33,7 +33,6 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA + + config CRYPTO_LIB_CHACHA_GENERIC + tristate +- select XOR_BLOCKS + help + This symbol can be depended upon by arch implementations of the + ChaCha library interface that require the generic code as a diff --git a/queue-5.10/series b/queue-5.10/series index bfcfae48adf..a167100e763 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -1 +1,4 @@ mm-force-tlb-flush-for-pfnmap-mappings-before-unlink_file_vma.patch +x86-nospec-unwreck-the-rsb-stuffing.patch +x86-nospec-fix-i386-rsb-stuffing.patch +crypto-lib-remove-unneeded-selection-of-xor_blocks.patch diff --git a/queue-5.10/x86-nospec-fix-i386-rsb-stuffing.patch b/queue-5.10/x86-nospec-fix-i386-rsb-stuffing.patch new file mode 100644 index 00000000000..0d6db63de70 --- /dev/null +++ b/queue-5.10/x86-nospec-fix-i386-rsb-stuffing.patch @@ -0,0 +1,50 @@ +From 332924973725e8cdcc783c175f68cf7e162cb9e5 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Fri, 19 Aug 2022 13:01:35 +0200 +Subject: x86/nospec: Fix i386 RSB stuffing + +From: Peter Zijlstra + +commit 332924973725e8cdcc783c175f68cf7e162cb9e5 upstream. + +Turns out that i386 doesn't unconditionally have LFENCE, as such the +loop in __FILL_RETURN_BUFFER isn't actually speculation safe on such +chips. + +Fixes: ba6e31af2be9 ("x86/speculation: Add LFENCE to RSB fill sequence") +Reported-by: Ben Hutchings +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/Yv9tj9vbQ9nNlXoY@worktop.programming.kicks-ass.net +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/nospec-branch.h | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -50,6 +50,7 @@ + * the optimal version — two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ ++#ifdef CONFIG_X86_64 + #define __FILL_RETURN_BUFFER(reg, nr) \ + mov $(nr/2), reg; \ + 771: \ +@@ -60,6 +61,17 @@ + jnz 771b; \ + /* barrier for jnz misprediction */ \ + lfence; ++#else ++/* ++ * i386 doesn't unconditionally have LFENCE, as such it can't ++ * do a loop. ++ */ ++#define __FILL_RETURN_BUFFER(reg, nr) \ ++ .rept nr; \ ++ __FILL_RETURN_SLOT; \ ++ .endr; \ ++ add $(BITS_PER_LONG/8) * nr, %_ASM_SP; ++#endif + + /* + * Stuff a single RSB slot. diff --git a/queue-5.10/x86-nospec-unwreck-the-rsb-stuffing.patch b/queue-5.10/x86-nospec-unwreck-the-rsb-stuffing.patch new file mode 100644 index 00000000000..b9023533644 --- /dev/null +++ b/queue-5.10/x86-nospec-unwreck-the-rsb-stuffing.patch @@ -0,0 +1,130 @@ +From 4e3aa9238277597c6c7624f302d81a7b568b6f2d Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Tue, 16 Aug 2022 14:28:36 +0200 +Subject: x86/nospec: Unwreck the RSB stuffing + +From: Peter Zijlstra + +commit 4e3aa9238277597c6c7624f302d81a7b568b6f2d upstream. + +Commit 2b1299322016 ("x86/speculation: Add RSB VM Exit protections") +made a right mess of the RSB stuffing, rewrite the whole thing to not +suck. + +Thanks to Andrew for the enlightening comment about Post-Barrier RSB +things so we can make this code less magical. + +Cc: stable@vger.kernel.org +Signed-off-by: Peter Zijlstra (Intel) +Link: https://lkml.kernel.org/r/YvuNdDWoUZSBjYcm@worktop.programming.kicks-ass.net +[bwh: Backported to 5.10: adjust context] +Signed-off-by: Ben Hutchings +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/nospec-branch.h | 80 +++++++++++++++++------------------ + 1 file changed, 39 insertions(+), 41 deletions(-) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -35,33 +35,44 @@ + #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ + + /* ++ * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN. ++ */ ++#define __FILL_RETURN_SLOT \ ++ ANNOTATE_INTRA_FUNCTION_CALL; \ ++ call 772f; \ ++ int3; \ ++772: ++ ++/* ++ * Stuff the entire RSB. ++ * + * Google experimented with loop-unrolling and this turned out to be + * the optimal version — two calls, each with their own speculation + * trap should their return address end up getting used, in a loop. + */ +-#define __FILL_RETURN_BUFFER(reg, nr, sp) \ +- mov $(nr/2), reg; \ +-771: \ +- ANNOTATE_INTRA_FUNCTION_CALL; \ +- call 772f; \ +-773: /* speculation trap */ \ +- UNWIND_HINT_EMPTY; \ +- pause; \ +- lfence; \ +- jmp 773b; \ +-772: \ +- ANNOTATE_INTRA_FUNCTION_CALL; \ +- call 774f; \ +-775: /* speculation trap */ \ +- UNWIND_HINT_EMPTY; \ +- pause; \ +- lfence; \ +- jmp 775b; \ +-774: \ +- add $(BITS_PER_LONG/8) * 2, sp; \ +- dec reg; \ +- jnz 771b; \ +- /* barrier for jnz misprediction */ \ ++#define __FILL_RETURN_BUFFER(reg, nr) \ ++ mov $(nr/2), reg; \ ++771: \ ++ __FILL_RETURN_SLOT \ ++ __FILL_RETURN_SLOT \ ++ add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \ ++ dec reg; \ ++ jnz 771b; \ ++ /* barrier for jnz misprediction */ \ ++ lfence; ++ ++/* ++ * Stuff a single RSB slot. ++ * ++ * To mitigate Post-Barrier RSB speculation, one CALL instruction must be ++ * forced to retire before letting a RET instruction execute. ++ * ++ * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed ++ * before this point. ++ */ ++#define __FILL_ONE_RETURN \ ++ __FILL_RETURN_SLOT \ ++ add $(BITS_PER_LONG/8), %_ASM_SP; \ + lfence; + + #ifdef __ASSEMBLY__ +@@ -120,28 +131,15 @@ + #endif + .endm + +-.macro ISSUE_UNBALANCED_RET_GUARD +- ANNOTATE_INTRA_FUNCTION_CALL +- call .Lunbalanced_ret_guard_\@ +- int3 +-.Lunbalanced_ret_guard_\@: +- add $(BITS_PER_LONG/8), %_ASM_SP +- lfence +-.endm +- + /* + * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP + * monstrosity above, manually. + */ +-.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2 +-.ifb \ftr2 +- ALTERNATIVE "jmp .Lskip_rsb_\@", "", \ftr +-.else +- ALTERNATIVE_2 "jmp .Lskip_rsb_\@", "", \ftr, "jmp .Lunbalanced_\@", \ftr2 +-.endif +- __FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP) +-.Lunbalanced_\@: +- ISSUE_UNBALANCED_RET_GUARD ++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS) ++ ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \ ++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \ ++ __stringify(__FILL_ONE_RETURN), \ftr2 ++ + .Lskip_rsb_\@: + .endm +