From: Greg Kroah-Hartman Date: Mon, 12 Mar 2018 16:35:22 +0000 (+0100) Subject: 4.9-stable patches X-Git-Tag: v4.14.27~34 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=b29a9f610ffaf47f38cce64505ceede1f22ad627;p=thirdparty%2Fkernel%2Fstable-queue.git 4.9-stable patches added patches: nospec-include-asm-barrier.h-dependency.patch nospec-kill-array_index_nospec_mask_check.patch revert-x86-retpoline-simplify-vmexit_fill_rsb.patch x86-retpoline-support-retpoline-builds-with-clang.patch x86-speculation-objtool-annotate-indirect-calls-jumps-for-objtool.patch x86-speculation-use-ibrs-if-available-before-calling-into-firmware.patch --- diff --git a/queue-4.9/alsa-hda-fix-a-wrong-fixup-for-alc289-on-dell-machines.patch b/queue-4.9/alsa-hda-fix-a-wrong-fixup-for-alc289-on-dell-machines.patch deleted file mode 100644 index 0843b63f6af..00000000000 --- a/queue-4.9/alsa-hda-fix-a-wrong-fixup-for-alc289-on-dell-machines.patch +++ /dev/null @@ -1,36 +0,0 @@ -From d5078193e56bb24f4593f00102a3b5e07bb84ee0 Mon Sep 17 00:00:00 2001 -From: Hui Wang -Date: Fri, 2 Mar 2018 13:05:36 +0800 -Subject: ALSA: hda - Fix a wrong FIXUP for alc289 on Dell machines - -From: Hui Wang - -commit d5078193e56bb24f4593f00102a3b5e07bb84ee0 upstream. - -With the alc289, the Pin 0x1b is Headphone-Mic, so we should assign -ALC269_FIXUP_DELL4_MIC_NO_PRESENCE rather than -ALC225_FIXUP_DELL1_MIC_NO_PRESENCE to it. And this change is suggested -by Kailang of Realtek and is verified on the machine. - -Fixes: 3f2f7c553d07 ("ALSA: hda - Fix headset mic detection problem for two Dell machines") -Cc: Kailang Yang -Cc: -Signed-off-by: Hui Wang -Signed-off-by: Takashi Iwai -Signed-off-by: Greg Kroah-Hartman - ---- - sound/pci/hda/patch_realtek.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/sound/pci/hda/patch_realtek.c -+++ b/sound/pci/hda/patch_realtek.c -@@ -6134,7 +6134,7 @@ static const struct snd_hda_pin_quirk al - {0x12, 0x90a60120}, - {0x14, 0x90170110}, - {0x21, 0x0321101f}), -- SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE, -+ SND_HDA_PIN_QUIRK(0x10ec0289, 0x1028, "Dell", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE, - {0x12, 0xb7a60130}, - {0x14, 0x90170110}, - {0x21, 0x04211020}), diff --git a/queue-4.9/nospec-include-asm-barrier.h-dependency.patch b/queue-4.9/nospec-include-asm-barrier.h-dependency.patch new file mode 100644 index 00000000000..115c5b30b71 --- /dev/null +++ b/queue-4.9/nospec-include-asm-barrier.h-dependency.patch @@ -0,0 +1,49 @@ +From eb6174f6d1be16b19cfa43dac296bfed003ce1a6 Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Fri, 16 Feb 2018 13:20:54 -0800 +Subject: nospec: Include dependency + +From: Dan Williams + +commit eb6174f6d1be16b19cfa43dac296bfed003ce1a6 upstream. + +The nospec.h header expects the per-architecture header file + to optionally define array_index_mask_nospec(). Include +that dependency to prevent inadvertent fallback to the default +array_index_mask_nospec() implementation. + +The default implementation may not provide a full mitigation +on architectures that perform data value speculation. + +Reported-by: Christian Borntraeger +Signed-off-by: Dan Williams +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: Will Deacon +Cc: linux-arch@vger.kernel.org +Link: http://lkml.kernel.org/r/151881605404.17395.1341935530792574707.stgit@dwillia2-desk3.amr.corp.intel.com +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/nospec.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/linux/nospec.h ++++ b/include/linux/nospec.h +@@ -5,6 +5,7 @@ + + #ifndef _LINUX_NOSPEC_H + #define _LINUX_NOSPEC_H ++#include + + /** + * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise diff --git a/queue-4.9/nospec-kill-array_index_nospec_mask_check.patch b/queue-4.9/nospec-kill-array_index_nospec_mask_check.patch new file mode 100644 index 00000000000..18bedfdd7e1 --- /dev/null +++ b/queue-4.9/nospec-kill-array_index_nospec_mask_check.patch @@ -0,0 +1,83 @@ +From 1d91c1d2c80cb70e2e553845e278b87a960c04da Mon Sep 17 00:00:00 2001 +From: Dan Williams +Date: Fri, 16 Feb 2018 13:20:42 -0800 +Subject: nospec: Kill array_index_nospec_mask_check() + +From: Dan Williams + +commit 1d91c1d2c80cb70e2e553845e278b87a960c04da upstream. + +There are multiple problems with the dynamic sanity checking in +array_index_nospec_mask_check(): + +* It causes unnecessary overhead in the 32-bit case since integer sized + @index values will no longer cause the check to be compiled away like + in the 64-bit case. + +* In the 32-bit case it may trigger with user controllable input when + the expectation is that should only trigger during development of new + kernel enabling. + +* The macro reuses the input parameter in multiple locations which is + broken if someone passes an expression like 'index++' to + array_index_nospec(). + +Reported-by: Linus Torvalds +Signed-off-by: Dan Williams +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Josh Poimboeuf +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: Will Deacon +Cc: linux-arch@vger.kernel.org +Link: http://lkml.kernel.org/r/151881604278.17395.6605847763178076520.stgit@dwillia2-desk3.amr.corp.intel.com +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/nospec.h | 22 +--------------------- + 1 file changed, 1 insertion(+), 21 deletions(-) + +--- a/include/linux/nospec.h ++++ b/include/linux/nospec.h +@@ -30,26 +30,6 @@ static inline unsigned long array_index_ + #endif + + /* +- * Warn developers about inappropriate array_index_nospec() usage. +- * +- * Even if the CPU speculates past the WARN_ONCE branch, the +- * sign bit of @index is taken into account when generating the +- * mask. +- * +- * This warning is compiled out when the compiler can infer that +- * @index and @size are less than LONG_MAX. +- */ +-#define array_index_mask_nospec_check(index, size) \ +-({ \ +- if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, \ +- "array_index_nospec() limited to range of [0, LONG_MAX]\n")) \ +- _mask = 0; \ +- else \ +- _mask = array_index_mask_nospec(index, size); \ +- _mask; \ +-}) +- +-/* + * array_index_nospec - sanitize an array index after a bounds check + * + * For a code sequence like: +@@ -67,7 +47,7 @@ static inline unsigned long array_index_ + ({ \ + typeof(index) _i = (index); \ + typeof(size) _s = (size); \ +- unsigned long _mask = array_index_mask_nospec_check(_i, _s); \ ++ unsigned long _mask = array_index_mask_nospec(_i, _s); \ + \ + BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ + BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ diff --git a/queue-4.9/revert-x86-retpoline-simplify-vmexit_fill_rsb.patch b/queue-4.9/revert-x86-retpoline-simplify-vmexit_fill_rsb.patch new file mode 100644 index 00000000000..bfaff4f4495 --- /dev/null +++ b/queue-4.9/revert-x86-retpoline-simplify-vmexit_fill_rsb.patch @@ -0,0 +1,251 @@ +From d1c99108af3c5992640aa2afa7d2e88c3775c06e Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Mon, 19 Feb 2018 10:50:56 +0000 +Subject: Revert "x86/retpoline: Simplify vmexit_fill_RSB()" + +From: David Woodhouse + +commit d1c99108af3c5992640aa2afa7d2e88c3775c06e upstream. + +This reverts commit 1dde7415e99933bb7293d6b2843752cbdb43ec11. By putting +the RSB filling out of line and calling it, we waste one RSB slot for +returning from the function itself, which means one fewer actual function +call we can make if we're doing the Skylake abomination of call-depth +counting. + +It also changed the number of RSB stuffings we do on vmexit from 32, +which was correct, to 16. Let's just stop with the bikeshedding; it +didn't actually *fix* anything anyway. + +Signed-off-by: David Woodhouse +Acked-by: Thomas Gleixner +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: arjan.van.de.ven@intel.com +Cc: bp@alien8.de +Cc: dave.hansen@intel.com +Cc: jmattson@google.com +Cc: karahmed@amazon.de +Cc: kvm@vger.kernel.org +Cc: pbonzini@redhat.com +Cc: rkrcmar@redhat.com +Link: http://lkml.kernel.org/r/1519037457-7643-4-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/entry/entry_32.S | 3 - + arch/x86/entry/entry_64.S | 3 - + arch/x86/include/asm/asm-prototypes.h | 3 - + arch/x86/include/asm/nospec-branch.h | 70 ++++++++++++++++++++++++++++++---- + arch/x86/lib/Makefile | 1 + arch/x86/lib/retpoline.S | 56 --------------------------- + 6 files changed, 65 insertions(+), 71 deletions(-) + +--- a/arch/x86/entry/entry_32.S ++++ b/arch/x86/entry/entry_32.S +@@ -237,8 +237,7 @@ ENTRY(__switch_to_asm) + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ +- /* Clobbers %ebx */ +- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW ++ FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW + #endif + + /* restore callee-saved registers */ +--- a/arch/x86/entry/entry_64.S ++++ b/arch/x86/entry/entry_64.S +@@ -331,8 +331,7 @@ ENTRY(__switch_to_asm) + * exist, overwrite the RSB with entries which capture + * speculative execution to prevent attack. + */ +- /* Clobbers %rbx */ +- FILL_RETURN_BUFFER RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW ++ FILL_RETURN_BUFFER %r12, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW + #endif + + /* restore callee-saved registers */ +--- a/arch/x86/include/asm/asm-prototypes.h ++++ b/arch/x86/include/asm/asm-prototypes.h +@@ -37,7 +37,4 @@ INDIRECT_THUNK(dx) + INDIRECT_THUNK(si) + INDIRECT_THUNK(di) + INDIRECT_THUNK(bp) +-asmlinkage void __fill_rsb(void); +-asmlinkage void __clear_rsb(void); +- + #endif /* CONFIG_RETPOLINE */ +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -8,6 +8,50 @@ + #include + #include + ++/* ++ * Fill the CPU return stack buffer. ++ * ++ * Each entry in the RSB, if used for a speculative 'ret', contains an ++ * infinite 'pause; lfence; jmp' loop to capture speculative execution. ++ * ++ * This is required in various cases for retpoline and IBRS-based ++ * mitigations for the Spectre variant 2 vulnerability. Sometimes to ++ * eliminate potentially bogus entries from the RSB, and sometimes ++ * purely to ensure that it doesn't get empty, which on some CPUs would ++ * allow predictions from other (unwanted!) sources to be used. ++ * ++ * We define a CPP macro such that it can be used from both .S files and ++ * inline assembly. It's possible to do a .macro and then include that ++ * from C via asm(".include ") but let's not go there. ++ */ ++ ++#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ ++#define RSB_FILL_LOOPS 16 /* To avoid underflow */ ++ ++/* ++ * Google experimented with loop-unrolling and this turned out to be ++ * the optimal version — two calls, each with their own speculation ++ * trap should their return address end up getting used, in a loop. ++ */ ++#define __FILL_RETURN_BUFFER(reg, nr, sp) \ ++ mov $(nr/2), reg; \ ++771: \ ++ call 772f; \ ++773: /* speculation trap */ \ ++ pause; \ ++ lfence; \ ++ jmp 773b; \ ++772: \ ++ call 774f; \ ++775: /* speculation trap */ \ ++ pause; \ ++ lfence; \ ++ jmp 775b; \ ++774: \ ++ dec reg; \ ++ jnz 771b; \ ++ add $(BITS_PER_LONG/8) * nr, sp; ++ + #ifdef __ASSEMBLY__ + + /* +@@ -78,10 +122,17 @@ + #endif + .endm + +-/* This clobbers the BX register */ +-.macro FILL_RETURN_BUFFER nr:req ftr:req ++ /* ++ * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP ++ * monstrosity above, manually. ++ */ ++.macro FILL_RETURN_BUFFER reg:req nr:req ftr:req + #ifdef CONFIG_RETPOLINE +- ALTERNATIVE "", "call __clear_rsb", \ftr ++ ANNOTATE_NOSPEC_ALTERNATIVE ++ ALTERNATIVE "jmp .Lskip_rsb_\@", \ ++ __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \ ++ \ftr ++.Lskip_rsb_\@: + #endif + .endm + +@@ -156,10 +207,15 @@ extern char __indirect_thunk_end[]; + static inline void vmexit_fill_RSB(void) + { + #ifdef CONFIG_RETPOLINE +- alternative_input("", +- "call __fill_rsb", +- X86_FEATURE_RETPOLINE, +- ASM_NO_INPUT_CLOBBER(_ASM_BX, "memory")); ++ unsigned long loops; ++ ++ asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE ++ ALTERNATIVE("jmp 910f", ++ __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), ++ X86_FEATURE_RETPOLINE) ++ "910:" ++ : "=r" (loops), ASM_CALL_CONSTRAINT ++ : : "memory" ); + #endif + } + +--- a/arch/x86/lib/Makefile ++++ b/arch/x86/lib/Makefile +@@ -26,7 +26,6 @@ lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += + lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o + lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o + lib-$(CONFIG_RETPOLINE) += retpoline.o +-OBJECT_FILES_NON_STANDARD_retpoline.o :=y + + obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o + +--- a/arch/x86/lib/retpoline.S ++++ b/arch/x86/lib/retpoline.S +@@ -7,7 +7,6 @@ + #include + #include + #include +-#include + + .macro THUNK reg + .section .text.__x86.indirect_thunk +@@ -47,58 +46,3 @@ GENERATE_THUNK(r13) + GENERATE_THUNK(r14) + GENERATE_THUNK(r15) + #endif +- +-/* +- * Fill the CPU return stack buffer. +- * +- * Each entry in the RSB, if used for a speculative 'ret', contains an +- * infinite 'pause; lfence; jmp' loop to capture speculative execution. +- * +- * This is required in various cases for retpoline and IBRS-based +- * mitigations for the Spectre variant 2 vulnerability. Sometimes to +- * eliminate potentially bogus entries from the RSB, and sometimes +- * purely to ensure that it doesn't get empty, which on some CPUs would +- * allow predictions from other (unwanted!) sources to be used. +- * +- * Google experimented with loop-unrolling and this turned out to be +- * the optimal version - two calls, each with their own speculation +- * trap should their return address end up getting used, in a loop. +- */ +-.macro STUFF_RSB nr:req sp:req +- mov $(\nr / 2), %_ASM_BX +- .align 16 +-771: +- call 772f +-773: /* speculation trap */ +- pause +- lfence +- jmp 773b +- .align 16 +-772: +- call 774f +-775: /* speculation trap */ +- pause +- lfence +- jmp 775b +- .align 16 +-774: +- dec %_ASM_BX +- jnz 771b +- add $((BITS_PER_LONG/8) * \nr), \sp +-.endm +- +-#define RSB_FILL_LOOPS 16 /* To avoid underflow */ +- +-ENTRY(__fill_rsb) +- STUFF_RSB RSB_FILL_LOOPS, %_ASM_SP +- ret +-END(__fill_rsb) +-EXPORT_SYMBOL_GPL(__fill_rsb) +- +-#define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */ +- +-ENTRY(__clear_rsb) +- STUFF_RSB RSB_CLEAR_LOOPS, %_ASM_SP +- ret +-END(__clear_rsb) +-EXPORT_SYMBOL_GPL(__clear_rsb) diff --git a/queue-4.9/series b/queue-4.9/series index f4fcc17e091..e461903ddc8 100644 --- a/queue-4.9/series +++ b/queue-4.9/series @@ -33,6 +33,11 @@ alsa-hda-realtek-fix-dock-line-out-volume-on-dell-precision-7520.patch alsa-hda-realtek-make-dock-sound-work-on-thinkpad-l570.patch alsa-seq-don-t-allow-resizing-pool-in-use.patch alsa-seq-more-protection-for-concurrent-write-and-ioctl-races.patch -alsa-hda-fix-a-wrong-fixup-for-alc289-on-dell-machines.patch alsa-hda-add-dock-and-led-support-for-hp-elitebook-820-g3.patch alsa-hda-add-dock-and-led-support-for-hp-probook-640-g2.patch +nospec-kill-array_index_nospec_mask_check.patch +nospec-include-asm-barrier.h-dependency.patch +revert-x86-retpoline-simplify-vmexit_fill_rsb.patch +x86-speculation-use-ibrs-if-available-before-calling-into-firmware.patch +x86-retpoline-support-retpoline-builds-with-clang.patch +x86-speculation-objtool-annotate-indirect-calls-jumps-for-objtool.patch diff --git a/queue-4.9/x86-retpoline-support-retpoline-builds-with-clang.patch b/queue-4.9/x86-retpoline-support-retpoline-builds-with-clang.patch new file mode 100644 index 00000000000..77c676f9830 --- /dev/null +++ b/queue-4.9/x86-retpoline-support-retpoline-builds-with-clang.patch @@ -0,0 +1,95 @@ +From 87358710c1fb4f1bf96bbe2349975ff9953fc9b2 Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Mon, 19 Feb 2018 10:50:57 +0000 +Subject: x86/retpoline: Support retpoline builds with Clang + +From: David Woodhouse + +commit 87358710c1fb4f1bf96bbe2349975ff9953fc9b2 upstream. + +Signed-off-by: David Woodhouse +Reviewed-by: Thomas Gleixner +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: arjan.van.de.ven@intel.com +Cc: bp@alien8.de +Cc: dave.hansen@intel.com +Cc: jmattson@google.com +Cc: karahmed@amazon.de +Cc: kvm@vger.kernel.org +Cc: pbonzini@redhat.com +Cc: rkrcmar@redhat.com +Link: http://lkml.kernel.org/r/1519037457-7643-5-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/Makefile | 5 ++++- + include/linux/compiler-clang.h | 5 +++++ + include/linux/compiler-gcc.h | 4 ++++ + include/linux/init.h | 8 ++++---- + 4 files changed, 17 insertions(+), 5 deletions(-) + +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -184,7 +184,10 @@ KBUILD_AFLAGS += $(mflags-y) + + # Avoid indirect branches in kernel to deal with Spectre + ifdef CONFIG_RETPOLINE +- RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register) ++ RETPOLINE_CFLAGS_GCC := -mindirect-branch=thunk-extern -mindirect-branch-register ++ RETPOLINE_CFLAGS_CLANG := -mretpoline-external-thunk ++ ++ RETPOLINE_CFLAGS += $(call cc-option,$(RETPOLINE_CFLAGS_GCC),$(call cc-option,$(RETPOLINE_CFLAGS_CLANG))) + ifneq ($(RETPOLINE_CFLAGS),) + KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE + endif +--- a/include/linux/compiler-clang.h ++++ b/include/linux/compiler-clang.h +@@ -15,3 +15,8 @@ + * with any version that can compile the kernel + */ + #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) ++ ++/* Clang doesn't have a way to turn it off per-function, yet. */ ++#ifdef __noretpoline ++#undef __noretpoline ++#endif +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -88,6 +88,10 @@ + #define __weak __attribute__((weak)) + #define __alias(symbol) __attribute__((alias(#symbol))) + ++#ifdef RETPOLINE ++#define __noretpoline __attribute__((indirect_branch("keep"))) ++#endif ++ + /* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without +--- a/include/linux/init.h ++++ b/include/linux/init.h +@@ -5,10 +5,10 @@ + #include + + /* Built-in __init functions needn't be compiled with retpoline */ +-#if defined(RETPOLINE) && !defined(MODULE) +-#define __noretpoline __attribute__((indirect_branch("keep"))) ++#if defined(__noretpoline) && !defined(MODULE) ++#define __noinitretpoline __noretpoline + #else +-#define __noretpoline ++#define __noinitretpoline + #endif + + /* These macros are used to mark some functions or +@@ -46,7 +46,7 @@ + + /* These are for everybody (although not all archs will actually + discard it in modules) */ +-#define __init __section(.init.text) __cold notrace __latent_entropy __noretpoline ++#define __init __section(.init.text) __cold notrace __latent_entropy __noinitretpoline + #define __initdata __section(.init.data) + #define __initconst __section(.init.rodata) + #define __exitdata __section(.exit.data) diff --git a/queue-4.9/x86-speculation-objtool-annotate-indirect-calls-jumps-for-objtool.patch b/queue-4.9/x86-speculation-objtool-annotate-indirect-calls-jumps-for-objtool.patch new file mode 100644 index 00000000000..a8fe0de36d9 --- /dev/null +++ b/queue-4.9/x86-speculation-objtool-annotate-indirect-calls-jumps-for-objtool.patch @@ -0,0 +1,98 @@ +From 9e0e3c5130e949c389caabc8033e9799b129e429 Mon Sep 17 00:00:00 2001 +From: Peter Zijlstra +Date: Wed, 17 Jan 2018 22:34:34 +0100 +Subject: x86/speculation, objtool: Annotate indirect calls/jumps for objtool + +From: Peter Zijlstra + +commit 9e0e3c5130e949c389caabc8033e9799b129e429 upstream. + +Annotate the indirect calls/jumps in the CALL_NOSPEC/JUMP_NOSPEC +alternatives. + +Signed-off-by: Peter Zijlstra (Intel) +Reviewed-by: David Woodhouse +Acked-by: Thomas Gleixner +Acked-by: Josh Poimboeuf +Cc: Andy Lutomirski +Cc: Arjan van de Ven +Cc: Borislav Petkov +Cc: Dan Williams +Cc: Dave Hansen +Cc: David Woodhouse +Cc: Greg Kroah-Hartman +Cc: Linus Torvalds +Cc: Peter Zijlstra +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/nospec-branch.h | 27 +++++++++++++++++++++++---- + 1 file changed, 23 insertions(+), 4 deletions(-) + +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -68,6 +68,18 @@ + .endm + + /* ++ * This should be used immediately before an indirect jump/call. It tells ++ * objtool the subsequent indirect jump/call is vouched safe for retpoline ++ * builds. ++ */ ++.macro ANNOTATE_RETPOLINE_SAFE ++ .Lannotate_\@: ++ .pushsection .discard.retpoline_safe ++ _ASM_PTR .Lannotate_\@ ++ .popsection ++.endm ++ ++/* + * These are the bare retpoline primitives for indirect jmp and call. + * Do not use these directly; they only exist to make the ALTERNATIVE + * invocation below less ugly. +@@ -103,9 +115,9 @@ + .macro JMP_NOSPEC reg:req + #ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE +- ALTERNATIVE_2 __stringify(jmp *\reg), \ ++ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \ + __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \ +- __stringify(lfence; jmp *\reg), X86_FEATURE_RETPOLINE_AMD ++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD + #else + jmp *\reg + #endif +@@ -114,9 +126,9 @@ + .macro CALL_NOSPEC reg:req + #ifdef CONFIG_RETPOLINE + ANNOTATE_NOSPEC_ALTERNATIVE +- ALTERNATIVE_2 __stringify(call *\reg), \ ++ ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \ + __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\ +- __stringify(lfence; call *\reg), X86_FEATURE_RETPOLINE_AMD ++ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD + #else + call *\reg + #endif +@@ -144,6 +156,12 @@ + ".long 999b - .\n\t" \ + ".popsection\n\t" + ++#define ANNOTATE_RETPOLINE_SAFE \ ++ "999:\n\t" \ ++ ".pushsection .discard.retpoline_safe\n\t" \ ++ _ASM_PTR " 999b\n\t" \ ++ ".popsection\n\t" ++ + #if defined(CONFIG_X86_64) && defined(RETPOLINE) + + /* +@@ -153,6 +171,7 @@ + # define CALL_NOSPEC \ + ANNOTATE_NOSPEC_ALTERNATIVE \ + ALTERNATIVE( \ ++ ANNOTATE_RETPOLINE_SAFE \ + "call *%[thunk_target]\n", \ + "call __x86_indirect_thunk_%V[thunk_target]\n", \ + X86_FEATURE_RETPOLINE) diff --git a/queue-4.9/x86-speculation-use-ibrs-if-available-before-calling-into-firmware.patch b/queue-4.9/x86-speculation-use-ibrs-if-available-before-calling-into-firmware.patch new file mode 100644 index 00000000000..9eddb6bf1b7 --- /dev/null +++ b/queue-4.9/x86-speculation-use-ibrs-if-available-before-calling-into-firmware.patch @@ -0,0 +1,221 @@ +From dd84441a797150dcc49298ec95c459a8891d8bb1 Mon Sep 17 00:00:00 2001 +From: David Woodhouse +Date: Mon, 19 Feb 2018 10:50:54 +0000 +Subject: x86/speculation: Use IBRS if available before calling into firmware + +From: David Woodhouse + +commit dd84441a797150dcc49298ec95c459a8891d8bb1 upstream. + +Retpoline means the kernel is safe because it has no indirect branches. +But firmware isn't, so use IBRS for firmware calls if it's available. + +Block preemption while IBRS is set, although in practice the call sites +already had to be doing that. + +Ignore hpwdt.c for now. It's taking spinlocks and calling into firmware +code, from an NMI handler. I don't want to touch that with a bargepole. + +Signed-off-by: David Woodhouse +Reviewed-by: Thomas Gleixner +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: arjan.van.de.ven@intel.com +Cc: bp@alien8.de +Cc: dave.hansen@intel.com +Cc: jmattson@google.com +Cc: karahmed@amazon.de +Cc: kvm@vger.kernel.org +Cc: pbonzini@redhat.com +Cc: rkrcmar@redhat.com +Link: http://lkml.kernel.org/r/1519037457-7643-2-git-send-email-dwmw@amazon.co.uk +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/include/asm/apm.h | 6 +++++ + arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/efi.h | 17 +++++++++++++-- + arch/x86/include/asm/nospec-branch.h | 39 ++++++++++++++++++++++++++--------- + arch/x86/kernel/cpu/bugs.c | 12 +++++++++- + 5 files changed, 63 insertions(+), 12 deletions(-) + +--- a/arch/x86/include/asm/apm.h ++++ b/arch/x86/include/asm/apm.h +@@ -6,6 +6,8 @@ + #ifndef _ASM_X86_MACH_DEFAULT_APM_H + #define _ASM_X86_MACH_DEFAULT_APM_H + ++#include ++ + #ifdef APM_ZERO_SEGS + # define APM_DO_ZERO_SEGS \ + "pushl %%ds\n\t" \ +@@ -31,6 +33,7 @@ static inline void apm_bios_call_asm(u32 + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ ++ firmware_restrict_branch_speculation_start(); + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +@@ -43,6 +46,7 @@ static inline void apm_bios_call_asm(u32 + "=S" (*esi) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); ++ firmware_restrict_branch_speculation_end(); + } + + static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in, +@@ -55,6 +59,7 @@ static inline bool apm_bios_call_simple_ + * N.B. We do NOT need a cld after the BIOS call + * because we always save and restore the flags. + */ ++ firmware_restrict_branch_speculation_start(); + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +@@ -67,6 +72,7 @@ static inline bool apm_bios_call_simple_ + "=S" (si) + : "a" (func), "b" (ebx_in), "c" (ecx_in) + : "memory", "cc"); ++ firmware_restrict_branch_speculation_end(); + return error; + } + +--- a/arch/x86/include/asm/cpufeatures.h ++++ b/arch/x86/include/asm/cpufeatures.h +@@ -203,6 +203,7 @@ + #define X86_FEATURE_KAISER ( 7*32+31) /* CONFIG_PAGE_TABLE_ISOLATION w/o nokaiser */ + + #define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */ ++#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */ + + /* Virtualization flags: Linux defined, word 8 */ + #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ +--- a/arch/x86/include/asm/efi.h ++++ b/arch/x86/include/asm/efi.h +@@ -5,6 +5,7 @@ + #include + #include + #include ++#include + + /* + * We map the EFI regions needed for runtime services non-contiguously, +@@ -35,8 +36,18 @@ + + extern unsigned long asmlinkage efi_call_phys(void *, ...); + +-#define arch_efi_call_virt_setup() kernel_fpu_begin() +-#define arch_efi_call_virt_teardown() kernel_fpu_end() ++#define arch_efi_call_virt_setup() \ ++({ \ ++ kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ ++}) ++ ++#define arch_efi_call_virt_teardown() \ ++({ \ ++ firmware_restrict_branch_speculation_end(); \ ++ kernel_fpu_end(); \ ++}) ++ + + /* + * Wrap all the virtual calls in a way that forces the parameters on the stack. +@@ -72,6 +83,7 @@ struct efi_scratch { + efi_sync_low_kernel_mappings(); \ + preempt_disable(); \ + __kernel_fpu_begin(); \ ++ firmware_restrict_branch_speculation_start(); \ + \ + if (efi_scratch.use_pgd) { \ + efi_scratch.prev_cr3 = read_cr3(); \ +@@ -90,6 +102,7 @@ struct efi_scratch { + __flush_tlb_all(); \ + } \ + \ ++ firmware_restrict_branch_speculation_end(); \ + __kernel_fpu_end(); \ + preempt_enable(); \ + }) +--- a/arch/x86/include/asm/nospec-branch.h ++++ b/arch/x86/include/asm/nospec-branch.h +@@ -219,17 +219,38 @@ static inline void vmexit_fill_RSB(void) + #endif + } + ++#define alternative_msr_write(_msr, _val, _feature) \ ++ asm volatile(ALTERNATIVE("", \ ++ "movl %[msr], %%ecx\n\t" \ ++ "movl %[val], %%eax\n\t" \ ++ "movl $0, %%edx\n\t" \ ++ "wrmsr", \ ++ _feature) \ ++ : : [msr] "i" (_msr), [val] "i" (_val) \ ++ : "eax", "ecx", "edx", "memory") ++ + static inline void indirect_branch_prediction_barrier(void) + { +- asm volatile(ALTERNATIVE("", +- "movl %[msr], %%ecx\n\t" +- "movl %[val], %%eax\n\t" +- "movl $0, %%edx\n\t" +- "wrmsr", +- X86_FEATURE_USE_IBPB) +- : : [msr] "i" (MSR_IA32_PRED_CMD), +- [val] "i" (PRED_CMD_IBPB) +- : "eax", "ecx", "edx", "memory"); ++ alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, ++ X86_FEATURE_USE_IBPB); ++} ++ ++/* ++ * With retpoline, we must use IBRS to restrict branch prediction ++ * before calling into firmware. ++ */ ++static inline void firmware_restrict_branch_speculation_start(void) ++{ ++ preempt_disable(); ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, SPEC_CTRL_IBRS, ++ X86_FEATURE_USE_IBRS_FW); ++} ++ ++static inline void firmware_restrict_branch_speculation_end(void) ++{ ++ alternative_msr_write(MSR_IA32_SPEC_CTRL, 0, ++ X86_FEATURE_USE_IBRS_FW); ++ preempt_enable(); + } + + #endif /* __ASSEMBLY__ */ +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -299,6 +299,15 @@ retpoline_auto: + setup_force_cpu_cap(X86_FEATURE_USE_IBPB); + pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); + } ++ ++ /* ++ * Retpoline means the kernel is safe because it has no indirect ++ * branches. But firmware isn't, so use IBRS to protect that. ++ */ ++ if (boot_cpu_has(X86_FEATURE_IBRS)) { ++ setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); ++ pr_info("Enabling Restricted Speculation for firmware calls\n"); ++ } + } + + #undef pr_fmt +@@ -325,8 +334,9 @@ ssize_t cpu_show_spectre_v2(struct devic + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return sprintf(buf, "Not affected\n"); + +- return sprintf(buf, "%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], ++ return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], + boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", ++ boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", + spectre_v2_module_string()); + } + #endif