]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-5.10/x86-bugs-use-alternative-instead-of-mds_user_clear-static-key.patch
5.10-stable patches
[thirdparty/kernel/stable-queue.git] / queue-5.10 / x86-bugs-use-alternative-instead-of-mds_user_clear-static-key.patch
CommitLineData
f0b05e03
GKH
1From stable+bounces-27546-greg=kroah.com@vger.kernel.org Tue Mar 12 23:40:59 2024
2From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
3Date: Tue, 12 Mar 2024 15:40:50 -0700
4Subject: x86/bugs: Use ALTERNATIVE() instead of mds_user_clear static key
5To: stable@vger.kernel.org
6Cc: Dave Hansen <dave.hansen@linux.intel.com>
7Message-ID: <20240312-delay-verw-backport-5-10-y-v2-5-ad081ccd89ca@linux.intel.com>
8Content-Disposition: inline
9
10From: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
11
12commit 6613d82e617dd7eb8b0c40b2fe3acea655b1d611 upstream.
13
14The VERW mitigation at exit-to-user is enabled via a static branch
15mds_user_clear. This static branch is never toggled after boot, and can
16be safely replaced with an ALTERNATIVE() which is convenient to use in
17asm.
18
19Switch to ALTERNATIVE() to use the VERW mitigation late in exit-to-user
20path. Also remove the now redundant VERW in exc_nmi() and
21arch_exit_to_user_mode().
22
23Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
24Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
25Link: https://lore.kernel.org/all/20240213-delay-verw-v8-4-a6216d83edb7%40linux.intel.com
26Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
27---
28 Documentation/x86/mds.rst | 36 +++++++++++++++++++++++++----------
29 arch/x86/include/asm/entry-common.h | 1
30 arch/x86/include/asm/nospec-branch.h | 12 -----------
31 arch/x86/kernel/cpu/bugs.c | 15 +++++---------
32 arch/x86/kernel/nmi.c | 3 --
33 arch/x86/kvm/vmx/vmx.c | 2 -
34 6 files changed, 33 insertions(+), 36 deletions(-)
35
36--- a/Documentation/x86/mds.rst
37+++ b/Documentation/x86/mds.rst
38@@ -95,6 +95,9 @@ The kernel provides a function to invoke
39
40 mds_clear_cpu_buffers()
41
42+Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
43+Other than CFLAGS.ZF, this macro doesn't clobber any registers.
44+
45 The mitigation is invoked on kernel/userspace, hypervisor/guest and C-state
46 (idle) transitions.
47
48@@ -138,17 +141,30 @@ Mitigation points
49
50 When transitioning from kernel to user space the CPU buffers are flushed
51 on affected CPUs when the mitigation is not disabled on the kernel
52- command line. The migitation is enabled through the static key
53- mds_user_clear.
54+ command line. The mitigation is enabled through the feature flag
55+ X86_FEATURE_CLEAR_CPU_BUF.
56
57- The mitigation is invoked in prepare_exit_to_usermode() which covers
58- all but one of the kernel to user space transitions. The exception
59- is when we return from a Non Maskable Interrupt (NMI), which is
60- handled directly in do_nmi().
61-
62- (The reason that NMI is special is that prepare_exit_to_usermode() can
63- enable IRQs. In NMI context, NMIs are blocked, and we don't want to
64- enable IRQs with NMIs blocked.)
65+ The mitigation is invoked just before transitioning to userspace after
66+ user registers are restored. This is done to minimize the window in
67+ which kernel data could be accessed after VERW e.g. via an NMI after
68+ VERW.
69+
70+ **Corner case not handled**
71+ Interrupts returning to kernel don't clear CPUs buffers since the
72+ exit-to-user path is expected to do that anyways. But, there could be
73+ a case when an NMI is generated in kernel after the exit-to-user path
74+ has cleared the buffers. This case is not handled and NMI returning to
75+ kernel don't clear CPU buffers because:
76+
77+ 1. It is rare to get an NMI after VERW, but before returning to userspace.
78+ 2. For an unprivileged user, there is no known way to make that NMI
79+ less rare or target it.
80+ 3. It would take a large number of these precisely-timed NMIs to mount
81+ an actual attack. There's presumably not enough bandwidth.
82+ 4. The NMI in question occurs after a VERW, i.e. when user state is
83+ restored and most interesting data is already scrubbed. Whats left
84+ is only the data that NMI touches, and that may or may not be of
85+ any interest.
86
87
88 2. C-State transition
89--- a/arch/x86/include/asm/entry-common.h
90+++ b/arch/x86/include/asm/entry-common.h
91@@ -77,7 +77,6 @@ static inline void arch_exit_to_user_mod
92
93 static __always_inline void arch_exit_to_user_mode(void)
94 {
95- mds_user_clear_cpu_buffers();
96 amd_clear_divider();
97 }
98 #define arch_exit_to_user_mode arch_exit_to_user_mode
99--- a/arch/x86/include/asm/nospec-branch.h
100+++ b/arch/x86/include/asm/nospec-branch.h
101@@ -370,7 +370,6 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_
102 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
103 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
104
105-DECLARE_STATIC_KEY_FALSE(mds_user_clear);
106 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
107
108 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
109@@ -403,17 +402,6 @@ static __always_inline void mds_clear_cp
110 }
111
112 /**
113- * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
114- *
115- * Clear CPU buffers if the corresponding static key is enabled
116- */
117-static __always_inline void mds_user_clear_cpu_buffers(void)
118-{
119- if (static_branch_likely(&mds_user_clear))
120- mds_clear_cpu_buffers();
121-}
122-
123-/**
124 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
125 *
126 * Clear CPU buffers if the corresponding static key is enabled
127--- a/arch/x86/kernel/cpu/bugs.c
128+++ b/arch/x86/kernel/cpu/bugs.c
129@@ -109,9 +109,6 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_i
130 /* Control unconditional IBPB in switch_mm() */
131 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
132
133-/* Control MDS CPU buffer clear before returning to user space */
134-DEFINE_STATIC_KEY_FALSE(mds_user_clear);
135-EXPORT_SYMBOL_GPL(mds_user_clear);
136 /* Control MDS CPU buffer clear before idling (halt, mwait) */
137 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
138 EXPORT_SYMBOL_GPL(mds_idle_clear);
139@@ -249,7 +246,7 @@ static void __init mds_select_mitigation
140 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
141 mds_mitigation = MDS_MITIGATION_VMWERV;
142
143- static_branch_enable(&mds_user_clear);
144+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
145
146 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
147 (mds_nosmt || cpu_mitigations_auto_nosmt()))
148@@ -353,7 +350,7 @@ static void __init taa_select_mitigation
149 * For guests that can't determine whether the correct microcode is
150 * present on host, enable the mitigation for UCODE_NEEDED as well.
151 */
152- static_branch_enable(&mds_user_clear);
153+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
154
155 if (taa_nosmt || cpu_mitigations_auto_nosmt())
156 cpu_smt_disable(false);
157@@ -421,7 +418,7 @@ static void __init mmio_select_mitigatio
158 */
159 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
160 boot_cpu_has(X86_FEATURE_RTM)))
161- static_branch_enable(&mds_user_clear);
162+ setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
163 else
164 static_branch_enable(&mmio_stale_data_clear);
165
166@@ -481,12 +478,12 @@ static void __init md_clear_update_mitig
167 if (cpu_mitigations_off())
168 return;
169
170- if (!static_key_enabled(&mds_user_clear))
171+ if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
172 goto out;
173
174 /*
175- * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
176- * mitigation, if necessary.
177+ * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
178+ * Stale Data mitigation, if necessary.
179 */
180 if (mds_mitigation == MDS_MITIGATION_OFF &&
181 boot_cpu_has_bug(X86_BUG_MDS)) {
182--- a/arch/x86/kernel/nmi.c
183+++ b/arch/x86/kernel/nmi.c
184@@ -519,9 +519,6 @@ nmi_restart:
185 write_cr2(this_cpu_read(nmi_cr2));
186 if (this_cpu_dec_return(nmi_state))
187 goto nmi_restart;
188-
189- if (user_mode(regs))
190- mds_user_clear_cpu_buffers();
191 }
192
193 #if defined(CONFIG_X86_64) && IS_ENABLED(CONFIG_KVM_INTEL)
194--- a/arch/x86/kvm/vmx/vmx.c
195+++ b/arch/x86/kvm/vmx/vmx.c
196@@ -6795,7 +6795,7 @@ static noinstr void vmx_vcpu_enter_exit(
197 /* L1D Flush includes CPU buffer clear to mitigate MDS */
198 if (static_branch_unlikely(&vmx_l1d_should_flush))
199 vmx_l1d_flush(vcpu);
200- else if (static_branch_unlikely(&mds_user_clear))
201+ else if (cpu_feature_enabled(X86_FEATURE_CLEAR_CPU_BUF))
202 mds_clear_cpu_buffers();
203 else if (static_branch_unlikely(&mmio_stale_data_clear) &&
204 kvm_arch_has_assigned_device(vcpu->kvm))