]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.9/0056-x86-speculation-mds-Clear-CPU-buffers-on-exit-to-use.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.9 / 0056-x86-speculation-mds-Clear-CPU-buffers-on-exit-to-use.patch
1 From 6c6a2fd175649b6d1ef4de775648a44693d2e58e Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Mon, 18 Feb 2019 23:42:51 +0100
4 Subject: [PATCH 56/76] x86/speculation/mds: Clear CPU buffers on exit to user
5
6 commit 04dcbdb8057827b043b3c71aa397c4c63e67d086 upstream.
7
8 Add a static key which controls the invocation of the CPU buffer clear
9 mechanism on exit to user space and add the call into
10 prepare_exit_to_usermode() and do_nmi() right before actually returning.
11
12 Add documentation which kernel to user space transition this covers and
13 explain why some corner cases are not mitigated.
14
15 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
16 Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
17 Reviewed-by: Borislav Petkov <bp@suse.de>
18 Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
19 Reviewed-by: Jon Masters <jcm@redhat.com>
20 Tested-by: Jon Masters <jcm@redhat.com>
21 Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
22 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
23 ---
24 Documentation/x86/mds.rst | 52 ++++++++++++++++++++++++++++
25 arch/x86/entry/common.c | 3 ++
26 arch/x86/include/asm/nospec-branch.h | 13 +++++++
27 arch/x86/kernel/cpu/bugs.c | 3 ++
28 arch/x86/kernel/nmi.c | 4 +++
29 arch/x86/kernel/traps.c | 8 +++++
30 6 files changed, 83 insertions(+)
31
32 diff --git a/Documentation/x86/mds.rst b/Documentation/x86/mds.rst
33 index 1096738d50f2..54d935bf283b 100644
34 --- a/Documentation/x86/mds.rst
35 +++ b/Documentation/x86/mds.rst
36 @@ -97,3 +97,55 @@ According to current knowledge additional mitigations inside the kernel
37 itself are not required because the necessary gadgets to expose the leaked
38 data cannot be controlled in a way which allows exploitation from malicious
39 user space or VM guests.
40 +
41 +Mitigation points
42 +-----------------
43 +
44 +1. Return to user space
45 +^^^^^^^^^^^^^^^^^^^^^^^
46 +
47 + When transitioning from kernel to user space the CPU buffers are flushed
48 + on affected CPUs when the mitigation is not disabled on the kernel
49 + command line. The migitation is enabled through the static key
50 + mds_user_clear.
51 +
52 + The mitigation is invoked in prepare_exit_to_usermode() which covers
53 + most of the kernel to user space transitions. There are a few exceptions
54 + which are not invoking prepare_exit_to_usermode() on return to user
55 + space. These exceptions use the paranoid exit code.
56 +
57 + - Non Maskable Interrupt (NMI):
58 +
59 + Access to sensible data like keys, credentials in the NMI context is
60 + mostly theoretical: The CPU can do prefetching or execute a
61 + misspeculated code path and thereby fetching data which might end up
62 + leaking through a buffer.
63 +
64 + But for mounting other attacks the kernel stack address of the task is
65 + already valuable information. So in full mitigation mode, the NMI is
66 + mitigated on the return from do_nmi() to provide almost complete
67 + coverage.
68 +
69 + - Double fault (#DF):
70 +
71 + A double fault is usually fatal, but the ESPFIX workaround, which can
72 + be triggered from user space through modify_ldt(2) is a recoverable
73 + double fault. #DF uses the paranoid exit path, so explicit mitigation
74 + in the double fault handler is required.
75 +
76 + - Machine Check Exception (#MC):
77 +
78 + Another corner case is a #MC which hits between the CPU buffer clear
79 + invocation and the actual return to user. As this still is in kernel
80 + space it takes the paranoid exit path which does not clear the CPU
81 + buffers. So the #MC handler repopulates the buffers to some
82 + extent. Machine checks are not reliably controllable and the window is
83 + extremly small so mitigation would just tick a checkbox that this
84 + theoretical corner case is covered. To keep the amount of special
85 + cases small, ignore #MC.
86 +
87 + - Debug Exception (#DB):
88 +
89 + This takes the paranoid exit path only when the INT1 breakpoint is in
90 + kernel space. #DB on a user space address takes the regular exit path,
91 + so no extra mitigation required.
92 diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
93 index b0cd306dc527..8841d016b4a4 100644
94 --- a/arch/x86/entry/common.c
95 +++ b/arch/x86/entry/common.c
96 @@ -28,6 +28,7 @@
97 #include <asm/vdso.h>
98 #include <asm/uaccess.h>
99 #include <asm/cpufeature.h>
100 +#include <asm/nospec-branch.h>
101
102 #define CREATE_TRACE_POINTS
103 #include <trace/events/syscalls.h>
104 @@ -206,6 +207,8 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
105 #endif
106
107 user_enter_irqoff();
108 +
109 + mds_user_clear_cpu_buffers();
110 }
111
112 #define SYSCALL_EXIT_WORK_FLAGS \
113 diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
114 index 4d11e89351f1..421015ef1703 100644
115 --- a/arch/x86/include/asm/nospec-branch.h
116 +++ b/arch/x86/include/asm/nospec-branch.h
117 @@ -308,6 +308,8 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
118 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
119 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
120
121 +DECLARE_STATIC_KEY_FALSE(mds_user_clear);
122 +
123 #include <asm/segment.h>
124
125 /**
126 @@ -333,6 +335,17 @@ static inline void mds_clear_cpu_buffers(void)
127 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
128 }
129
130 +/**
131 + * mds_user_clear_cpu_buffers - Mitigation for MDS vulnerability
132 + *
133 + * Clear CPU buffers if the corresponding static key is enabled
134 + */
135 +static inline void mds_user_clear_cpu_buffers(void)
136 +{
137 + if (static_branch_likely(&mds_user_clear))
138 + mds_clear_cpu_buffers();
139 +}
140 +
141 #endif /* __ASSEMBLY__ */
142
143 /*
144 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
145 index a7e9a93e387a..4dc04dc913c3 100644
146 --- a/arch/x86/kernel/cpu/bugs.c
147 +++ b/arch/x86/kernel/cpu/bugs.c
148 @@ -59,6 +59,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
149 /* Control unconditional IBPB in switch_mm() */
150 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
151
152 +/* Control MDS CPU buffer clear before returning to user space */
153 +DEFINE_STATIC_KEY_FALSE(mds_user_clear);
154 +
155 void __init check_bugs(void)
156 {
157 identify_boot_cpu();
158 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
159 index bfe4d6c96fbd..6b7b35d80264 100644
160 --- a/arch/x86/kernel/nmi.c
161 +++ b/arch/x86/kernel/nmi.c
162 @@ -32,6 +32,7 @@
163 #include <asm/x86_init.h>
164 #include <asm/reboot.h>
165 #include <asm/cache.h>
166 +#include <asm/nospec-branch.h>
167
168 #define CREATE_TRACE_POINTS
169 #include <trace/events/nmi.h>
170 @@ -544,6 +545,9 @@ do_nmi(struct pt_regs *regs, long error_code)
171 write_cr2(this_cpu_read(nmi_cr2));
172 if (this_cpu_dec_return(nmi_state))
173 goto nmi_restart;
174 +
175 + if (user_mode(regs))
176 + mds_user_clear_cpu_buffers();
177 }
178 NOKPROBE_SYMBOL(do_nmi);
179
180 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
181 index 5bbfa2f63b8c..ef225fa8e928 100644
182 --- a/arch/x86/kernel/traps.c
183 +++ b/arch/x86/kernel/traps.c
184 @@ -62,6 +62,7 @@
185 #include <asm/alternative.h>
186 #include <asm/fpu/xstate.h>
187 #include <asm/trace/mpx.h>
188 +#include <asm/nospec-branch.h>
189 #include <asm/mpx.h>
190 #include <asm/vm86.h>
191
192 @@ -340,6 +341,13 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
193 regs->ip = (unsigned long)general_protection;
194 regs->sp = (unsigned long)&normal_regs->orig_ax;
195
196 + /*
197 + * This situation can be triggered by userspace via
198 + * modify_ldt(2) and the return does not take the regular
199 + * user space exit, so a CPU buffer clear is required when
200 + * MDS mitigation is enabled.
201 + */
202 + mds_user_clear_cpu_buffers();
203 return;
204 }
205 #endif
206 --
207 2.21.0
208