]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-4.9/0018-x86-speculation-Apply-IBPB-more-strictly-to-avoid-cr.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.9 / 0018-x86-speculation-Apply-IBPB-more-strictly-to-avoid-cr.patch
CommitLineData
6fa88700
GKH
1From 621843a13c4b5c47866eba59fdbcd5a7f2900c30 Mon Sep 17 00:00:00 2001
2From: Jiri Kosina <jkosina@suse.cz>
3Date: Tue, 25 Sep 2018 14:38:18 +0200
4Subject: [PATCH 18/76] x86/speculation: Apply IBPB more strictly to avoid
5 cross-process data leak
6
7commit dbfe2953f63c640463c630746cd5d9de8b2f63ae upstream.
8
9Currently, IBPB is only issued in cases when switching into a non-dumpable
10process, the rationale being to protect such 'important and security
11sensitive' processess (such as GPG) from data leaking into a different
12userspace process via spectre v2.
13
14This is however completely insufficient to provide proper userspace-to-userpace
15spectrev2 protection, as any process can poison branch buffers before being
16scheduled out, and the newly scheduled process immediately becomes spectrev2
17victim.
18
19In order to minimize the performance impact (for usecases that do require
20spectrev2 protection), issue the barrier only in cases when switching between
21processess where the victim can't be ptraced by the potential attacker (as in
22such cases, the attacker doesn't have to bother with branch buffers at all).
23
24[ tglx: Split up PTRACE_MODE_NOACCESS_CHK into PTRACE_MODE_SCHED and
25 PTRACE_MODE_IBPB to be able to do ptrace() context tracking reasonably
26 fine-grained ]
27
28Fixes: 18bf3c3ea8 ("x86/speculation: Use Indirect Branch Prediction Barrier in context switch")
29Originally-by: Tim Chen <tim.c.chen@linux.intel.com>
30Signed-off-by: Jiri Kosina <jkosina@suse.cz>
31Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
32Cc: Peter Zijlstra <peterz@infradead.org>
33Cc: Josh Poimboeuf <jpoimboe@redhat.com>
34Cc: Andrea Arcangeli <aarcange@redhat.com>
35Cc: "WoodhouseDavid" <dwmw@amazon.co.uk>
36Cc: Andi Kleen <ak@linux.intel.com>
37Cc: "SchauflerCasey" <casey.schaufler@intel.com>
38Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251437340.15880@cbobk.fhfr.pm
39Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
40Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
41---
42 arch/x86/mm/tlb.c | 31 ++++++++++++++++++++-----------
43 include/linux/ptrace.h | 21 +++++++++++++++++++--
44 kernel/ptrace.c | 10 ++++++++++
45 3 files changed, 49 insertions(+), 13 deletions(-)
46
47diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
48index eac92e2d171b..ff8f8e529317 100644
49--- a/arch/x86/mm/tlb.c
50+++ b/arch/x86/mm/tlb.c
51@@ -7,6 +7,7 @@
52 #include <linux/export.h>
53 #include <linux/cpu.h>
54 #include <linux/debugfs.h>
55+#include <linux/ptrace.h>
56
57 #include <asm/tlbflush.h>
58 #include <asm/mmu_context.h>
59@@ -101,6 +102,19 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
60 local_irq_restore(flags);
61 }
62
63+static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id)
64+{
65+ /*
66+ * Check if the current (previous) task has access to the memory
67+ * of the @tsk (next) task. If access is denied, make sure to
68+ * issue a IBPB to stop user->user Spectre-v2 attacks.
69+ *
70+ * Note: __ptrace_may_access() returns 0 or -ERRNO.
71+ */
72+ return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id &&
73+ ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB));
74+}
75+
76 void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
77 struct task_struct *tsk)
78 {
79@@ -115,18 +129,13 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
80 * one process from doing Spectre-v2 attacks on another.
81 *
82 * As an optimization, flush indirect branches only when
83- * switching into processes that disable dumping. This
84- * protects high value processes like gpg, without having
85- * too high performance overhead. IBPB is *expensive*!
86- *
87- * This will not flush branches when switching into kernel
88- * threads. It will also not flush if we switch to idle
89- * thread and back to the same process. It will flush if we
90- * switch to a different non-dumpable process.
91+ * switching into a processes that can't be ptrace by the
92+ * current one (as in such case, attacker has much more
93+ * convenient way how to tamper with the next process than
94+ * branch buffer poisoning).
95 */
96- if (tsk && tsk->mm &&
97- tsk->mm->context.ctx_id != last_ctx_id &&
98- get_dumpable(tsk->mm) != SUID_DUMP_USER)
99+ if (static_cpu_has(X86_FEATURE_USE_IBPB) &&
100+ ibpb_needed(tsk, last_ctx_id))
101 indirect_branch_prediction_barrier();
102
103 if (IS_ENABLED(CONFIG_VMAP_STACK)) {
104diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
105index d53a23100401..58ae371556bc 100644
106--- a/include/linux/ptrace.h
107+++ b/include/linux/ptrace.h
108@@ -60,14 +60,17 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
109 #define PTRACE_MODE_READ 0x01
110 #define PTRACE_MODE_ATTACH 0x02
111 #define PTRACE_MODE_NOAUDIT 0x04
112-#define PTRACE_MODE_FSCREDS 0x08
113-#define PTRACE_MODE_REALCREDS 0x10
114+#define PTRACE_MODE_FSCREDS 0x08
115+#define PTRACE_MODE_REALCREDS 0x10
116+#define PTRACE_MODE_SCHED 0x20
117+#define PTRACE_MODE_IBPB 0x40
118
119 /* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
120 #define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
121 #define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
122 #define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
123 #define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
124+#define PTRACE_MODE_SPEC_IBPB (PTRACE_MODE_ATTACH_REALCREDS | PTRACE_MODE_IBPB)
125
126 /**
127 * ptrace_may_access - check whether the caller is permitted to access
128@@ -85,6 +88,20 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
129 */
130 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
131
132+/**
133+ * ptrace_may_access - check whether the caller is permitted to access
134+ * a target task.
135+ * @task: target task
136+ * @mode: selects type of access and caller credentials
137+ *
138+ * Returns true on success, false on denial.
139+ *
140+ * Similar to ptrace_may_access(). Only to be called from context switch
141+ * code. Does not call into audit and the regular LSM hooks due to locking
142+ * constraints.
143+ */
144+extern bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode);
145+
146 static inline int ptrace_reparented(struct task_struct *child)
147 {
148 return !same_thread_group(child->real_parent, child->parent);
149diff --git a/kernel/ptrace.c b/kernel/ptrace.c
150index f39a7be98fc1..efba851ee018 100644
151--- a/kernel/ptrace.c
152+++ b/kernel/ptrace.c
153@@ -258,6 +258,9 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
154
155 static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
156 {
157+ if (mode & PTRACE_MODE_SCHED)
158+ return false;
159+
160 if (mode & PTRACE_MODE_NOAUDIT)
161 return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
162 else
163@@ -325,9 +328,16 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
164 !ptrace_has_cap(mm->user_ns, mode)))
165 return -EPERM;
166
167+ if (mode & PTRACE_MODE_SCHED)
168+ return 0;
169 return security_ptrace_access_check(task, mode);
170 }
171
172+bool ptrace_may_access_sched(struct task_struct *task, unsigned int mode)
173+{
174+ return __ptrace_may_access(task, mode | PTRACE_MODE_SCHED);
175+}
176+
177 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
178 {
179 int err;
180--
1812.21.0
182