]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - queue-4.4/x86-speculation-avoid-__switch_to_xtra-calls.patch
4.4-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.4 / x86-speculation-avoid-__switch_to_xtra-calls.patch
CommitLineData
1f91e7a4
GKH
1From foo@baz Tue 14 May 2019 08:29:35 PM CEST
2From: Thomas Gleixner <tglx@linutronix.de>
3Date: Sun, 25 Nov 2018 19:33:48 +0100
4Subject: x86/speculation: Avoid __switch_to_xtra() calls
5
6From: Thomas Gleixner <tglx@linutronix.de>
7
8commit 5635d99953f04b550738f6f4c1c532667c3fd872 upstream.
9
10The TIF_SPEC_IB bit does not need to be evaluated in the decision to invoke
11__switch_to_xtra() when:
12
13 - CONFIG_SMP is disabled
14
15 - The conditional STIPB mode is disabled
16
17The TIF_SPEC_IB bit still controls IBPB in both cases so the TIF work mask
18checks might invoke __switch_to_xtra() for nothing if TIF_SPEC_IB is the
19only set bit in the work masks.
20
21Optimize it out by masking the bit at compile time for CONFIG_SMP=n and at
22run time when the static key controlling the conditional STIBP mode is
23disabled.
24
25Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
26Reviewed-by: Ingo Molnar <mingo@kernel.org>
27Cc: Peter Zijlstra <peterz@infradead.org>
28Cc: Andy Lutomirski <luto@kernel.org>
29Cc: Linus Torvalds <torvalds@linux-foundation.org>
30Cc: Jiri Kosina <jkosina@suse.cz>
31Cc: Tom Lendacky <thomas.lendacky@amd.com>
32Cc: Josh Poimboeuf <jpoimboe@redhat.com>
33Cc: Andrea Arcangeli <aarcange@redhat.com>
34Cc: David Woodhouse <dwmw@amazon.co.uk>
35Cc: Tim Chen <tim.c.chen@linux.intel.com>
36Cc: Andi Kleen <ak@linux.intel.com>
37Cc: Dave Hansen <dave.hansen@intel.com>
38Cc: Casey Schaufler <casey.schaufler@intel.com>
39Cc: Asit Mallick <asit.k.mallick@intel.com>
40Cc: Arjan van de Ven <arjan@linux.intel.com>
41Cc: Jon Masters <jcm@redhat.com>
42Cc: Waiman Long <longman9394@gmail.com>
43Cc: Greg KH <gregkh@linuxfoundation.org>
44Cc: Dave Stewart <david.c.stewart@intel.com>
45Cc: Kees Cook <keescook@chromium.org>
46Link: https://lkml.kernel.org/r/20181125185005.374062201@linutronix.de
47[bwh: Backported to 4.4: adjust context]
48Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
49Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
50---
51 arch/x86/include/asm/thread_info.h | 13 +++++++++++--
52 arch/x86/kernel/process.h | 15 +++++++++++++++
53 2 files changed, 26 insertions(+), 2 deletions(-)
54
55--- a/arch/x86/include/asm/thread_info.h
56+++ b/arch/x86/include/asm/thread_info.h
57@@ -150,9 +150,18 @@ struct thread_info {
58 _TIF_NOHZ)
59
60 /* flags to check in __switch_to() */
61-#define _TIF_WORK_CTXSW \
62+#define _TIF_WORK_CTXSW_BASE \
63 (_TIF_IO_BITMAP|_TIF_NOTSC|_TIF_BLOCKSTEP| \
64- _TIF_SSBD|_TIF_SPEC_IB)
65+ _TIF_SSBD)
66+
67+/*
68+ * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.
69+ */
70+#ifdef CONFIG_SMP
71+# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB)
72+#else
73+# define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE)
74+#endif
75
76 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
77 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
78--- a/arch/x86/kernel/process.h
79+++ b/arch/x86/kernel/process.h
80@@ -2,6 +2,8 @@
81 //
82 // Code shared between 32 and 64 bit
83
84+#include <asm/spec-ctrl.h>
85+
86 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
87
88 /*
89@@ -14,6 +16,19 @@ static inline void switch_to_extra(struc
90 unsigned long next_tif = task_thread_info(next)->flags;
91 unsigned long prev_tif = task_thread_info(prev)->flags;
92
93+ if (IS_ENABLED(CONFIG_SMP)) {
94+ /*
95+ * Avoid __switch_to_xtra() invocation when conditional
96+ * STIPB is disabled and the only different bit is
97+ * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not
98+ * in the TIF_WORK_CTXSW masks.
99+ */
100+ if (!static_branch_likely(&switch_to_cond_stibp)) {
101+ prev_tif &= ~_TIF_SPEC_IB;
102+ next_tif &= ~_TIF_SPEC_IB;
103+ }
104+ }
105+
106 /*
107 * __switch_to_xtra() handles debug registers, i/o bitmaps,
108 * speculation mitigations etc.