]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - queue-4.9/0039-x86-process-Consolidate-and-simplify-switch_to_xtra-.patch
4.9-stable patches
[thirdparty/kernel/stable-queue.git] / queue-4.9 / 0039-x86-process-Consolidate-and-simplify-switch_to_xtra-.patch
1 From 453a26f77acf7654dd2c25f4a08290776d66605d Mon Sep 17 00:00:00 2001
2 From: Thomas Gleixner <tglx@linutronix.de>
3 Date: Sun, 25 Nov 2018 19:33:47 +0100
4 Subject: [PATCH 39/76] x86/process: Consolidate and simplify switch_to_xtra()
5 code
6
7 commit ff16701a29cba3aafa0bd1656d766813b2d0a811 upstream.
8
9 Move the conditional invocation of __switch_to_xtra() into an inline
10 function so the logic can be shared between 32 and 64 bit.
11
12 Remove the handthrough of the TSS pointer and retrieve the pointer directly
13 in the bitmap handling function. Use this_cpu_ptr() instead of the
14 per_cpu() indirection.
15
16 This is a preparatory change so integration of conditional indirect branch
17 speculation optimization happens only in one place.
18
19 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
20 Reviewed-by: Ingo Molnar <mingo@kernel.org>
21 Cc: Peter Zijlstra <peterz@infradead.org>
22 Cc: Andy Lutomirski <luto@kernel.org>
23 Cc: Linus Torvalds <torvalds@linux-foundation.org>
24 Cc: Jiri Kosina <jkosina@suse.cz>
25 Cc: Tom Lendacky <thomas.lendacky@amd.com>
26 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
27 Cc: Andrea Arcangeli <aarcange@redhat.com>
28 Cc: David Woodhouse <dwmw@amazon.co.uk>
29 Cc: Tim Chen <tim.c.chen@linux.intel.com>
30 Cc: Andi Kleen <ak@linux.intel.com>
31 Cc: Dave Hansen <dave.hansen@intel.com>
32 Cc: Casey Schaufler <casey.schaufler@intel.com>
33 Cc: Asit Mallick <asit.k.mallick@intel.com>
34 Cc: Arjan van de Ven <arjan@linux.intel.com>
35 Cc: Jon Masters <jcm@redhat.com>
36 Cc: Waiman Long <longman9394@gmail.com>
37 Cc: Greg KH <gregkh@linuxfoundation.org>
38 Cc: Dave Stewart <david.c.stewart@intel.com>
39 Cc: Kees Cook <keescook@chromium.org>
40 Link: https://lkml.kernel.org/r/20181125185005.280855518@linutronix.de
41 [bwh: Backported to 4.9:
42 - Use cpu_tss instead of cpu_tss_rw
43 - __switch_to() still uses the tss variable, so don't delete it
44 - Adjust context]
45 Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
46 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
47 ---
48 arch/x86/include/asm/switch_to.h | 3 ---
49 arch/x86/kernel/process.c | 12 +++++++-----
50 arch/x86/kernel/process.h | 24 ++++++++++++++++++++++++
51 arch/x86/kernel/process_32.c | 9 +++------
52 arch/x86/kernel/process_64.c | 9 +++------
53 5 files changed, 37 insertions(+), 20 deletions(-)
54 create mode 100644 arch/x86/kernel/process.h
55
56 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
57 index 5cb436acd463..676e84f521ba 100644
58 --- a/arch/x86/include/asm/switch_to.h
59 +++ b/arch/x86/include/asm/switch_to.h
60 @@ -8,9 +8,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev,
61
62 __visible struct task_struct *__switch_to(struct task_struct *prev,
63 struct task_struct *next);
64 -struct tss_struct;
65 -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
66 - struct tss_struct *tss);
67
68 /* This runs runs on the previous thread's stack. */
69 static inline void prepare_switch_to(struct task_struct *prev,
70 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
71 index 5111e107a902..b1abe87c5f4d 100644
72 --- a/arch/x86/kernel/process.c
73 +++ b/arch/x86/kernel/process.c
74 @@ -35,6 +35,8 @@
75 #include <asm/switch_to.h>
76 #include <asm/spec-ctrl.h>
77
78 +#include "process.h"
79 +
80 /*
81 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
82 * no more per-task TSS's. The TSS size is kept cacheline-aligned
83 @@ -183,11 +185,12 @@ int set_tsc_mode(unsigned int val)
84 return 0;
85 }
86
87 -static inline void switch_to_bitmap(struct tss_struct *tss,
88 - struct thread_struct *prev,
89 +static inline void switch_to_bitmap(struct thread_struct *prev,
90 struct thread_struct *next,
91 unsigned long tifp, unsigned long tifn)
92 {
93 + struct tss_struct *tss = this_cpu_ptr(&cpu_tss);
94 +
95 if (tifn & _TIF_IO_BITMAP) {
96 /*
97 * Copy the relevant range of the IO bitmap.
98 @@ -374,8 +377,7 @@ void speculation_ctrl_update(unsigned long tif)
99 preempt_enable();
100 }
101
102 -void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
103 - struct tss_struct *tss)
104 +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
105 {
106 struct thread_struct *prev, *next;
107 unsigned long tifp, tifn;
108 @@ -385,7 +387,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
109
110 tifn = READ_ONCE(task_thread_info(next_p)->flags);
111 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
112 - switch_to_bitmap(tss, prev, next, tifp, tifn);
113 + switch_to_bitmap(prev, next, tifp, tifn);
114
115 propagate_user_return_notify(prev_p, next_p);
116
117 diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h
118 new file mode 100644
119 index 000000000000..020fbfac3a27
120 --- /dev/null
121 +++ b/arch/x86/kernel/process.h
122 @@ -0,0 +1,24 @@
123 +// SPDX-License-Identifier: GPL-2.0
124 +//
125 +// Code shared between 32 and 64 bit
126 +
127 +void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p);
128 +
129 +/*
130 + * This needs to be inline to optimize for the common case where no extra
131 + * work needs to be done.
132 + */
133 +static inline void switch_to_extra(struct task_struct *prev,
134 + struct task_struct *next)
135 +{
136 + unsigned long next_tif = task_thread_info(next)->flags;
137 + unsigned long prev_tif = task_thread_info(prev)->flags;
138 +
139 + /*
140 + * __switch_to_xtra() handles debug registers, i/o bitmaps,
141 + * speculation mitigations etc.
142 + */
143 + if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT ||
144 + prev_tif & _TIF_WORK_CTXSW_PREV))
145 + __switch_to_xtra(prev, next);
146 +}
147 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
148 index bd7be8efdc4c..912246fd6cd9 100644
149 --- a/arch/x86/kernel/process_32.c
150 +++ b/arch/x86/kernel/process_32.c
151 @@ -55,6 +55,8 @@
152 #include <asm/switch_to.h>
153 #include <asm/vm86.h>
154
155 +#include "process.h"
156 +
157 void __show_regs(struct pt_regs *regs, int all)
158 {
159 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
160 @@ -264,12 +266,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
161 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
162 set_iopl_mask(next->iopl);
163
164 - /*
165 - * Now maybe handle debug registers and/or IO bitmaps
166 - */
167 - if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
168 - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
169 - __switch_to_xtra(prev_p, next_p, tss);
170 + switch_to_extra(prev_p, next_p);
171
172 /*
173 * Leave lazy mode, flushing any hypercalls made here.
174 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
175 index a2661814bde0..81eec65fe053 100644
176 --- a/arch/x86/kernel/process_64.c
177 +++ b/arch/x86/kernel/process_64.c
178 @@ -51,6 +51,8 @@
179 #include <asm/xen/hypervisor.h>
180 #include <asm/vdso.h>
181
182 +#include "process.h"
183 +
184 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
185
186 /* Prints also some state that isn't saved in the pt_regs */
187 @@ -454,12 +456,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
188 /* Reload esp0 and ss1. This changes current_thread_info(). */
189 load_sp0(tss, next);
190
191 - /*
192 - * Now maybe reload the debug registers and handle I/O bitmaps
193 - */
194 - if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
195 - task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
196 - __switch_to_xtra(prev_p, next_p, tss);
197 + switch_to_extra(prev_p, next_p);
198
199 #ifdef CONFIG_XEN
200 /*
201 --
202 2.21.0
203