]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.14.112/x86-power-make-restore_processor_context-sane.patch
Linux 4.9.169
[thirdparty/kernel/stable-queue.git] / releases / 4.14.112 / x86-power-make-restore_processor_context-sane.patch
1 From 5834343942fb3b5abe4e1a97def98504d96ae4ac Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Thu, 14 Dec 2017 13:19:07 -0800
4 Subject: x86/power: Make restore_processor_context() sane
5
6 [ Upstream commit 7ee18d677989e99635027cee04c878950e0752b9 ]
7
8 My previous attempt to fix a couple of bugs in __restore_processor_context():
9
10 5b06bbcfc2c6 ("x86/power: Fix some ordering bugs in __restore_processor_context()")
11
12 ... introduced yet another bug, breaking suspend-resume.
13
14 Rather than trying to come up with a minimal fix, let's try to clean it up
15 for real. This patch fixes quite a few things:
16
17 - The old code saved a nonsensical subset of segment registers.
18 The only registers that need to be saved are those that contain
19 userspace state or those that can't be trivially restored without
20 percpu access working. (On x86_32, we can restore percpu access
21 by writing __KERNEL_PERCPU to %fs. On x86_64, it's easier to
22 save and restore the kernel's GSBASE.) With this patch, we
23 restore hardcoded values to the kernel state where applicable and
24 explicitly restore the user state after fixing all the descriptor
25 tables.
26
27 - We used to use an unholy mix of inline asm and C helpers for
28 segment register access. Let's get rid of the inline asm.
29
30 This fixes the reported s2ram hangs and make the code all around
31 more logical.
32
33 Analyzed-by: Linus Torvalds <torvalds@linux-foundation.org>
34 Reported-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
35 Reported-by: Pavel Machek <pavel@ucw.cz>
36 Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com>
37 Tested-by: Pavel Machek <pavel@ucw.cz>
38 Signed-off-by: Andy Lutomirski <luto@kernel.org>
39 Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
40 Acked-by: Thomas Gleixner <tglx@linutronix.de>
41 Cc: Borislav Petkov <bpetkov@suse.de>
42 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
43 Cc: Peter Zijlstra <peterz@infradead.org>
44 Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
45 Cc: Zhang Rui <rui.zhang@intel.com>
46 Fixes: 5b06bbcfc2c6 ("x86/power: Fix some ordering bugs in __restore_processor_context()")
47 Link: http://lkml.kernel.org/r/398ee68e5c0f766425a7b746becfc810840770ff.1513286253.git.luto@kernel.org
48 Signed-off-by: Ingo Molnar <mingo@kernel.org>
49 Signed-off-by: Sasha Levin <sashal@kernel.org>
50 ---
51 arch/x86/include/asm/suspend_32.h | 8 +++-
52 arch/x86/include/asm/suspend_64.h | 16 ++++++-
53 arch/x86/power/cpu.c | 79 ++++++++++++++++---------------
54 3 files changed, 62 insertions(+), 41 deletions(-)
55
56 diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h
57 index 982c325dad33..8be6afb58471 100644
58 --- a/arch/x86/include/asm/suspend_32.h
59 +++ b/arch/x86/include/asm/suspend_32.h
60 @@ -12,7 +12,13 @@
61
62 /* image of the saved processor state */
63 struct saved_context {
64 - u16 es, fs, gs, ss;
65 + /*
66 + * On x86_32, all segment registers, with the possible exception of
67 + * gs, are saved at kernel entry in pt_regs.
68 + */
69 +#ifdef CONFIG_X86_32_LAZY_GS
70 + u16 gs;
71 +#endif
72 unsigned long cr0, cr2, cr3, cr4;
73 u64 misc_enable;
74 bool misc_enable_saved;
75 diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h
76 index 600e9e0aea51..a7af9f53c0cb 100644
77 --- a/arch/x86/include/asm/suspend_64.h
78 +++ b/arch/x86/include/asm/suspend_64.h
79 @@ -20,8 +20,20 @@
80 */
81 struct saved_context {
82 struct pt_regs regs;
83 - u16 ds, es, fs, gs, ss;
84 - unsigned long gs_base, gs_kernel_base, fs_base;
85 +
86 + /*
87 + * User CS and SS are saved in current_pt_regs(). The rest of the
88 + * segment selectors need to be saved and restored here.
89 + */
90 + u16 ds, es, fs, gs;
91 +
92 + /*
93 + * Usermode FSBASE and GSBASE may not match the fs and gs selectors,
94 + * so we save them separately. We save the kernelmode GSBASE to
95 + * restore percpu access after resume.
96 + */
97 + unsigned long kernelmode_gs_base, usermode_gs_base, fs_base;
98 +
99 unsigned long cr0, cr2, cr3, cr4, cr8;
100 u64 misc_enable;
101 bool misc_enable_saved;
102 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
103 index 8e1668470b23..a7d966964c6f 100644
104 --- a/arch/x86/power/cpu.c
105 +++ b/arch/x86/power/cpu.c
106 @@ -99,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
107 /*
108 * segment registers
109 */
110 -#ifdef CONFIG_X86_32
111 - savesegment(es, ctxt->es);
112 - savesegment(fs, ctxt->fs);
113 +#ifdef CONFIG_X86_32_LAZY_GS
114 savesegment(gs, ctxt->gs);
115 - savesegment(ss, ctxt->ss);
116 -#else
117 -/* CONFIG_X86_64 */
118 - asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
119 - asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
120 - asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
121 - asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
122 - asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
123 +#endif
124 +#ifdef CONFIG_X86_64
125 + savesegment(gs, ctxt->gs);
126 + savesegment(fs, ctxt->fs);
127 + savesegment(ds, ctxt->ds);
128 + savesegment(es, ctxt->es);
129
130 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
131 - rdmsrl(MSR_GS_BASE, ctxt->gs_base);
132 - rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
133 + rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
134 + rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
135 mtrr_save_fixed_ranges(NULL);
136
137 rdmsrl(MSR_EFER, ctxt->efer);
138 @@ -191,9 +187,12 @@ static void fix_processor_context(void)
139 }
140
141 /**
142 - * __restore_processor_state - restore the contents of CPU registers saved
143 - * by __save_processor_state()
144 - * @ctxt - structure to load the registers contents from
145 + * __restore_processor_state - restore the contents of CPU registers saved
146 + * by __save_processor_state()
147 + * @ctxt - structure to load the registers contents from
148 + *
149 + * The asm code that gets us here will have restored a usable GDT, although
150 + * it will be pointing to the wrong alias.
151 */
152 static void notrace __restore_processor_state(struct saved_context *ctxt)
153 {
154 @@ -216,46 +215,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
155 write_cr2(ctxt->cr2);
156 write_cr0(ctxt->cr0);
157
158 + /* Restore the IDT. */
159 + load_idt(&ctxt->idt);
160 +
161 /*
162 - * now restore the descriptor tables to their proper values
163 - * ltr is done i fix_processor_context().
164 + * Just in case the asm code got us here with the SS, DS, or ES
165 + * out of sync with the GDT, update them.
166 */
167 - load_idt(&ctxt->idt);
168 + loadsegment(ss, __KERNEL_DS);
169 + loadsegment(ds, __USER_DS);
170 + loadsegment(es, __USER_DS);
171
172 -#ifdef CONFIG_X86_64
173 /*
174 - * We need GSBASE restored before percpu access can work.
175 - * percpu access can happen in exception handlers or in complicated
176 - * helpers like load_gs_index().
177 + * Restore percpu access. Percpu access can happen in exception
178 + * handlers or in complicated helpers like load_gs_index().
179 */
180 - wrmsrl(MSR_GS_BASE, ctxt->gs_base);
181 +#ifdef CONFIG_X86_64
182 + wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
183 +#else
184 + loadsegment(fs, __KERNEL_PERCPU);
185 + loadsegment(gs, __KERNEL_STACK_CANARY);
186 #endif
187
188 + /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
189 fix_processor_context();
190
191 /*
192 - * Restore segment registers. This happens after restoring the GDT
193 - * and LDT, which happen in fix_processor_context().
194 + * Now that we have descriptor tables fully restored and working
195 + * exception handling, restore the usermode segments.
196 */
197 -#ifdef CONFIG_X86_32
198 +#ifdef CONFIG_X86_64
199 + loadsegment(ds, ctxt->es);
200 loadsegment(es, ctxt->es);
201 loadsegment(fs, ctxt->fs);
202 - loadsegment(gs, ctxt->gs);
203 - loadsegment(ss, ctxt->ss);
204 -#else
205 -/* CONFIG_X86_64 */
206 - asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
207 - asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
208 - asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
209 load_gs_index(ctxt->gs);
210 - asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
211
212 /*
213 - * Restore FSBASE and user GSBASE after reloading the respective
214 - * segment selectors.
215 + * Restore FSBASE and GSBASE after restoring the selectors, since
216 + * restoring the selectors clobbers the bases. Keep in mind
217 + * that MSR_KERNEL_GS_BASE is horribly misnamed.
218 */
219 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
220 - wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
221 + wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
222 +#elif defined(CONFIG_X86_32_LAZY_GS)
223 + loadsegment(gs, ctxt->gs);
224 #endif
225
226 do_fpu_end();
227 --
228 2.19.1
229