]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - arch/x86/power/cpu.c
x86/power: Fix some ordering bugs in __restore_processor_context()
[thirdparty/kernel/stable.git] / arch / x86 / power / cpu.c
1 /*
2 * Suspend support specific for i386/x86-64.
3 *
4 * Distribute under GPLv2
5 *
6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
9 */
10
11 #include <linux/suspend.h>
12 #include <linux/export.h>
13 #include <linux/smp.h>
14 #include <linux/perf_event.h>
15 #include <linux/tboot.h>
16
17 #include <asm/pgtable.h>
18 #include <asm/proto.h>
19 #include <asm/mtrr.h>
20 #include <asm/page.h>
21 #include <asm/mce.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
25 #include <asm/cpu.h>
26 #include <asm/mmu_context.h>
27 #include <linux/dmi.h>
28
29 #ifdef CONFIG_X86_32
30 __visible unsigned long saved_context_ebx;
31 __visible unsigned long saved_context_esp, saved_context_ebp;
32 __visible unsigned long saved_context_esi, saved_context_edi;
33 __visible unsigned long saved_context_eflags;
34 #endif
35 struct saved_context saved_context;
36
37 static void msr_save_context(struct saved_context *ctxt)
38 {
39 struct saved_msr *msr = ctxt->saved_msrs.array;
40 struct saved_msr *end = msr + ctxt->saved_msrs.num;
41
42 while (msr < end) {
43 msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
44 msr++;
45 }
46 }
47
48 static void msr_restore_context(struct saved_context *ctxt)
49 {
50 struct saved_msr *msr = ctxt->saved_msrs.array;
51 struct saved_msr *end = msr + ctxt->saved_msrs.num;
52
53 while (msr < end) {
54 if (msr->valid)
55 wrmsrl(msr->info.msr_no, msr->info.reg.q);
56 msr++;
57 }
58 }
59
60 /**
61 * __save_processor_state - save CPU registers before creating a
62 * hibernation image and before restoring the memory state from it
63 * @ctxt - structure to store the registers contents in
64 *
65 * NOTE: If there is a CPU register the modification of which by the
66 * boot kernel (ie. the kernel used for loading the hibernation image)
67 * might affect the operations of the restored target kernel (ie. the one
68 * saved in the hibernation image), then its contents must be saved by this
69 * function. In other words, if kernel A is hibernated and different
70 * kernel B is used for loading the hibernation image into memory, the
71 * kernel A's __save_processor_state() function must save all registers
72 * needed by kernel A, so that it can operate correctly after the resume
73 * regardless of what kernel B does in the meantime.
74 */
75 static void __save_processor_state(struct saved_context *ctxt)
76 {
77 #ifdef CONFIG_X86_32
78 mtrr_save_fixed_ranges(NULL);
79 #endif
80 kernel_fpu_begin();
81
82 /*
83 * descriptor tables
84 */
85 #ifdef CONFIG_X86_32
86 store_idt(&ctxt->idt);
87 #else
88 /* CONFIG_X86_64 */
89 store_idt((struct desc_ptr *)&ctxt->idt_limit);
90 #endif
91 /*
92 * We save it here, but restore it only in the hibernate case.
93 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
94 * mode in "secondary_startup_64". In 32-bit mode it is done via
95 * 'pmode_gdt' in wakeup_start.
96 */
97 ctxt->gdt_desc.size = GDT_SIZE - 1;
98 ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
99
100 store_tr(ctxt->tr);
101
102 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
103 /*
104 * segment registers
105 */
106 #ifdef CONFIG_X86_32
107 savesegment(es, ctxt->es);
108 savesegment(fs, ctxt->fs);
109 savesegment(gs, ctxt->gs);
110 savesegment(ss, ctxt->ss);
111 #else
112 /* CONFIG_X86_64 */
113 asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
114 asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
115 asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
116 asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
117 asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
118
119 rdmsrl(MSR_FS_BASE, ctxt->fs_base);
120 rdmsrl(MSR_GS_BASE, ctxt->gs_base);
121 rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
122 mtrr_save_fixed_ranges(NULL);
123
124 rdmsrl(MSR_EFER, ctxt->efer);
125 #endif
126
127 /*
128 * control registers
129 */
130 ctxt->cr0 = read_cr0();
131 ctxt->cr2 = read_cr2();
132 ctxt->cr3 = __read_cr3();
133 ctxt->cr4 = __read_cr4();
134 #ifdef CONFIG_X86_64
135 ctxt->cr8 = read_cr8();
136 #endif
137 ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
138 &ctxt->misc_enable);
139 msr_save_context(ctxt);
140 }
141
142 /* Needed by apm.c */
143 void save_processor_state(void)
144 {
145 __save_processor_state(&saved_context);
146 x86_platform.save_sched_clock_state();
147 }
148 #ifdef CONFIG_X86_32
149 EXPORT_SYMBOL(save_processor_state);
150 #endif
151
152 static void do_fpu_end(void)
153 {
154 /*
155 * Restore FPU regs if necessary.
156 */
157 kernel_fpu_end();
158 }
159
160 static void fix_processor_context(void)
161 {
162 int cpu = smp_processor_id();
163 struct tss_struct *t = &per_cpu(cpu_tss, cpu);
164 #ifdef CONFIG_X86_64
165 struct desc_struct *desc = get_cpu_gdt_rw(cpu);
166 tss_desc tss;
167 #endif
168 set_tss_desc(cpu, t); /*
169 * This just modifies memory; should not be
170 * necessary. But... This is necessary, because
171 * 386 hardware has concept of busy TSS or some
172 * similar stupidity.
173 */
174
175 #ifdef CONFIG_X86_64
176 memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
177 tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
178 write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
179
180 syscall_init(); /* This sets MSR_*STAR and related */
181 #endif
182 load_TR_desc(); /* This does ltr */
183 load_mm_ldt(current->active_mm); /* This does lldt */
184 initialize_tlbstate_and_flush();
185
186 fpu__resume_cpu();
187
188 /* The processor is back on the direct GDT, load back the fixmap */
189 load_fixmap_gdt(cpu);
190 }
191
192 /**
193 * __restore_processor_state - restore the contents of CPU registers saved
194 * by __save_processor_state()
195 * @ctxt - structure to load the registers contents from
196 */
197 static void notrace __restore_processor_state(struct saved_context *ctxt)
198 {
199 if (ctxt->misc_enable_saved)
200 wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
201 /*
202 * control registers
203 */
204 /* cr4 was introduced in the Pentium CPU */
205 #ifdef CONFIG_X86_32
206 if (ctxt->cr4)
207 __write_cr4(ctxt->cr4);
208 #else
209 /* CONFIG X86_64 */
210 wrmsrl(MSR_EFER, ctxt->efer);
211 write_cr8(ctxt->cr8);
212 __write_cr4(ctxt->cr4);
213 #endif
214 write_cr3(ctxt->cr3);
215 write_cr2(ctxt->cr2);
216 write_cr0(ctxt->cr0);
217
218 /*
219 * now restore the descriptor tables to their proper values
220 * ltr is done i fix_processor_context().
221 */
222 #ifdef CONFIG_X86_32
223 load_idt(&ctxt->idt);
224 #else
225 /* CONFIG_X86_64 */
226 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
227 #endif
228
229 #ifdef CONFIG_X86_64
230 /*
231 * We need GSBASE restored before percpu access can work.
232 * percpu access can happen in exception handlers or in complicated
233 * helpers like load_gs_index().
234 */
235 wrmsrl(MSR_GS_BASE, ctxt->gs_base);
236 #endif
237
238 fix_processor_context();
239
240 /*
241 * Restore segment registers. This happens after restoring the GDT
242 * and LDT, which happen in fix_processor_context().
243 */
244 #ifdef CONFIG_X86_32
245 loadsegment(es, ctxt->es);
246 loadsegment(fs, ctxt->fs);
247 loadsegment(gs, ctxt->gs);
248 loadsegment(ss, ctxt->ss);
249
250 /*
251 * sysenter MSRs
252 */
253 if (boot_cpu_has(X86_FEATURE_SEP))
254 enable_sep_cpu();
255 #else
256 /* CONFIG_X86_64 */
257 asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
258 asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
259 asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
260 load_gs_index(ctxt->gs);
261 asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
262
263 /*
264 * Restore FSBASE and user GSBASE after reloading the respective
265 * segment selectors.
266 */
267 wrmsrl(MSR_FS_BASE, ctxt->fs_base);
268 wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
269 #endif
270
271 do_fpu_end();
272 tsc_verify_tsc_adjust(true);
273 x86_platform.restore_sched_clock_state();
274 mtrr_bp_restore();
275 perf_restore_debug_store();
276 msr_restore_context(ctxt);
277 }
278
279 /* Needed by apm.c */
280 void notrace restore_processor_state(void)
281 {
282 __restore_processor_state(&saved_context);
283 }
284 #ifdef CONFIG_X86_32
285 EXPORT_SYMBOL(restore_processor_state);
286 #endif
287
288 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
289 static void resume_play_dead(void)
290 {
291 play_dead_common();
292 tboot_shutdown(TB_SHUTDOWN_WFS);
293 hlt_play_dead();
294 }
295
296 int hibernate_resume_nonboot_cpu_disable(void)
297 {
298 void (*play_dead)(void) = smp_ops.play_dead;
299 int ret;
300
301 /*
302 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
303 * during hibernate image restoration, because it is likely that the
304 * monitored address will be actually written to at that time and then
305 * the "dead" CPU will attempt to execute instructions again, but the
306 * address in its instruction pointer may not be possible to resolve
307 * any more at that point (the page tables used by it previously may
308 * have been overwritten by hibernate image data).
309 */
310 smp_ops.play_dead = resume_play_dead;
311 ret = disable_nonboot_cpus();
312 smp_ops.play_dead = play_dead;
313 return ret;
314 }
315 #endif
316
317 /*
318 * When bsp_check() is called in hibernate and suspend, cpu hotplug
319 * is disabled already. So it's unnessary to handle race condition between
320 * cpumask query and cpu hotplug.
321 */
322 static int bsp_check(void)
323 {
324 if (cpumask_first(cpu_online_mask) != 0) {
325 pr_warn("CPU0 is offline.\n");
326 return -ENODEV;
327 }
328
329 return 0;
330 }
331
332 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
333 void *ptr)
334 {
335 int ret = 0;
336
337 switch (action) {
338 case PM_SUSPEND_PREPARE:
339 case PM_HIBERNATION_PREPARE:
340 ret = bsp_check();
341 break;
342 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
343 case PM_RESTORE_PREPARE:
344 /*
345 * When system resumes from hibernation, online CPU0 because
346 * 1. it's required for resume and
347 * 2. the CPU was online before hibernation
348 */
349 if (!cpu_online(0))
350 _debug_hotplug_cpu(0, 1);
351 break;
352 case PM_POST_RESTORE:
353 /*
354 * When a resume really happens, this code won't be called.
355 *
356 * This code is called only when user space hibernation software
357 * prepares for snapshot device during boot time. So we just
358 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
359 * preparing the snapshot device.
360 *
361 * This works for normal boot case in our CPU0 hotplug debug
362 * mode, i.e. CPU0 is offline and user mode hibernation
363 * software initializes during boot time.
364 *
365 * If CPU0 is online and user application accesses snapshot
366 * device after boot time, this will offline CPU0 and user may
367 * see different CPU0 state before and after accessing
368 * the snapshot device. But hopefully this is not a case when
369 * user debugging CPU0 hotplug. Even if users hit this case,
370 * they can easily online CPU0 back.
371 *
372 * To simplify this debug code, we only consider normal boot
373 * case. Otherwise we need to remember CPU0's state and restore
374 * to that state and resolve racy conditions etc.
375 */
376 _debug_hotplug_cpu(0, 0);
377 break;
378 #endif
379 default:
380 break;
381 }
382 return notifier_from_errno(ret);
383 }
384
385 static int __init bsp_pm_check_init(void)
386 {
387 /*
388 * Set this bsp_pm_callback as lower priority than
389 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
390 * earlier to disable cpu hotplug before bsp online check.
391 */
392 pm_notifier(bsp_pm_callback, -INT_MAX);
393 return 0;
394 }
395
396 core_initcall(bsp_pm_check_init);
397
398 static int msr_init_context(const u32 *msr_id, const int total_num)
399 {
400 int i = 0;
401 struct saved_msr *msr_array;
402
403 if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
404 pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
405 return -EINVAL;
406 }
407
408 msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
409 if (!msr_array) {
410 pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
411 return -ENOMEM;
412 }
413
414 for (i = 0; i < total_num; i++) {
415 msr_array[i].info.msr_no = msr_id[i];
416 msr_array[i].valid = false;
417 msr_array[i].info.reg.q = 0;
418 }
419 saved_context.saved_msrs.num = total_num;
420 saved_context.saved_msrs.array = msr_array;
421
422 return 0;
423 }
424
425 /*
426 * The following section is a quirk framework for problematic BIOSen:
427 * Sometimes MSRs are modified by the BIOSen after suspended to
428 * RAM, this might cause unexpected behavior after wakeup.
429 * Thus we save/restore these specified MSRs across suspend/resume
430 * in order to work around it.
431 *
432 * For any further problematic BIOSen/platforms,
433 * please add your own function similar to msr_initialize_bdw.
434 */
435 static int msr_initialize_bdw(const struct dmi_system_id *d)
436 {
437 /* Add any extra MSR ids into this array. */
438 u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
439
440 pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
441 return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
442 }
443
444 static const struct dmi_system_id msr_save_dmi_table[] = {
445 {
446 .callback = msr_initialize_bdw,
447 .ident = "BROADWELL BDX_EP",
448 .matches = {
449 DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
450 DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
451 },
452 },
453 {}
454 };
455
456 static int pm_check_save_msr(void)
457 {
458 dmi_check_system(msr_save_dmi_table);
459 return 0;
460 }
461
462 device_initcall(pm_check_save_msr);