]>
Commit | Line | Data |
---|---|---|
fcb9b3db SL |
1 | From d9a9ae47f7af9df371456fb77b29ad2c12bf4dbc Mon Sep 17 00:00:00 2001 |
2 | From: Andy Lutomirski <luto@kernel.org> | |
3 | Date: Thu, 14 Dec 2017 13:19:06 -0800 | |
4 | Subject: x86/power/32: Move SYSENTER MSR restoration to | |
5 | fix_processor_context() | |
6 | ||
7 | [ Upstream commit 896c80bef4d3b357814a476663158aaf669d0fb3 ] | |
8 | ||
9 | x86_64 restores system call MSRs in fix_processor_context(), and | |
10 | x86_32 restored them along with segment registers. The 64-bit | |
11 | variant makes more sense, so move the 32-bit code to match the | |
12 | 64-bit code. | |
13 | ||
14 | No side effects are expected to runtime behavior. | |
15 | ||
16 | Tested-by: Jarkko Nikula <jarkko.nikula@linux.intel.com> | |
17 | Signed-off-by: Andy Lutomirski <luto@kernel.org> | |
18 | Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | |
19 | Acked-by: Thomas Gleixner <tglx@linutronix.de> | |
20 | Cc: Borislav Petkov <bpetkov@suse.de> | |
21 | Cc: Josh Poimboeuf <jpoimboe@redhat.com> | |
22 | Cc: Linus Torvalds <torvalds@linux-foundation.org> | |
23 | Cc: Pavel Machek <pavel@ucw.cz> | |
24 | Cc: Peter Zijlstra <peterz@infradead.org> | |
25 | Cc: Rafael J. Wysocki <rjw@rjwysocki.net> | |
26 | Cc: Zhang Rui <rui.zhang@intel.com> | |
27 | Link: http://lkml.kernel.org/r/65158f8d7ee64dd6bbc6c1c83b3b34aaa854e3ae.1513286253.git.luto@kernel.org | |
28 | Signed-off-by: Ingo Molnar <mingo@kernel.org> | |
29 | Signed-off-by: Sasha Levin <sashal@kernel.org> | |
30 | --- | |
31 | arch/x86/power/cpu.c | 9 +++------ | |
32 | 1 file changed, 3 insertions(+), 6 deletions(-) | |
33 | ||
34 | diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c | |
35 | index cba2e2c3f89e..8e1668470b23 100644 | |
36 | --- a/arch/x86/power/cpu.c | |
37 | +++ b/arch/x86/power/cpu.c | |
38 | @@ -176,6 +176,9 @@ static void fix_processor_context(void) | |
39 | write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); | |
40 | ||
41 | syscall_init(); /* This sets MSR_*STAR and related */ | |
42 | +#else | |
43 | + if (boot_cpu_has(X86_FEATURE_SEP)) | |
44 | + enable_sep_cpu(); | |
45 | #endif | |
46 | load_TR_desc(); /* This does ltr */ | |
47 | load_mm_ldt(current->active_mm); /* This does lldt */ | |
48 | @@ -239,12 +242,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) | |
49 | loadsegment(fs, ctxt->fs); | |
50 | loadsegment(gs, ctxt->gs); | |
51 | loadsegment(ss, ctxt->ss); | |
52 | - | |
53 | - /* | |
54 | - * sysenter MSRs | |
55 | - */ | |
56 | - if (boot_cpu_has(X86_FEATURE_SEP)) | |
57 | - enable_sep_cpu(); | |
58 | #else | |
59 | /* CONFIG_X86_64 */ | |
60 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); | |
61 | -- | |
62 | 2.19.1 | |
63 |