From 4040cf5c5817d989bf691411009f7d82b6df8b20 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 8 Apr 2019 19:16:07 -0400 Subject: [PATCH] fixes for 4.9 Signed-off-by: Sasha Levin --- ...-tm-code-inside-ppc_transactional_me.patch | 73 ++++++ queue-4.9/series | 5 + ...e-sysenter-msr-restoration-to-fix_pr.patch | 63 +++++ ...-struct-desc_ptr-for-the-idt-in-stru.patch | 83 +++++++ ...me-ordering-bugs-in-__restore_proces.patch | 128 ++++++++++ ...-make-restore_processor_context-sane.patch | 229 ++++++++++++++++++ 6 files changed, 581 insertions(+) create mode 100644 queue-4.9/powerpc-tm-limit-tm-code-inside-ppc_transactional_me.patch create mode 100644 queue-4.9/series create mode 100644 queue-4.9/x86-power-32-move-sysenter-msr-restoration-to-fix_pr.patch create mode 100644 queue-4.9/x86-power-64-use-struct-desc_ptr-for-the-idt-in-stru.patch create mode 100644 queue-4.9/x86-power-fix-some-ordering-bugs-in-__restore_proces.patch create mode 100644 queue-4.9/x86-power-make-restore_processor_context-sane.patch diff --git a/queue-4.9/powerpc-tm-limit-tm-code-inside-ppc_transactional_me.patch b/queue-4.9/powerpc-tm-limit-tm-code-inside-ppc_transactional_me.patch new file mode 100644 index 0000000000..2a11ff61c0 --- /dev/null +++ b/queue-4.9/powerpc-tm-limit-tm-code-inside-ppc_transactional_me.patch @@ -0,0 +1,73 @@ +From 774cf9c608b3f6a08fb347fc94dbc45504af920e Mon Sep 17 00:00:00 2001 +From: Breno Leitao +Date: Mon, 8 Apr 2019 16:32:38 +1000 +Subject: powerpc/tm: Limit TM code inside PPC_TRANSACTIONAL_MEM + +[ Upstream commit 897bc3df8c5aebb54c32d831f917592e873d0559 ] + +Commit e1c3743e1a20 ("powerpc/tm: Set MSR[TS] just prior to recheckpoint") +moved a code block around and this block uses a 'msr' variable outside of +the CONFIG_PPC_TRANSACTIONAL_MEM, however the 'msr' variable is declared +inside a CONFIG_PPC_TRANSACTIONAL_MEM block, causing a possible error when +CONFIG_PPC_TRANSACTION_MEM is not defined. + + error: 'msr' undeclared (first use in this function) + +This is not causing a compilation error in the mainline kernel, because +'msr' is being used as an argument of MSR_TM_ACTIVE(), which is defined as +the following when CONFIG_PPC_TRANSACTIONAL_MEM is *not* set: + + #define MSR_TM_ACTIVE(x) 0 + +This patch just fixes this issue avoiding the 'msr' variable usage outside +the CONFIG_PPC_TRANSACTIONAL_MEM block, avoiding trusting in the +MSR_TM_ACTIVE() definition. + +Cc: stable@vger.kernel.org +Reported-by: Christoph Biedl +Fixes: e1c3743e1a20 ("powerpc/tm: Set MSR[TS] just prior to recheckpoint") +Signed-off-by: Breno Leitao +Signed-off-by: Michael Ellerman +Signed-off-by: Sasha Levin +--- + arch/powerpc/kernel/signal_64.c | 23 ++++++++++++++++++----- + 1 file changed, 18 insertions(+), 5 deletions(-) + +diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c +index d929afab7b24..bdf2f7b995bb 100644 +--- a/arch/powerpc/kernel/signal_64.c ++++ b/arch/powerpc/kernel/signal_64.c +@@ -746,12 +746,25 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, + if (restore_tm_sigcontexts(current, &uc->uc_mcontext, + &uc_transact->uc_mcontext)) + goto badframe; +- } +- else +- /* Fall through, for non-TM restore */ ++ } else + #endif +- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) +- goto badframe; ++ { ++ /* ++ * Fall through, for non-TM restore ++ * ++ * Unset MSR[TS] on the thread regs since MSR from user ++ * context does not have MSR active, and recheckpoint was ++ * not called since restore_tm_sigcontexts() was not called ++ * also. ++ * ++ * If not unsetting it, the code can RFID to userspace with ++ * MSR[TS] set, but without CPU in the proper state, ++ * causing a TM bad thing. ++ */ ++ current->thread.regs->msr &= ~MSR_TS_MASK; ++ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) ++ goto badframe; ++ } + + if (restore_altstack(&uc->uc_stack)) + goto badframe; +-- +2.19.1 + diff --git a/queue-4.9/series b/queue-4.9/series new file mode 100644 index 0000000000..4ae4144692 --- /dev/null +++ b/queue-4.9/series @@ -0,0 +1,5 @@ +x86-power-fix-some-ordering-bugs-in-__restore_proces.patch +x86-power-64-use-struct-desc_ptr-for-the-idt-in-stru.patch +x86-power-32-move-sysenter-msr-restoration-to-fix_pr.patch +x86-power-make-restore_processor_context-sane.patch +powerpc-tm-limit-tm-code-inside-ppc_transactional_me.patch diff --git a/queue-4.9/x86-power-32-move-sysenter-msr-restoration-to-fix_pr.patch b/queue-4.9/x86-power-32-move-sysenter-msr-restoration-to-fix_pr.patch new file mode 100644 index 0000000000..bf9427b3c8 --- /dev/null +++ b/queue-4.9/x86-power-32-move-sysenter-msr-restoration-to-fix_pr.patch @@ -0,0 +1,63 @@ +From 69e9fadb18c4e28fa564be525f2c7da2a513a043 Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Thu, 14 Dec 2017 13:19:06 -0800 +Subject: x86/power/32: Move SYSENTER MSR restoration to + fix_processor_context() + +[ Upstream commit 896c80bef4d3b357814a476663158aaf669d0fb3 ] + +x86_64 restores system call MSRs in fix_processor_context(), and +x86_32 restored them along with segment registers. The 64-bit +variant makes more sense, so move the 32-bit code to match the +64-bit code. + +No side effects are expected to runtime behavior. + +Tested-by: Jarkko Nikula +Signed-off-by: Andy Lutomirski +Acked-by: Rafael J. Wysocki +Acked-by: Thomas Gleixner +Cc: Borislav Petkov +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Pavel Machek +Cc: Peter Zijlstra +Cc: Rafael J. Wysocki +Cc: Zhang Rui +Link: http://lkml.kernel.org/r/65158f8d7ee64dd6bbc6c1c83b3b34aaa854e3ae.1513286253.git.luto@kernel.org +Signed-off-by: Ingo Molnar +Signed-off-by: Sasha Levin +--- + arch/x86/power/cpu.c | 9 +++------ + 1 file changed, 3 insertions(+), 6 deletions(-) + +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index ec923a1cdaf0..2335e8beb0cf 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -174,6 +174,9 @@ static void fix_processor_context(void) + write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); + + syscall_init(); /* This sets MSR_*STAR and related */ ++#else ++ if (boot_cpu_has(X86_FEATURE_SEP)) ++ enable_sep_cpu(); + #endif + load_TR_desc(); /* This does ltr */ + load_mm_ldt(current->active_mm); /* This does lldt */ +@@ -233,12 +236,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) + loadsegment(fs, ctxt->fs); + loadsegment(gs, ctxt->gs); + loadsegment(ss, ctxt->ss); +- +- /* +- * sysenter MSRs +- */ +- if (boot_cpu_has(X86_FEATURE_SEP)) +- enable_sep_cpu(); + #else + /* CONFIG_X86_64 */ + asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); +-- +2.19.1 + diff --git a/queue-4.9/x86-power-64-use-struct-desc_ptr-for-the-idt-in-stru.patch b/queue-4.9/x86-power-64-use-struct-desc_ptr-for-the-idt-in-stru.patch new file mode 100644 index 0000000000..2688eeefd9 --- /dev/null +++ b/queue-4.9/x86-power-64-use-struct-desc_ptr-for-the-idt-in-stru.patch @@ -0,0 +1,83 @@ +From b90c81310fae869c591381416efac589a5b9d74c Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Thu, 14 Dec 2017 13:19:05 -0800 +Subject: x86/power/64: Use struct desc_ptr for the IDT in struct saved_context + +[ Upstream commit 090edbe23ff57940fca7f57d9165ce57a826bd7a ] + +x86_64's saved_context nonsensically used separate idt_limit and +idt_base fields and then cast &idt_limit to struct desc_ptr *. + +This was correct (with -fno-strict-aliasing), but it's confusing, +served no purpose, and required #ifdeffery. Simplify this by +using struct desc_ptr directly. + +No change in functionality. + +Tested-by: Jarkko Nikula +Signed-off-by: Andy Lutomirski +Acked-by: Rafael J. Wysocki +Acked-by: Thomas Gleixner +Cc: Borislav Petkov +Cc: Josh Poimboeuf +Cc: Linus Torvalds +Cc: Pavel Machek +Cc: Peter Zijlstra +Cc: Rafael J. Wysocki +Cc: Zhang Rui +Link: http://lkml.kernel.org/r/967909ce38d341b01d45eff53e278e2728a3a93a.1513286253.git.luto@kernel.org +Signed-off-by: Ingo Molnar +Signed-off-by: Sasha Levin +--- + arch/x86/include/asm/suspend_64.h | 3 +-- + arch/x86/power/cpu.c | 11 +---------- + 2 files changed, 2 insertions(+), 12 deletions(-) + +diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h +index 2bd96b4df140..ab899e5f3a85 100644 +--- a/arch/x86/include/asm/suspend_64.h ++++ b/arch/x86/include/asm/suspend_64.h +@@ -29,8 +29,7 @@ struct saved_context { + u16 gdt_pad; /* Unused */ + struct desc_ptr gdt_desc; + u16 idt_pad; +- u16 idt_limit; +- unsigned long idt_base; ++ struct desc_ptr idt; + u16 ldt; + u16 tss; + unsigned long tr; +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index 73063dfed476..ec923a1cdaf0 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt) + /* + * descriptor tables + */ +-#ifdef CONFIG_X86_32 + store_idt(&ctxt->idt); +-#else +-/* CONFIG_X86_64 */ +- store_idt((struct desc_ptr *)&ctxt->idt_limit); +-#endif ++ + /* + * We save it here, but restore it only in the hibernate case. + * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit +@@ -215,12 +211,7 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) + * now restore the descriptor tables to their proper values + * ltr is done i fix_processor_context(). + */ +-#ifdef CONFIG_X86_32 + load_idt(&ctxt->idt); +-#else +-/* CONFIG_X86_64 */ +- load_idt((const struct desc_ptr *)&ctxt->idt_limit); +-#endif + + #ifdef CONFIG_X86_64 + /* +-- +2.19.1 + diff --git a/queue-4.9/x86-power-fix-some-ordering-bugs-in-__restore_proces.patch b/queue-4.9/x86-power-fix-some-ordering-bugs-in-__restore_proces.patch new file mode 100644 index 0000000000..7a39fafe8b --- /dev/null +++ b/queue-4.9/x86-power-fix-some-ordering-bugs-in-__restore_proces.patch @@ -0,0 +1,128 @@ +From ebd5404a5e0055b9fee2e43589417dada01ac26f Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Thu, 30 Nov 2017 07:57:57 -0800 +Subject: x86/power: Fix some ordering bugs in __restore_processor_context() + +[ Upstream commit 5b06bbcfc2c621da3009da8decb7511500c293ed ] + +__restore_processor_context() had a couple of ordering bugs. It +restored GSBASE after calling load_gs_index(), and the latter can +call into tracing code. It also tried to restore segment registers +before restoring the LDT, which is straight-up wrong. + +Reorder the code so that we restore GSBASE, then the descriptor +tables, then the segments. + +This fixes two bugs. First, it fixes a regression that broke resume +under certain configurations due to irqflag tracing in +native_load_gs_index(). Second, it fixes resume when the userspace +process that initiated suspect had funny segments. The latter can be +reproduced by compiling this: + +// SPDX-License-Identifier: GPL-2.0 +/* + * ldt_echo.c - Echo argv[1] while using an LDT segment + */ + +int main(int argc, char **argv) +{ + int ret; + size_t len; + char *buf; + + const struct user_desc desc = { + .entry_number = 0, + .base_addr = 0, + .limit = 0xfffff, + .seg_32bit = 1, + .contents = 0, /* Data, grow-up */ + .read_exec_only = 0, + .limit_in_pages = 1, + .seg_not_present = 0, + .useable = 0 + }; + + if (argc != 2) + errx(1, "Usage: %s STRING", argv[0]); + + len = asprintf(&buf, "%s\n", argv[1]); + if (len < 0) + errx(1, "Out of memory"); + + ret = syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)); + if (ret < -1) + errno = -ret; + if (ret) + err(1, "modify_ldt"); + + asm volatile ("movw %0, %%es" :: "rm" ((unsigned short)7)); + write(1, buf, len); + return 0; +} + +and running ldt_echo >/sys/power/mem + +Without the fix, the latter causes a triple fault on resume. + +Fixes: ca37e57bbe0c ("x86/entry/64: Add missing irqflags tracing to native_load_gs_index()") +Reported-by: Jarkko Nikula +Signed-off-by: Andy Lutomirski +Signed-off-by: Thomas Gleixner +Tested-by: Jarkko Nikula +Cc: Peter Zijlstra +Cc: Borislav Petkov +Cc: Linus Torvalds +Link: https://lkml.kernel.org/r/6b31721ea92f51ea839e79bd97ade4a75b1eeea2.1512057304.git.luto@kernel.org +Signed-off-by: Ingo Molnar +Signed-off-by: Sasha Levin +--- + arch/x86/power/cpu.c | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index 53cace2ec0e2..73063dfed476 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -222,8 +222,20 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) + load_idt((const struct desc_ptr *)&ctxt->idt_limit); + #endif + ++#ifdef CONFIG_X86_64 + /* +- * segment registers ++ * We need GSBASE restored before percpu access can work. ++ * percpu access can happen in exception handlers or in complicated ++ * helpers like load_gs_index(). ++ */ ++ wrmsrl(MSR_GS_BASE, ctxt->gs_base); ++#endif ++ ++ fix_processor_context(); ++ ++ /* ++ * Restore segment registers. This happens after restoring the GDT ++ * and LDT, which happen in fix_processor_context(). + */ + #ifdef CONFIG_X86_32 + loadsegment(es, ctxt->es); +@@ -244,13 +256,14 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) + load_gs_index(ctxt->gs); + asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); + ++ /* ++ * Restore FSBASE and user GSBASE after reloading the respective ++ * segment selectors. ++ */ + wrmsrl(MSR_FS_BASE, ctxt->fs_base); +- wrmsrl(MSR_GS_BASE, ctxt->gs_base); + wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); + #endif + +- fix_processor_context(); +- + do_fpu_end(); + x86_platform.restore_sched_clock_state(); + mtrr_bp_restore(); +-- +2.19.1 + diff --git a/queue-4.9/x86-power-make-restore_processor_context-sane.patch b/queue-4.9/x86-power-make-restore_processor_context-sane.patch new file mode 100644 index 0000000000..a7bca31b95 --- /dev/null +++ b/queue-4.9/x86-power-make-restore_processor_context-sane.patch @@ -0,0 +1,229 @@ +From 019413726dee24c756f3a9af4d1f6bdcb4191b68 Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Thu, 14 Dec 2017 13:19:07 -0800 +Subject: x86/power: Make restore_processor_context() sane + +[ Upstream commit 7ee18d677989e99635027cee04c878950e0752b9 ] + +My previous attempt to fix a couple of bugs in __restore_processor_context(): + + 5b06bbcfc2c6 ("x86/power: Fix some ordering bugs in __restore_processor_context()") + +... introduced yet another bug, breaking suspend-resume. + +Rather than trying to come up with a minimal fix, let's try to clean it up +for real. This patch fixes quite a few things: + + - The old code saved a nonsensical subset of segment registers. + The only registers that need to be saved are those that contain + userspace state or those that can't be trivially restored without + percpu access working. (On x86_32, we can restore percpu access + by writing __KERNEL_PERCPU to %fs. On x86_64, it's easier to + save and restore the kernel's GSBASE.) With this patch, we + restore hardcoded values to the kernel state where applicable and + explicitly restore the user state after fixing all the descriptor + tables. + + - We used to use an unholy mix of inline asm and C helpers for + segment register access. Let's get rid of the inline asm. + +This fixes the reported s2ram hangs and make the code all around +more logical. + +Analyzed-by: Linus Torvalds +Reported-by: Jarkko Nikula +Reported-by: Pavel Machek +Tested-by: Jarkko Nikula +Tested-by: Pavel Machek +Signed-off-by: Andy Lutomirski +Acked-by: Rafael J. Wysocki +Acked-by: Thomas Gleixner +Cc: Borislav Petkov +Cc: Josh Poimboeuf +Cc: Peter Zijlstra +Cc: Rafael J. Wysocki +Cc: Zhang Rui +Fixes: 5b06bbcfc2c6 ("x86/power: Fix some ordering bugs in __restore_processor_context()") +Link: http://lkml.kernel.org/r/398ee68e5c0f766425a7b746becfc810840770ff.1513286253.git.luto@kernel.org +Signed-off-by: Ingo Molnar +Signed-off-by: Sasha Levin +--- + arch/x86/include/asm/suspend_32.h | 8 +++- + arch/x86/include/asm/suspend_64.h | 16 ++++++- + arch/x86/power/cpu.c | 79 ++++++++++++++++--------------- + 3 files changed, 62 insertions(+), 41 deletions(-) + +diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h +index 8e9dbe7b73a1..5cc2ce4ab8a3 100644 +--- a/arch/x86/include/asm/suspend_32.h ++++ b/arch/x86/include/asm/suspend_32.h +@@ -11,7 +11,13 @@ + + /* image of the saved processor state */ + struct saved_context { +- u16 es, fs, gs, ss; ++ /* ++ * On x86_32, all segment registers, with the possible exception of ++ * gs, are saved at kernel entry in pt_regs. ++ */ ++#ifdef CONFIG_X86_32_LAZY_GS ++ u16 gs; ++#endif + unsigned long cr0, cr2, cr3, cr4; + u64 misc_enable; + bool misc_enable_saved; +diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h +index ab899e5f3a85..701751918921 100644 +--- a/arch/x86/include/asm/suspend_64.h ++++ b/arch/x86/include/asm/suspend_64.h +@@ -19,8 +19,20 @@ + */ + struct saved_context { + struct pt_regs regs; +- u16 ds, es, fs, gs, ss; +- unsigned long gs_base, gs_kernel_base, fs_base; ++ ++ /* ++ * User CS and SS are saved in current_pt_regs(). The rest of the ++ * segment selectors need to be saved and restored here. ++ */ ++ u16 ds, es, fs, gs; ++ ++ /* ++ * Usermode FSBASE and GSBASE may not match the fs and gs selectors, ++ * so we save them separately. We save the kernelmode GSBASE to ++ * restore percpu access after resume. ++ */ ++ unsigned long kernelmode_gs_base, usermode_gs_base, fs_base; ++ + unsigned long cr0, cr2, cr3, cr4, cr8; + u64 misc_enable; + bool misc_enable_saved; +diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c +index 2335e8beb0cf..054e27671df9 100644 +--- a/arch/x86/power/cpu.c ++++ b/arch/x86/power/cpu.c +@@ -99,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt) + /* + * segment registers + */ +-#ifdef CONFIG_X86_32 +- savesegment(es, ctxt->es); +- savesegment(fs, ctxt->fs); ++#ifdef CONFIG_X86_32_LAZY_GS + savesegment(gs, ctxt->gs); +- savesegment(ss, ctxt->ss); +-#else +-/* CONFIG_X86_64 */ +- asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); +- asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); +- asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); +- asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); +- asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); ++#endif ++#ifdef CONFIG_X86_64 ++ savesegment(gs, ctxt->gs); ++ savesegment(fs, ctxt->fs); ++ savesegment(ds, ctxt->ds); ++ savesegment(es, ctxt->es); + + rdmsrl(MSR_FS_BASE, ctxt->fs_base); +- rdmsrl(MSR_GS_BASE, ctxt->gs_base); +- rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); ++ rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); ++ rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); + mtrr_save_fixed_ranges(NULL); + + rdmsrl(MSR_EFER, ctxt->efer); +@@ -185,9 +181,12 @@ static void fix_processor_context(void) + } + + /** +- * __restore_processor_state - restore the contents of CPU registers saved +- * by __save_processor_state() +- * @ctxt - structure to load the registers contents from ++ * __restore_processor_state - restore the contents of CPU registers saved ++ * by __save_processor_state() ++ * @ctxt - structure to load the registers contents from ++ * ++ * The asm code that gets us here will have restored a usable GDT, although ++ * it will be pointing to the wrong alias. + */ + static void notrace __restore_processor_state(struct saved_context *ctxt) + { +@@ -210,46 +209,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) + write_cr2(ctxt->cr2); + write_cr0(ctxt->cr0); + ++ /* Restore the IDT. */ ++ load_idt(&ctxt->idt); ++ + /* +- * now restore the descriptor tables to their proper values +- * ltr is done i fix_processor_context(). ++ * Just in case the asm code got us here with the SS, DS, or ES ++ * out of sync with the GDT, update them. + */ +- load_idt(&ctxt->idt); ++ loadsegment(ss, __KERNEL_DS); ++ loadsegment(ds, __USER_DS); ++ loadsegment(es, __USER_DS); + +-#ifdef CONFIG_X86_64 + /* +- * We need GSBASE restored before percpu access can work. +- * percpu access can happen in exception handlers or in complicated +- * helpers like load_gs_index(). ++ * Restore percpu access. Percpu access can happen in exception ++ * handlers or in complicated helpers like load_gs_index(). + */ +- wrmsrl(MSR_GS_BASE, ctxt->gs_base); ++#ifdef CONFIG_X86_64 ++ wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); ++#else ++ loadsegment(fs, __KERNEL_PERCPU); ++ loadsegment(gs, __KERNEL_STACK_CANARY); + #endif + ++ /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ + fix_processor_context(); + + /* +- * Restore segment registers. This happens after restoring the GDT +- * and LDT, which happen in fix_processor_context(). ++ * Now that we have descriptor tables fully restored and working ++ * exception handling, restore the usermode segments. + */ +-#ifdef CONFIG_X86_32 ++#ifdef CONFIG_X86_64 ++ loadsegment(ds, ctxt->es); + loadsegment(es, ctxt->es); + loadsegment(fs, ctxt->fs); +- loadsegment(gs, ctxt->gs); +- loadsegment(ss, ctxt->ss); +-#else +-/* CONFIG_X86_64 */ +- asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); +- asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); +- asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); + load_gs_index(ctxt->gs); +- asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); + + /* +- * Restore FSBASE and user GSBASE after reloading the respective +- * segment selectors. ++ * Restore FSBASE and GSBASE after restoring the selectors, since ++ * restoring the selectors clobbers the bases. Keep in mind ++ * that MSR_KERNEL_GS_BASE is horribly misnamed. + */ + wrmsrl(MSR_FS_BASE, ctxt->fs_base); +- wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); ++ wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); ++#elif defined(CONFIG_X86_32_LAZY_GS) ++ loadsegment(gs, ctxt->gs); + #endif + + do_fpu_end(); +-- +2.19.1 + -- 2.39.2