1 From 6690e86be83ac75832e461c141055b5d601c0a6d Mon Sep 17 00:00:00 2001
2 From: Peter Zijlstra <peterz@infradead.org>
3 Date: Thu, 14 Feb 2019 10:30:52 +0100
4 Subject: sched/x86: Save [ER]FLAGS on context switch
6 From: Peter Zijlstra <peterz@infradead.org>
8 commit 6690e86be83ac75832e461c141055b5d601c0a6d upstream.
10 Effectively reverts commit:
12 2c7577a75837 ("sched/x86_64: Don't save flags on context switch")
14 Specifically because SMAP uses FLAGS.AC which invalidates the claim
15 that the kernel has clean flags.
17 In particular; while preemption from interrupt return is fine (the
18 IRET frame on the exception stack contains FLAGS) it breaks any code
19 that does synchonous scheduling, including preempt_enable().
21 This has become a significant issue ever since commit:
23 5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses")
25 provided for means of having 'normal' C code between STAC / CLAC,
26 exposing the FLAGS.AC state. So far this hasn't led to trouble,
27 however fix it before it comes apart.
29 Reported-by: Julien Thierry <julien.thierry@arm.com>
30 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
31 Acked-by: Andy Lutomirski <luto@amacapital.net>
32 Cc: Borislav Petkov <bp@alien8.de>
33 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
34 Cc: Linus Torvalds <torvalds@linux-foundation.org>
35 Cc: Peter Zijlstra <peterz@infradead.org>
36 Cc: Thomas Gleixner <tglx@linutronix.de>
38 Fixes: 5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses")
39 Signed-off-by: Ingo Molnar <mingo@kernel.org>
40 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
43 arch/x86/entry/entry_32.S | 2 ++
44 arch/x86/entry/entry_64.S | 2 ++
45 arch/x86/include/asm/switch_to.h | 1 +
46 arch/x86/kernel/process_32.c | 7 +++++++
47 arch/x86/kernel/process_64.c | 8 ++++++++
48 5 files changed, 20 insertions(+)
50 --- a/arch/x86/entry/entry_32.S
51 +++ b/arch/x86/entry/entry_32.S
52 @@ -648,6 +648,7 @@ ENTRY(__switch_to_asm)
59 movl %esp, TASK_threadsp(%eax)
60 @@ -670,6 +671,7 @@ ENTRY(__switch_to_asm)
63 /* restore callee-saved registers */
68 --- a/arch/x86/entry/entry_64.S
69 +++ b/arch/x86/entry/entry_64.S
70 @@ -352,6 +352,7 @@ ENTRY(__switch_to_asm)
77 movq %rsp, TASK_threadsp(%rdi)
78 @@ -374,6 +375,7 @@ ENTRY(__switch_to_asm)
81 /* restore callee-saved registers */
86 --- a/arch/x86/include/asm/switch_to.h
87 +++ b/arch/x86/include/asm/switch_to.h
88 @@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void);
89 * order of the fields must match the code in __switch_to_asm().
91 struct inactive_task_frame {
92 + unsigned long flags;
96 --- a/arch/x86/kernel/process_32.c
97 +++ b/arch/x86/kernel/process_32.c
98 @@ -130,6 +130,13 @@ int copy_thread_tls(unsigned long clone_
99 struct task_struct *tsk;
103 + * For a new task use the RESET flags value since there is no before.
104 + * All the status flags are zero; DF and all the system flags must also
105 + * be 0, specifically IF must be 0 because we context switch to the new
106 + * task with interrupts disabled.
108 + frame->flags = X86_EFLAGS_FIXED;
110 frame->ret_addr = (unsigned long) ret_from_fork;
111 p->thread.sp = (unsigned long) fork_frame;
112 --- a/arch/x86/kernel/process_64.c
113 +++ b/arch/x86/kernel/process_64.c
114 @@ -300,6 +300,14 @@ int copy_thread_tls(unsigned long clone_
115 childregs = task_pt_regs(p);
116 fork_frame = container_of(childregs, struct fork_frame, regs);
117 frame = &fork_frame->frame;
120 + * For a new task use the RESET flags value since there is no before.
121 + * All the status flags are zero; DF and all the system flags must also
122 + * be 0, specifically IF must be 0 because we context switch to the new
123 + * task with interrupts disabled.
125 + frame->flags = X86_EFLAGS_FIXED;
127 frame->ret_addr = (unsigned long) ret_from_fork;
128 p->thread.sp = (unsigned long) fork_frame;