]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.4.14/x86-entry-traps-don-t-force-in_interrupt-to-return-true-in-ist-handlers.patch
drop queue-4.14/mips-make-sure-dt-memory-regions-are-valid.patch
[thirdparty/kernel/stable-queue.git] / releases / 4.4.14 / x86-entry-traps-don-t-force-in_interrupt-to-return-true-in-ist-handlers.patch
1 From aaee8c3c5cce2d9107310dd9f3026b4f901d441c Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Tue, 24 May 2016 15:54:04 -0700
4 Subject: x86/entry/traps: Don't force in_interrupt() to return true in IST handlers
5
6 From: Andy Lutomirski <luto@kernel.org>
7
8 commit aaee8c3c5cce2d9107310dd9f3026b4f901d441c upstream.
9
10 Forcing in_interrupt() to return true if we're not in a bona fide
11 interrupt confuses the softirq code. This fixes warnings like:
12
13 NOHZ: local_softirq_pending 282
14
15 ... which can happen when running things like selftests/x86.
16
17 This will change perf's static percpu buffer usage in IST context.
18 I think this is okay, and it's changing the behavior to match
19 historical (pre-4.0) behavior.
20
21 Signed-off-by: Andy Lutomirski <luto@kernel.org>
22 Cc: Andy Lutomirski <luto@amacapital.net>
23 Cc: Borislav Petkov <bp@alien8.de>
24 Cc: Brian Gerst <brgerst@gmail.com>
25 Cc: Denys Vlasenko <dvlasenk@redhat.com>
26 Cc: H. Peter Anvin <hpa@zytor.com>
27 Cc: Linus Torvalds <torvalds@linux-foundation.org>
28 Cc: Oleg Nesterov <oleg@redhat.com>
29 Cc: Peter Zijlstra <peterz@infradead.org>
30 Cc: Thomas Gleixner <tglx@linutronix.de>
31 Fixes: 959274753857 ("x86, traps: Track entry into and exit from IST context")
32 Link: http://lkml.kernel.org/r/cdc215f94d118d691d73df35275022331156fb45.1464130360.git.luto@kernel.org
33 Signed-off-by: Ingo Molnar <mingo@kernel.org>
34 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
35
36 ---
37 arch/x86/kernel/traps.c | 20 ++++++++++----------
38 1 file changed, 10 insertions(+), 10 deletions(-)
39
40 --- a/arch/x86/kernel/traps.c
41 +++ b/arch/x86/kernel/traps.c
42 @@ -109,6 +109,12 @@ static inline void preempt_conditional_c
43 preempt_count_dec();
44 }
45
46 +/*
47 + * In IST context, we explicitly disable preemption. This serves two
48 + * purposes: it makes it much less likely that we would accidentally
49 + * schedule in IST context and it will force a warning if we somehow
50 + * manage to schedule by accident.
51 + */
52 void ist_enter(struct pt_regs *regs)
53 {
54 if (user_mode(regs)) {
55 @@ -123,13 +129,7 @@ void ist_enter(struct pt_regs *regs)
56 rcu_nmi_enter();
57 }
58
59 - /*
60 - * We are atomic because we're on the IST stack; or we're on
61 - * x86_32, in which case we still shouldn't schedule; or we're
62 - * on x86_64 and entered from user mode, in which case we're
63 - * still atomic unless ist_begin_non_atomic is called.
64 - */
65 - preempt_count_add(HARDIRQ_OFFSET);
66 + preempt_disable();
67
68 /* This code is a bit fragile. Test it. */
69 RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
70 @@ -137,7 +137,7 @@ void ist_enter(struct pt_regs *regs)
71
72 void ist_exit(struct pt_regs *regs)
73 {
74 - preempt_count_sub(HARDIRQ_OFFSET);
75 + preempt_enable_no_resched();
76
77 if (!user_mode(regs))
78 rcu_nmi_exit();
79 @@ -168,7 +168,7 @@ void ist_begin_non_atomic(struct pt_regs
80 BUG_ON((unsigned long)(current_top_of_stack() -
81 current_stack_pointer()) >= THREAD_SIZE);
82
83 - preempt_count_sub(HARDIRQ_OFFSET);
84 + preempt_enable_no_resched();
85 }
86
87 /**
88 @@ -178,7 +178,7 @@ void ist_begin_non_atomic(struct pt_regs
89 */
90 void ist_end_non_atomic(void)
91 {
92 - preempt_count_add(HARDIRQ_OFFSET);
93 + preempt_disable();
94 }
95
96 static nokprobe_inline int