]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/4.18.6/watchdog-mark-watchdog-touch-functions-as-notrace.patch
4.14-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 4.18.6 / watchdog-mark-watchdog-touch-functions-as-notrace.patch
CommitLineData
13c5a9d4
GKH
1From cb9d7fd51d9fbb329d182423bd7b92d0f8cb0e01 Mon Sep 17 00:00:00 2001
2From: Vincent Whitchurch <vincent.whitchurch@axis.com>
3Date: Tue, 21 Aug 2018 17:25:07 +0200
4Subject: watchdog: Mark watchdog touch functions as notrace
5
6From: Vincent Whitchurch <vincent.whitchurch@axis.com>
7
8commit cb9d7fd51d9fbb329d182423bd7b92d0f8cb0e01 upstream.
9
10Some architectures need to use stop_machine() to patch functions for
11ftrace, and the assumption is that the stopped CPUs do not make function
12calls to traceable functions when they are in the stopped state.
13
14Commit ce4f06dcbb5d ("stop_machine: Touch_nmi_watchdog() after
15MULTI_STOP_PREPARE") added calls to the watchdog touch functions from
16the stopped CPUs and those functions lack notrace annotations. This
17leads to crashes when enabling/disabling ftrace on ARM kernels built
18with the Thumb-2 instruction set.
19
20Fix it by adding the necessary notrace annotations.
21
22Fixes: ce4f06dcbb5d ("stop_machine: Touch_nmi_watchdog() after MULTI_STOP_PREPARE")
23Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
24Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
25Cc: Peter Zijlstra <peterz@infradead.org>
26Cc: oleg@redhat.com
27Cc: tj@kernel.org
28Cc: stable@vger.kernel.org
29Link: https://lkml.kernel.org/r/20180821152507.18313-1-vincent.whitchurch@axis.com
30Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
31
32---
33 kernel/watchdog.c | 4 ++--
34 kernel/watchdog_hld.c | 2 +-
35 kernel/workqueue.c | 2 +-
36 3 files changed, 4 insertions(+), 4 deletions(-)
37
38--- a/kernel/watchdog.c
39+++ b/kernel/watchdog.c
40@@ -266,7 +266,7 @@ static void __touch_watchdog(void)
41 * entering idle state. This should only be used for scheduler events.
42 * Use touch_softlockup_watchdog() for everything else.
43 */
44-void touch_softlockup_watchdog_sched(void)
45+notrace void touch_softlockup_watchdog_sched(void)
46 {
47 /*
48 * Preemption can be enabled. It doesn't matter which CPU's timestamp
49@@ -275,7 +275,7 @@ void touch_softlockup_watchdog_sched(voi
50 raw_cpu_write(watchdog_touch_ts, 0);
51 }
52
53-void touch_softlockup_watchdog(void)
54+notrace void touch_softlockup_watchdog(void)
55 {
56 touch_softlockup_watchdog_sched();
57 wq_watchdog_touch(raw_smp_processor_id());
58--- a/kernel/watchdog_hld.c
59+++ b/kernel/watchdog_hld.c
60@@ -29,7 +29,7 @@ static struct cpumask dead_events_mask;
61 static unsigned long hardlockup_allcpu_dumped;
62 static atomic_t watchdog_cpus = ATOMIC_INIT(0);
63
64-void arch_touch_nmi_watchdog(void)
65+notrace void arch_touch_nmi_watchdog(void)
66 {
67 /*
68 * Using __raw here because some code paths have
69--- a/kernel/workqueue.c
70+++ b/kernel/workqueue.c
71@@ -5559,7 +5559,7 @@ static void wq_watchdog_timer_fn(struct
72 mod_timer(&wq_watchdog_timer, jiffies + thresh);
73 }
74
75-void wq_watchdog_touch(int cpu)
76+notrace void wq_watchdog_touch(int cpu)
77 {
78 if (cpu >= 0)
79 per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;