]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2014 17:56:52 +0000 (09:56 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 11 Feb 2014 17:56:52 +0000 (09:56 -0800)
added patches:
3.13.y-timekeeping-fix-clock_set-clock_was_set-think-o.patch
timekeeping-avoid-possible-deadlock-from-clock_was_set_delayed.patch

queue-3.13/3.13.y-timekeeping-fix-clock_set-clock_was_set-think-o.patch [new file with mode: 0644]
queue-3.13/series
queue-3.13/timekeeping-avoid-possible-deadlock-from-clock_was_set_delayed.patch [new file with mode: 0644]

diff --git a/queue-3.13/3.13.y-timekeeping-fix-clock_set-clock_was_set-think-o.patch b/queue-3.13/3.13.y-timekeeping-fix-clock_set-clock_was_set-think-o.patch
new file mode 100644 (file)
index 0000000..becaa96
--- /dev/null
@@ -0,0 +1,44 @@
+From john.stultz@linaro.org  Tue Feb 11 09:56:02 2014
+From: John Stultz <john.stultz@linaro.org>
+Date: Mon, 10 Feb 2014 13:07:21 -0800
+Subject: 3.13.y: timekeeping: Fix clock_set/clock_was_set think-o
+To: stable <stable@vger.kernel.org>
+Cc: John Stultz <john.stultz@linaro.org>, Thomas Gleixner <tglx@linutronix.de>, Prarit Bhargava <prarit@redhat.com>, Richard Cochran <richardcochran@gmail.com>, Ingo Molnar <mingo@kernel.org>, Sasha Levin <sasha.levin@oracle.com>
+Message-ID: <1392066444-4940-5-git-send-email-john.stultz@linaro.org>
+
+From: John Stultz <john.stultz@linaro.org>
+
+In backporting 6fdda9a9c5db367130cf32df5d6618d08b89f46a
+(timekeeping: Avoid possible deadlock from clock_was_set_delayed),
+I ralized the patch had a think-o where instead of checking
+clock_set I accidentally typed clock_was_set (which is a function
+- so the conditional always is true).
+
+Upstream this was resolved in the immediately following patch
+47a1b796306356f358e515149d86baf0cc6bf007 (tick/timekeeping: Call
+update_wall_time outside the jiffies lock). But since that patch
+really isn't -stable material, so this patch only pulls
+the name change.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/time/timekeeping.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1441,7 +1441,7 @@ static void update_wall_time(void)
+       write_seqcount_end(&timekeeper_seq);
+ out:
+       raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
+-      if (clock_was_set) {
++      if (clock_set) {
+               /*
+                * XXX -  I'd rather we just call clock_was_set(), but
+                * since we're currently holding the jiffies lock, calling
index cf266a19e69789144c1c95509d8c2d704c542189..47ceb6f40fe111de58b651cf9e3952b4119d2188 100644 (file)
@@ -111,3 +111,5 @@ powerpc-thp-fix-crash-on-mremap.patch
 powerpc-mm-fix-compile-error-of-pgtable-ppc64.h.patch
 timekeeping-fix-lost-updates-to-tai-adjustment.patch
 timekeeping-fix-potential-lost-pv-notification-of-time-change.patch
+timekeeping-avoid-possible-deadlock-from-clock_was_set_delayed.patch
+3.13.y-timekeeping-fix-clock_set-clock_was_set-think-o.patch
diff --git a/queue-3.13/timekeeping-avoid-possible-deadlock-from-clock_was_set_delayed.patch b/queue-3.13/timekeeping-avoid-possible-deadlock-from-clock_was_set_delayed.patch
new file mode 100644 (file)
index 0000000..4a4965f
--- /dev/null
@@ -0,0 +1,157 @@
+From 6fdda9a9c5db367130cf32df5d6618d08b89f46a Mon Sep 17 00:00:00 2001
+From: John Stultz <john.stultz@linaro.org>
+Date: Tue, 10 Dec 2013 17:18:18 -0800
+Subject: timekeeping: Avoid possible deadlock from clock_was_set_delayed
+
+From: John Stultz <john.stultz@linaro.org>
+
+commit 6fdda9a9c5db367130cf32df5d6618d08b89f46a upstream.
+
+As part of normal operaions, the hrtimer subsystem frequently calls
+into the timekeeping code, creating a locking order of
+  hrtimer locks -> timekeeping locks
+
+clock_was_set_delayed() was suppoed to allow us to avoid deadlocks
+between the timekeeping the hrtimer subsystem, so that we could
+notify the hrtimer subsytem the time had changed while holding
+the timekeeping locks. This was done by scheduling delayed work
+that would run later once we were out of the timekeeing code.
+
+But unfortunately the lock chains are complex enoguh that in
+scheduling delayed work, we end up eventually trying to grab
+an hrtimer lock.
+
+Sasha Levin noticed this in testing when the new seqlock lockdep
+enablement triggered the following (somewhat abrieviated) message:
+
+[  251.100221] ======================================================
+[  251.100221] [ INFO: possible circular locking dependency detected ]
+[  251.100221] 3.13.0-rc2-next-20131206-sasha-00005-g8be2375-dirty #4053 Not tainted
+[  251.101967] -------------------------------------------------------
+[  251.101967] kworker/10:1/4506 is trying to acquire lock:
+[  251.101967]  (timekeeper_seq){----..}, at: [<ffffffff81160e96>] retrigger_next_event+0x56/0x70
+[  251.101967]
+[  251.101967] but task is already holding lock:
+[  251.101967]  (hrtimer_bases.lock#11){-.-...}, at: [<ffffffff81160e7c>] retrigger_next_event+0x3c/0x70
+[  251.101967]
+[  251.101967] which lock already depends on the new lock.
+[  251.101967]
+[  251.101967]
+[  251.101967] the existing dependency chain (in reverse order) is:
+[  251.101967]
+-> #5 (hrtimer_bases.lock#11){-.-...}:
+[snipped]
+-> #4 (&rt_b->rt_runtime_lock){-.-...}:
+[snipped]
+-> #3 (&rq->lock){-.-.-.}:
+[snipped]
+-> #2 (&p->pi_lock){-.-.-.}:
+[snipped]
+-> #1 (&(&pool->lock)->rlock){-.-...}:
+[  251.101967]        [<ffffffff81194803>] validate_chain+0x6c3/0x7b0
+[  251.101967]        [<ffffffff81194d9d>] __lock_acquire+0x4ad/0x580
+[  251.101967]        [<ffffffff81194ff2>] lock_acquire+0x182/0x1d0
+[  251.101967]        [<ffffffff84398500>] _raw_spin_lock+0x40/0x80
+[  251.101967]        [<ffffffff81153e69>] __queue_work+0x1a9/0x3f0
+[  251.101967]        [<ffffffff81154168>] queue_work_on+0x98/0x120
+[  251.101967]        [<ffffffff81161351>] clock_was_set_delayed+0x21/0x30
+[  251.101967]        [<ffffffff811c4bd1>] do_adjtimex+0x111/0x160
+[  251.101967]        [<ffffffff811e2711>] compat_sys_adjtimex+0x41/0x70
+[  251.101967]        [<ffffffff843a4b49>] ia32_sysret+0x0/0x5
+[  251.101967]
+-> #0 (timekeeper_seq){----..}:
+[snipped]
+[  251.101967] other info that might help us debug this:
+[  251.101967]
+[  251.101967] Chain exists of:
+  timekeeper_seq --> &rt_b->rt_runtime_lock --> hrtimer_bases.lock#11
+
+[  251.101967]  Possible unsafe locking scenario:
+[  251.101967]
+[  251.101967]        CPU0                    CPU1
+[  251.101967]        ----                    ----
+[  251.101967]   lock(hrtimer_bases.lock#11);
+[  251.101967]                                lock(&rt_b->rt_runtime_lock);
+[  251.101967]                                lock(hrtimer_bases.lock#11);
+[  251.101967]   lock(timekeeper_seq);
+[  251.101967]
+[  251.101967]  *** DEADLOCK ***
+[  251.101967]
+[  251.101967] 3 locks held by kworker/10:1/4506:
+[  251.101967]  #0:  (events){.+.+.+}, at: [<ffffffff81154960>] process_one_work+0x200/0x530
+[  251.101967]  #1:  (hrtimer_work){+.+...}, at: [<ffffffff81154960>] process_one_work+0x200/0x530
+[  251.101967]  #2:  (hrtimer_bases.lock#11){-.-...}, at: [<ffffffff81160e7c>] retrigger_next_event+0x3c/0x70
+[  251.101967]
+[  251.101967] stack backtrace:
+[  251.101967] CPU: 10 PID: 4506 Comm: kworker/10:1 Not tainted 3.13.0-rc2-next-20131206-sasha-00005-g8be2375-dirty #4053
+[  251.101967] Workqueue: events clock_was_set_work
+
+So the best solution is to avoid calling clock_was_set_delayed() while
+holding the timekeeping lock, and instead using a flag variable to
+decide if we should call clock_was_set() once we've released the locks.
+
+This works for the case here, where the do_adjtimex() was the deadlock
+trigger point. Unfortuantely, in update_wall_time() we still hold
+the jiffies lock, which would deadlock with the ipi triggered by
+clock_was_set(), preventing us from calling it even after we drop the
+timekeeping lock. So instead call clock_was_set_delayed() at that point.
+
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Prarit Bhargava <prarit@redhat.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/timekeeping.c |   18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1278,7 +1278,6 @@ static inline unsigned int accumulate_ns
+                       __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
+-                      clock_was_set_delayed();
+                       clock_set = TK_CLOCK_WAS_SET;
+               }
+       }
+@@ -1442,6 +1441,19 @@ static void update_wall_time(void)
+       write_seqcount_end(&timekeeper_seq);
+ out:
+       raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
++      if (clock_was_set) {
++              /*
++               * XXX -  I'd rather we just call clock_was_set(), but
++               * since we're currently holding the jiffies lock, calling
++               * clock_was_set would trigger an ipi which would then grab
++               * the jiffies lock and we'd deadlock. :(
++               * The right solution should probably be droping
++               * the jiffies lock before calling update_wall_time
++               * but that requires some rework of the tick sched
++               * code.
++               */
++              clock_was_set_delayed();
++      }
+ }
+ /**
+@@ -1702,11 +1714,13 @@ int do_adjtimex(struct timex *txc)
+       if (tai != orig_tai) {
+               __timekeeping_set_tai_offset(tk, tai);
+               timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
+-              clock_was_set_delayed();
+       }
+       write_seqcount_end(&timekeeper_seq);
+       raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
++      if (tai != orig_tai)
++              clock_was_set();
++
+       ntp_notify_cmos_timer();
+       return ret;