]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 6 Dec 2013 17:48:25 +0000 (09:48 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 6 Dec 2013 17:48:25 +0000 (09:48 -0800)
added patches:
aio-restore-locking-of-ioctx-list-on-removal.patch
clockevents-add-module-refcount.patch
clockevents-get-rid-of-the-notifier-chain.patch
clockevents-prefer-cpu-local-devices-over-global-devices.patch
clockevents-split-out-selection-logic.patch
mmc-block-fix-a-bug-of-error-handling-in-mmc-driver.patch
xfs-add-capability-check-to-free-eofblocks-ioctl.patch

queue-3.10/aio-restore-locking-of-ioctx-list-on-removal.patch [new file with mode: 0644]
queue-3.10/clockevents-add-module-refcount.patch [new file with mode: 0644]
queue-3.10/clockevents-get-rid-of-the-notifier-chain.patch [new file with mode: 0644]
queue-3.10/clockevents-prefer-cpu-local-devices-over-global-devices.patch [new file with mode: 0644]
queue-3.10/clockevents-split-out-selection-logic.patch [new file with mode: 0644]
queue-3.10/mmc-block-fix-a-bug-of-error-handling-in-mmc-driver.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/xfs-add-capability-check-to-free-eofblocks-ioctl.patch [new file with mode: 0644]

diff --git a/queue-3.10/aio-restore-locking-of-ioctx-list-on-removal.patch b/queue-3.10/aio-restore-locking-of-ioctx-list-on-removal.patch
new file mode 100644 (file)
index 0000000..9a18194
--- /dev/null
@@ -0,0 +1,74 @@
+From mguzik@redhat.com  Fri Dec  6 09:24:03 2013
+From: Mateusz Guzik <mguzik@redhat.com>
+Date: Thu,  5 Dec 2013 11:09:02 +0100
+Subject: aio: restore locking of ioctx list on removal
+To: stable@vger.kernel.org
+Cc: Eryu Guan <eguan@redhat.com>, Jeff Moyer <jmoyer@redhat.com>, Kent Overstreet <kmo@daterainc.com>, linux-aio@kvack.org, linux-kernel@vger.kernel.org
+Message-ID: <1386238142-21792-1-git-send-email-mguzik@redhat.com>
+
+From: Mateusz Guzik <mguzik@redhat.com>
+
+Commit 36f5588905c10a8c4568a210d601fe8c3c27e0f0
+"aio: refcounting cleanup" resulted in ioctx_lock not being held
+during ctx removal, leaving the list susceptible to corruptions.
+
+In mainline kernel the issue went away as a side effect of
+db446a08c23d5475e6b08c87acca79ebb20f283c "aio: convert the ioctx list to
+table lookup v3".
+
+Fix the problem by restoring appropriate locking.
+
+Signed-off-by: Mateusz Guzik <mguzik@redhat.com>
+Reported-by: Eryu Guan <eguan@redhat.com>
+Cc: Jeff Moyer <jmoyer@redhat.com>
+Cc: Kent Overstreet <kmo@daterainc.com>
+Acked-by: Benjamin LaHaise <bcrl@kvack.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c |   10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -423,10 +423,12 @@ static void kill_ioctx_rcu(struct rcu_he
+  *    when the processes owning a context have all exited to encourage
+  *    the rapid destruction of the kioctx.
+  */
+-static void kill_ioctx(struct kioctx *ctx)
++static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+ {
+       if (!atomic_xchg(&ctx->dead, 1)) {
++              spin_lock(&mm->ioctx_lock);
+               hlist_del_rcu(&ctx->list);
++              spin_unlock(&mm->ioctx_lock);
+               /*
+                * It'd be more correct to do this in free_ioctx(), after all
+@@ -494,7 +496,7 @@ void exit_aio(struct mm_struct *mm)
+                */
+               ctx->mmap_size = 0;
+-              kill_ioctx(ctx);
++              kill_ioctx(mm, ctx);
+       }
+ }
+@@ -852,7 +854,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_e
+       if (!IS_ERR(ioctx)) {
+               ret = put_user(ioctx->user_id, ctxp);
+               if (ret)
+-                      kill_ioctx(ioctx);
++                      kill_ioctx(current->mm, ioctx);
+               put_ioctx(ioctx);
+       }
+@@ -870,7 +872,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_
+ {
+       struct kioctx *ioctx = lookup_ioctx(ctx);
+       if (likely(NULL != ioctx)) {
+-              kill_ioctx(ioctx);
++              kill_ioctx(current->mm, ioctx);
+               put_ioctx(ioctx);
+               return 0;
+       }
diff --git a/queue-3.10/clockevents-add-module-refcount.patch b/queue-3.10/clockevents-add-module-refcount.patch
new file mode 100644 (file)
index 0000000..9ac09d3
--- /dev/null
@@ -0,0 +1,103 @@
+From ccf33d6880f39a35158fff66db13000ae4943fac Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 25 Apr 2013 20:31:49 +0000
+Subject: clockevents: Add module refcount
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit ccf33d6880f39a35158fff66db13000ae4943fac upstream.
+
+We want to be able to remove clockevent modules as well. Add a
+refcount so we don't remove a module with an active clock event
+device.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Magnus Damm <magnus.damm@gmail.com>
+Link: http://lkml.kernel.org/r/20130425143436.307435149@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Kim Phillips <kim.phillips@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/clockchips.h   |    3 +++
+ kernel/time/clockevents.c    |    1 +
+ kernel/time/tick-broadcast.c |    3 +++
+ kernel/time/tick-common.c    |    4 ++++
+ 4 files changed, 11 insertions(+)
+
+--- a/include/linux/clockchips.h
++++ b/include/linux/clockchips.h
+@@ -30,6 +30,7 @@ enum clock_event_nofitiers {
+ #include <linux/notifier.h>
+ struct clock_event_device;
++struct module;
+ /* Clock event mode commands */
+ enum clock_event_mode {
+@@ -83,6 +84,7 @@ enum clock_event_mode {
+  * @irq:              IRQ number (only for non CPU local devices)
+  * @cpumask:          cpumask to indicate for which CPUs this device works
+  * @list:             list head for the management code
++ * @owner:            module reference
+  */
+ struct clock_event_device {
+       void                    (*event_handler)(struct clock_event_device *);
+@@ -112,6 +114,7 @@ struct clock_event_device {
+       int                     irq;
+       const struct cpumask    *cpumask;
+       struct list_head        list;
++      struct module           *owner;
+ } ____cacheline_aligned;
+ /*
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -392,6 +392,7 @@ void clockevents_exchange_device(struct
+        * released list and do a notify add later.
+        */
+       if (old) {
++              module_put(old->owner);
+               clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
+               list_del(&old->list);
+               list_add(&old->list, &clockevents_released);
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -19,6 +19,7 @@
+ #include <linux/profile.h>
+ #include <linux/sched.h>
+ #include <linux/smp.h>
++#include <linux/module.h>
+ #include "tick-internal.h"
+@@ -74,6 +75,8 @@ void tick_install_broadcast_device(struc
+            tick_broadcast_device.evtdev->rating >= dev->rating) ||
+            (dev->features & CLOCK_EVT_FEAT_C3STOP))
+               return;
++      if (!try_module_get(dev->owner))
++              return;
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
+       if (cur)
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -18,6 +18,7 @@
+ #include <linux/percpu.h>
+ #include <linux/profile.h>
+ #include <linux/sched.h>
++#include <linux/module.h>
+ #include <asm/irq_regs.h>
+@@ -261,6 +262,9 @@ void tick_check_new_device(struct clock_
+                       goto out_bc;
+       }
++      if (!try_module_get(newdev->owner))
++              return;
++
+       /*
+        * Replace the eventually existing device by the new
+        * device. If the current device is the broadcast device, do
diff --git a/queue-3.10/clockevents-get-rid-of-the-notifier-chain.patch b/queue-3.10/clockevents-get-rid-of-the-notifier-chain.patch
new file mode 100644 (file)
index 0000000..57c87aa
--- /dev/null
@@ -0,0 +1,253 @@
+From 7172a286ced0c1f4f239a0fa09db54ed37d3ead2 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 25 Apr 2013 20:31:47 +0000
+Subject: clockevents: Get rid of the notifier chain
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 7172a286ced0c1f4f239a0fa09db54ed37d3ead2 upstream.
+
+7+ years and still a single user. Kill it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Magnus Damm <magnus.damm@gmail.com>
+Link: http://lkml.kernel.org/r/20130425143436.098520211@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Kim Phillips <kim.phillips@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/clockchips.h   |    1 -
+ kernel/time/clockevents.c    |   35 +++--------------------------------
+ kernel/time/tick-broadcast.c |    5 ++---
+ kernel/time/tick-common.c    |   30 +++++-------------------------
+ kernel/time/tick-internal.h  |    7 ++++---
+ 5 files changed, 14 insertions(+), 64 deletions(-)
+
+--- a/include/linux/clockchips.h
++++ b/include/linux/clockchips.h
+@@ -150,7 +150,6 @@ extern void clockevents_exchange_device(
+                                       struct clock_event_device *new);
+ extern void clockevents_set_mode(struct clock_event_device *dev,
+                                enum clock_event_mode mode);
+-extern int clockevents_register_notifier(struct notifier_block *nb);
+ extern int clockevents_program_event(struct clock_event_device *dev,
+                                    ktime_t expires, bool force);
+--- a/kernel/time/clockevents.c
++++ b/kernel/time/clockevents.c
+@@ -15,7 +15,6 @@
+ #include <linux/hrtimer.h>
+ #include <linux/init.h>
+ #include <linux/module.h>
+-#include <linux/notifier.h>
+ #include <linux/smp.h>
+ #include "tick-internal.h"
+@@ -23,10 +22,6 @@
+ /* The registered clock event devices */
+ static LIST_HEAD(clockevent_devices);
+ static LIST_HEAD(clockevents_released);
+-
+-/* Notification for clock events */
+-static RAW_NOTIFIER_HEAD(clockevents_chain);
+-
+ /* Protection for the above */
+ static DEFINE_RAW_SPINLOCK(clockevents_lock);
+@@ -267,30 +262,6 @@ int clockevents_program_event(struct clo
+       return (rc && force) ? clockevents_program_min_delta(dev) : rc;
+ }
+-/**
+- * clockevents_register_notifier - register a clock events change listener
+- */
+-int clockevents_register_notifier(struct notifier_block *nb)
+-{
+-      unsigned long flags;
+-      int ret;
+-
+-      raw_spin_lock_irqsave(&clockevents_lock, flags);
+-      ret = raw_notifier_chain_register(&clockevents_chain, nb);
+-      raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+-
+-      return ret;
+-}
+-
+-/*
+- * Notify about a clock event change. Called with clockevents_lock
+- * held.
+- */
+-static void clockevents_do_notify(unsigned long reason, void *dev)
+-{
+-      raw_notifier_call_chain(&clockevents_chain, reason, dev);
+-}
+-
+ /*
+  * Called after a notify add to make devices available which were
+  * released from the notifier call.
+@@ -304,7 +275,7 @@ static void clockevents_notify_released(
+                                struct clock_event_device, list);
+               list_del(&dev->list);
+               list_add(&dev->list, &clockevent_devices);
+-              clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
++              tick_check_new_device(dev);
+       }
+ }
+@@ -325,7 +296,7 @@ void clockevents_register_device(struct
+       raw_spin_lock_irqsave(&clockevents_lock, flags);
+       list_add(&dev->list, &clockevent_devices);
+-      clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
++      tick_check_new_device(dev);
+       clockevents_notify_released();
+       raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+@@ -468,7 +439,7 @@ void clockevents_notify(unsigned long re
+       int cpu;
+       raw_spin_lock_irqsave(&clockevents_lock, flags);
+-      clockevents_do_notify(reason, arg);
++      tick_notify(reason, arg);
+       switch (reason) {
+       case CLOCK_EVT_NOTIFY_CPU_DEAD:
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -65,7 +65,7 @@ static void tick_broadcast_start_periodi
+ /*
+  * Check, if the device can be utilized as broadcast device:
+  */
+-int tick_check_broadcast_device(struct clock_event_device *dev)
++void tick_install_broadcast_device(struct clock_event_device *dev)
+ {
+       struct clock_event_device *cur = tick_broadcast_device.evtdev;
+@@ -73,7 +73,7 @@ int tick_check_broadcast_device(struct c
+           (tick_broadcast_device.evtdev &&
+            tick_broadcast_device.evtdev->rating >= dev->rating) ||
+            (dev->features & CLOCK_EVT_FEAT_C3STOP))
+-              return 0;
++              return;
+       clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
+       if (cur)
+@@ -91,7 +91,6 @@ int tick_check_broadcast_device(struct c
+        */
+       if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
+               tick_clock_notify();
+-      return 1;
+ }
+ /*
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -209,11 +209,11 @@ static void tick_setup_device(struct tic
+ /*
+  * Check, if the new registered device should be used.
+  */
+-static int tick_check_new_device(struct clock_event_device *newdev)
++void tick_check_new_device(struct clock_event_device *newdev)
+ {
+       struct clock_event_device *curdev;
+       struct tick_device *td;
+-      int cpu, ret = NOTIFY_OK;
++      int cpu;
+       unsigned long flags;
+       raw_spin_lock_irqsave(&tick_device_lock, flags);
+@@ -276,18 +276,14 @@ static int tick_check_new_device(struct
+               tick_oneshot_notify();
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
+-      return NOTIFY_STOP;
++      return;
+ out_bc:
+       /*
+        * Can the new device be used as a broadcast device ?
+        */
+-      if (tick_check_broadcast_device(newdev))
+-              ret = NOTIFY_STOP;
+-
++      tick_install_broadcast_device(newdev);
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
+-
+-      return ret;
+ }
+ /*
+@@ -361,17 +357,10 @@ static void tick_resume(void)
+       raw_spin_unlock_irqrestore(&tick_device_lock, flags);
+ }
+-/*
+- * Notification about clock event devices
+- */
+-static int tick_notify(struct notifier_block *nb, unsigned long reason,
+-                             void *dev)
++void tick_notify(unsigned long reason, void *dev)
+ {
+       switch (reason) {
+-      case CLOCK_EVT_NOTIFY_ADD:
+-              return tick_check_new_device(dev);
+-
+       case CLOCK_EVT_NOTIFY_BROADCAST_ON:
+       case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
+       case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
+@@ -405,21 +394,12 @@ static int tick_notify(struct notifier_b
+       default:
+               break;
+       }
+-
+-      return NOTIFY_OK;
+ }
+-static struct notifier_block tick_notifier = {
+-      .notifier_call = tick_notify,
+-};
+-
+ /**
+  * tick_init - initialize the tick control
+- *
+- * Register the notifier with the clockevents framework
+  */
+ void __init tick_init(void)
+ {
+-      clockevents_register_notifier(&tick_notifier);
+       tick_broadcast_init();
+ }
+--- a/kernel/time/tick-internal.h
++++ b/kernel/time/tick-internal.h
+@@ -18,6 +18,8 @@ extern int tick_do_timer_cpu __read_most
+ extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
+ extern void tick_handle_periodic(struct clock_event_device *dev);
++extern void tick_notify(unsigned long reason, void *dev);
++extern void tick_check_new_device(struct clock_event_device *dev);
+ extern void clockevents_shutdown(struct clock_event_device *dev);
+@@ -90,7 +92,7 @@ static inline bool tick_broadcast_onesho
+  */
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu);
+-extern int tick_check_broadcast_device(struct clock_event_device *dev);
++extern void tick_install_broadcast_device(struct clock_event_device *dev);
+ extern int tick_is_broadcast_device(struct clock_event_device *dev);
+ extern void tick_broadcast_on_off(unsigned long reason, int *oncpu);
+ extern void tick_shutdown_broadcast(unsigned int *cpup);
+@@ -102,9 +104,8 @@ tick_set_periodic_handler(struct clock_e
+ #else /* !BROADCAST */
+-static inline int tick_check_broadcast_device(struct clock_event_device *dev)
++static inline void tick_install_broadcast_device(struct clock_event_device *dev)
+ {
+-      return 0;
+ }
+ static inline int tick_is_broadcast_device(struct clock_event_device *dev)
diff --git a/queue-3.10/clockevents-prefer-cpu-local-devices-over-global-devices.patch b/queue-3.10/clockevents-prefer-cpu-local-devices-over-global-devices.patch
new file mode 100644 (file)
index 0000000..155cb35
--- /dev/null
@@ -0,0 +1,63 @@
+From 70e5975d3a04be5479a28eec4a2fb10f98ad2785 Mon Sep 17 00:00:00 2001
+From: Stephen Boyd <sboyd@codeaurora.org>
+Date: Thu, 13 Jun 2013 11:39:50 -0700
+Subject: clockevents: Prefer CPU local devices over global devices
+
+From: Stephen Boyd <sboyd@codeaurora.org>
+
+commit 70e5975d3a04be5479a28eec4a2fb10f98ad2785 upstream.
+
+On an SMP system with only one global clockevent and a dummy
+clockevent per CPU we run into problems. We want the dummy
+clockevents to be registered as the per CPU tick devices, but
+we can only achieve that if we register the dummy clockevents
+before the global clockevent or if we artificially inflate the
+rating of the dummy clockevents to be higher than the rating
+of the global clockevent. Failure to do so leads to boot
+hangs when the dummy timers are registered on all other CPUs
+besides the CPU that accepted the global clockevent as its tick
+device and there is no broadcast timer to poke the dummy
+devices.
+
+If we're registering multiple clockevents and one clockevent is
+global and the other is local to a particular CPU we should
+choose to use the local clockevent regardless of the rating of
+the device. This way, if the clockevent is a dummy it will take
+the tick device duty as long as there isn't a higher rated tick
+device and any global clockevent will be bumped out into
+broadcast mode, fixing the problem described above.
+
+Reported-and-tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Stephen Boyd <sboyd@codeaurora.org>
+Tested-by: soren.brinkmann@xilinx.com
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Daniel Lezcano <daniel.lezcano@linaro.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: John Stultz <john.stultz@linaro.org>
+Link: http://lkml.kernel.org/r/20130613183950.GA32061@codeaurora.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Kim Phillips <kim.phillips@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/tick-common.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -234,8 +234,13 @@ static bool tick_check_preferred(struct
+                       return false;
+       }
+-      /* Use the higher rated one */
+-      return !curdev || newdev->rating > curdev->rating;
++      /*
++       * Use the higher rated one, but prefer a CPU local device with a lower
++       * rating than a non-CPU local device
++       */
++      return !curdev ||
++              newdev->rating > curdev->rating ||
++             !cpumask_equal(curdev->cpumask, newdev->cpumask);
+ }
+ /*
diff --git a/queue-3.10/clockevents-split-out-selection-logic.patch b/queue-3.10/clockevents-split-out-selection-logic.patch
new file mode 100644 (file)
index 0000000..9daa439
--- /dev/null
@@ -0,0 +1,153 @@
+From 45cb8e01b2ecef1c2afb18333e95793fa1a90281 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 25 Apr 2013 20:31:50 +0000
+Subject: clockevents: Split out selection logic
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 45cb8e01b2ecef1c2afb18333e95793fa1a90281 upstream.
+
+Split out the clockevent device selection logic. Preparatory patch to
+allow unbinding active clockevent devices.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Magnus Damm <magnus.damm@gmail.com>
+Link: http://lkml.kernel.org/r/20130425143436.431796247@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Kim Phillips <kim.phillips@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/time/tick-broadcast.c |   25 ++++++++++++---
+ kernel/time/tick-common.c    |   69 ++++++++++++++++++++++---------------------
+ 2 files changed, 56 insertions(+), 38 deletions(-)
+
+--- a/kernel/time/tick-broadcast.c
++++ b/kernel/time/tick-broadcast.c
+@@ -66,19 +66,34 @@ static void tick_broadcast_start_periodi
+ /*
+  * Check, if the device can be utilized as broadcast device:
+  */
++static bool tick_check_broadcast_device(struct clock_event_device *curdev,
++                                      struct clock_event_device *newdev)
++{
++      if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
++          (newdev->features & CLOCK_EVT_FEAT_C3STOP))
++              return false;
++
++      if (tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT &&
++          !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
++              return false;
++
++      return !curdev || newdev->rating > curdev->rating;
++}
++
++/*
++ * Conditionally install/replace broadcast device
++ */
+ void tick_install_broadcast_device(struct clock_event_device *dev)
+ {
+       struct clock_event_device *cur = tick_broadcast_device.evtdev;
+-      if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
+-          (tick_broadcast_device.evtdev &&
+-           tick_broadcast_device.evtdev->rating >= dev->rating) ||
+-           (dev->features & CLOCK_EVT_FEAT_C3STOP))
++      if (!tick_check_broadcast_device(cur, dev))
+               return;
++
+       if (!try_module_get(dev->owner))
+               return;
+-      clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
++      clockevents_exchange_device(cur, dev);
+       if (cur)
+               cur->event_handler = clockevents_handle_noop;
+       tick_broadcast_device.evtdev = dev;
+--- a/kernel/time/tick-common.c
++++ b/kernel/time/tick-common.c
+@@ -207,6 +207,37 @@ static void tick_setup_device(struct tic
+               tick_setup_oneshot(newdev, handler, next_event);
+ }
++static bool tick_check_percpu(struct clock_event_device *curdev,
++                            struct clock_event_device *newdev, int cpu)
++{
++      if (!cpumask_test_cpu(cpu, newdev->cpumask))
++              return false;
++      if (cpumask_equal(newdev->cpumask, cpumask_of(cpu)))
++              return true;
++      /* Check if irq affinity can be set */
++      if (newdev->irq >= 0 && !irq_can_set_affinity(newdev->irq))
++              return false;
++      /* Prefer an existing cpu local device */
++      if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
++              return false;
++      return true;
++}
++
++static bool tick_check_preferred(struct clock_event_device *curdev,
++                               struct clock_event_device *newdev)
++{
++      /* Prefer oneshot capable device */
++      if (!(newdev->features & CLOCK_EVT_FEAT_ONESHOT)) {
++              if (curdev && (curdev->features & CLOCK_EVT_FEAT_ONESHOT))
++                      return false;
++              if (tick_oneshot_mode_active())
++                      return false;
++      }
++
++      /* Use the higher rated one */
++      return !curdev || newdev->rating > curdev->rating;
++}
++
+ /*
+  * Check, if the new registered device should be used.
+  */
+@@ -227,40 +258,12 @@ void tick_check_new_device(struct clock_
+       curdev = td->evtdev;
+       /* cpu local device ? */
+-      if (!cpumask_equal(newdev->cpumask, cpumask_of(cpu))) {
+-
+-              /*
+-               * If the cpu affinity of the device interrupt can not
+-               * be set, ignore it.
+-               */
+-              if (!irq_can_set_affinity(newdev->irq))
+-                      goto out_bc;
+-
+-              /*
+-               * If we have a cpu local device already, do not replace it
+-               * by a non cpu local device
+-               */
+-              if (curdev && cpumask_equal(curdev->cpumask, cpumask_of(cpu)))
+-                      goto out_bc;
+-      }
++      if (!tick_check_percpu(curdev, newdev, cpu))
++              goto out_bc;
+-      /*
+-       * If we have an active device, then check the rating and the oneshot
+-       * feature.
+-       */
+-      if (curdev) {
+-              /*
+-               * Prefer one shot capable devices !
+-               */
+-              if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
+-                  !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
+-                      goto out_bc;
+-              /*
+-               * Check the rating
+-               */
+-              if (curdev->rating >= newdev->rating)
+-                      goto out_bc;
+-      }
++      /* Preference decision */
++      if (!tick_check_preferred(curdev, newdev))
++              goto out_bc;
+       if (!try_module_get(newdev->owner))
+               return;
diff --git a/queue-3.10/mmc-block-fix-a-bug-of-error-handling-in-mmc-driver.patch b/queue-3.10/mmc-block-fix-a-bug-of-error-handling-in-mmc-driver.patch
new file mode 100644 (file)
index 0000000..3f5cbb5
--- /dev/null
@@ -0,0 +1,134 @@
+From c8760069627ad3b0dbbea170f0c4c58b16e18d3d Mon Sep 17 00:00:00 2001
+From: KOBAYASHI Yoshitake <yoshitake.kobayashi@toshiba.co.jp>
+Date: Sun, 7 Jul 2013 07:35:45 +0900
+Subject: mmc: block: fix a bug of error handling in MMC driver
+
+From: KOBAYASHI Yoshitake <yoshitake.kobayashi@toshiba.co.jp>
+
+commit c8760069627ad3b0dbbea170f0c4c58b16e18d3d upstream.
+
+Current MMC driver doesn't handle generic error (bit19 of device
+status) in write sequence. As a result, write data gets lost when
+generic error occurs. For example, a generic error when updating a
+filesystem management information causes a loss of write data and
+corrupts the filesystem. In the worst case, the system will never
+boot.
+
+This patch includes the following functionality:
+  1. To enable error checking for the response of CMD12 and CMD13
+     in write command sequence
+  2. To retry write sequence when a generic error occurs
+
+Messages are added for v2 to show what occurs.
+
+Signed-off-by: KOBAYASHI Yoshitake <yoshitake.kobayashi@toshiba.co.jp>
+Signed-off-by: Chris Ball <cjb@laptop.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/mmc/card/block.c |   45 ++++++++++++++++++++++++++++++++++++++++++---
+ 1 file changed, 42 insertions(+), 3 deletions(-)
+
+--- a/drivers/mmc/card/block.c
++++ b/drivers/mmc/card/block.c
+@@ -769,7 +769,7 @@ static int mmc_blk_cmd_error(struct requ
+  * Otherwise we don't understand what happened, so abort.
+  */
+ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+-      struct mmc_blk_request *brq, int *ecc_err)
++      struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+ {
+       bool prev_cmd_status_valid = true;
+       u32 status, stop_status = 0;
+@@ -807,6 +807,16 @@ static int mmc_blk_cmd_recovery(struct m
+           (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+               *ecc_err = 1;
++      /* Flag General errors */
++      if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
++              if ((status & R1_ERROR) ||
++                      (brq->stop.resp[0] & R1_ERROR)) {
++                      pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
++                             req->rq_disk->disk_name, __func__,
++                             brq->stop.resp[0], status);
++                      *gen_err = 1;
++              }
++
+       /*
+        * Check the current card state.  If it is in some data transfer
+        * mode, tell it to stop (and hopefully transition back to TRAN.)
+@@ -826,6 +836,13 @@ static int mmc_blk_cmd_recovery(struct m
+                       return ERR_ABORT;
+               if (stop_status & R1_CARD_ECC_FAILED)
+                       *ecc_err = 1;
++              if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
++                      if (stop_status & R1_ERROR) {
++                              pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
++                                     req->rq_disk->disk_name, __func__,
++                                     stop_status);
++                              *gen_err = 1;
++                      }
+       }
+       /* Check for set block count errors */
+@@ -1069,7 +1086,7 @@ static int mmc_blk_err_check(struct mmc_
+                                                   mmc_active);
+       struct mmc_blk_request *brq = &mq_mrq->brq;
+       struct request *req = mq_mrq->req;
+-      int ecc_err = 0;
++      int ecc_err = 0, gen_err = 0;
+       /*
+        * sbc.error indicates a problem with the set block count
+@@ -1083,7 +1100,7 @@ static int mmc_blk_err_check(struct mmc_
+        */
+       if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+           brq->data.error) {
+-              switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
++              switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+               case ERR_RETRY:
+                       return MMC_BLK_RETRY;
+               case ERR_ABORT:
+@@ -1115,6 +1132,14 @@ static int mmc_blk_err_check(struct mmc_
+               u32 status;
+               unsigned long timeout;
++              /* Check stop command response */
++              if (brq->stop.resp[0] & R1_ERROR) {
++                      pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
++                             req->rq_disk->disk_name, __func__,
++                             brq->stop.resp[0]);
++                      gen_err = 1;
++              }
++
+               timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
+               do {
+                       int err = get_card_status(card, &status, 5);
+@@ -1124,6 +1149,13 @@ static int mmc_blk_err_check(struct mmc_
+                               return MMC_BLK_CMD_ERR;
+                       }
++                      if (status & R1_ERROR) {
++                              pr_err("%s: %s: general error sending status command, card status %#x\n",
++                                     req->rq_disk->disk_name, __func__,
++                                     status);
++                              gen_err = 1;
++                      }
++
+                       /* Timeout if the device never becomes ready for data
+                        * and never leaves the program state.
+                        */
+@@ -1143,6 +1175,13 @@ static int mmc_blk_err_check(struct mmc_
+                        (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+       }
++      /* if general error occurs, retry the write operation. */
++      if (gen_err) {
++              pr_warn("%s: retrying write for general error\n",
++                              req->rq_disk->disk_name);
++              return MMC_BLK_RETRY;
++      }
++
+       if (brq->data.error) {
+               pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+                      req->rq_disk->disk_name, brq->data.error,
index ae9bc57c8a4e5e8cd53062860ca4d73c4b44bf03..8d89e850db1d5e4156dffde67ded57635a7cbb10 100644 (file)
@@ -39,3 +39,10 @@ inet-fix-possible-seqlock-deadlocks.patch
 ipv6-fix-possible-seqlock-deadlock-in-ip6_finish_output2.patch
 pktgen-xfrm-update-ipv4-header-total-len-and-checksum-after-tranformation.patch
 tcp-gso-fix-truesize-tracking.patch
+xfs-add-capability-check-to-free-eofblocks-ioctl.patch
+mmc-block-fix-a-bug-of-error-handling-in-mmc-driver.patch
+aio-restore-locking-of-ioctx-list-on-removal.patch
+clockevents-get-rid-of-the-notifier-chain.patch
+clockevents-add-module-refcount.patch
+clockevents-split-out-selection-logic.patch
+clockevents-prefer-cpu-local-devices-over-global-devices.patch
diff --git a/queue-3.10/xfs-add-capability-check-to-free-eofblocks-ioctl.patch b/queue-3.10/xfs-add-capability-check-to-free-eofblocks-ioctl.patch
new file mode 100644 (file)
index 0000000..e396a55
--- /dev/null
@@ -0,0 +1,45 @@
+From 8c567a7fab6e086a0284eee2db82348521e7120c Mon Sep 17 00:00:00 2001
+From: Dwight Engen <dwight.engen@oracle.com>
+Date: Thu, 15 Aug 2013 14:08:03 -0400
+Subject: xfs: add capability check to free eofblocks ioctl
+
+From: Dwight Engen <dwight.engen@oracle.com>
+
+commit 8c567a7fab6e086a0284eee2db82348521e7120c upstream.
+
+Check for CAP_SYS_ADMIN since the caller can truncate preallocated
+blocks from files they do not own nor have write access to. A more
+fine grained access check was considered: require the caller to
+specify their own uid/gid and to use inode_permission to check for
+write, but this would not catch the case of an inode not reachable
+via path traversal from the callers mount namespace.
+
+Add check for read-only filesystem to free eofblocks ioctl.
+
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Reviewed-by: Gao feng <gaofeng@cn.fujitsu.com>
+Signed-off-by: Dwight Engen <dwight.engen@oracle.com>
+Signed-off-by: Ben Myers <bpm@sgi.com>
+Cc: Kees Cook <keescook@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/xfs/xfs_ioctl.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/fs/xfs/xfs_ioctl.c
++++ b/fs/xfs/xfs_ioctl.c
+@@ -1612,6 +1612,12 @@ xfs_file_ioctl(
+       case XFS_IOC_FREE_EOFBLOCKS: {
+               struct xfs_eofblocks eofb;
++              if (!capable(CAP_SYS_ADMIN))
++                      return -EPERM;
++
++              if (mp->m_flags & XFS_MOUNT_RDONLY)
++                      return -XFS_ERROR(EROFS);
++
+               if (copy_from_user(&eofb, arg, sizeof(eofb)))
+                       return -XFS_ERROR(EFAULT);