]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.13-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Nov 2017 07:56:28 +0000 (08:56 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Nov 2017 07:56:28 +0000 (08:56 +0100)
added patches:
irqchip-irq-mvebu-gicp-add-missing-spin_lock-init.patch
x86-mcelog-get-rid-of-rcu-remnants.patch

queue-4.13/irqchip-irq-mvebu-gicp-add-missing-spin_lock-init.patch [new file with mode: 0644]
queue-4.13/series
queue-4.13/x86-mcelog-get-rid-of-rcu-remnants.patch [new file with mode: 0644]

diff --git a/queue-4.13/irqchip-irq-mvebu-gicp-add-missing-spin_lock-init.patch b/queue-4.13/irqchip-irq-mvebu-gicp-add-missing-spin_lock-init.patch
new file mode 100644 (file)
index 0000000..44def4f
--- /dev/null
@@ -0,0 +1,42 @@
+From c9bb86338a6bb91e4d32db04feb6b8d423e04d06 Mon Sep 17 00:00:00 2001
+From: Antoine Tenart <antoine.tenart@free-electrons.com>
+Date: Wed, 25 Oct 2017 09:23:26 +0200
+Subject: irqchip/irq-mvebu-gicp: Add missing spin_lock init
+
+From: Antoine Tenart <antoine.tenart@free-electrons.com>
+
+commit c9bb86338a6bb91e4d32db04feb6b8d423e04d06 upstream.
+
+A spin lock is used in the irq-mvebu-gicp driver, but it is never
+initialized. This patch adds the missing spin_lock_init() call in the
+driver's probe function.
+
+Fixes: a68a63cb4dfc ("irqchip/irq-mvebu-gicp: Add new driver for Marvell GICP")
+Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: gregory.clement@free-electrons.com
+Acked-by: marc.zyngier@arm.com
+Cc: thomas.petazzoni@free-electrons.com
+Cc: andrew@lunn.ch
+Cc: jason@lakedaemon.net
+Cc: nadavh@marvell.com
+Cc: miquel.raynal@free-electrons.com
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: sebastian.hesselbarth@gmail.com
+Link: https://lkml.kernel.org/r/20171025072326.21030-1-antoine.tenart@free-electrons.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/irqchip/irq-mvebu-gicp.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/irqchip/irq-mvebu-gicp.c
++++ b/drivers/irqchip/irq-mvebu-gicp.c
+@@ -194,6 +194,7 @@ static int mvebu_gicp_probe(struct platf
+               return -ENOMEM;
+       gicp->dev = &pdev->dev;
++      spin_lock_init(&gicp->spi_lock);
+       gicp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!gicp->res)
index 0936d04b1d1325cad0352b6b2681697929389b7d..afcf4b89277ab5e44bd8ca5256fbf85f85b28f49 100644 (file)
@@ -32,3 +32,5 @@ x86-cpu-fix-up-cpu-mhz-in-proc-cpuinfo.patch
 powerpc-kprobes-dereference-function-pointers-only-if-the-address-does-not-belong-to-kernel-text.patch
 futex-fix-more-put_pi_state-vs.-exit_pi_state_list-races.patch
 perf-cgroup-fix-perf-cgroup-hierarchy-support.patch
+x86-mcelog-get-rid-of-rcu-remnants.patch
+irqchip-irq-mvebu-gicp-add-missing-spin_lock-init.patch
diff --git a/queue-4.13/x86-mcelog-get-rid-of-rcu-remnants.patch b/queue-4.13/x86-mcelog-get-rid-of-rcu-remnants.patch
new file mode 100644 (file)
index 0000000..e0802bc
--- /dev/null
@@ -0,0 +1,216 @@
+From 7298f08ea8870d44d36c7d6cd07dd0303faef6c2 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@suse.de>
+Date: Wed, 1 Nov 2017 17:47:54 +0100
+Subject: x86/mcelog: Get rid of RCU remnants
+
+From: Borislav Petkov <bp@suse.de>
+
+commit 7298f08ea8870d44d36c7d6cd07dd0303faef6c2 upstream.
+
+Jeremy reported a suspicious RCU usage warning in mcelog.
+
+/dev/mcelog is called in process context now as part of the notifier
+chain and doesn't need any of the fancy RCU and lockless accesses which
+it did in atomic context.
+
+Axe it all in favor of a simple mutex synchronization which cures the
+problem reported.
+
+Fixes: 5de97c9f6d85 ("x86/mce: Factor out and deprecate the /dev/mcelog driver")
+Reported-by: Jeremy Cline <jcline@redhat.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-and-tested-by: Tony Luck <tony.luck@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: linux-edac@vger.kernel.org
+Cc: Laura Abbott <labbott@redhat.com>
+Link: https://lkml.kernel.org/r/20171101164754.xzzmskl4ngrqc5br@pd.tnic
+Link: https://bugzilla.redhat.com/show_bug.cgi?id=1498969
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/mcheck/dev-mcelog.c |  119 ++++++--------------------------
+ 1 file changed, 26 insertions(+), 93 deletions(-)
+
+--- a/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
++++ b/arch/x86/kernel/cpu/mcheck/dev-mcelog.c
+@@ -24,14 +24,6 @@ static DEFINE_MUTEX(mce_chrdev_read_mute
+ static char mce_helper[128];
+ static char *mce_helper_argv[2] = { mce_helper, NULL };
+-#define mce_log_get_idx_check(p) \
+-({ \
+-      RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \
+-                       !lockdep_is_held(&mce_chrdev_read_mutex), \
+-                       "suspicious mce_log_get_idx_check() usage"); \
+-      smp_load_acquire(&(p)); \
+-})
+-
+ /*
+  * Lockless MCE logging infrastructure.
+  * This avoids deadlocks on printk locks without having to break locks. Also
+@@ -53,43 +45,32 @@ static int dev_mce_log(struct notifier_b
+                               void *data)
+ {
+       struct mce *mce = (struct mce *)data;
+-      unsigned int next, entry;
++      unsigned int entry;
++
++      mutex_lock(&mce_chrdev_read_mutex);
+-      wmb();
+-      for (;;) {
+-              entry = mce_log_get_idx_check(mcelog.next);
+-              for (;;) {
+-
+-                      /*
+-                       * When the buffer fills up discard new entries.
+-                       * Assume that the earlier errors are the more
+-                       * interesting ones:
+-                       */
+-                      if (entry >= MCE_LOG_LEN) {
+-                              set_bit(MCE_OVERFLOW,
+-                                      (unsigned long *)&mcelog.flags);
+-                              return NOTIFY_OK;
+-                      }
+-                      /* Old left over entry. Skip: */
+-                      if (mcelog.entry[entry].finished) {
+-                              entry++;
+-                              continue;
+-                      }
+-                      break;
+-              }
+-              smp_rmb();
+-              next = entry + 1;
+-              if (cmpxchg(&mcelog.next, entry, next) == entry)
+-                      break;
++      entry = mcelog.next;
++
++      /*
++       * When the buffer fills up discard new entries. Assume that the
++       * earlier errors are the more interesting ones:
++       */
++      if (entry >= MCE_LOG_LEN) {
++              set_bit(MCE_OVERFLOW, (unsigned long *)&mcelog.flags);
++              goto unlock;
+       }
++
++      mcelog.next = entry + 1;
++
+       memcpy(mcelog.entry + entry, mce, sizeof(struct mce));
+-      wmb();
+       mcelog.entry[entry].finished = 1;
+-      wmb();
+       /* wake processes polling /dev/mcelog */
+       wake_up_interruptible(&mce_chrdev_wait);
++unlock:
++      mutex_unlock(&mce_chrdev_read_mutex);
++
+       return NOTIFY_OK;
+ }
+@@ -177,13 +158,6 @@ static int mce_chrdev_release(struct ino
+       return 0;
+ }
+-static void collect_tscs(void *data)
+-{
+-      unsigned long *cpu_tsc = (unsigned long *)data;
+-
+-      cpu_tsc[smp_processor_id()] = rdtsc();
+-}
+-
+ static int mce_apei_read_done;
+ /* Collect MCE record of previous boot in persistent storage via APEI ERST. */
+@@ -231,14 +205,9 @@ static ssize_t mce_chrdev_read(struct fi
+                               size_t usize, loff_t *off)
+ {
+       char __user *buf = ubuf;
+-      unsigned long *cpu_tsc;
+-      unsigned prev, next;
++      unsigned next;
+       int i, err;
+-      cpu_tsc = kmalloc(nr_cpu_ids * sizeof(long), GFP_KERNEL);
+-      if (!cpu_tsc)
+-              return -ENOMEM;
+-
+       mutex_lock(&mce_chrdev_read_mutex);
+       if (!mce_apei_read_done) {
+@@ -247,65 +216,29 @@ static ssize_t mce_chrdev_read(struct fi
+                       goto out;
+       }
+-      next = mce_log_get_idx_check(mcelog.next);
+-
+       /* Only supports full reads right now */
+       err = -EINVAL;
+       if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce))
+               goto out;
++      next = mcelog.next;
+       err = 0;
+-      prev = 0;
+-      do {
+-              for (i = prev; i < next; i++) {
+-                      unsigned long start = jiffies;
+-                      struct mce *m = &mcelog.entry[i];
+-
+-                      while (!m->finished) {
+-                              if (time_after_eq(jiffies, start + 2)) {
+-                                      memset(m, 0, sizeof(*m));
+-                                      goto timeout;
+-                              }
+-                              cpu_relax();
+-                      }
+-                      smp_rmb();
+-                      err |= copy_to_user(buf, m, sizeof(*m));
+-                      buf += sizeof(*m);
+-timeout:
+-                      ;
+-              }
+-
+-              memset(mcelog.entry + prev, 0,
+-                     (next - prev) * sizeof(struct mce));
+-              prev = next;
+-              next = cmpxchg(&mcelog.next, prev, 0);
+-      } while (next != prev);
+-      synchronize_sched();
+-
+-      /*
+-       * Collect entries that were still getting written before the
+-       * synchronize.
+-       */
+-      on_each_cpu(collect_tscs, cpu_tsc, 1);
+-
+-      for (i = next; i < MCE_LOG_LEN; i++) {
++      for (i = 0; i < next; i++) {
+               struct mce *m = &mcelog.entry[i];
+-              if (m->finished && m->tsc < cpu_tsc[m->cpu]) {
+-                      err |= copy_to_user(buf, m, sizeof(*m));
+-                      smp_rmb();
+-                      buf += sizeof(*m);
+-                      memset(m, 0, sizeof(*m));
+-              }
++              err |= copy_to_user(buf, m, sizeof(*m));
++              buf += sizeof(*m);
+       }
++      memset(mcelog.entry, 0, next * sizeof(struct mce));
++      mcelog.next = 0;
++
+       if (err)
+               err = -EFAULT;
+ out:
+       mutex_unlock(&mce_chrdev_read_mutex);
+-      kfree(cpu_tsc);
+       return err ? err : buf - ubuf;
+ }