]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 28 Jun 2014 00:23:37 +0000 (17:23 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 28 Jun 2014 00:23:37 +0000 (17:23 -0700)
added patches:
genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
x86-x32-use-compat-shims-for-io_-setup-submit.patch

queue-3.10/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch [new file with mode: 0644]
queue-3.10/series
queue-3.10/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch [new file with mode: 0644]
queue-3.10/x86-x32-use-compat-shims-for-io_-setup-submit.patch [new file with mode: 0644]

diff --git a/queue-3.10/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch b/queue-3.10/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
new file mode 100644 (file)
index 0000000..d0501bc
--- /dev/null
@@ -0,0 +1,215 @@
+From 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 7 Mar 2013 14:53:45 +0100
+Subject: genirq: Sanitize spurious interrupt detection of threaded irqs
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c upstream.
+
+Till reported that the spurious interrupt detection of threaded
+interrupts is broken in two ways:
+
+- note_interrupt() is called for each action thread of a shared
+  interrupt line. That's wrong as we are only interested whether none
+  of the device drivers felt responsible for the interrupt, but by
+  calling multiple times for a single interrupt line we account
+  IRQ_NONE even if one of the drivers felt responsible.
+
+- note_interrupt() when called from the thread handler is not
+  serialized. That leaves the members of irq_desc which are used for
+  the spurious detection unprotected.
+
+To solve this we need to defer the spurious detection of a threaded
+interrupt to the next hardware interrupt context where we have
+implicit serialization.
+
+If note_interrupt is called with action_ret == IRQ_WAKE_THREAD, we
+check whether the previous interrupt requested a deferred check. If
+not, we request a deferred check for the next hardware interrupt and
+return.
+
+If set, we check whether one of the interrupt threads signaled
+success. Depending on this information we feed the result into the
+spurious detector.
+
+If one primary handler of a shared interrupt returns IRQ_HANDLED we
+disable the deferred check of irq threads on the same line, as we have
+found at least one device driver who cared.
+
+Reported-by: Till Straumann <strauman@slac.stanford.edu>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Austin Schuh <austin@peloton-tech.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Wolfgang Grandegger <wg@grandegger.com>
+Cc: Pavel Pisa <pisa@cmp.felk.cvut.cz>
+Cc: Marc Kleine-Budde <mkl@pengutronix.de>
+Cc: linux-can@vger.kernel.org
+Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1303071450130.22263@ionos
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/irqdesc.h |    4 +
+ kernel/irq/manage.c     |    4 -
+ kernel/irq/spurious.c   |  106 ++++++++++++++++++++++++++++++++++++++++++++++--
+ 3 files changed, 108 insertions(+), 6 deletions(-)
+
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -27,6 +27,8 @@ struct irq_desc;
+  * @irq_count:                stats field to detect stalled irqs
+  * @last_unhandled:   aging timer for unhandled count
+  * @irqs_unhandled:   stats field for spurious unhandled interrupts
++ * @threads_handled:  stats field for deferred spurious detection of threaded handlers
++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
+  * @lock:             locking for SMP
+  * @affinity_hint:    hint to user space for preferred irq affinity
+  * @affinity_notify:  context for notification of affinity changes
+@@ -52,6 +54,8 @@ struct irq_desc {
+       unsigned int            irq_count;      /* For detecting broken IRQs */
+       unsigned long           last_unhandled; /* Aging timer for unhandled count */
+       unsigned int            irqs_unhandled;
++      atomic_t                threads_handled;
++      int                     threads_handled_last;
+       raw_spinlock_t          lock;
+       struct cpumask          *percpu_enabled;
+ #ifdef CONFIG_SMP
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -861,8 +861,8 @@ static int irq_thread(void *data)
+               irq_thread_check_affinity(desc, action);
+               action_ret = handler_fn(desc, action);
+-              if (!noirqdebug)
+-                      note_interrupt(action->irq, desc, action_ret);
++              if (action_ret == IRQ_HANDLED)
++                      atomic_inc(&desc->threads_handled);
+               wake_threads_waitq(desc);
+       }
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, stru
+       return action && (action->flags & IRQF_IRQPOLL);
+ }
++#define SPURIOUS_DEFERRED     0x80000000
++
+ void note_interrupt(unsigned int irq, struct irq_desc *desc,
+                   irqreturn_t action_ret)
+ {
+       if (desc->istate & IRQS_POLL_INPROGRESS)
+               return;
+-      /* we get here again via the threaded handler */
+-      if (action_ret == IRQ_WAKE_THREAD)
+-              return;
+-
+       if (bad_action_ret(action_ret)) {
+               report_bad_irq(irq, desc, action_ret);
+               return;
+       }
++      /*
++       * We cannot call note_interrupt from the threaded handler
++       * because we need to look at the compound of all handlers
++       * (primary and threaded). Aside of that in the threaded
++       * shared case we have no serialization against an incoming
++       * hardware interrupt while we are dealing with a threaded
++       * result.
++       *
++       * So in case a thread is woken, we just note the fact and
++       * defer the analysis to the next hardware interrupt.
++       *
++       * The threaded handlers store whether they sucessfully
++       * handled an interrupt and we check whether that number
++       * changed versus the last invocation.
++       *
++       * We could handle all interrupts with the delayed by one
++       * mechanism, but for the non forced threaded case we'd just
++       * add pointless overhead to the straight hardirq interrupts
++       * for the sake of a few lines less code.
++       */
++      if (action_ret & IRQ_WAKE_THREAD) {
++              /*
++               * There is a thread woken. Check whether one of the
++               * shared primary handlers returned IRQ_HANDLED. If
++               * not we defer the spurious detection to the next
++               * interrupt.
++               */
++              if (action_ret == IRQ_WAKE_THREAD) {
++                      int handled;
++                      /*
++                       * We use bit 31 of thread_handled_last to
++                       * denote the deferred spurious detection
++                       * active. No locking necessary as
++                       * thread_handled_last is only accessed here
++                       * and we have the guarantee that hard
++                       * interrupts are not reentrant.
++                       */
++                      if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
++                              desc->threads_handled_last |= SPURIOUS_DEFERRED;
++                              return;
++                      }
++                      /*
++                       * Check whether one of the threaded handlers
++                       * returned IRQ_HANDLED since the last
++                       * interrupt happened.
++                       *
++                       * For simplicity we just set bit 31, as it is
++                       * set in threads_handled_last as well. So we
++                       * avoid extra masking. And we really do not
++                       * care about the high bits of the handled
++                       * count. We just care about the count being
++                       * different than the one we saw before.
++                       */
++                      handled = atomic_read(&desc->threads_handled);
++                      handled |= SPURIOUS_DEFERRED;
++                      if (handled != desc->threads_handled_last) {
++                              action_ret = IRQ_HANDLED;
++                              /*
++                               * Note: We keep the SPURIOUS_DEFERRED
++                               * bit set. We are handling the
++                               * previous invocation right now.
++                               * Keep it for the current one, so the
++                               * next hardware interrupt will
++                               * account for it.
++                               */
++                              desc->threads_handled_last = handled;
++                      } else {
++                              /*
++                               * None of the threaded handlers felt
++                               * responsible for the last interrupt
++                               *
++                               * We keep the SPURIOUS_DEFERRED bit
++                               * set in threads_handled_last as we
++                               * need to account for the current
++                               * interrupt as well.
++                               */
++                              action_ret = IRQ_NONE;
++                      }
++              } else {
++                      /*
++                       * One of the primary handlers returned
++                       * IRQ_HANDLED. So we don't care about the
++                       * threaded handlers on the same line. Clear
++                       * the deferred detection bit.
++                       *
++                       * In theory we could/should check whether the
++                       * deferred bit is set and take the result of
++                       * the previous run into account here as
++                       * well. But it's really not worth the
++                       * trouble. If every other interrupt is
++                       * handled we never trigger the spurious
++                       * detector. And if this is just the one out
++                       * of 100k unhandled ones which is handled
++                       * then we merily delay the spurious detection
++                       * by one hard interrupt. Not a real problem.
++                       */
++                      desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
++              }
++      }
++
+       if (unlikely(action_ret == IRQ_NONE)) {
+               /*
+                * If we are seeing only the odd spurious IRQ caused by
index db85729f37884e4f30dc0a987dbb5c48f31dd7c0..a6d3877defccf4c84ec04ddb1cd7c5a4d89a6ec3 100644 (file)
@@ -54,3 +54,6 @@ target-use-complete_all-for-se_cmd-t_transport_stop_comp.patch
 iscsi-target-fix-abort_task-connection-reset-iscsi_queue_req-memory-leak.patch
 target-report-correct-response-length-for-some-commands.patch
 target-explicitly-clear-ramdisk_mcp-backend-pages.patch
+x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
+x86-x32-use-compat-shims-for-io_-setup-submit.patch
+genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch
diff --git a/queue-3.10/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch b/queue-3.10/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch
new file mode 100644 (file)
index 0000000..76753fc
--- /dev/null
@@ -0,0 +1,37 @@
+From 246f2d2ee1d715e1077fc47d61c394569c8ee692 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+Date: Wed, 30 Apr 2014 14:03:25 -0700
+Subject: x86-32, espfix: Remove filter for espfix32 due to race
+
+From: "H. Peter Anvin" <hpa@linux.intel.com>
+
+commit 246f2d2ee1d715e1077fc47d61c394569c8ee692 upstream.
+
+It is not safe to use LAR to filter when to go down the espfix path,
+because the LDT is per-process (rather than per-thread) and another
+thread might change the descriptors behind our back.  Fortunately it
+is always *safe* (if a bit slow) to go down the espfix path, and a
+32-bit LDT stack segment is extremely rare.
+
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/entry_32.S |    5 -----
+ 1 file changed, 5 deletions(-)
+
+--- a/arch/x86/kernel/entry_32.S
++++ b/arch/x86/kernel/entry_32.S
+@@ -554,11 +554,6 @@ ENTRY(iret_exc)
+       CFI_RESTORE_STATE
+ ldt_ss:
+-      larl PT_OLDSS(%esp), %eax
+-      jnz restore_nocheck
+-      testl $0x00400000, %eax         # returning to 32bit stack?
+-      jnz restore_nocheck             # allright, normal return
+-
+ #ifdef CONFIG_PARAVIRT
+       /*
+        * The kernel can't run on a non-flat stack if paravirt mode
diff --git a/queue-3.10/x86-x32-use-compat-shims-for-io_-setup-submit.patch b/queue-3.10/x86-x32-use-compat-shims-for-io_-setup-submit.patch
new file mode 100644 (file)
index 0000000..f7e3904
--- /dev/null
@@ -0,0 +1,57 @@
+From 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 Mon Sep 17 00:00:00 2001
+From: Mike Frysinger <vapier@gentoo.org>
+Date: Sun, 4 May 2014 20:43:15 -0400
+Subject: x86, x32: Use compat shims for io_{setup,submit}
+
+From: Mike Frysinger <vapier@gentoo.org>
+
+commit 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 upstream.
+
+The io_setup takes a pointer to a context id of type aio_context_t.
+This in turn is typed to a __kernel_ulong_t.  We could tweak the
+exported headers to define this as a 64bit quantity for specific
+ABIs, but since we already have a 32bit compat shim for the x86 ABI,
+let's just re-use that logic.  The libaio package is also written to
+expect this as a pointer type, so a compat shim would simplify that.
+
+The io_submit func operates on an array of pointers to iocb structs.
+Padding out the array to be 64bit aligned is a huge pain, so convert
+it over to the existing compat shim too.
+
+We don't convert io_getevents to the compat func as its only purpose
+is to handle the timespec struct, and the x32 ABI uses 64bit times.
+
+With this change, the libaio package can now pass its testsuite when
+built for the x32 ABI.
+
+Signed-off-by: Mike Frysinger <vapier@gentoo.org>
+Link: http://lkml.kernel.org/r/1399250595-5005-1-git-send-email-vapier@gentoo.org
+Cc: H.J. Lu <hjl.tools@gmail.com>
+Signed-off-by: H. Peter Anvin <hpa@zytor.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/syscalls/syscall_64.tbl |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/syscalls/syscall_64.tbl
++++ b/arch/x86/syscalls/syscall_64.tbl
+@@ -212,10 +212,10 @@
+ 203   common  sched_setaffinity       sys_sched_setaffinity
+ 204   common  sched_getaffinity       sys_sched_getaffinity
+ 205   64      set_thread_area
+-206   common  io_setup                sys_io_setup
++206   64      io_setup                sys_io_setup
+ 207   common  io_destroy              sys_io_destroy
+ 208   common  io_getevents            sys_io_getevents
+-209   common  io_submit               sys_io_submit
++209   64      io_submit               sys_io_submit
+ 210   common  io_cancel               sys_io_cancel
+ 211   64      get_thread_area
+ 212   common  lookup_dcookie          sys_lookup_dcookie
+@@ -356,3 +356,5 @@
+ 540   x32     process_vm_writev       compat_sys_process_vm_writev
+ 541   x32     setsockopt              compat_sys_setsockopt
+ 542   x32     getsockopt              compat_sys_getsockopt
++543   x32     io_setup                compat_sys_io_setup
++544   x32     io_submit               compat_sys_io_submit