From 5540fca7b08bd8bafa89a573aa3281ae321658aa Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 27 Jun 2014 17:38:34 -0700 Subject: [PATCH] 3.4-stable patches added patches: genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch x86-x32-use-compat-shims-for-io_-setup-submit.patch --- ...interrupt-detection-of-threaded-irqs.patch | 215 ++++++++++++++++++ queue-3.4/series | 3 + ...move-filter-for-espfix32-due-to-race.patch | 37 +++ ...se-compat-shims-for-io_-setup-submit.patch | 57 +++++ 4 files changed, 312 insertions(+) create mode 100644 queue-3.4/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch create mode 100644 queue-3.4/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch create mode 100644 queue-3.4/x86-x32-use-compat-shims-for-io_-setup-submit.patch diff --git a/queue-3.4/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch b/queue-3.4/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch new file mode 100644 index 00000000000..12ba0cdbbfc --- /dev/null +++ b/queue-3.4/genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch @@ -0,0 +1,215 @@ +From 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c Mon Sep 17 00:00:00 2001 +From: Thomas Gleixner +Date: Thu, 7 Mar 2013 14:53:45 +0100 +Subject: genirq: Sanitize spurious interrupt detection of threaded irqs + +From: Thomas Gleixner + +commit 1e77d0a1ed7417d2a5a52a7b8d32aea1833faa6c upstream. + +Till reported that the spurious interrupt detection of threaded +interrupts is broken in two ways: + +- note_interrupt() is called for each action thread of a shared + interrupt line. That's wrong as we are only interested whether none + of the device drivers felt responsible for the interrupt, but by + calling multiple times for a single interrupt line we account + IRQ_NONE even if one of the drivers felt responsible. + +- note_interrupt() when called from the thread handler is not + serialized. That leaves the members of irq_desc which are used for + the spurious detection unprotected. + +To solve this we need to defer the spurious detection of a threaded +interrupt to the next hardware interrupt context where we have +implicit serialization. + +If note_interrupt is called with action_ret == IRQ_WAKE_THREAD, we +check whether the previous interrupt requested a deferred check. If +not, we request a deferred check for the next hardware interrupt and +return. + +If set, we check whether one of the interrupt threads signaled +success. Depending on this information we feed the result into the +spurious detector. + +If one primary handler of a shared interrupt returns IRQ_HANDLED we +disable the deferred check of irq threads on the same line, as we have +found at least one device driver who cared. + +Reported-by: Till Straumann +Signed-off-by: Thomas Gleixner +Tested-by: Austin Schuh +Cc: Oliver Hartkopp +Cc: Wolfgang Grandegger +Cc: Pavel Pisa +Cc: Marc Kleine-Budde +Cc: linux-can@vger.kernel.org +Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1303071450130.22263@ionos +Signed-off-by: Greg Kroah-Hartman + +--- + include/linux/irqdesc.h | 4 + + kernel/irq/manage.c | 4 - + kernel/irq/spurious.c | 106 ++++++++++++++++++++++++++++++++++++++++++++++-- + 3 files changed, 108 insertions(+), 6 deletions(-) + +--- a/include/linux/irqdesc.h ++++ b/include/linux/irqdesc.h +@@ -27,6 +27,8 @@ struct module; + * @irq_count: stats field to detect stalled irqs + * @last_unhandled: aging timer for unhandled count + * @irqs_unhandled: stats field for spurious unhandled interrupts ++ * @threads_handled: stats field for deferred spurious detection of threaded handlers ++ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @lock: locking for SMP + * @affinity_hint: hint to user space for preferred irq affinity + * @affinity_notify: context for notification of affinity changes +@@ -52,6 +54,8 @@ struct irq_desc { + unsigned int irq_count; /* For detecting broken IRQs */ + unsigned long last_unhandled; /* Aging timer for unhandled count */ + unsigned int irqs_unhandled; ++ atomic_t threads_handled; ++ int threads_handled_last; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + #ifdef CONFIG_SMP +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -809,8 +809,8 @@ static int irq_thread(void *data) + irq_thread_check_affinity(desc, action); + + action_ret = handler_fn(desc, action); +- if (!noirqdebug) +- note_interrupt(action->irq, desc, action_ret); ++ if (action_ret == IRQ_HANDLED) ++ atomic_inc(&desc->threads_handled); + + wake_threads_waitq(desc); + } +--- a/kernel/irq/spurious.c ++++ b/kernel/irq/spurious.c +@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, stru + return action && (action->flags & IRQF_IRQPOLL); + } + ++#define SPURIOUS_DEFERRED 0x80000000 ++ + void note_interrupt(unsigned int irq, struct irq_desc *desc, + irqreturn_t action_ret) + { + if (desc->istate & IRQS_POLL_INPROGRESS) + return; + +- /* we get here again via the threaded handler */ +- if (action_ret == IRQ_WAKE_THREAD) +- return; +- + if (bad_action_ret(action_ret)) { + report_bad_irq(irq, desc, action_ret); + return; + } + ++ /* ++ * We cannot call note_interrupt from the threaded handler ++ * because we need to look at the compound of all handlers ++ * (primary and threaded). Aside of that in the threaded ++ * shared case we have no serialization against an incoming ++ * hardware interrupt while we are dealing with a threaded ++ * result. ++ * ++ * So in case a thread is woken, we just note the fact and ++ * defer the analysis to the next hardware interrupt. ++ * ++ * The threaded handlers store whether they sucessfully ++ * handled an interrupt and we check whether that number ++ * changed versus the last invocation. ++ * ++ * We could handle all interrupts with the delayed by one ++ * mechanism, but for the non forced threaded case we'd just ++ * add pointless overhead to the straight hardirq interrupts ++ * for the sake of a few lines less code. ++ */ ++ if (action_ret & IRQ_WAKE_THREAD) { ++ /* ++ * There is a thread woken. Check whether one of the ++ * shared primary handlers returned IRQ_HANDLED. If ++ * not we defer the spurious detection to the next ++ * interrupt. ++ */ ++ if (action_ret == IRQ_WAKE_THREAD) { ++ int handled; ++ /* ++ * We use bit 31 of thread_handled_last to ++ * denote the deferred spurious detection ++ * active. No locking necessary as ++ * thread_handled_last is only accessed here ++ * and we have the guarantee that hard ++ * interrupts are not reentrant. ++ */ ++ if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { ++ desc->threads_handled_last |= SPURIOUS_DEFERRED; ++ return; ++ } ++ /* ++ * Check whether one of the threaded handlers ++ * returned IRQ_HANDLED since the last ++ * interrupt happened. ++ * ++ * For simplicity we just set bit 31, as it is ++ * set in threads_handled_last as well. So we ++ * avoid extra masking. And we really do not ++ * care about the high bits of the handled ++ * count. We just care about the count being ++ * different than the one we saw before. ++ */ ++ handled = atomic_read(&desc->threads_handled); ++ handled |= SPURIOUS_DEFERRED; ++ if (handled != desc->threads_handled_last) { ++ action_ret = IRQ_HANDLED; ++ /* ++ * Note: We keep the SPURIOUS_DEFERRED ++ * bit set. We are handling the ++ * previous invocation right now. ++ * Keep it for the current one, so the ++ * next hardware interrupt will ++ * account for it. ++ */ ++ desc->threads_handled_last = handled; ++ } else { ++ /* ++ * None of the threaded handlers felt ++ * responsible for the last interrupt ++ * ++ * We keep the SPURIOUS_DEFERRED bit ++ * set in threads_handled_last as we ++ * need to account for the current ++ * interrupt as well. ++ */ ++ action_ret = IRQ_NONE; ++ } ++ } else { ++ /* ++ * One of the primary handlers returned ++ * IRQ_HANDLED. So we don't care about the ++ * threaded handlers on the same line. Clear ++ * the deferred detection bit. ++ * ++ * In theory we could/should check whether the ++ * deferred bit is set and take the result of ++ * the previous run into account here as ++ * well. But it's really not worth the ++ * trouble. If every other interrupt is ++ * handled we never trigger the spurious ++ * detector. And if this is just the one out ++ * of 100k unhandled ones which is handled ++ * then we merily delay the spurious detection ++ * by one hard interrupt. Not a real problem. ++ */ ++ desc->threads_handled_last &= ~SPURIOUS_DEFERRED; ++ } ++ } ++ + if (unlikely(action_ret == IRQ_NONE)) { + /* + * If we are seeing only the odd spurious IRQ caused by diff --git a/queue-3.4/series b/queue-3.4/series index 22cef92db70..e4848faf4d1 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -29,3 +29,6 @@ usb-sierra-fix-remote-wakeup.patch acpi-fix-conflict-between-customized-dsdt-and-dsdt-local-copy.patch arm-stacktrace-avoid-listing-stacktrace-functions-in-stacktrace.patch target-explicitly-clear-ramdisk_mcp-backend-pages.patch +x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch +x86-x32-use-compat-shims-for-io_-setup-submit.patch +genirq-sanitize-spurious-interrupt-detection-of-threaded-irqs.patch diff --git a/queue-3.4/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch b/queue-3.4/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch new file mode 100644 index 00000000000..d5681ddf06f --- /dev/null +++ b/queue-3.4/x86-32-espfix-remove-filter-for-espfix32-due-to-race.patch @@ -0,0 +1,37 @@ +From 246f2d2ee1d715e1077fc47d61c394569c8ee692 Mon Sep 17 00:00:00 2001 +From: "H. Peter Anvin" +Date: Wed, 30 Apr 2014 14:03:25 -0700 +Subject: x86-32, espfix: Remove filter for espfix32 due to race + +From: "H. Peter Anvin" + +commit 246f2d2ee1d715e1077fc47d61c394569c8ee692 upstream. + +It is not safe to use LAR to filter when to go down the espfix path, +because the LDT is per-process (rather than per-thread) and another +thread might change the descriptors behind our back. Fortunately it +is always *safe* (if a bit slow) to go down the espfix path, and a +32-bit LDT stack segment is extremely rare. + +Signed-off-by: H. Peter Anvin +Link: http://lkml.kernel.org/r/1398816946-3351-1-git-send-email-hpa@linux.intel.com +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/entry_32.S | 5 ----- + 1 file changed, 5 deletions(-) + +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -550,11 +550,6 @@ ENTRY(iret_exc) + + CFI_RESTORE_STATE + ldt_ss: +- larl PT_OLDSS(%esp), %eax +- jnz restore_nocheck +- testl $0x00400000, %eax # returning to 32bit stack? +- jnz restore_nocheck # allright, normal return +- + #ifdef CONFIG_PARAVIRT + /* + * The kernel can't run on a non-flat stack if paravirt mode diff --git a/queue-3.4/x86-x32-use-compat-shims-for-io_-setup-submit.patch b/queue-3.4/x86-x32-use-compat-shims-for-io_-setup-submit.patch new file mode 100644 index 00000000000..01a0fac4db2 --- /dev/null +++ b/queue-3.4/x86-x32-use-compat-shims-for-io_-setup-submit.patch @@ -0,0 +1,57 @@ +From 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 Mon Sep 17 00:00:00 2001 +From: Mike Frysinger +Date: Sun, 4 May 2014 20:43:15 -0400 +Subject: x86, x32: Use compat shims for io_{setup,submit} + +From: Mike Frysinger + +commit 7fd44dacdd803c0bbf38bf478d51d280902bb0f1 upstream. + +The io_setup takes a pointer to a context id of type aio_context_t. +This in turn is typed to a __kernel_ulong_t. We could tweak the +exported headers to define this as a 64bit quantity for specific +ABIs, but since we already have a 32bit compat shim for the x86 ABI, +let's just re-use that logic. The libaio package is also written to +expect this as a pointer type, so a compat shim would simplify that. + +The io_submit func operates on an array of pointers to iocb structs. +Padding out the array to be 64bit aligned is a huge pain, so convert +it over to the existing compat shim too. + +We don't convert io_getevents to the compat func as its only purpose +is to handle the timespec struct, and the x32 ABI uses 64bit times. + +With this change, the libaio package can now pass its testsuite when +built for the x32 ABI. + +Signed-off-by: Mike Frysinger +Link: http://lkml.kernel.org/r/1399250595-5005-1-git-send-email-vapier@gentoo.org +Cc: H.J. Lu +Signed-off-by: H. Peter Anvin +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/syscalls/syscall_64.tbl | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +--- a/arch/x86/syscalls/syscall_64.tbl ++++ b/arch/x86/syscalls/syscall_64.tbl +@@ -212,10 +212,10 @@ + 203 common sched_setaffinity sys_sched_setaffinity + 204 common sched_getaffinity sys_sched_getaffinity + 205 64 set_thread_area +-206 common io_setup sys_io_setup ++206 64 io_setup sys_io_setup + 207 common io_destroy sys_io_destroy + 208 common io_getevents sys_io_getevents +-209 common io_submit sys_io_submit ++209 64 io_submit sys_io_submit + 210 common io_cancel sys_io_cancel + 211 64 get_thread_area + 212 common lookup_dcookie sys_lookup_dcookie +@@ -353,3 +353,5 @@ + 540 x32 process_vm_writev compat_sys_process_vm_writev + 541 x32 setsockopt compat_sys_setsockopt + 542 x32 getsockopt compat_sys_getsockopt ++543 x32 io_setup compat_sys_io_setup ++544 x32 io_submit compat_sys_io_submit -- 2.47.3