From: Greg Kroah-Hartman Date: Tue, 29 Sep 2015 13:40:39 +0000 (+0200) Subject: 3.14-stable patches X-Git-Tag: v4.1.9~3 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=38e25a3e8b100fb4761026be28002af09787a537;p=thirdparty%2Fkernel%2Fstable-queue.git 3.14-stable patches added patches: revert-iio-bmg160-iio_buffer-and-iio_triggered_buffer-are-required.patch x86-nmi-enable-nested-do_nmi-handling-for-64-bit-kernels.patch --- diff --git a/queue-3.14/revert-iio-bmg160-iio_buffer-and-iio_triggered_buffer-are-required.patch b/queue-3.14/revert-iio-bmg160-iio_buffer-and-iio_triggered_buffer-are-required.patch new file mode 100644 index 00000000000..13ebdf2db48 --- /dev/null +++ b/queue-3.14/revert-iio-bmg160-iio_buffer-and-iio_triggered_buffer-are-required.patch @@ -0,0 +1,32 @@ +From 35c45e8bce3c92fb1ff94d376f1d4bfaae079d66 Mon Sep 17 00:00:00 2001 +From: Markus Pargmann +Date: Wed, 29 Jul 2015 15:46:03 +0200 +Subject: Revert "iio: bmg160: IIO_BUFFER and IIO_TRIGGERED_BUFFER are required" + +This reverts commit 279c039ca63acbd69e69d6d7ddfed50346fb2185 which was +commit 06d2f6ca5a38abe92f1f3a132b331eee773868c3 upstream as it should +not have been applied. + + +Reported-by: Luis Henriques +Cc: Markus Pargmann +Cc: Srinivas Pandruvada +Cc: Jonathan Cameron +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/iio/gyro/Kconfig | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +--- a/drivers/iio/gyro/Kconfig ++++ b/drivers/iio/gyro/Kconfig +@@ -93,8 +93,7 @@ config IIO_ST_GYRO_SPI_3AXIS + config ITG3200 + tristate "InvenSense ITG3200 Digital 3-Axis Gyroscope I2C driver" + depends on I2C +- select IIO_BUFFER +- select IIO_TRIGGERED_BUFFER ++ select IIO_TRIGGERED_BUFFER if IIO_BUFFER + help + Say yes here to add support for the InvenSense ITG3200 digital + 3-axis gyroscope sensor. diff --git a/queue-3.14/series b/queue-3.14/series index 7525168d648..9a7d7f29b88 100644 --- a/queue-3.14/series +++ b/queue-3.14/series @@ -74,3 +74,5 @@ rds-fix-an-integer-overflow-test-in-rds_info_getsockopt.patch udp-fix-dst-races-with-multicast-early-demux.patch bna-fix-interrupts-storm-caused-by-erroneous-packets.patch subject-net-gso-use-feature-flag-argument-in-all-protocol-gso-handlers.patch +revert-iio-bmg160-iio_buffer-and-iio_triggered_buffer-are-required.patch +x86-nmi-enable-nested-do_nmi-handling-for-64-bit-kernels.patch diff --git a/queue-3.14/x86-nmi-enable-nested-do_nmi-handling-for-64-bit-kernels.patch b/queue-3.14/x86-nmi-enable-nested-do_nmi-handling-for-64-bit-kernels.patch new file mode 100644 index 00000000000..eb5e8fc166c --- /dev/null +++ b/queue-3.14/x86-nmi-enable-nested-do_nmi-handling-for-64-bit-kernels.patch @@ -0,0 +1,197 @@ +From 9d05041679904b12c12421cbcf9cb5f4860a8d7b Mon Sep 17 00:00:00 2001 +From: Andy Lutomirski +Date: Wed, 15 Jul 2015 10:29:33 -0700 +Subject: x86/nmi: Enable nested do_nmi() handling for 64-bit kernels + +From: Andy Lutomirski + +commit 9d05041679904b12c12421cbcf9cb5f4860a8d7b upstream. + +32-bit kernels handle nested NMIs in C. Enable the exact same +handling on 64-bit kernels as well. This isn't currently +necessary, but it will become necessary once the asm code starts +allowing limited nesting. + +Signed-off-by: Andy Lutomirski +Reviewed-by: Steven Rostedt +Cc: Borislav Petkov +Cc: Linus Torvalds +Cc: Peter Zijlstra +Cc: Thomas Gleixner +Cc: stable@vger.kernel.org +Signed-off-by: Ingo Molnar +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/nmi.c | 125 +++++++++++++++++++++----------------------------- + 1 file changed, 53 insertions(+), 72 deletions(-) + +--- a/arch/x86/kernel/nmi.c ++++ b/arch/x86/kernel/nmi.c +@@ -392,15 +392,15 @@ static __kprobes void default_do_nmi(str + } + + /* +- * NMIs can hit breakpoints which will cause it to lose its +- * NMI context with the CPU when the breakpoint does an iret. +- */ +-#ifdef CONFIG_X86_32 +-/* +- * For i386, NMIs use the same stack as the kernel, and we can +- * add a workaround to the iret problem in C (preventing nested +- * NMIs if an NMI takes a trap). Simply have 3 states the NMI +- * can be in: ++ * NMIs can hit breakpoints which will cause it to lose its NMI context ++ * with the CPU when the breakpoint or page fault does an IRET. ++ * ++ * As a result, NMIs can nest if NMIs get unmasked due an IRET during ++ * NMI processing. On x86_64, the asm glue protects us from nested NMIs ++ * if the outer NMI came from kernel mode, but we can still nest if the ++ * outer NMI came from user mode. ++ * ++ * To handle these nested NMIs, we have three states: + * + * 1) not running + * 2) executing +@@ -414,15 +414,14 @@ static __kprobes void default_do_nmi(str + * (Note, the latch is binary, thus multiple NMIs triggering, + * when one is running, are ignored. Only one NMI is restarted.) + * +- * If an NMI hits a breakpoint that executes an iret, another +- * NMI can preempt it. We do not want to allow this new NMI +- * to run, but we want to execute it when the first one finishes. +- * We set the state to "latched", and the exit of the first NMI will +- * perform a dec_return, if the result is zero (NOT_RUNNING), then +- * it will simply exit the NMI handler. If not, the dec_return +- * would have set the state to NMI_EXECUTING (what we want it to +- * be when we are running). In this case, we simply jump back +- * to rerun the NMI handler again, and restart the 'latched' NMI. ++ * If an NMI executes an iret, another NMI can preempt it. We do not ++ * want to allow this new NMI to run, but we want to execute it when the ++ * first one finishes. We set the state to "latched", and the exit of ++ * the first NMI will perform a dec_return, if the result is zero ++ * (NOT_RUNNING), then it will simply exit the NMI handler. If not, the ++ * dec_return would have set the state to NMI_EXECUTING (what we want it ++ * to be when we are running). In this case, we simply jump back to ++ * rerun the NMI handler again, and restart the 'latched' NMI. + * + * No trap (breakpoint or page fault) should be hit before nmi_restart, + * thus there is no race between the first check of state for NOT_RUNNING +@@ -445,49 +444,36 @@ enum nmi_states { + static DEFINE_PER_CPU(enum nmi_states, nmi_state); + static DEFINE_PER_CPU(unsigned long, nmi_cr2); + +-#define nmi_nesting_preprocess(regs) \ +- do { \ +- if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ +- this_cpu_write(nmi_state, NMI_LATCHED); \ +- return; \ +- } \ +- this_cpu_write(nmi_state, NMI_EXECUTING); \ +- this_cpu_write(nmi_cr2, read_cr2()); \ +- } while (0); \ +- nmi_restart: +- +-#define nmi_nesting_postprocess() \ +- do { \ +- if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ +- write_cr2(this_cpu_read(nmi_cr2)); \ +- if (this_cpu_dec_return(nmi_state)) \ +- goto nmi_restart; \ +- } while (0) +-#else /* x86_64 */ ++#ifdef CONFIG_X86_64 + /* +- * In x86_64 things are a bit more difficult. This has the same problem +- * where an NMI hitting a breakpoint that calls iret will remove the +- * NMI context, allowing a nested NMI to enter. What makes this more +- * difficult is that both NMIs and breakpoints have their own stack. +- * When a new NMI or breakpoint is executed, the stack is set to a fixed +- * point. If an NMI is nested, it will have its stack set at that same +- * fixed address that the first NMI had, and will start corrupting the +- * stack. This is handled in entry_64.S, but the same problem exists with +- * the breakpoint stack. +- * +- * If a breakpoint is being processed, and the debug stack is being used, +- * if an NMI comes in and also hits a breakpoint, the stack pointer +- * will be set to the same fixed address as the breakpoint that was +- * interrupted, causing that stack to be corrupted. To handle this case, +- * check if the stack that was interrupted is the debug stack, and if +- * so, change the IDT so that new breakpoints will use the current stack +- * and not switch to the fixed address. On return of the NMI, switch back +- * to the original IDT. ++ * In x86_64, we need to handle breakpoint -> NMI -> breakpoint. Without ++ * some care, the inner breakpoint will clobber the outer breakpoint's ++ * stack. ++ * ++ * If a breakpoint is being processed, and the debug stack is being ++ * used, if an NMI comes in and also hits a breakpoint, the stack ++ * pointer will be set to the same fixed address as the breakpoint that ++ * was interrupted, causing that stack to be corrupted. To handle this ++ * case, check if the stack that was interrupted is the debug stack, and ++ * if so, change the IDT so that new breakpoints will use the current ++ * stack and not switch to the fixed address. On return of the NMI, ++ * switch back to the original IDT. + */ + static DEFINE_PER_CPU(int, update_debug_stack); ++#endif + +-static inline void nmi_nesting_preprocess(struct pt_regs *regs) ++dotraplinkage notrace void ++do_nmi(struct pt_regs *regs, long error_code) + { ++ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { ++ this_cpu_write(nmi_state, NMI_LATCHED); ++ return; ++ } ++ this_cpu_write(nmi_state, NMI_EXECUTING); ++ this_cpu_write(nmi_cr2, read_cr2()); ++nmi_restart: ++ ++#ifdef CONFIG_X86_64 + /* + * If we interrupted a breakpoint, it is possible that + * the nmi handler will have breakpoints too. We need to +@@ -498,22 +484,8 @@ static inline void nmi_nesting_preproces + debug_stack_set_zero(); + this_cpu_write(update_debug_stack, 1); + } +-} +- +-static inline void nmi_nesting_postprocess(void) +-{ +- if (unlikely(this_cpu_read(update_debug_stack))) { +- debug_stack_reset(); +- this_cpu_write(update_debug_stack, 0); +- } +-} + #endif + +-dotraplinkage notrace __kprobes void +-do_nmi(struct pt_regs *regs, long error_code) +-{ +- nmi_nesting_preprocess(regs); +- + nmi_enter(); + + inc_irq_stat(__nmi_count); +@@ -523,8 +495,17 @@ do_nmi(struct pt_regs *regs, long error_ + + nmi_exit(); + +- /* On i386, may loop back to preprocess */ +- nmi_nesting_postprocess(); ++#ifdef CONFIG_X86_64 ++ if (unlikely(this_cpu_read(update_debug_stack))) { ++ debug_stack_reset(); ++ this_cpu_write(update_debug_stack, 0); ++ } ++#endif ++ ++ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) ++ write_cr2(this_cpu_read(nmi_cr2)); ++ if (this_cpu_dec_return(nmi_state)) ++ goto nmi_restart; + } + + void stop_nmi(void)