From: Greg Kroah-Hartman Date: Sat, 8 Aug 2015 22:02:37 +0000 (-0700) Subject: 4.1-stable patches X-Git-Tag: v4.1.5~5 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=b4802c1577e68e002c761ba8001ff33c89f73ec3;p=thirdparty%2Fkernel%2Fstable-queue.git 4.1-stable patches added patches: arc-make-arc-bitops-safer-add-anti-optimization.patch arc-reduce-bitops-lines-of-code-using-macros.patch avr32-handle-null-as-a-valid-clock-object.patch blk-mq-set-default-timeout-as-30-seconds.patch drm-nouveau-drm-nv04-nv40-instmem-protect-access-to-priv-heap-by-mutex.patch drm-nouveau-fbcon-nv11-correctly-account-for-ring-space-usage.patch drm-nouveau-hold-mutex-when-calling-nouveau_abi16_fini.patch drm-nouveau-kms-nv50-guard-against-enabling-cursor-on-disabled-heads.patch hwmon-nct7802-fix-integer-overflow-seen-when-writing-voltage-limits.patch hwmon-nct7904-rename-pwm-attributes-to-match-hwmon-abi.patch ib-ipoib-fix-config_infiniband_ipoib_cm.patch intel_pstate-add-get_scaling-cpu_defaults-param-to-knights-landing.patch iscsi-target-fix-iscsit_start_kthreads-failure-oops.patch iscsi-target-fix-iser-explicit-logout-tx-kthread-leak.patch iscsi-target-fix-use-after-free-during-tpg-session-shutdown.patch n_tty-signal-and-flush-atomically.patch nfs-don-t-revalidate-the-mapping-if-both-size-and-change-attr-are-up-to-date.patch nfs-fix-a-memory-leak-in-nfs_do_recoalesce.patch nfsv4-we-must-set-nfs_open_state-flag-in-nfs_resync_open_stateid_locked.patch perf-hists-browser-take-the-comm-dsos-etc-filters-into-account.patch perf-x86-intel-cqm-return-cached-counter-value-from-irq-context.patch qla2xxx-fix-command-initialization-in-target-mode.patch qla2xxx-fix-hardware-lock-unlock-issue-causing-kernel-panic.patch qla2xxx-kill-sessions-log-out-initiator-on-rscn-and-port-down-events.patch qla2xxx-release-request-queue-reservation.patch qla2xxx-remove-msleep-in-qlt_send_term_exchange.patch rds-rds_ib_device.refcount-overflow.patch vhost-actually-track-log-eventfd-file.patch --- diff --git a/queue-4.1/arc-make-arc-bitops-safer-add-anti-optimization.patch b/queue-4.1/arc-make-arc-bitops-safer-add-anti-optimization.patch new file mode 100644 index 00000000000..a5d6f11fec9 --- /dev/null +++ b/queue-4.1/arc-make-arc-bitops-safer-add-anti-optimization.patch @@ -0,0 +1,149 @@ +From 80f420842ff42ad61f84584716d74ef635f13892 Mon Sep 17 00:00:00 2001 +From: Vineet Gupta +Date: Fri, 3 Jul 2015 11:26:22 +0530 +Subject: ARC: Make ARC bitops "safer" (add anti-optimization) + +From: Vineet Gupta + +commit 80f420842ff42ad61f84584716d74ef635f13892 upstream. + +ARCompact/ARCv2 ISA provide that any instructions which deals with +bitpos/count operand ASL, LSL, BSET, BCLR, BMSK .... will only consider +lower 5 bits. i.e. auto-clamp the pos to 0-31. + +ARC Linux bitops exploited this fact by NOT explicitly masking out upper +bits for @nr operand in general, saving a bunch of AND/BMSK instructions +in generated code around bitops. + +While this micro-optimization has worked well over years it is NOT safe +as shifting a number with a value, greater than native size is +"undefined" per "C" spec. + +So as it turns outm EZChip ran into this eventually, in their massive +muti-core SMP build with 64 cpus. There was a test_bit() inside a loop +from 63 to 0 and gcc was weirdly optimizing away the first iteration +(so it was really adhering to standard by implementing undefined behaviour +vs. removing all the iterations which were phony i.e. (1 << [63..32]) + +| for i = 63 to 0 +| X = ( 1 << i ) +| if X == 0 +| continue + +So fix the code to do the explicit masking at the expense of generating +additional instructions. Fortunately, this can be mitigated to a large +extent as gcc has SHIFT_COUNT_TRUNCATED which allows combiner to fold +masking into shift operation itself. It is currently not enabled in ARC +gcc backend, but could be done after a bit of testing. + +Fixes STAR 9000866918 ("unsafe "undefined behavior" code in kernel") + +Reported-by: Noam Camus +Signed-off-by: Vineet Gupta +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arc/include/asm/bitops.h | 35 +++++++++-------------------------- + 1 file changed, 9 insertions(+), 26 deletions(-) + +--- a/arch/arc/include/asm/bitops.h ++++ b/arch/arc/include/asm/bitops.h +@@ -50,8 +50,7 @@ static inline void op##_bit(unsigned lon + * done for const @nr, but no code is generated due to gcc \ + * const prop. \ + */ \ +- if (__builtin_constant_p(nr)) \ +- nr &= 0x1f; \ ++ nr &= 0x1f; \ + \ + __asm__ __volatile__( \ + "1: llock %0, [%1] \n" \ +@@ -82,8 +81,7 @@ static inline int test_and_##op##_bit(un + \ + m += nr >> 5; \ + \ +- if (__builtin_constant_p(nr)) \ +- nr &= 0x1f; \ ++ nr &= 0x1f; \ + \ + /* \ + * Explicit full memory barrier needed before/after as \ +@@ -129,16 +127,13 @@ static inline void op##_bit(unsigned lon + unsigned long temp, flags; \ + m += nr >> 5; \ + \ +- if (__builtin_constant_p(nr)) \ +- nr &= 0x1f; \ +- \ + /* \ + * spin lock/unlock provide the needed smp_mb() before/after \ + */ \ + bitops_lock(flags); \ + \ + temp = *m; \ +- *m = temp c_op (1UL << nr); \ ++ *m = temp c_op (1UL << (nr & 0x1f)); \ + \ + bitops_unlock(flags); \ + } +@@ -149,17 +144,14 @@ static inline int test_and_##op##_bit(un + unsigned long old, flags; \ + m += nr >> 5; \ + \ +- if (__builtin_constant_p(nr)) \ +- nr &= 0x1f; \ +- \ + bitops_lock(flags); \ + \ + old = *m; \ +- *m = old c_op (1 << nr); \ ++ *m = old c_op (1UL << (nr & 0x1f)); \ + \ + bitops_unlock(flags); \ + \ +- return (old & (1 << nr)) != 0; \ ++ return (old & (1UL << (nr & 0x1f))) != 0; \ + } + + #endif /* CONFIG_ARC_HAS_LLSC */ +@@ -174,11 +166,8 @@ static inline void __##op##_bit(unsigned + unsigned long temp; \ + m += nr >> 5; \ + \ +- if (__builtin_constant_p(nr)) \ +- nr &= 0x1f; \ +- \ + temp = *m; \ +- *m = temp c_op (1UL << nr); \ ++ *m = temp c_op (1UL << (nr & 0x1f)); \ + } + + #define __TEST_N_BIT_OP(op, c_op, asm_op) \ +@@ -187,13 +176,10 @@ static inline int __test_and_##op##_bit( + unsigned long old; \ + m += nr >> 5; \ + \ +- if (__builtin_constant_p(nr)) \ +- nr &= 0x1f; \ +- \ + old = *m; \ +- *m = old c_op (1 << nr); \ ++ *m = old c_op (1UL << (nr & 0x1f)); \ + \ +- return (old & (1 << nr)) != 0; \ ++ return (old & (1UL << (nr & 0x1f))) != 0; \ + } + + #define BIT_OPS(op, c_op, asm_op) \ +@@ -224,10 +210,7 @@ test_bit(unsigned int nr, const volatile + + addr += nr >> 5; + +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- mask = 1 << nr; ++ mask = 1UL << (nr & 0x1f); + + return ((mask & *addr) != 0); + } diff --git a/queue-4.1/arc-reduce-bitops-lines-of-code-using-macros.patch b/queue-4.1/arc-reduce-bitops-lines-of-code-using-macros.patch new file mode 100644 index 00000000000..9abd08cbb07 --- /dev/null +++ b/queue-4.1/arc-reduce-bitops-lines-of-code-using-macros.patch @@ -0,0 +1,546 @@ +From 04e2eee4b02edcafce96c9c37b31b1a3318291a4 Mon Sep 17 00:00:00 2001 +From: Vineet Gupta +Date: Tue, 31 Mar 2015 22:38:21 +0530 +Subject: ARC: Reduce bitops lines of code using macros + +From: Vineet Gupta + +commit 04e2eee4b02edcafce96c9c37b31b1a3318291a4 upstream. + +No semantical changes ! + +Acked-by: Peter Zijlstra (Intel) +Signed-off-by: Vineet Gupta +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arc/include/asm/bitops.h | 489 ++++++++++++------------------------------ + 1 file changed, 150 insertions(+), 339 deletions(-) + +--- a/arch/arc/include/asm/bitops.h ++++ b/arch/arc/include/asm/bitops.h +@@ -18,83 +18,50 @@ + #include + #include + #include ++#ifndef CONFIG_ARC_HAS_LLSC ++#include ++#endif + +-/* +- * Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns. +- * The Kconfig glue ensures that in SMP, this is only set if the container +- * SoC/platform has cross-core coherent LLOCK/SCOND +- */ + #if defined(CONFIG_ARC_HAS_LLSC) + +-static inline void set_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned int temp; +- +- m += nr >> 5; +- +- /* +- * ARC ISA micro-optimization: +- * +- * Instructions dealing with bitpos only consider lower 5 bits (0-31) +- * e.g (x << 33) is handled like (x << 1) by ASL instruction +- * (mem pointer still needs adjustment to point to next word) +- * +- * Hence the masking to clamp @nr arg can be elided in general. +- * +- * However if @nr is a constant (above assumed it in a register), +- * and greater than 31, gcc can optimize away (x << 33) to 0, +- * as overflow, given the 32-bit ISA. Thus masking needs to be done +- * for constant @nr, but no code is generated due to const prop. +- */ +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- __asm__ __volatile__( +- "1: llock %0, [%1] \n" +- " bset %0, %0, %2 \n" +- " scond %0, [%1] \n" +- " bnz 1b \n" +- : "=&r"(temp) +- : "r"(m), "ir"(nr) +- : "cc"); +-} +- +-static inline void clear_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned int temp; +- +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- __asm__ __volatile__( +- "1: llock %0, [%1] \n" +- " bclr %0, %0, %2 \n" +- " scond %0, [%1] \n" +- " bnz 1b \n" +- : "=&r"(temp) +- : "r"(m), "ir"(nr) +- : "cc"); +-} ++/* ++ * Hardware assisted Atomic-R-M-W ++ */ + +-static inline void change_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned int temp; +- +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- __asm__ __volatile__( +- "1: llock %0, [%1] \n" +- " bxor %0, %0, %2 \n" +- " scond %0, [%1] \n" +- " bnz 1b \n" +- : "=&r"(temp) +- : "r"(m), "ir"(nr) +- : "cc"); ++#define BIT_OP(op, c_op, asm_op) \ ++static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ ++{ \ ++ unsigned int temp; \ ++ \ ++ m += nr >> 5; \ ++ \ ++ /* \ ++ * ARC ISA micro-optimization: \ ++ * \ ++ * Instructions dealing with bitpos only consider lower 5 bits \ ++ * e.g (x << 33) is handled like (x << 1) by ASL instruction \ ++ * (mem pointer still needs adjustment to point to next word) \ ++ * \ ++ * Hence the masking to clamp @nr arg can be elided in general. \ ++ * \ ++ * However if @nr is a constant (above assumed in a register), \ ++ * and greater than 31, gcc can optimize away (x << 33) to 0, \ ++ * as overflow, given the 32-bit ISA. Thus masking needs to be \ ++ * done for const @nr, but no code is generated due to gcc \ ++ * const prop. \ ++ */ \ ++ if (__builtin_constant_p(nr)) \ ++ nr &= 0x1f; \ ++ \ ++ __asm__ __volatile__( \ ++ "1: llock %0, [%1] \n" \ ++ " " #asm_op " %0, %0, %2 \n" \ ++ " scond %0, [%1] \n" \ ++ " bnz 1b \n" \ ++ : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ ++ : "r"(m), /* Not "m": llock only supports reg direct addr mode */ \ ++ "ir"(nr) \ ++ : "cc"); \ + } + + /* +@@ -108,91 +75,38 @@ static inline void change_bit(unsigned l + * Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally + * and the old value of bit is returned + */ +-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old, temp; +- +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- /* +- * Explicit full memory barrier needed before/after as +- * LLOCK/SCOND themselves don't provide any such semantics +- */ +- smp_mb(); +- +- __asm__ __volatile__( +- "1: llock %0, [%2] \n" +- " bset %1, %0, %3 \n" +- " scond %1, [%2] \n" +- " bnz 1b \n" +- : "=&r"(old), "=&r"(temp) +- : "r"(m), "ir"(nr) +- : "cc"); +- +- smp_mb(); +- +- return (old & (1 << nr)) != 0; +-} +- +-static inline int +-test_and_clear_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned int old, temp; +- +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- smp_mb(); +- +- __asm__ __volatile__( +- "1: llock %0, [%2] \n" +- " bclr %1, %0, %3 \n" +- " scond %1, [%2] \n" +- " bnz 1b \n" +- : "=&r"(old), "=&r"(temp) +- : "r"(m), "ir"(nr) +- : "cc"); +- +- smp_mb(); +- +- return (old & (1 << nr)) != 0; +-} +- +-static inline int +-test_and_change_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned int old, temp; +- +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- smp_mb(); +- +- __asm__ __volatile__( +- "1: llock %0, [%2] \n" +- " bxor %1, %0, %3 \n" +- " scond %1, [%2] \n" +- " bnz 1b \n" +- : "=&r"(old), "=&r"(temp) +- : "r"(m), "ir"(nr) +- : "cc"); +- +- smp_mb(); +- +- return (old & (1 << nr)) != 0; ++#define TEST_N_BIT_OP(op, c_op, asm_op) \ ++static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ ++{ \ ++ unsigned long old, temp; \ ++ \ ++ m += nr >> 5; \ ++ \ ++ if (__builtin_constant_p(nr)) \ ++ nr &= 0x1f; \ ++ \ ++ /* \ ++ * Explicit full memory barrier needed before/after as \ ++ * LLOCK/SCOND themselves don't provide any such smenatic \ ++ */ \ ++ smp_mb(); \ ++ \ ++ __asm__ __volatile__( \ ++ "1: llock %0, [%2] \n" \ ++ " " #asm_op " %1, %0, %3 \n" \ ++ " scond %1, [%2] \n" \ ++ " bnz 1b \n" \ ++ : "=&r"(old), "=&r"(temp) \ ++ : "r"(m), "ir"(nr) \ ++ : "cc"); \ ++ \ ++ smp_mb(); \ ++ \ ++ return (old & (1 << nr)) != 0; \ + } + + #else /* !CONFIG_ARC_HAS_LLSC */ + +-#include +- + /* + * Non hardware assisted Atomic-R-M-W + * Locking would change to irq-disabling only (UP) and spinlocks (SMP) +@@ -209,111 +123,43 @@ test_and_change_bit(unsigned long nr, vo + * at compile time) + */ + +-static inline void set_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long temp, flags; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- bitops_lock(flags); +- +- temp = *m; +- *m = temp | (1UL << nr); +- +- bitops_unlock(flags); +-} +- +-static inline void clear_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long temp, flags; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- bitops_lock(flags); +- +- temp = *m; +- *m = temp & ~(1UL << nr); +- +- bitops_unlock(flags); +-} +- +-static inline void change_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long temp, flags; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- bitops_lock(flags); +- +- temp = *m; +- *m = temp ^ (1UL << nr); +- +- bitops_unlock(flags); +-} +- +-static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old, flags; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- /* +- * spin lock/unlock provide the needed smp_mb() before/after +- */ +- bitops_lock(flags); +- +- old = *m; +- *m = old | (1 << nr); +- +- bitops_unlock(flags); +- +- return (old & (1 << nr)) != 0; +-} +- +-static inline int +-test_and_clear_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old, flags; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- bitops_lock(flags); +- +- old = *m; +- *m = old & ~(1 << nr); +- +- bitops_unlock(flags); +- +- return (old & (1 << nr)) != 0; +-} +- +-static inline int +-test_and_change_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old, flags; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- bitops_lock(flags); +- +- old = *m; +- *m = old ^ (1 << nr); +- +- bitops_unlock(flags); +- +- return (old & (1 << nr)) != 0; ++#define BIT_OP(op, c_op, asm_op) \ ++static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\ ++{ \ ++ unsigned long temp, flags; \ ++ m += nr >> 5; \ ++ \ ++ if (__builtin_constant_p(nr)) \ ++ nr &= 0x1f; \ ++ \ ++ /* \ ++ * spin lock/unlock provide the needed smp_mb() before/after \ ++ */ \ ++ bitops_lock(flags); \ ++ \ ++ temp = *m; \ ++ *m = temp c_op (1UL << nr); \ ++ \ ++ bitops_unlock(flags); \ ++} ++ ++#define TEST_N_BIT_OP(op, c_op, asm_op) \ ++static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ ++{ \ ++ unsigned long old, flags; \ ++ m += nr >> 5; \ ++ \ ++ if (__builtin_constant_p(nr)) \ ++ nr &= 0x1f; \ ++ \ ++ bitops_lock(flags); \ ++ \ ++ old = *m; \ ++ *m = old c_op (1 << nr); \ ++ \ ++ bitops_unlock(flags); \ ++ \ ++ return (old & (1 << nr)) != 0; \ + } + + #endif /* CONFIG_ARC_HAS_LLSC */ +@@ -322,86 +168,51 @@ test_and_change_bit(unsigned long nr, vo + * Non atomic variants + **************************************/ + +-static inline void __set_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long temp; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- temp = *m; +- *m = temp | (1UL << nr); +-} +- +-static inline void __clear_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long temp; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- temp = *m; +- *m = temp & ~(1UL << nr); +-} +- +-static inline void __change_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long temp; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- temp = *m; +- *m = temp ^ (1UL << nr); +-} +- +-static inline int +-__test_and_set_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- old = *m; +- *m = old | (1 << nr); +- +- return (old & (1 << nr)) != 0; +-} +- +-static inline int +-__test_and_clear_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- old = *m; +- *m = old & ~(1 << nr); +- +- return (old & (1 << nr)) != 0; +-} +- +-static inline int +-__test_and_change_bit(unsigned long nr, volatile unsigned long *m) +-{ +- unsigned long old; +- m += nr >> 5; +- +- if (__builtin_constant_p(nr)) +- nr &= 0x1f; +- +- old = *m; +- *m = old ^ (1 << nr); +- +- return (old & (1 << nr)) != 0; +-} ++#define __BIT_OP(op, c_op, asm_op) \ ++static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \ ++{ \ ++ unsigned long temp; \ ++ m += nr >> 5; \ ++ \ ++ if (__builtin_constant_p(nr)) \ ++ nr &= 0x1f; \ ++ \ ++ temp = *m; \ ++ *m = temp c_op (1UL << nr); \ ++} ++ ++#define __TEST_N_BIT_OP(op, c_op, asm_op) \ ++static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\ ++{ \ ++ unsigned long old; \ ++ m += nr >> 5; \ ++ \ ++ if (__builtin_constant_p(nr)) \ ++ nr &= 0x1f; \ ++ \ ++ old = *m; \ ++ *m = old c_op (1 << nr); \ ++ \ ++ return (old & (1 << nr)) != 0; \ ++} ++ ++#define BIT_OPS(op, c_op, asm_op) \ ++ \ ++ /* set_bit(), clear_bit(), change_bit() */ \ ++ BIT_OP(op, c_op, asm_op) \ ++ \ ++ /* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\ ++ TEST_N_BIT_OP(op, c_op, asm_op) \ ++ \ ++ /* __set_bit(), __clear_bit(), __change_bit() */ \ ++ __BIT_OP(op, c_op, asm_op) \ ++ \ ++ /* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\ ++ __TEST_N_BIT_OP(op, c_op, asm_op) ++ ++BIT_OPS(set, |, bset) ++BIT_OPS(clear, & ~, bclr) ++BIT_OPS(change, ^, bxor) + + /* + * This routine doesn't need to be atomic. diff --git a/queue-4.1/avr32-handle-null-as-a-valid-clock-object.patch b/queue-4.1/avr32-handle-null-as-a-valid-clock-object.patch new file mode 100644 index 00000000000..adca0590024 --- /dev/null +++ b/queue-4.1/avr32-handle-null-as-a-valid-clock-object.patch @@ -0,0 +1,92 @@ +From 5c02a4206538da12c040b51778d310df84c6bf6c Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Fri, 24 Jul 2015 13:49:48 +0300 +Subject: avr32: handle NULL as a valid clock object + +From: Andy Shevchenko + +commit 5c02a4206538da12c040b51778d310df84c6bf6c upstream. + +Since NULL is used as valid clock object on optional clocks we have to handle +this case in avr32 implementation as well. + +Fixes: e1824dfe0d8e (net: macb: Adjust tx_clk when link speed changes) +Signed-off-by: Andy Shevchenko +Acked-by: Hans-Christian Egtvedt +Signed-off-by: Greg Kroah-Hartman + +--- + arch/avr32/mach-at32ap/clock.c | 20 +++++++++++++++++++- + 1 file changed, 19 insertions(+), 1 deletion(-) + +--- a/arch/avr32/mach-at32ap/clock.c ++++ b/arch/avr32/mach-at32ap/clock.c +@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk) + { + unsigned long flags; + ++ if (!clk) ++ return 0; ++ + spin_lock_irqsave(&clk_lock, flags); + __clk_enable(clk); + spin_unlock_irqrestore(&clk_lock, flags); +@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk) + { + unsigned long flags; + ++ if (IS_ERR_OR_NULL(clk)) ++ return; ++ + spin_lock_irqsave(&clk_lock, flags); + __clk_disable(clk); + spin_unlock_irqrestore(&clk_lock, flags); +@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *c + unsigned long flags; + unsigned long rate; + ++ if (!clk) ++ return 0; ++ + spin_lock_irqsave(&clk_lock, flags); + rate = clk->get_rate(clk); + spin_unlock_irqrestore(&clk_lock, flags); +@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, uns + { + unsigned long flags, actual_rate; + ++ if (!clk) ++ return 0; ++ + if (!clk->set_rate) + return -ENOSYS; + +@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsign + unsigned long flags; + long ret; + ++ if (!clk) ++ return 0; ++ + if (!clk->set_rate) + return -ENOSYS; + +@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, stru + unsigned long flags; + int ret; + ++ if (!clk) ++ return 0; ++ + if (!clk->set_parent) + return -ENOSYS; + +@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent); + + struct clk *clk_get_parent(struct clk *clk) + { +- return clk->parent; ++ return !clk ? NULL : clk->parent; + } + EXPORT_SYMBOL(clk_get_parent); + diff --git a/queue-4.1/blk-mq-set-default-timeout-as-30-seconds.patch b/queue-4.1/blk-mq-set-default-timeout-as-30-seconds.patch new file mode 100644 index 00000000000..c8402ad5630 --- /dev/null +++ b/queue-4.1/blk-mq-set-default-timeout-as-30-seconds.patch @@ -0,0 +1,33 @@ +From e56f698bd0720e17f10f39e8b0b5b446ad0ab22c Mon Sep 17 00:00:00 2001 +From: Ming Lei +Date: Thu, 16 Jul 2015 19:53:22 +0800 +Subject: blk-mq: set default timeout as 30 seconds + +From: Ming Lei + +commit e56f698bd0720e17f10f39e8b0b5b446ad0ab22c upstream. + +It is reasonable to set default timeout of request as 30 seconds instead of +30000 ticks, which may be 300 seconds if HZ is 100, for example, some arm64 +based systems may choose 100 HZ. + +Signed-off-by: Ming Lei +Fixes: c76cbbcf4044 ("blk-mq: put blk_queue_rq_timeout together in blk_mq_init_queue()" +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman + +--- + block/blk-mq.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -1968,7 +1968,7 @@ struct request_queue *blk_mq_init_alloca + goto err_hctxs; + + setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q); +- blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000); ++ blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); + + q->nr_queues = nr_cpu_ids; + q->nr_hw_queues = set->nr_hw_queues; diff --git a/queue-4.1/drm-nouveau-drm-nv04-nv40-instmem-protect-access-to-priv-heap-by-mutex.patch b/queue-4.1/drm-nouveau-drm-nv04-nv40-instmem-protect-access-to-priv-heap-by-mutex.patch new file mode 100644 index 00000000000..27ca526891e --- /dev/null +++ b/queue-4.1/drm-nouveau-drm-nv04-nv40-instmem-protect-access-to-priv-heap-by-mutex.patch @@ -0,0 +1,53 @@ +From 7512223b1ece29a5968ed8b67ccb891d21b7834b Mon Sep 17 00:00:00 2001 +From: Kamil Dudka +Date: Wed, 15 Jul 2015 22:57:43 +0200 +Subject: drm/nouveau/drm/nv04-nv40/instmem: protect access to priv->heap by mutex + +From: Kamil Dudka + +commit 7512223b1ece29a5968ed8b67ccb891d21b7834b upstream. + +This fixes the list_del corruption reported +at . + +Signed-off-by: Kamil Dudka +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c ++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c +@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *ob + { + struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); + struct nv04_instobj_priv *node = (void *)object; ++ struct nvkm_subdev *subdev = (void *)priv; ++ ++ mutex_lock(&subdev->mutex); + nvkm_mm_free(&priv->heap, &node->mem); ++ mutex_unlock(&subdev->mutex); ++ + nvkm_instobj_destroy(&node->base); + } + +@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *pa + struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); + struct nv04_instobj_priv *node; + struct nvkm_instobj_args *args = data; ++ struct nvkm_subdev *subdev = (void *)priv; + int ret; + + if (!args->align) +@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *pa + if (ret) + return ret; + ++ mutex_lock(&subdev->mutex); + ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, + args->align, &node->mem); ++ mutex_unlock(&subdev->mutex); + if (ret) + return ret; + diff --git a/queue-4.1/drm-nouveau-fbcon-nv11-correctly-account-for-ring-space-usage.patch b/queue-4.1/drm-nouveau-fbcon-nv11-correctly-account-for-ring-space-usage.patch new file mode 100644 index 00000000000..071cbd3ec21 --- /dev/null +++ b/queue-4.1/drm-nouveau-fbcon-nv11-correctly-account-for-ring-space-usage.patch @@ -0,0 +1,33 @@ +From d108142c0840ce389cd9898aa76943b3fb430b83 Mon Sep 17 00:00:00 2001 +From: Ilia Mirkin +Date: Mon, 29 Jun 2015 04:07:20 -0400 +Subject: drm/nouveau/fbcon/nv11-: correctly account for ring space usage + +From: Ilia Mirkin + +commit d108142c0840ce389cd9898aa76943b3fb430b83 upstream. + +The RING_SPACE macro accounts how much space is used up so it's +important to ask it for the right amount. Incorrect accounting of this +can cause page faults down the line as writes are attempted outside of +the ring. + +Signed-off-by: Ilia Mirkin +Signed-off-by: Ben Skeggs +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/nouveau/nv04_fbcon.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c ++++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c +@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *in + if (ret) + return ret; + +- if (RING_SPACE(chan, 49)) { ++ if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) { + nouveau_fbcon_gpu_lockup(info); + return 0; + } diff --git a/queue-4.1/drm-nouveau-hold-mutex-when-calling-nouveau_abi16_fini.patch b/queue-4.1/drm-nouveau-hold-mutex-when-calling-nouveau_abi16_fini.patch new file mode 100644 index 00000000000..e30397bc229 --- /dev/null +++ b/queue-4.1/drm-nouveau-hold-mutex-when-calling-nouveau_abi16_fini.patch @@ -0,0 +1,31 @@ +From ac8c79304280da6ef05c348a9da03ab04898b994 Mon Sep 17 00:00:00 2001 +From: Kamil Dudka +Date: Wed, 15 Jul 2015 17:18:15 +0200 +Subject: drm/nouveau: hold mutex when calling nouveau_abi16_fini() + +From: Kamil Dudka + +commit ac8c79304280da6ef05c348a9da03ab04898b994 upstream. + +This was the only access to cli->abi16 without holding the mutex. + +Signed-off-by: Kamil Dudka +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/nouveau/nouveau_drm.c | 2 ++ + 1 file changed, 2 insertions(+) + +--- a/drivers/gpu/drm/nouveau/nouveau_drm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c +@@ -863,8 +863,10 @@ nouveau_drm_preclose(struct drm_device * + + pm_runtime_get_sync(dev->dev); + ++ mutex_lock(&cli->mutex); + if (cli->abi16) + nouveau_abi16_fini(cli->abi16); ++ mutex_unlock(&cli->mutex); + + mutex_lock(&drm->client.mutex); + list_del(&cli->head); diff --git a/queue-4.1/drm-nouveau-kms-nv50-guard-against-enabling-cursor-on-disabled-heads.patch b/queue-4.1/drm-nouveau-kms-nv50-guard-against-enabling-cursor-on-disabled-heads.patch new file mode 100644 index 00000000000..f49dabc42d4 --- /dev/null +++ b/queue-4.1/drm-nouveau-kms-nv50-guard-against-enabling-cursor-on-disabled-heads.patch @@ -0,0 +1,30 @@ +From 697bb728d9e2367020aa0c5af7363809d7658e43 Mon Sep 17 00:00:00 2001 +From: Ben Skeggs +Date: Tue, 28 Jul 2015 17:20:57 +1000 +Subject: drm/nouveau/kms/nv50-: guard against enabling cursor on disabled heads + +From: Ben Skeggs + +commit 697bb728d9e2367020aa0c5af7363809d7658e43 upstream. + +Userspace has started doing this, which upsets the display class hw +error checking in various unpleasant ways. + +Signed-off-by: Ben Skeggs +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/gpu/drm/nouveau/nv50_display.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/gpu/drm/nouveau/nv50_display.c ++++ b/drivers/gpu/drm/nouveau/nv50_display.c +@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouvea + { + struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); + +- if (show && nv_crtc->cursor.nvbo) ++ if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled) + nv50_crtc_cursor_show(nv_crtc); + else + nv50_crtc_cursor_hide(nv_crtc); diff --git a/queue-4.1/hwmon-nct7802-fix-integer-overflow-seen-when-writing-voltage-limits.patch b/queue-4.1/hwmon-nct7802-fix-integer-overflow-seen-when-writing-voltage-limits.patch new file mode 100644 index 00000000000..05dd29357ea --- /dev/null +++ b/queue-4.1/hwmon-nct7802-fix-integer-overflow-seen-when-writing-voltage-limits.patch @@ -0,0 +1,33 @@ +From 9200bc4c28cd8992eb5379345abd6b4f0c93df16 Mon Sep 17 00:00:00 2001 +From: Guenter Roeck +Date: Sat, 4 Jul 2015 13:23:42 -0700 +Subject: hwmon: (nct7802) Fix integer overflow seen when writing voltage limits + +From: Guenter Roeck + +commit 9200bc4c28cd8992eb5379345abd6b4f0c93df16 upstream. + +Writing a large value into a voltage limit attribute can result +in an overflow due to an auto-conversion from unsigned long to +unsigned int. + +Cc: Constantine Shulyupin +Reviewed-by: Jean Delvare +Signed-off-by: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/hwmon/nct7802.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/hwmon/nct7802.c ++++ b/drivers/hwmon/nct7802.c +@@ -195,7 +195,7 @@ abort: + } + + static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, +- unsigned int voltage) ++ unsigned long voltage) + { + int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; + int err; diff --git a/queue-4.1/hwmon-nct7904-rename-pwm-attributes-to-match-hwmon-abi.patch b/queue-4.1/hwmon-nct7904-rename-pwm-attributes-to-match-hwmon-abi.patch new file mode 100644 index 00000000000..03f7d0c5504 --- /dev/null +++ b/queue-4.1/hwmon-nct7904-rename-pwm-attributes-to-match-hwmon-abi.patch @@ -0,0 +1,132 @@ +From 0d6aaffc3a6db642e0a165ba4d17d6d7bbaf5201 Mon Sep 17 00:00:00 2001 +From: Guenter Roeck +Date: Mon, 27 Jul 2015 10:21:46 -0700 +Subject: hwmon: (nct7904) Rename pwm attributes to match hwmon ABI + +From: Guenter Roeck + +commit 0d6aaffc3a6db642e0a165ba4d17d6d7bbaf5201 upstream. + +pwm attributes have well defined names, which should be used. + +Cc: Vadim V. Vlasov +Signed-off-by: Guenter Roeck +Signed-off-by: Greg Kroah-Hartman + +--- + Documentation/hwmon/nct7904 | 4 +-- + drivers/hwmon/nct7904.c | 57 ++++++++++++++++++++++---------------------- + 2 files changed, 31 insertions(+), 30 deletions(-) + +--- a/Documentation/hwmon/nct7904 ++++ b/Documentation/hwmon/nct7904 +@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 d + temp[2-9]_input CPU temperatures (1/1000 degree, + 0.125 degree resolution) + +-fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode ++pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode + Setting SmartFan mode is supported only if it has been + previously configured by BIOS (or configuration EEPROM) + +-fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode ++pwm[1-4] R/O in SmartFan mode, R/W in manual control mode + + The driver checks sensor control registers and does not export the sensors + that are not enabled. Anyway, a sensor that is enabled may actually be not +--- a/drivers/hwmon/nct7904.c ++++ b/drivers/hwmon/nct7904.c +@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *d + return sprintf(buf, "%d\n", val); + } + +-static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, +- const char *buf, size_t count) ++static ssize_t store_enable(struct device *dev, ++ struct device_attribute *devattr, ++ const char *buf, size_t count) + { + int index = to_sensor_dev_attr(devattr)->index; + struct nct7904_data *data = dev_get_drvdata(dev); +@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device + + if (kstrtoul(buf, 10, &val) < 0) + return -EINVAL; +- if (val > 1 || (val && !data->fan_mode[index])) ++ if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index])) + return -EINVAL; + + ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, +- val ? data->fan_mode[index] : 0); ++ val == 2 ? data->fan_mode[index] : 0); + + return ret ? ret : count; + } + +-/* Return 0 for manual mode or 1 for SmartFan mode */ +-static ssize_t show_mode(struct device *dev, +- struct device_attribute *devattr, char *buf) ++/* Return 1 for manual mode or 2 for SmartFan mode */ ++static ssize_t show_enable(struct device *dev, ++ struct device_attribute *devattr, char *buf) + { + int index = to_sensor_dev_attr(devattr)->index; + struct nct7904_data *data = dev_get_drvdata(dev); +@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device * + if (val < 0) + return val; + +- return sprintf(buf, "%d\n", val ? 1 : 0); ++ return sprintf(buf, "%d\n", val ? 2 : 1); + } + + /* 2 attributes per channel: pwm and mode */ +-static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, ++static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, + show_pwm, store_pwm, 0); +-static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, +- show_mode, store_mode, 0); +-static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, ++static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, ++ show_enable, store_enable, 0); ++static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR, + show_pwm, store_pwm, 1); +-static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, +- show_mode, store_mode, 1); +-static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, ++static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR, ++ show_enable, store_enable, 1); ++static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR, + show_pwm, store_pwm, 2); +-static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, +- show_mode, store_mode, 2); +-static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, ++static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR, ++ show_enable, store_enable, 2); ++static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR, + show_pwm, store_pwm, 3); +-static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, +- show_mode, store_mode, 3); ++static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR, ++ show_enable, store_enable, 3); + + static struct attribute *nct7904_fanctl_attrs[] = { +- &sensor_dev_attr_fan1_pwm.dev_attr.attr, +- &sensor_dev_attr_fan1_mode.dev_attr.attr, +- &sensor_dev_attr_fan2_pwm.dev_attr.attr, +- &sensor_dev_attr_fan2_mode.dev_attr.attr, +- &sensor_dev_attr_fan3_pwm.dev_attr.attr, +- &sensor_dev_attr_fan3_mode.dev_attr.attr, +- &sensor_dev_attr_fan4_pwm.dev_attr.attr, +- &sensor_dev_attr_fan4_mode.dev_attr.attr, ++ &sensor_dev_attr_pwm1.dev_attr.attr, ++ &sensor_dev_attr_pwm1_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm2.dev_attr.attr, ++ &sensor_dev_attr_pwm2_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm3.dev_attr.attr, ++ &sensor_dev_attr_pwm3_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm4.dev_attr.attr, ++ &sensor_dev_attr_pwm4_enable.dev_attr.attr, + NULL + }; + diff --git a/queue-4.1/ib-ipoib-fix-config_infiniband_ipoib_cm.patch b/queue-4.1/ib-ipoib-fix-config_infiniband_ipoib_cm.patch new file mode 100644 index 00000000000..a92781b4997 --- /dev/null +++ b/queue-4.1/ib-ipoib-fix-config_infiniband_ipoib_cm.patch @@ -0,0 +1,37 @@ +From efc1eedbf63a194b3b576fc25776f3f1fa55a4d4 Mon Sep 17 00:00:00 2001 +From: Jason Gunthorpe +Date: Wed, 22 Jul 2015 14:30:03 -0600 +Subject: IB/ipoib: Fix CONFIG_INFINIBAND_IPOIB_CM + +From: Jason Gunthorpe + +commit efc1eedbf63a194b3b576fc25776f3f1fa55a4d4 upstream. + +If the above is turned off then ipoib_cm_dev_init unconditionally +returns ENOSYS, and the newly added error handling in +0b3957 prevents ipoib from coming up at all: + +kernel: mlx4_0: ipoib_transport_dev_init failed +kernel: mlx4_0: failed to initialize port 1 (ret = -12) + +Fixes: 0b39578bcde4 (IB/ipoib: Use dedicated workqueues per interface) +Signed-off-by: Jason Gunthorpe +Signed-off-by: Doug Ledford +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/infiniband/ulp/ipoib/ipoib_verbs.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +@@ -176,7 +176,8 @@ int ipoib_transport_dev_init(struct net_ + else + size += ipoib_recvq_size * ipoib_max_conn_qp; + } else +- goto out_free_wq; ++ if (ret != -ENOSYS) ++ goto out_free_wq; + + priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0); + if (IS_ERR(priv->recv_cq)) { diff --git a/queue-4.1/intel_pstate-add-get_scaling-cpu_defaults-param-to-knights-landing.patch b/queue-4.1/intel_pstate-add-get_scaling-cpu_defaults-param-to-knights-landing.patch new file mode 100644 index 00000000000..b7e7378b66f --- /dev/null +++ b/queue-4.1/intel_pstate-add-get_scaling-cpu_defaults-param-to-knights-landing.patch @@ -0,0 +1,35 @@ +From 69cefc273f942bd7bb347a02e8b5b738d5f6e6f3 Mon Sep 17 00:00:00 2001 +From: Lukasz Anaczkowski +Date: Tue, 21 Jul 2015 10:41:13 +0200 +Subject: intel_pstate: Add get_scaling cpu_defaults param to Knights Landing + +From: Lukasz Anaczkowski + +commit 69cefc273f942bd7bb347a02e8b5b738d5f6e6f3 upstream. + +Scaling for Knights Landing is same as the default scaling (100000). +When Knigts Landing support was added to the pstate driver, this +parameter was omitted resulting in a kernel panic during boot. + +Fixes: b34ef932d79a (intel_pstate: Knights Landing support) +Reported-by: Yasuaki Ishimatsu +Signed-off-by: Dasaratharaman Chandramouli +Signed-off-by: Lukasz Anaczkowski +Acked-by: Kristen Carlson Accardi +Signed-off-by: Rafael J. Wysocki +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/cpufreq/intel_pstate.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -678,6 +678,7 @@ static struct cpu_defaults knl_params = + .get_max = core_get_max_pstate, + .get_min = core_get_min_pstate, + .get_turbo = knl_get_turbo_pstate, ++ .get_scaling = core_get_scaling, + .set = core_set_pstate, + }, + }; diff --git a/queue-4.1/iscsi-target-fix-iscsit_start_kthreads-failure-oops.patch b/queue-4.1/iscsi-target-fix-iscsit_start_kthreads-failure-oops.patch new file mode 100644 index 00000000000..45cc09a47e2 --- /dev/null +++ b/queue-4.1/iscsi-target-fix-iscsit_start_kthreads-failure-oops.patch @@ -0,0 +1,299 @@ +From e54198657b65625085834847ab6271087323ffea Mon Sep 17 00:00:00 2001 +From: Nicholas Bellinger +Date: Wed, 22 Jul 2015 23:14:19 -0700 +Subject: iscsi-target: Fix iscsit_start_kthreads failure OOPs + +From: Nicholas Bellinger + +commit e54198657b65625085834847ab6271087323ffea upstream. + +This patch fixes a regression introduced with the following commit +in v4.0-rc1 code, where a iscsit_start_kthreads() failure triggers +a NULL pointer dereference OOPs: + + commit 88dcd2dab5c23b1c9cfc396246d8f476c872f0ca + Author: Nicholas Bellinger + Date: Thu Feb 26 22:19:15 2015 -0800 + + iscsi-target: Convert iscsi_thread_set usage to kthread.h + +To address this bug, move iscsit_start_kthreads() immediately +preceeding the transmit of last login response, before signaling +a successful transition into full-feature-phase within existing +iscsi_target_do_tx_login_io() logic. + +This ensures that no target-side resource allocation failures can +occur after the final login response has been successfully sent. + +Also, it adds a iscsi_conn->rx_login_comp to allow the RX thread +to sleep to prevent other socket related failures until the final +iscsi_post_login_handler() call is able to complete. + +Cc: Sagi Grimberg +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/target/iscsi/iscsi_target.c | 18 ++++++++++-- + drivers/target/iscsi/iscsi_target_login.c | 45 +++++++++++------------------- + drivers/target/iscsi/iscsi_target_login.h | 3 +- + drivers/target/iscsi/iscsi_target_nego.c | 34 ++++++++++++++++++++++ + include/target/iscsi/iscsi_target_core.h | 1 + 5 files changed, 68 insertions(+), 33 deletions(-) + +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4001,7 +4001,13 @@ get_immediate: + } + + transport_err: +- iscsit_take_action_for_connection_exit(conn); ++ /* ++ * Avoid the normal connection failure code-path if this connection ++ * is still within LOGIN mode, and iscsi_np process context is ++ * responsible for cleaning up the early connection failure. ++ */ ++ if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) ++ iscsit_take_action_for_connection_exit(conn); + out: + return 0; + } +@@ -4093,7 +4099,7 @@ reject: + + int iscsi_target_rx_thread(void *arg) + { +- int ret; ++ int ret, rc; + u8 buffer[ISCSI_HDR_LEN], opcode; + u32 checksum = 0, digest = 0; + struct iscsi_conn *conn = arg; +@@ -4103,10 +4109,16 @@ int iscsi_target_rx_thread(void *arg) + * connection recovery / failure event can be triggered externally. + */ + allow_signal(SIGINT); ++ /* ++ * Wait for iscsi_post_login_handler() to complete before allowing ++ * incoming iscsi/tcp socket I/O, and/or failing the connection. ++ */ ++ rc = wait_for_completion_interruptible(&conn->rx_login_comp); ++ if (rc < 0) ++ return 0; + + if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { + struct completion comp; +- int rc; + + init_completion(&comp); + rc = wait_for_completion_interruptible(&comp); +--- a/drivers/target/iscsi/iscsi_target_login.c ++++ b/drivers/target/iscsi/iscsi_target_login.c +@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_i + init_completion(&conn->conn_logout_comp); + init_completion(&conn->rx_half_close_comp); + init_completion(&conn->tx_half_close_comp); ++ init_completion(&conn->rx_login_comp); + spin_lock_init(&conn->cmd_lock); + spin_lock_init(&conn->conn_usage_lock); + spin_lock_init(&conn->immed_queue_lock); +@@ -699,7 +700,7 @@ static void iscsi_post_login_start_timer + iscsit_start_nopin_timer(conn); + } + +-static int iscsit_start_kthreads(struct iscsi_conn *conn) ++int iscsit_start_kthreads(struct iscsi_conn *conn) + { + int ret = 0; + +@@ -734,6 +735,7 @@ static int iscsit_start_kthreads(struct + + return 0; + out_tx: ++ send_sig(SIGINT, conn->tx_thread, 1); + kthread_stop(conn->tx_thread); + conn->tx_thread_active = false; + out_bitmap: +@@ -744,7 +746,7 @@ out_bitmap: + return ret; + } + +-int iscsi_post_login_handler( ++void iscsi_post_login_handler( + struct iscsi_np *np, + struct iscsi_conn *conn, + u8 zero_tsih) +@@ -754,7 +756,6 @@ int iscsi_post_login_handler( + struct se_session *se_sess = sess->se_sess; + struct iscsi_portal_group *tpg = sess->tpg; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; +- int rc; + + iscsit_inc_conn_usage_count(conn); + +@@ -795,10 +796,6 @@ int iscsi_post_login_handler( + sess->sess_ops->InitiatorName); + spin_unlock_bh(&sess->conn_lock); + +- rc = iscsit_start_kthreads(conn); +- if (rc) +- return rc; +- + iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads +@@ -807,15 +804,20 @@ int iscsi_post_login_handler( + iscsit_thread_get_cpumask(conn); + conn->conn_rx_reset_cpumask = 1; + conn->conn_tx_reset_cpumask = 1; +- ++ /* ++ * Wakeup the sleeping iscsi_target_rx_thread() now that ++ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. ++ */ ++ complete(&conn->rx_login_comp); + iscsit_dec_conn_usage_count(conn); ++ + if (stop_timer) { + spin_lock_bh(&se_tpg->session_lock); + iscsit_stop_time2retain_timer(sess); + spin_unlock_bh(&se_tpg->session_lock); + } + iscsit_dec_session_usage_count(sess); +- return 0; ++ return; + } + + iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); +@@ -856,10 +858,6 @@ int iscsi_post_login_handler( + " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); + spin_unlock_bh(&se_tpg->session_lock); + +- rc = iscsit_start_kthreads(conn); +- if (rc) +- return rc; +- + iscsi_post_login_start_timers(conn); + /* + * Determine CPU mask to ensure connection's RX and TX kthreads +@@ -868,10 +866,12 @@ int iscsi_post_login_handler( + iscsit_thread_get_cpumask(conn); + conn->conn_rx_reset_cpumask = 1; + conn->conn_tx_reset_cpumask = 1; +- ++ /* ++ * Wakeup the sleeping iscsi_target_rx_thread() now that ++ * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state. ++ */ ++ complete(&conn->rx_login_comp); + iscsit_dec_conn_usage_count(conn); +- +- return 0; + } + + static void iscsi_handle_login_thread_timeout(unsigned long data) +@@ -1436,23 +1436,12 @@ static int __iscsi_target_login_thread(s + if (ret < 0) + goto new_sess_out; + +- if (!conn->sess) { +- pr_err("struct iscsi_conn session pointer is NULL!\n"); +- goto new_sess_out; +- } +- + iscsi_stop_login_thread_timer(np); + +- if (signal_pending(current)) +- goto new_sess_out; +- + if (ret == 1) { + tpg_np = conn->tpg_np; + +- ret = iscsi_post_login_handler(np, conn, zero_tsih); +- if (ret < 0) +- goto new_sess_out; +- ++ iscsi_post_login_handler(np, conn, zero_tsih); + iscsit_deaccess_np(np, tpg, tpg_np); + } + +--- a/drivers/target/iscsi/iscsi_target_login.h ++++ b/drivers/target/iscsi/iscsi_target_login.h +@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi + extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); + extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); + extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); +-extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); ++extern int iscsit_start_kthreads(struct iscsi_conn *); ++extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); + extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, + bool, bool); + extern int iscsi_target_login_thread(void *); +--- a/drivers/target/iscsi/iscsi_target_nego.c ++++ b/drivers/target/iscsi/iscsi_target_nego.c +@@ -17,6 +17,7 @@ + ******************************************************************************/ + + #include ++#include + #include + #include + #include +@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(s + ntohl(login_rsp->statsn), login->rsp_length); + + padding = ((-login->rsp_length) & 3); ++ /* ++ * Before sending the last login response containing the transition ++ * bit for full-feature-phase, go ahead and start up TX/RX threads ++ * now to avoid potential resource allocation failures after the ++ * final login response has been sent. ++ */ ++ if (login->login_complete) { ++ int rc = iscsit_start_kthreads(conn); ++ if (rc) { ++ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ++ ISCSI_LOGIN_STATUS_NO_RESOURCES); ++ return -1; ++ } ++ } + + if (conn->conn_transport->iscsit_put_login_tx(conn, login, + login->rsp_length + padding) < 0) +- return -1; ++ goto err; + + login->rsp_length = 0; + mutex_lock(&sess->cmdsn_mutex); +@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(s + mutex_unlock(&sess->cmdsn_mutex); + + return 0; ++ ++err: ++ if (login->login_complete) { ++ if (conn->rx_thread && conn->rx_thread_active) { ++ send_sig(SIGINT, conn->rx_thread, 1); ++ kthread_stop(conn->rx_thread); ++ } ++ if (conn->tx_thread && conn->tx_thread_active) { ++ send_sig(SIGINT, conn->tx_thread, 1); ++ kthread_stop(conn->tx_thread); ++ } ++ spin_lock(&iscsit_global->ts_bitmap_lock); ++ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id, ++ get_order(1)); ++ spin_unlock(&iscsit_global->ts_bitmap_lock); ++ } ++ return -1; + } + + static void iscsi_target_sk_data_ready(struct sock *sk) +--- a/include/target/iscsi/iscsi_target_core.h ++++ b/include/target/iscsi/iscsi_target_core.h +@@ -606,6 +606,7 @@ struct iscsi_conn { + int bitmap_id; + int rx_thread_active; + struct task_struct *rx_thread; ++ struct completion rx_login_comp; + int tx_thread_active; + struct task_struct *tx_thread; + /* list_head for session connection list */ diff --git a/queue-4.1/iscsi-target-fix-iser-explicit-logout-tx-kthread-leak.patch b/queue-4.1/iscsi-target-fix-iser-explicit-logout-tx-kthread-leak.patch new file mode 100644 index 00000000000..3da94b11391 --- /dev/null +++ b/queue-4.1/iscsi-target-fix-iser-explicit-logout-tx-kthread-leak.patch @@ -0,0 +1,71 @@ +From 007d038bdf95ccfe2491d0078be54040d110fd06 Mon Sep 17 00:00:00 2001 +From: Nicholas Bellinger +Date: Thu, 23 Jul 2015 22:30:31 +0000 +Subject: iscsi-target: Fix iser explicit logout TX kthread leak + +From: Nicholas Bellinger + +commit 007d038bdf95ccfe2491d0078be54040d110fd06 upstream. + +This patch fixes a regression introduced with the following commit +in v4.0-rc1 code, where an explicit iser-target logout would result +in ->tx_thread_active being incorrectly cleared by the logout post +handler, and subsequent TX kthread leak: + + commit 88dcd2dab5c23b1c9cfc396246d8f476c872f0ca + Author: Nicholas Bellinger + Date: Thu Feb 26 22:19:15 2015 -0800 + + iscsi-target: Convert iscsi_thread_set usage to kthread.h + +To address this bug, change iscsit_logout_post_handler_closesession() +and iscsit_logout_post_handler_samecid() to only cmpxchg() on +->tx_thread_active for traditional iscsi/tcp connections. + +This is required because iscsi/tcp connections are invoking logout +post handler logic directly from TX kthread context, while iser +connections are invoking logout post handler logic from a seperate +workqueue context. + +Cc: Sagi Grimberg +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/target/iscsi/iscsi_target.c | 18 ++++++++++++++++-- + 1 file changed, 16 insertions(+), 2 deletions(-) + +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4555,7 +4555,18 @@ static void iscsit_logout_post_handler_c + struct iscsi_conn *conn) + { + struct iscsi_session *sess = conn->sess; +- int sleep = cmpxchg(&conn->tx_thread_active, true, false); ++ int sleep = 1; ++ /* ++ * Traditional iscsi/tcp will invoke this logic from TX thread ++ * context during session logout, so clear tx_thread_active and ++ * sleep if iscsit_close_connection() has not already occured. ++ * ++ * Since iser-target invokes this logic from it's own workqueue, ++ * always sleep waiting for RX/TX thread shutdown to complete ++ * within iscsit_close_connection(). ++ */ ++ if (conn->conn_transport->transport_type == ISCSI_TCP) ++ sleep = cmpxchg(&conn->tx_thread_active, true, false); + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); +@@ -4569,7 +4580,10 @@ static void iscsit_logout_post_handler_c + static void iscsit_logout_post_handler_samecid( + struct iscsi_conn *conn) + { +- int sleep = cmpxchg(&conn->tx_thread_active, true, false); ++ int sleep = 1; ++ ++ if (conn->conn_transport->transport_type == ISCSI_TCP) ++ sleep = cmpxchg(&conn->tx_thread_active, true, false); + + atomic_set(&conn->conn_logout_remove, 0); + complete(&conn->conn_logout_comp); diff --git a/queue-4.1/iscsi-target-fix-use-after-free-during-tpg-session-shutdown.patch b/queue-4.1/iscsi-target-fix-use-after-free-during-tpg-session-shutdown.patch new file mode 100644 index 00000000000..4147b92dca1 --- /dev/null +++ b/queue-4.1/iscsi-target-fix-use-after-free-during-tpg-session-shutdown.patch @@ -0,0 +1,65 @@ +From 417c20a9bdd1e876384127cf096d8ae8b559066c Mon Sep 17 00:00:00 2001 +From: Nicholas Bellinger +Date: Wed, 22 Jul 2015 00:24:09 -0700 +Subject: iscsi-target: Fix use-after-free during TPG session shutdown + +From: Nicholas Bellinger + +commit 417c20a9bdd1e876384127cf096d8ae8b559066c upstream. + +This patch fixes a use-after-free bug in iscsit_release_sessions_for_tpg() +where se_portal_group->session_lock was incorrectly released/re-acquired +while walking the active se_portal_group->tpg_sess_list. + +The can result in a NULL pointer dereference when iscsit_close_session() +shutdown happens in the normal path asynchronously to this code, causing +a bogus dereference of an already freed list entry to occur. + +To address this bug, walk the session list checking for the same state +as before, but move entries to a local list to avoid dropping the lock +while walking the active list. + +As before, signal using iscsi_session->session_restatement=1 for those +list entries to be released locally by iscsit_free_session() code. + +Reported-by: Sunilkumar Nadumuttlu +Cc: Sunilkumar Nadumuttlu +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/target/iscsi/iscsi_target.c | 12 ++++++++---- + 1 file changed, 8 insertions(+), 4 deletions(-) + +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -4776,6 +4776,7 @@ int iscsit_release_sessions_for_tpg(stru + struct iscsi_session *sess; + struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; + struct se_session *se_sess, *se_sess_tmp; ++ LIST_HEAD(free_list); + int session_count = 0; + + spin_lock_bh(&se_tpg->session_lock); +@@ -4797,14 +4798,17 @@ int iscsit_release_sessions_for_tpg(stru + } + atomic_set(&sess->session_reinstatement, 1); + spin_unlock(&sess->conn_lock); +- spin_unlock_bh(&se_tpg->session_lock); + +- iscsit_free_session(sess); +- spin_lock_bh(&se_tpg->session_lock); ++ list_move_tail(&se_sess->sess_list, &free_list); ++ } ++ spin_unlock_bh(&se_tpg->session_lock); + ++ list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) { ++ sess = (struct iscsi_session *)se_sess->fabric_sess_ptr; ++ ++ iscsit_free_session(sess); + session_count++; + } +- spin_unlock_bh(&se_tpg->session_lock); + + pr_debug("Released %d iSCSI Session(s) from Target Portal" + " Group: %hu\n", session_count, tpg->tpgt); diff --git a/queue-4.1/n_tty-signal-and-flush-atomically.patch b/queue-4.1/n_tty-signal-and-flush-atomically.patch new file mode 100644 index 00000000000..8b5253cf437 --- /dev/null +++ b/queue-4.1/n_tty-signal-and-flush-atomically.patch @@ -0,0 +1,60 @@ +From 3b19e032295647b7be2aa3be62510db4aaeda759 Mon Sep 17 00:00:00 2001 +From: Peter Hurley +Date: Sat, 27 Jun 2015 09:21:32 -0400 +Subject: n_tty: signal and flush atomically + +From: Peter Hurley + +commit 3b19e032295647b7be2aa3be62510db4aaeda759 upstream. + +When handling signalling char, claim the termios write lock before +signalling waiting readers and writers to prevent further i/o +before flushing the echo and output buffers. This prevents a +userspace signal handler which may output from racing the terminal +flush. + +Reference: Bugzilla #99351 ("Output truncated in ssh session after...") +Fixes: commit d2b6f44779d3 ("n_tty: Fix signal handling flushes") +Reported-by: Filipe Brandenburger +Signed-off-by: Peter Hurley +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/tty/n_tty.c | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, stru + * Locking: ctrl_lock + */ + +-static void isig(int sig, struct tty_struct *tty) ++static void __isig(int sig, struct tty_struct *tty) + { +- struct n_tty_data *ldata = tty->disc_data; + struct pid *tty_pgrp = tty_get_pgrp(tty); + if (tty_pgrp) { + kill_pgrp(tty_pgrp, sig, 1); + put_pid(tty_pgrp); + } ++} ++ ++static void isig(int sig, struct tty_struct *tty) ++{ ++ struct n_tty_data *ldata = tty->disc_data; + +- if (!L_NOFLSH(tty)) { ++ if (L_NOFLSH(tty)) { ++ /* signal only */ ++ __isig(sig, tty); ++ ++ } else { /* signal and flush */ + up_read(&tty->termios_rwsem); + down_write(&tty->termios_rwsem); + ++ __isig(sig, tty); ++ + /* clear echo buffer */ + mutex_lock(&ldata->output_lock); + ldata->echo_head = ldata->echo_tail = 0; diff --git a/queue-4.1/nfs-don-t-revalidate-the-mapping-if-both-size-and-change-attr-are-up-to-date.patch b/queue-4.1/nfs-don-t-revalidate-the-mapping-if-both-size-and-change-attr-are-up-to-date.patch new file mode 100644 index 00000000000..66b249534fc --- /dev/null +++ b/queue-4.1/nfs-don-t-revalidate-the-mapping-if-both-size-and-change-attr-are-up-to-date.patch @@ -0,0 +1,54 @@ +From 85a23cee3f2c928475f31777ead5a71340a12fc3 Mon Sep 17 00:00:00 2001 +From: Trond Myklebust +Date: Sun, 5 Jul 2015 11:02:53 -0400 +Subject: NFS: Don't revalidate the mapping if both size and change attr are up to date + +From: Trond Myklebust + +commit 85a23cee3f2c928475f31777ead5a71340a12fc3 upstream. + +If we've ensured that the size and the change attribute are both correct, +then there is no point in marking those attributes as needing revalidation +again. Only do so if we know the size is incorrect and was not updated. + +Fixes: f2467b6f64da ("NFS: Clear NFS_INO_REVAL_PAGECACHE when...") +Signed-off-by: Trond Myklebust +Signed-off-by: Greg Kroah-Hartman + +--- + fs/nfs/inode.c | 8 ++++---- + 1 file changed, 4 insertions(+), 4 deletions(-) + +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -1242,9 +1242,11 @@ static int nfs_check_inode_attributes(st + if (fattr->valid & NFS_ATTR_FATTR_SIZE) { + cur_size = i_size_read(inode); + new_isize = nfs_size_to_loff_t(fattr->size); +- if (cur_size != new_isize && nfsi->nrequests == 0) ++ if (cur_size != new_isize) + invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; + } ++ if (nfsi->nrequests != 0) ++ invalid &= ~NFS_INO_REVAL_PAGECACHE; + + /* Have any file permissions changed? */ + if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) +@@ -1682,8 +1684,7 @@ static int nfs_update_inode(struct inode + invalid |= NFS_INO_INVALID_ATTR + | NFS_INO_INVALID_DATA + | NFS_INO_INVALID_ACCESS +- | NFS_INO_INVALID_ACL +- | NFS_INO_REVAL_PAGECACHE; ++ | NFS_INO_INVALID_ACL; + if (S_ISDIR(inode->i_mode)) + nfs_force_lookup_revalidate(inode); + inode->i_version = fattr->change_attr; +@@ -1715,7 +1716,6 @@ static int nfs_update_inode(struct inode + if ((nfsi->nrequests == 0) || new_isize > cur_isize) { + i_size_write(inode, new_isize); + invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; +- invalid &= ~NFS_INO_REVAL_PAGECACHE; + } + dprintk("NFS: isize change on server for file %s/%ld " + "(%Ld to %Ld)\n", diff --git a/queue-4.1/nfs-fix-a-memory-leak-in-nfs_do_recoalesce.patch b/queue-4.1/nfs-fix-a-memory-leak-in-nfs_do_recoalesce.patch new file mode 100644 index 00000000000..626935c9cfd --- /dev/null +++ b/queue-4.1/nfs-fix-a-memory-leak-in-nfs_do_recoalesce.patch @@ -0,0 +1,36 @@ +From 03d5eb65b53889fe98a5ecddfe205c16e3093190 Mon Sep 17 00:00:00 2001 +From: Trond Myklebust +Date: Mon, 27 Jul 2015 10:23:19 -0400 +Subject: NFS: Fix a memory leak in nfs_do_recoalesce + +From: Trond Myklebust + +commit 03d5eb65b53889fe98a5ecddfe205c16e3093190 upstream. + +If the function exits early, then we must put those requests that were +not processed back onto the &mirror->pg_list so they can be cleaned up +by nfs_pgio_error(). + +Fixes: a7d42ddb30997 ("nfs: add mirroring support to pgio layer") +Signed-off-by: Trond Myklebust +Signed-off-by: Greg Kroah-Hartman + +--- + fs/nfs/pagelist.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -1110,8 +1110,11 @@ static int nfs_do_recoalesce(struct nfs_ + nfs_list_remove_request(req); + if (__nfs_pageio_add_request(desc, req)) + continue; +- if (desc->pg_error < 0) ++ if (desc->pg_error < 0) { ++ list_splice_tail(&head, &mirror->pg_list); ++ mirror->pg_recoalesce = 1; + return 0; ++ } + break; + } + } while (mirror->pg_recoalesce); diff --git a/queue-4.1/nfsv4-we-must-set-nfs_open_state-flag-in-nfs_resync_open_stateid_locked.patch b/queue-4.1/nfsv4-we-must-set-nfs_open_state-flag-in-nfs_resync_open_stateid_locked.patch new file mode 100644 index 00000000000..e91edb6303d --- /dev/null +++ b/queue-4.1/nfsv4-we-must-set-nfs_open_state-flag-in-nfs_resync_open_stateid_locked.patch @@ -0,0 +1,38 @@ +From 3c38cbe2ade88240fabb585b408f779ad3b9a31b Mon Sep 17 00:00:00 2001 +From: Trond Myklebust +Date: Wed, 22 Jul 2015 13:46:13 -0400 +Subject: NFSv4: We must set NFS_OPEN_STATE flag in nfs_resync_open_stateid_locked + +From: Trond Myklebust + +commit 3c38cbe2ade88240fabb585b408f779ad3b9a31b upstream. + +Otherwise, nfs4_select_rw_stateid() will always return the zero stateid +instead of the correct open stateid. + +Fixes: f95549cf24660 ("NFSv4: More CLOSE/OPEN races") +Signed-off-by: Trond Myklebust +Signed-off-by: Greg Kroah-Hartman + +--- + fs/nfs/nfs4proc.c | 3 +++ + 1 file changed, 3 insertions(+) + +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -1204,12 +1204,15 @@ static bool nfs_need_update_open_stateid + + static void nfs_resync_open_stateid_locked(struct nfs4_state *state) + { ++ if (!(state->n_wronly || state->n_rdonly || state->n_rdwr)) ++ return; + if (state->n_wronly) + set_bit(NFS_O_WRONLY_STATE, &state->flags); + if (state->n_rdonly) + set_bit(NFS_O_RDONLY_STATE, &state->flags); + if (state->n_rdwr) + set_bit(NFS_O_RDWR_STATE, &state->flags); ++ set_bit(NFS_OPEN_STATE, &state->flags); + } + + static void nfs_clear_open_stateid_locked(struct nfs4_state *state, diff --git a/queue-4.1/perf-hists-browser-take-the-comm-dsos-etc-filters-into-account.patch b/queue-4.1/perf-hists-browser-take-the-comm-dsos-etc-filters-into-account.patch new file mode 100644 index 00000000000..4594944bc99 --- /dev/null +++ b/queue-4.1/perf-hists-browser-take-the-comm-dsos-etc-filters-into-account.patch @@ -0,0 +1,89 @@ +From 9c0fa8dd3d58de8b688fda758eea1719949c7f0a Mon Sep 17 00:00:00 2001 +From: Arnaldo Carvalho de Melo +Date: Mon, 13 Jul 2015 08:26:35 -0300 +Subject: perf hists browser: Take the --comm, --dsos, etc filters into account + +From: Arnaldo Carvalho de Melo + +commit 9c0fa8dd3d58de8b688fda758eea1719949c7f0a upstream. + +At some point: + + commit 2c86c7ca7606 + Author: Namhyung Kim + Date: Mon Mar 17 18:18:54 2014 -0300 + + perf report: Merge al->filtered with hist_entry->filtered + +We stopped dropping samples for things filtered via the --comms, --dsos, +--symbols, etc, i.e. things marked as filtered in the symbol resolution +routines (thread__find_addr_map(), perf_event__preprocess_sample(), +etc). + +But then, in: + + commit 268397cb2a47 + Author: Namhyung Kim + Date: Tue Apr 22 14:49:31 2014 +0900 + + perf top/tui: Update nr_entries properly after a filter is applied + +We don't take into account entries that were filtered in +perf_event__preprocess_sample() and friends, which leads to +inconsistency in the browser seek routines, that expects the number of +hist_entry->filtered entries to match what it thinks is the number of +unfiltered, browsable entries. + +So, for instance, when we do: + + perf top --symbols ___non_existent_symbol___ + +the hist_browser__nr_entries() routine thinks there are no filters in +place, uses the hists->nr_entries but all entries are filtered, leading +to a segfault. + +Tested with: + + perf top --symbols malloc,free --percentage=relative + +Freezing, by pressing 'f', at any time and doing the math on the +percentages ends up with 100%, ditto for: + + perf top --dsos libpthread-2.20.so,libxul.so --percentage=relative + +Both were segfaulting, all fixed now. + +More work needed to do away with checking if filters are in place, we +should just use the nr_non_filtered_samples counter, no need to +conditionally use it or hists.nr_filter, as what the browser does is +just show unfiltered stuff. An audit of how it is being accounted is +needed, this is the minimal fix. + +Reported-by: Michael Petlan +Fixes: 268397cb2a47 ("perf top/tui: Update nr_entries properly after a filter is applied") +Cc: Adrian Hunter +Cc: Borislav Petkov +Cc: David Ahern +Cc: Frederic Weisbecker +Cc: Jiri Olsa +Cc: Namhyung Kim +Cc: Stephane Eranian +Link: http://lkml.kernel.org/n/tip-6w01d5q97qk0d64kuojme5in@git.kernel.org +Signed-off-by: Arnaldo Carvalho de Melo +Signed-off-by: Greg Kroah-Hartman + +--- + tools/perf/ui/browsers/hists.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/tools/perf/ui/browsers/hists.c ++++ b/tools/perf/ui/browsers/hists.c +@@ -45,7 +45,7 @@ static struct rb_node *hists__filter_ent + + static bool hist_browser__has_filter(struct hist_browser *hb) + { +- return hists__has_filter(hb->hists) || hb->min_pcnt; ++ return hists__has_filter(hb->hists) || hb->min_pcnt || symbol_conf.has_filter; + } + + static int hist_browser__get_folding(struct hist_browser *browser) diff --git a/queue-4.1/perf-x86-intel-cqm-return-cached-counter-value-from-irq-context.patch b/queue-4.1/perf-x86-intel-cqm-return-cached-counter-value-from-irq-context.patch new file mode 100644 index 00000000000..2717546a59d --- /dev/null +++ b/queue-4.1/perf-x86-intel-cqm-return-cached-counter-value-from-irq-context.patch @@ -0,0 +1,99 @@ +From 2c534c0da0a68418693e10ce1c4146e085f39518 Mon Sep 17 00:00:00 2001 +From: Matt Fleming +Date: Tue, 21 Jul 2015 15:55:09 +0100 +Subject: perf/x86/intel/cqm: Return cached counter value from IRQ context + +From: Matt Fleming + +commit 2c534c0da0a68418693e10ce1c4146e085f39518 upstream. + +Peter reported the following potential crash which I was able to +reproduce with his test program, + +[ 148.765788] ------------[ cut here ]------------ +[ 148.765796] WARNING: CPU: 34 PID: 2840 at kernel/smp.c:417 smp_call_function_many+0xb6/0x260() +[ 148.765797] Modules linked in: +[ 148.765800] CPU: 34 PID: 2840 Comm: perf Not tainted 4.2.0-rc1+ #4 +[ 148.765803] ffffffff81cdc398 ffff88085f105950 ffffffff818bdfd5 0000000000000007 +[ 148.765805] 0000000000000000 ffff88085f105990 ffffffff810e413a 0000000000000000 +[ 148.765807] ffffffff82301080 0000000000000022 ffffffff8107f640 ffffffff8107f640 +[ 148.765809] Call Trace: +[ 148.765810] [] dump_stack+0x45/0x57 +[ 148.765818] [] warn_slowpath_common+0x8a/0xc0 +[ 148.765822] [] ? intel_cqm_stable+0x60/0x60 +[ 148.765824] [] ? intel_cqm_stable+0x60/0x60 +[ 148.765825] [] warn_slowpath_null+0x1a/0x20 +[ 148.765827] [] smp_call_function_many+0xb6/0x260 +[ 148.765829] [] ? intel_cqm_stable+0x60/0x60 +[ 148.765831] [] on_each_cpu_mask+0x28/0x60 +[ 148.765832] [] intel_cqm_event_count+0x7f/0xe0 +[ 148.765836] [] perf_output_read+0x2a5/0x400 +[ 148.765839] [] perf_output_sample+0x31a/0x590 +[ 148.765840] [] ? perf_prepare_sample+0x26d/0x380 +[ 148.765841] [] perf_event_output+0x47/0x60 +[ 148.765843] [] __perf_event_overflow+0x215/0x240 +[ 148.765844] [] perf_event_overflow+0x14/0x20 +[ 148.765847] [] intel_pmu_handle_irq+0x1d4/0x440 +[ 148.765849] [] ? __perf_event_task_sched_in+0x36/0xa0 +[ 148.765853] [] ? vunmap_page_range+0x19d/0x2f0 +[ 148.765854] [] ? unmap_kernel_range_noflush+0x11/0x20 +[ 148.765859] [] ? ghes_copy_tofrom_phys+0x11e/0x2a0 +[ 148.765863] [] ? native_apic_msr_write+0x2b/0x30 +[ 148.765865] [] ? x2apic_send_IPI_self+0x1d/0x20 +[ 148.765869] [] ? arch_irq_work_raise+0x35/0x40 +[ 148.765872] [] ? irq_work_queue+0x66/0x80 +[ 148.765875] [] perf_event_nmi_handler+0x26/0x40 +[ 148.765877] [] nmi_handle+0x79/0x100 +[ 148.765879] [] default_do_nmi+0x42/0x100 +[ 148.765880] [] do_nmi+0x83/0xb0 +[ 148.765884] [] end_repeat_nmi+0x1e/0x2e +[ 148.765886] [] ? __perf_event_task_sched_in+0x36/0xa0 +[ 148.765888] [] ? __perf_event_task_sched_in+0x36/0xa0 +[ 148.765890] [] ? __perf_event_task_sched_in+0x36/0xa0 +[ 148.765891] <> [] finish_task_switch+0x156/0x210 +[ 148.765898] [] __schedule+0x341/0x920 +[ 148.765899] [] schedule+0x37/0x80 +[ 148.765903] [] ? do_page_fault+0x2f/0x80 +[ 148.765905] [] schedule_user+0x1a/0x50 +[ 148.765907] [] retint_careful+0x14/0x32 +[ 148.765908] ---[ end trace e33ff2be78e14901 ]--- + +The CQM task events are not safe to be called from within interrupt +context because they require performing an IPI to read the counter value +on all sockets. And performing IPIs from within IRQ context is a +"no-no". + +Make do with the last read counter value currently event in +event->count when we're invoked in this context. + +Reported-by: Peter Zijlstra +Signed-off-by: Matt Fleming +Cc: Thomas Gleixner +Cc: Vikas Shivappa +Cc: Kanaka Juvva +Cc: Will Auld +Link: http://lkml.kernel.org/r/1437490509-15373-1-git-send-email-matt@codeblueprint.co.uk +Signed-off-by: Thomas Gleixner +Signed-off-by: Greg Kroah-Hartman + +--- + arch/x86/kernel/cpu/perf_event_intel_cqm.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c ++++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c +@@ -934,6 +934,14 @@ static u64 intel_cqm_event_count(struct + return 0; + + /* ++ * Getting up-to-date values requires an SMP IPI which is not ++ * possible if we're being called in interrupt context. Return ++ * the cached values instead. ++ */ ++ if (unlikely(in_interrupt())) ++ goto out; ++ ++ /* + * Notice that we don't perform the reading of an RMID + * atomically, because we can't hold a spin lock across the + * IPIs. diff --git a/queue-4.1/qla2xxx-fix-command-initialization-in-target-mode.patch b/queue-4.1/qla2xxx-fix-command-initialization-in-target-mode.patch new file mode 100644 index 00000000000..859dde5879f --- /dev/null +++ b/queue-4.1/qla2xxx-fix-command-initialization-in-target-mode.patch @@ -0,0 +1,45 @@ +From 9fce12540cb9f91e7f1f539a80b70f0b388bdae0 Mon Sep 17 00:00:00 2001 +From: Kanoj Sarcar +Date: Wed, 10 Jun 2015 11:05:23 -0400 +Subject: qla2xxx: fix command initialization in target mode. + +From: Kanoj Sarcar + +commit 9fce12540cb9f91e7f1f539a80b70f0b388bdae0 upstream. + +Signed-off-by: Kanoj Sarcar +Signed-off-by: Himanshu Madhani +Reviewed-by: Nicholas Bellinger +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/scsi/qla2xxx/qla_target.c | 10 +++++----- + 1 file changed, 5 insertions(+), 5 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -3346,6 +3346,11 @@ static struct qla_tgt_cmd *qlt_get_tag(s + cmd->loop_id = sess->loop_id; + cmd->conf_compl_supported = sess->conf_compl_supported; + ++ cmd->cmd_flags = 0; ++ cmd->jiffies_at_alloc = get_jiffies_64(); ++ ++ cmd->reset_count = vha->hw->chip_reset; ++ + return cmd; + } + +@@ -3452,11 +3457,6 @@ static int qlt_handle_cmd_for_atio(struc + return -ENOMEM; + } + +- cmd->cmd_flags = 0; +- cmd->jiffies_at_alloc = get_jiffies_64(); +- +- cmd->reset_count = vha->hw->chip_reset; +- + cmd->cmd_in_wq = 1; + cmd->cmd_flags |= BIT_0; + INIT_WORK(&cmd->work, qlt_do_work); diff --git a/queue-4.1/qla2xxx-fix-hardware-lock-unlock-issue-causing-kernel-panic.patch b/queue-4.1/qla2xxx-fix-hardware-lock-unlock-issue-causing-kernel-panic.patch new file mode 100644 index 00000000000..00347f1492e --- /dev/null +++ b/queue-4.1/qla2xxx-fix-hardware-lock-unlock-issue-causing-kernel-panic.patch @@ -0,0 +1,108 @@ +From ba9f6f64a0ff6b7ecaed72144c179061f8eca378 Mon Sep 17 00:00:00 2001 +From: Saurav Kashyap +Date: Wed, 10 Jun 2015 11:05:17 -0400 +Subject: qla2xxx: Fix hardware lock/unlock issue causing kernel panic. + +From: Saurav Kashyap + +commit ba9f6f64a0ff6b7ecaed72144c179061f8eca378 upstream. + +[ Upstream commit ef86cb2059a14b4024c7320999ee58e938873032 ] + +This patch fixes a kernel panic for qla2xxx Target core +Module driver introduced by a fix in the qla2xxx initiator code. + +Commit ef86cb2 ("qla2xxx: Mark port lost when we receive an RSCN for it.") +introduced the regression for qla2xxx Target driver. + +Stack trace will have following signature + + --- --- +[ffff88081faa3cc8] _raw_spin_lock_irqsave at ffffffff815b1f03 +[ffff88081faa3cd0] qlt_fc_port_deleted at ffffffffa096ccd0 [qla2xxx] +[ffff88081faa3d20] qla2x00_schedule_rport_del at ffffffffa0913831[qla2xxx] +[ffff88081faa3d50] qla2x00_mark_device_lost at ffffffffa09159c5[qla2xxx] +[ffff88081faa3db0] qla2x00_async_event at ffffffffa0938d59 [qla2xxx] +[ffff88081faa3e30] qla24xx_msix_default at ffffffffa093a326 [qla2xxx] +[ffff88081faa3e90] handle_irq_event_percpu at ffffffff810a7b8d +[ffff88081faa3ee0] handle_irq_event at ffffffff810a7d32 +[ffff88081faa3f10] handle_edge_irq at ffffffff810ab6b9 +[ffff88081faa3f30] handle_irq at ffffffff8100619c +[ffff88081faa3f70] do_IRQ at ffffffff815b4b1c + --- --- + +Signed-off-by: Saurav Kashyap +Signed-off-by: Himanshu Madhani +Reviewed-by: Nicholas Bellinger +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/scsi/qla2xxx/qla_init.c | 4 ++++ + drivers/scsi/qla2xxx/qla_target.c | 6 ------ + 2 files changed, 4 insertions(+), 6 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -2924,6 +2924,7 @@ qla2x00_rport_del(void *data) + struct fc_rport *rport; + scsi_qla_host_t *vha = fcport->vha; + unsigned long flags; ++ unsigned long vha_flags; + + spin_lock_irqsave(fcport->vha->host->host_lock, flags); + rport = fcport->drport ? fcport->drport: fcport->rport; +@@ -2935,7 +2936,9 @@ qla2x00_rport_del(void *data) + * Release the target mode FC NEXUS in qla_target.c code + * if target mod is enabled. + */ ++ spin_lock_irqsave(&vha->hw->hardware_lock, vha_flags); + qlt_fc_port_deleted(vha, fcport); ++ spin_unlock_irqrestore(&vha->hw->hardware_lock, vha_flags); + } + } + +@@ -3303,6 +3306,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t + * Create target mode FC NEXUS in qla_target.c if target mode is + * enabled.. + */ ++ + qlt_fc_port_added(vha, fcport); + + spin_lock_irqsave(fcport->vha->host->host_lock, flags); +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -782,10 +782,8 @@ void qlt_fc_port_added(struct scsi_qla_h + + void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) + { +- struct qla_hw_data *ha = vha->hw; + struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; + struct qla_tgt_sess *sess; +- unsigned long flags; + + if (!vha->hw->tgt.tgt_ops) + return; +@@ -793,14 +791,11 @@ void qlt_fc_port_deleted(struct scsi_qla + if (!tgt || (fcport->port_type != FCT_INITIATOR)) + return; + +- spin_lock_irqsave(&ha->hardware_lock, flags); + if (tgt->tgt_stop) { +- spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); + if (!sess) { +- spin_unlock_irqrestore(&ha->hardware_lock, flags); + return; + } + +@@ -808,7 +803,6 @@ void qlt_fc_port_deleted(struct scsi_qla + + sess->local = 1; + qlt_schedule_sess_for_deletion(sess, false); +- spin_unlock_irqrestore(&ha->hardware_lock, flags); + } + + static inline int test_tgt_sess_count(struct qla_tgt *tgt) diff --git a/queue-4.1/qla2xxx-kill-sessions-log-out-initiator-on-rscn-and-port-down-events.patch b/queue-4.1/qla2xxx-kill-sessions-log-out-initiator-on-rscn-and-port-down-events.patch new file mode 100644 index 00000000000..d3f3cd74ebb --- /dev/null +++ b/queue-4.1/qla2xxx-kill-sessions-log-out-initiator-on-rscn-and-port-down-events.patch @@ -0,0 +1,275 @@ +From b2032fd567326ad0b2d443bb6d96d2580ec670a5 Mon Sep 17 00:00:00 2001 +From: Roland Dreier +Date: Tue, 14 Jul 2015 16:00:42 -0400 +Subject: qla2xxx: kill sessions/log out initiator on RSCN and port down events + +From: Roland Dreier + +commit b2032fd567326ad0b2d443bb6d96d2580ec670a5 upstream. + +To fix some issues talking to ESX, this patch modifies the qla2xxx driver +so that it never logs into remote ports. This has the side effect of +getting rid of the "rports" entirely, which means we never log out of +initiators and never tear down sessions when an initiator goes away. + +This is mostly OK, except that we can run into trouble if we have +initiator A assigned FC address X:Y:Z by the fabric talking to us, and +then initiator A goes away. Some time (could be a long time) later, +initiator B comes along and also gets FC address X:Y:Z (which is +available again, because initiator A is gone). If initiator B starts +talking to us, then we'll still have the session for initiator A, and +since we look up incoming IO based on the FC address X:Y:Z, initiator B +will end up using ACLs for initiator A. + +Fix this by: + + 1. Handling RSCN events somewhat differently; instead of completely + skipping the processing of fcports, we look through the list, and if + an fcport disappears, we tell the target code the tear down the + session and tell the HBA FW to release the N_Port handle. + + 2. Handling "port down" events by flushing all of our sessions. The + firmware was already releasing the N_Port handle but we want the + target code to drop all the sessions too. + +Signed-off-by: Roland Dreier +Signed-off-by: Alexei Potashnik +Acked-by: Quinn Tran +Signed-off-by: Himanshu Madhani +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/scsi/qla2xxx/qla_dbg.c | 2 + drivers/scsi/qla2xxx/qla_init.c | 137 ++++++++++++++++++++++++++++++-------- + drivers/scsi/qla2xxx/qla_target.c | 9 +- + 3 files changed, 117 insertions(+), 31 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_dbg.c ++++ b/drivers/scsi/qla2xxx/qla_dbg.c +@@ -68,7 +68,7 @@ + * | | | 0xd101-0xd1fe | + * | | | 0xd214-0xd2fe | + * | Target Mode | 0xe079 | | +- * | Target Mode Management | 0xf072 | 0xf002 | ++ * | Target Mode Management | 0xf080 | 0xf002 | + * | | | 0xf046-0xf049 | + * | Target Mode Task Management | 0x1000b | | + * ---------------------------------------------------------------------- +--- a/drivers/scsi/qla2xxx/qla_init.c ++++ b/drivers/scsi/qla2xxx/qla_init.c +@@ -3464,20 +3464,43 @@ qla2x00_configure_fabric(scsi_qla_host_t + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) + continue; + +- if (fcport->scan_state == QLA_FCPORT_SCAN && +- atomic_read(&fcport->state) == FCS_ONLINE) { +- qla2x00_mark_device_lost(vha, fcport, +- ql2xplogiabsentdevice, 0); +- if (fcport->loop_id != FC_NO_LOOP_ID && +- (fcport->flags & FCF_FCP2_DEVICE) == 0 && +- fcport->port_type != FCT_INITIATOR && +- fcport->port_type != FCT_BROADCAST) { +- ha->isp_ops->fabric_logout(vha, +- fcport->loop_id, +- fcport->d_id.b.domain, +- fcport->d_id.b.area, +- fcport->d_id.b.al_pa); +- qla2x00_clear_loop_id(fcport); ++ if (fcport->scan_state == QLA_FCPORT_SCAN) { ++ if (qla_ini_mode_enabled(base_vha) && ++ atomic_read(&fcport->state) == FCS_ONLINE) { ++ qla2x00_mark_device_lost(vha, fcport, ++ ql2xplogiabsentdevice, 0); ++ if (fcport->loop_id != FC_NO_LOOP_ID && ++ (fcport->flags & FCF_FCP2_DEVICE) == 0 && ++ fcport->port_type != FCT_INITIATOR && ++ fcport->port_type != FCT_BROADCAST) { ++ ha->isp_ops->fabric_logout(vha, ++ fcport->loop_id, ++ fcport->d_id.b.domain, ++ fcport->d_id.b.area, ++ fcport->d_id.b.al_pa); ++ qla2x00_clear_loop_id(fcport); ++ } ++ } else if (!qla_ini_mode_enabled(base_vha)) { ++ /* ++ * In target mode, explicitly kill ++ * sessions and log out of devices ++ * that are gone, so that we don't ++ * end up with an initiator using the ++ * wrong ACL (if the fabric recycles ++ * an FC address and we have a stale ++ * session around) and so that we don't ++ * report initiators that are no longer ++ * on the fabric. ++ */ ++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077, ++ "port gone, logging out/killing session: " ++ "%8phC state 0x%x flags 0x%x fc4_type 0x%x " ++ "scan_state %d\n", ++ fcport->port_name, ++ atomic_read(&fcport->state), ++ fcport->flags, fcport->fc4_type, ++ fcport->scan_state); ++ qlt_fc_port_deleted(vha, fcport); + } + } + } +@@ -3498,6 +3521,28 @@ qla2x00_configure_fabric(scsi_qla_host_t + (fcport->flags & FCF_LOGIN_NEEDED) == 0) + continue; + ++ /* ++ * If we're not an initiator, skip looking for devices ++ * and logging in. There's no reason for us to do it, ++ * and it seems to actively cause problems in target ++ * mode if we race with the initiator logging into us ++ * (we might get the "port ID used" status back from ++ * our login command and log out the initiator, which ++ * seems to cause havoc). ++ */ ++ if (!qla_ini_mode_enabled(base_vha)) { ++ if (fcport->scan_state == QLA_FCPORT_FOUND) { ++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078, ++ "port %8phC state 0x%x flags 0x%x fc4_type 0x%x " ++ "scan_state %d (initiator mode disabled; skipping " ++ "login)\n", fcport->port_name, ++ atomic_read(&fcport->state), ++ fcport->flags, fcport->fc4_type, ++ fcport->scan_state); ++ } ++ continue; ++ } ++ + if (fcport->loop_id == FC_NO_LOOP_ID) { + fcport->loop_id = next_loopid; + rval = qla2x00_find_new_loop_id( +@@ -3524,16 +3569,38 @@ qla2x00_configure_fabric(scsi_qla_host_t + test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) + break; + +- /* Find a new loop ID to use. */ +- fcport->loop_id = next_loopid; +- rval = qla2x00_find_new_loop_id(base_vha, fcport); +- if (rval != QLA_SUCCESS) { +- /* Ran out of IDs to use */ +- break; +- } ++ /* ++ * If we're not an initiator, skip looking for devices ++ * and logging in. There's no reason for us to do it, ++ * and it seems to actively cause problems in target ++ * mode if we race with the initiator logging into us ++ * (we might get the "port ID used" status back from ++ * our login command and log out the initiator, which ++ * seems to cause havoc). ++ */ ++ if (qla_ini_mode_enabled(base_vha)) { ++ /* Find a new loop ID to use. */ ++ fcport->loop_id = next_loopid; ++ rval = qla2x00_find_new_loop_id(base_vha, ++ fcport); ++ if (rval != QLA_SUCCESS) { ++ /* Ran out of IDs to use */ ++ break; ++ } + +- /* Login and update database */ +- qla2x00_fabric_dev_login(vha, fcport, &next_loopid); ++ /* Login and update database */ ++ qla2x00_fabric_dev_login(vha, fcport, ++ &next_loopid); ++ } else { ++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079, ++ "new port %8phC state 0x%x flags 0x%x fc4_type " ++ "0x%x scan_state %d (initiator mode disabled; " ++ "skipping login)\n", ++ fcport->port_name, ++ atomic_read(&fcport->state), ++ fcport->flags, fcport->fc4_type, ++ fcport->scan_state); ++ } + + list_move_tail(&fcport->list, &vha->vp_fcports); + } +@@ -3729,11 +3796,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_ho + fcport->fp_speed = new_fcport->fp_speed; + + /* +- * If address the same and state FCS_ONLINE, nothing +- * changed. ++ * If address the same and state FCS_ONLINE ++ * (or in target mode), nothing changed. + */ + if (fcport->d_id.b24 == new_fcport->d_id.b24 && +- atomic_read(&fcport->state) == FCS_ONLINE) { ++ (atomic_read(&fcport->state) == FCS_ONLINE || ++ !qla_ini_mode_enabled(base_vha))) { + break; + } + +@@ -3753,6 +3821,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_ho + * Log it out if still logged in and mark it for + * relogin later. + */ ++ if (!qla_ini_mode_enabled(base_vha)) { ++ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080, ++ "port changed FC ID, %8phC" ++ " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n", ++ fcport->port_name, ++ fcport->d_id.b.domain, ++ fcport->d_id.b.area, ++ fcport->d_id.b.al_pa, ++ fcport->loop_id, ++ new_fcport->d_id.b.domain, ++ new_fcport->d_id.b.area, ++ new_fcport->d_id.b.al_pa); ++ fcport->d_id.b24 = new_fcport->d_id.b24; ++ break; ++ } ++ + fcport->d_id.b24 = new_fcport->d_id.b24; + fcport->flags |= FCF_LOGIN_NEEDED; + if (fcport->loop_id != FC_NO_LOOP_ID && +@@ -3772,6 +3856,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_ho + if (found) + continue; + /* If device was not in our fcports list, then add it. */ ++ new_fcport->scan_state = QLA_FCPORT_FOUND; + list_add_tail(&new_fcport->list, new_fcports); + + /* Allocate a new replacement fcport. */ +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -113,6 +113,7 @@ static void qlt_abort_cmd_on_host_reset( + static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, + struct atio_from_isp *atio, uint16_t status, int qfull); + static void qlt_disable_vha(struct scsi_qla_host *vha); ++static void qlt_clear_tgt_db(struct qla_tgt *tgt); + /* + * Global Variables + */ +@@ -431,10 +432,10 @@ static int qlt_reset(struct scsi_qla_hos + + loop_id = le16_to_cpu(n->u.isp24.nport_handle); + if (loop_id == 0xFFFF) { +-#if 0 /* FIXME: Re-enable Global event handling.. */ + /* Global event */ +- atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); +- qlt_clear_tgt_db(ha->tgt.qla_tgt); ++ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count); ++ qlt_clear_tgt_db(vha->vha_tgt.qla_tgt); ++#if 0 /* FIXME: do we need to choose a session here? */ + if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { + sess = list_entry(ha->tgt.qla_tgt->sess_list.next, + typeof(*sess), sess_list_entry); +@@ -788,7 +789,7 @@ void qlt_fc_port_deleted(struct scsi_qla + if (!vha->hw->tgt.tgt_ops) + return; + +- if (!tgt || (fcport->port_type != FCT_INITIATOR)) ++ if (!tgt) + return; + + if (tgt->tgt_stop) { diff --git a/queue-4.1/qla2xxx-release-request-queue-reservation.patch b/queue-4.1/qla2xxx-release-request-queue-reservation.patch new file mode 100644 index 00000000000..dde196e6352 --- /dev/null +++ b/queue-4.1/qla2xxx-release-request-queue-reservation.patch @@ -0,0 +1,52 @@ +From 810e30bc4658e9c069577bde52394a5af872803c Mon Sep 17 00:00:00 2001 +From: Quinn Tran +Date: Wed, 10 Jun 2015 11:05:20 -0400 +Subject: qla2xxx: release request queue reservation. + +From: Quinn Tran + +commit 810e30bc4658e9c069577bde52394a5af872803c upstream. + +Request IOCB queue element(s) is reserved during +good path IO. Under error condition such as unable +to allocate IOCB handle condition, the IOCB count +that was reserved is not released. + +Signed-off-by: Quinn Tran +Signed-off-by: Himanshu Madhani +Reviewed-by: Nicholas Bellinger +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/scsi/qla2xxx/qla_target.c | 10 +++++++--- + 1 file changed, 7 insertions(+), 3 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -2341,9 +2341,10 @@ int qlt_xmit_response(struct qla_tgt_cmd + res = qlt_build_ctio_crc2_pkt(&prm, vha); + else + res = qlt_24xx_build_ctio_pkt(&prm, vha); +- if (unlikely(res != 0)) ++ if (unlikely(res != 0)) { ++ vha->req->cnt += full_req_cnt; + goto out_unmap_unlock; +- ++ } + + pkt = (struct ctio7_to_24xx *)prm.pkt; + +@@ -2481,8 +2482,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd * + else + res = qlt_24xx_build_ctio_pkt(&prm, vha); + +- if (unlikely(res != 0)) ++ if (unlikely(res != 0)) { ++ vha->req->cnt += prm.req_cnt; + goto out_unlock_free_unmap; ++ } ++ + pkt = (struct ctio7_to_24xx *)prm.pkt; + pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | + CTIO7_FLAGS_STATUS_MODE_0); diff --git a/queue-4.1/qla2xxx-remove-msleep-in-qlt_send_term_exchange.patch b/queue-4.1/qla2xxx-remove-msleep-in-qlt_send_term_exchange.patch new file mode 100644 index 00000000000..97627611b12 --- /dev/null +++ b/queue-4.1/qla2xxx-remove-msleep-in-qlt_send_term_exchange.patch @@ -0,0 +1,58 @@ +From 6bc85dd595a5438b50ec085668e53ef26058bb90 Mon Sep 17 00:00:00 2001 +From: Himanshu Madhani +Date: Wed, 10 Jun 2015 11:05:22 -0400 +Subject: qla2xxx: Remove msleep in qlt_send_term_exchange + +From: Himanshu Madhani + +commit 6bc85dd595a5438b50ec085668e53ef26058bb90 upstream. + +Remove unnecessary msleep from qlt_send_term_exchange as it +adds latency of 250 msec while sending terminate exchange to +an aborted task. + +Signed-off-by: Himanshu Madhani +Signed-off-by: Giridhar Malavali +Reviewed-by: Nicholas Bellinger +Signed-off-by: Nicholas Bellinger +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/scsi/qla2xxx/qla_target.c | 13 +++++++------ + 1 file changed, 7 insertions(+), 6 deletions(-) + +--- a/drivers/scsi/qla2xxx/qla_target.c ++++ b/drivers/scsi/qla2xxx/qla_target.c +@@ -2715,7 +2715,7 @@ static int __qlt_send_term_exchange(stru + static void qlt_send_term_exchange(struct scsi_qla_host *vha, + struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) + { +- unsigned long flags; ++ unsigned long flags = 0; + int rc; + + if (qlt_issue_marker(vha, ha_locked) < 0) +@@ -2731,17 +2731,18 @@ static void qlt_send_term_exchange(struc + rc = __qlt_send_term_exchange(vha, cmd, atio); + if (rc == -ENOMEM) + qlt_alloc_qfull_cmd(vha, atio, 0, 0); +- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); + + done: + if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || + !cmd->cmd_sent_to_fw)) { +- if (!ha_locked && !in_interrupt()) +- msleep(250); /* just in case */ +- +- qlt_unmap_sg(vha, cmd); ++ if (cmd->sg_mapped) ++ qlt_unmap_sg(vha, cmd); + vha->hw->tgt.tgt_ops->free_cmd(cmd); + } ++ ++ if (!ha_locked) ++ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); ++ + return; + } + diff --git a/queue-4.1/rds-rds_ib_device.refcount-overflow.patch b/queue-4.1/rds-rds_ib_device.refcount-overflow.patch new file mode 100644 index 00000000000..fc57e0bf20b --- /dev/null +++ b/queue-4.1/rds-rds_ib_device.refcount-overflow.patch @@ -0,0 +1,51 @@ +From 4fabb59449aa44a585b3603ffdadd4c5f4d0c033 Mon Sep 17 00:00:00 2001 +From: Wengang Wang +Date: Mon, 6 Jul 2015 14:35:11 +0800 +Subject: rds: rds_ib_device.refcount overflow + +From: Wengang Wang + +commit 4fabb59449aa44a585b3603ffdadd4c5f4d0c033 upstream. + +Fixes: 3e0249f9c05c ("RDS/IB: add refcount tracking to struct rds_ib_device") + +There lacks a dropping on rds_ib_device.refcount in case rds_ib_alloc_fmr +failed(mr pool running out). this lead to the refcount overflow. + +A complain in line 117(see following) is seen. From vmcore: +s_ib_rdma_mr_pool_depleted is 2147485544 and rds_ibdev->refcount is -2147475448. +That is the evidence the mr pool is used up. so rds_ib_alloc_fmr is very likely +to return ERR_PTR(-EAGAIN). + +115 void rds_ib_dev_put(struct rds_ib_device *rds_ibdev) +116 { +117 BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0); +118 if (atomic_dec_and_test(&rds_ibdev->refcount)) +119 queue_work(rds_wq, &rds_ibdev->free_work); +120 } + +fix is to drop refcount when rds_ib_alloc_fmr failed. + +Signed-off-by: Wengang Wang +Reviewed-by: Haggai Eran +Signed-off-by: Doug Ledford +Signed-off-by: Greg Kroah-Hartman + +--- + net/rds/ib_rdma.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +--- a/net/rds/ib_rdma.c ++++ b/net/rds/ib_rdma.c +@@ -759,8 +759,10 @@ void *rds_ib_get_mr(struct scatterlist * + } + + ibmr = rds_ib_alloc_fmr(rds_ibdev); +- if (IS_ERR(ibmr)) ++ if (IS_ERR(ibmr)) { ++ rds_ib_dev_put(rds_ibdev); + return ibmr; ++ } + + ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents); + if (ret == 0) diff --git a/queue-4.1/series b/queue-4.1/series index 2ee8f7350f4..627d6d91f5d 100644 --- a/queue-4.1/series +++ b/queue-4.1/series @@ -91,3 +91,31 @@ efi-handle-memory-error-structures-produced-based-on-old-versions-of-standard.pa arm64-efi-map-the-entire-uefi-vendor-string-before-reading-it.patch efi-check-for-null-efi-kernel-parameters.patch x86-efi-use-all-64-bit-of-efi_memmap-in-setup_e820.patch +arc-reduce-bitops-lines-of-code-using-macros.patch +arc-make-arc-bitops-safer-add-anti-optimization.patch +rds-rds_ib_device.refcount-overflow.patch +n_tty-signal-and-flush-atomically.patch +blk-mq-set-default-timeout-as-30-seconds.patch +perf-hists-browser-take-the-comm-dsos-etc-filters-into-account.patch +perf-x86-intel-cqm-return-cached-counter-value-from-irq-context.patch +vhost-actually-track-log-eventfd-file.patch +hwmon-nct7802-fix-integer-overflow-seen-when-writing-voltage-limits.patch +hwmon-nct7904-rename-pwm-attributes-to-match-hwmon-abi.patch +nfs-don-t-revalidate-the-mapping-if-both-size-and-change-attr-are-up-to-date.patch +avr32-handle-null-as-a-valid-clock-object.patch +nfsv4-we-must-set-nfs_open_state-flag-in-nfs_resync_open_stateid_locked.patch +nfs-fix-a-memory-leak-in-nfs_do_recoalesce.patch +ib-ipoib-fix-config_infiniband_ipoib_cm.patch +iscsi-target-fix-use-after-free-during-tpg-session-shutdown.patch +iscsi-target-fix-iscsit_start_kthreads-failure-oops.patch +iscsi-target-fix-iser-explicit-logout-tx-kthread-leak.patch +intel_pstate-add-get_scaling-cpu_defaults-param-to-knights-landing.patch +qla2xxx-fix-hardware-lock-unlock-issue-causing-kernel-panic.patch +qla2xxx-release-request-queue-reservation.patch +qla2xxx-remove-msleep-in-qlt_send_term_exchange.patch +qla2xxx-fix-command-initialization-in-target-mode.patch +qla2xxx-kill-sessions-log-out-initiator-on-rscn-and-port-down-events.patch +drm-nouveau-fbcon-nv11-correctly-account-for-ring-space-usage.patch +drm-nouveau-kms-nv50-guard-against-enabling-cursor-on-disabled-heads.patch +drm-nouveau-hold-mutex-when-calling-nouveau_abi16_fini.patch +drm-nouveau-drm-nv04-nv40-instmem-protect-access-to-priv-heap-by-mutex.patch diff --git a/queue-4.1/vhost-actually-track-log-eventfd-file.patch b/queue-4.1/vhost-actually-track-log-eventfd-file.patch new file mode 100644 index 00000000000..ef30f3cb5bf --- /dev/null +++ b/queue-4.1/vhost-actually-track-log-eventfd-file.patch @@ -0,0 +1,33 @@ +From 7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= +Date: Fri, 17 Jul 2015 15:32:03 +0200 +Subject: vhost: actually track log eventfd file +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: =?UTF-8?q?Marc-Andr=C3=A9=20Lureau?= + +commit 7932c0bd7740f4cd2aa168d3ce0199e7af7d72d5 upstream. + +While reviewing vhost log code, I found out that log_file is never +set. Note: I haven't tested the change (QEMU doesn't use LOG_FD yet). + +Signed-off-by: Marc-André Lureau +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/vhost/vhost.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/vhost/vhost.c ++++ b/drivers/vhost/vhost.c +@@ -886,6 +886,7 @@ long vhost_dev_ioctl(struct vhost_dev *d + } + if (eventfp != d->log_file) { + filep = d->log_file; ++ d->log_file = eventfp; + ctx = d->log_ctx; + d->log_ctx = eventfp ? + eventfd_ctx_fileget(eventfp) : NULL;