]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 18 Oct 2015 01:00:46 +0000 (18:00 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 18 Oct 2015 01:00:46 +0000 (18:00 -0700)
added patches:
3w-9xxx-don-t-unmap-bounce-buffered-commands.patch
sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch

queue-4.2/3w-9xxx-don-t-unmap-bounce-buffered-commands.patch [new file with mode: 0644]
queue-4.2/sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch [new file with mode: 0644]
queue-4.2/series

diff --git a/queue-4.2/3w-9xxx-don-t-unmap-bounce-buffered-commands.patch b/queue-4.2/3w-9xxx-don-t-unmap-bounce-buffered-commands.patch
new file mode 100644 (file)
index 0000000..1e6b8c8
--- /dev/null
@@ -0,0 +1,106 @@
+From 15e3d5a285ab9283136dba34bbf72886d9146706 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Sat, 3 Oct 2015 19:16:07 +0200
+Subject: 3w-9xxx: don't unmap bounce buffered commands
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 15e3d5a285ab9283136dba34bbf72886d9146706 upstream.
+
+3w controller don't dma map small single SGL entry commands but instead
+bounce buffer them.  Add a helper to identify these commands and don't
+call scsi_dma_unmap for them.
+
+Based on an earlier patch from James Bottomley.
+
+Fixes: 118c85 ("3w-9xxx: fix command completion race")
+Reported-by: Tóth Attila <atoth@atoth.sote.hu>
+Tested-by: Tóth Attila <atoth@atoth.sote.hu>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Acked-by: Adam Radford <aradford@gmail.com>
+Signed-off-by: James Bottomley <JBottomley@Odin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/3w-9xxx.c |   28 +++++++++++++++++++++-------
+ 1 file changed, 21 insertions(+), 7 deletions(-)
+
+--- a/drivers/scsi/3w-9xxx.c
++++ b/drivers/scsi/3w-9xxx.c
+@@ -212,6 +212,17 @@ static const struct file_operations twa_
+       .llseek         = noop_llseek,
+ };
++/*
++ * The controllers use an inline buffer instead of a mapped SGL for small,
++ * single entry buffers.  Note that we treat a zero-length transfer like
++ * a mapped SGL.
++ */
++static bool twa_command_mapped(struct scsi_cmnd *cmd)
++{
++      return scsi_sg_count(cmd) != 1 ||
++              scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
++}
++
+ /* This function will complete an aen request from the isr */
+ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+ {
+@@ -1339,7 +1350,8 @@ static irqreturn_t twa_interrupt(int irq
+                               }
+                               /* Now complete the io */
+-                              scsi_dma_unmap(cmd);
++                              if (twa_command_mapped(cmd))
++                                      scsi_dma_unmap(cmd);
+                               cmd->scsi_done(cmd);
+                               tw_dev->state[request_id] = TW_S_COMPLETED;
+                               twa_free_request_id(tw_dev, request_id);
+@@ -1582,7 +1594,8 @@ static int twa_reset_device_extension(TW
+                               struct scsi_cmnd *cmd = tw_dev->srb[i];
+                               cmd->result = (DID_RESET << 16);
+-                              scsi_dma_unmap(cmd);
++                              if (twa_command_mapped(cmd))
++                                      scsi_dma_unmap(cmd);
+                               cmd->scsi_done(cmd);
+                       }
+               }
+@@ -1765,12 +1778,14 @@ static int twa_scsi_queue_lck(struct scs
+       retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
+       switch (retval) {
+       case SCSI_MLQUEUE_HOST_BUSY:
+-              scsi_dma_unmap(SCpnt);
++              if (twa_command_mapped(SCpnt))
++                      scsi_dma_unmap(SCpnt);
+               twa_free_request_id(tw_dev, request_id);
+               break;
+       case 1:
+               SCpnt->result = (DID_ERROR << 16);
+-              scsi_dma_unmap(SCpnt);
++              if (twa_command_mapped(SCpnt))
++                      scsi_dma_unmap(SCpnt);
+               done(SCpnt);
+               tw_dev->state[request_id] = TW_S_COMPLETED;
+               twa_free_request_id(tw_dev, request_id);
+@@ -1831,8 +1846,7 @@ static int twa_scsiop_execute_scsi(TW_De
+               /* Map sglist from scsi layer to cmd packet */
+               if (scsi_sg_count(srb)) {
+-                      if ((scsi_sg_count(srb) == 1) &&
+-                          (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
++                      if (!twa_command_mapped(srb)) {
+                               if (srb->sc_data_direction == DMA_TO_DEVICE ||
+                                   srb->sc_data_direction == DMA_BIDIRECTIONAL)
+                                       scsi_sg_copy_to_buffer(srb,
+@@ -1905,7 +1919,7 @@ static void twa_scsiop_execute_scsi_comp
+ {
+       struct scsi_cmnd *cmd = tw_dev->srb[request_id];
+-      if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
++      if (!twa_command_mapped(cmd) &&
+           (cmd->sc_data_direction == DMA_FROM_DEVICE ||
+            cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
+               if (scsi_sg_count(cmd) == 1) {
diff --git a/queue-4.2/sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch b/queue-4.2/sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch
new file mode 100644 (file)
index 0000000..43b021c
--- /dev/null
@@ -0,0 +1,172 @@
+From fe32d3cd5e8eb0f82e459763374aa80797023403 Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Date: Wed, 15 Jul 2015 12:52:04 +0300
+Subject: sched/preempt: Fix cond_resched_lock() and cond_resched_softirq()
+
+From: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+
+commit fe32d3cd5e8eb0f82e459763374aa80797023403 upstream.
+
+These functions check should_resched() before unlocking spinlock/bh-enable:
+preempt_count always non-zero => should_resched() always returns false.
+cond_resched_lock() worked iff spin_needbreak is set.
+
+This patch adds argument "preempt_offset" to should_resched().
+
+preempt_count offset constants for that:
+
+  PREEMPT_DISABLE_OFFSET  - offset after preempt_disable()
+  PREEMPT_LOCK_OFFSET     - offset after spin_lock()
+  SOFTIRQ_DISABLE_OFFSET  - offset after local_bh_distable()
+  SOFTIRQ_LOCK_OFFSET     - offset after spin_lock_bh()
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Graf <agraf@suse.de>
+Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Cc: David Vrabel <david.vrabel@citrix.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: bdb438065890 ("sched: Extract the basic add/sub preempt_count modifiers")
+Link: http://lkml.kernel.org/r/20150715095204.12246.98268.stgit@buzz
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/include/asm/preempt.h |    4 ++--
+ include/asm-generic/preempt.h  |    5 +++--
+ include/linux/preempt.h        |   19 ++++++++++++++-----
+ include/linux/sched.h          |    6 ------
+ kernel/sched/core.c            |    6 +++---
+ 5 files changed, 22 insertions(+), 18 deletions(-)
+
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -90,9 +90,9 @@ static __always_inline bool __preempt_co
+ /*
+  * Returns true when we need to resched and can (barring IRQ state).
+  */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+-      return unlikely(!raw_cpu_read_4(__preempt_count));
++      return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset);
+ }
+ #ifdef CONFIG_PREEMPT
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -71,9 +71,10 @@ static __always_inline bool __preempt_co
+ /*
+  * Returns true when we need to resched and can (barring IRQ state).
+  */
+-static __always_inline bool should_resched(void)
++static __always_inline bool should_resched(int preempt_offset)
+ {
+-      return unlikely(!preempt_count() && tif_need_resched());
++      return unlikely(preempt_count() == preempt_offset &&
++                      tif_need_resched());
+ }
+ #ifdef CONFIG_PREEMPT
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -84,13 +84,21 @@
+  */
+ #define in_nmi()      (preempt_count() & NMI_MASK)
++/*
++ * The preempt_count offset after preempt_disable();
++ */
+ #if defined(CONFIG_PREEMPT_COUNT)
+-# define PREEMPT_DISABLE_OFFSET 1
++# define PREEMPT_DISABLE_OFFSET       PREEMPT_OFFSET
+ #else
+-# define PREEMPT_DISABLE_OFFSET 0
++# define PREEMPT_DISABLE_OFFSET       0
+ #endif
+ /*
++ * The preempt_count offset after spin_lock()
++ */
++#define PREEMPT_LOCK_OFFSET   PREEMPT_DISABLE_OFFSET
++
++/*
+  * The preempt_count offset needed for things like:
+  *
+  *  spin_lock_bh()
+@@ -103,7 +111,7 @@
+  *
+  * Work as expected.
+  */
+-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
++#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
+ /*
+  * Are we running in atomic context?  WARNING: this macro cannot
+@@ -124,7 +132,8 @@
+ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+ extern void preempt_count_add(int val);
+ extern void preempt_count_sub(int val);
+-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
++#define preempt_count_dec_and_test() \
++      ({ preempt_count_sub(1); should_resched(0); })
+ #else
+ #define preempt_count_add(val)        __preempt_count_add(val)
+ #define preempt_count_sub(val)        __preempt_count_sub(val)
+@@ -184,7 +193,7 @@ do { \
+ #define preempt_check_resched() \
+ do { \
+-      if (should_resched()) \
++      if (should_resched(0)) \
+               __preempt_schedule(); \
+ } while (0)
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -2909,12 +2909,6 @@ extern int _cond_resched(void);
+ extern int __cond_resched_lock(spinlock_t *lock);
+-#ifdef CONFIG_PREEMPT_COUNT
+-#define PREEMPT_LOCK_OFFSET   PREEMPT_OFFSET
+-#else
+-#define PREEMPT_LOCK_OFFSET   0
+-#endif
+-
+ #define cond_resched_lock(lock) ({                            \
+       ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
+       __cond_resched_lock(lock);                              \
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4499,7 +4499,7 @@ SYSCALL_DEFINE0(sched_yield)
+ int __sched _cond_resched(void)
+ {
+-      if (should_resched()) {
++      if (should_resched(0)) {
+               preempt_schedule_common();
+               return 1;
+       }
+@@ -4517,7 +4517,7 @@ EXPORT_SYMBOL(_cond_resched);
+  */
+ int __cond_resched_lock(spinlock_t *lock)
+ {
+-      int resched = should_resched();
++      int resched = should_resched(PREEMPT_LOCK_OFFSET);
+       int ret = 0;
+       lockdep_assert_held(lock);
+@@ -4539,7 +4539,7 @@ int __sched __cond_resched_softirq(void)
+ {
+       BUG_ON(!in_softirq());
+-      if (should_resched()) {
++      if (should_resched(SOFTIRQ_DISABLE_OFFSET)) {
+               local_bh_enable();
+               preempt_schedule_common();
+               local_bh_disable();
index e928a83ab8f2ba67f0e8d888f7a2d451e7fd3d60..df6cb3295e7ad09583038dd5c3aa1f88d203f3e6 100644 (file)
@@ -254,3 +254,5 @@ serial-atmel-fix-error-path-of-probe-function.patch
 e1000e-fix-tight-loop-implementation-of-systime-read-algorithm.patch
 mm-slab-fix-unexpected-index-mapping-result-of-kmalloc_size-index_node-1.patch
 blk-mq-avoid-setting-hctx-tags-cpumask-before-allocation.patch
+sched-preempt-fix-cond_resched_lock-and-cond_resched_softirq.patch
+3w-9xxx-don-t-unmap-bounce-buffered-commands.patch