]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Aug 2017 06:20:50 +0000 (08:20 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 31 Aug 2017 06:20:50 +0000 (08:20 +0200)
added patches:
arm64-fpsimd-prevent-registers-leaking-across-exec.patch
locking-spinlock-debug-remove-spinlock-lockup-detection-code.patch
scsi-sg-protect-accesses-to-reserved-page-array.patch
scsi-sg-reset-res_in_use-after-unlinking-reserved-array.patch

queue-4.9/arm64-fpsimd-prevent-registers-leaking-across-exec.patch [new file with mode: 0644]
queue-4.9/locking-spinlock-debug-remove-spinlock-lockup-detection-code.patch [new file with mode: 0644]
queue-4.9/scsi-sg-protect-accesses-to-reserved-page-array.patch [new file with mode: 0644]
queue-4.9/scsi-sg-reset-res_in_use-after-unlinking-reserved-array.patch [new file with mode: 0644]
queue-4.9/series

diff --git a/queue-4.9/arm64-fpsimd-prevent-registers-leaking-across-exec.patch b/queue-4.9/arm64-fpsimd-prevent-registers-leaking-across-exec.patch
new file mode 100644 (file)
index 0000000..e115d8a
--- /dev/null
@@ -0,0 +1,54 @@
+From 096622104e14d8a1db4860bd557717067a0515d2 Mon Sep 17 00:00:00 2001
+From: Dave Martin <Dave.Martin@arm.com>
+Date: Fri, 18 Aug 2017 16:57:01 +0100
+Subject: arm64: fpsimd: Prevent registers leaking across exec
+
+From: Dave Martin <Dave.Martin@arm.com>
+
+commit 096622104e14d8a1db4860bd557717067a0515d2 upstream.
+
+There are some tricky dependencies between the different stages of
+flushing the FPSIMD register state during exec, and these can race
+with context switch in ways that can cause the old task's regs to
+leak across.  In particular, a context switch during the memset() can
+cause some of the task's old FPSIMD registers to reappear.
+
+Disabling preemption for this small window would be no big deal for
+performance: preemption is already disabled for similar scenarios
+like updating the FPSIMD registers in sigreturn.
+
+So, instead of rearranging things in ways that might swap existing
+subtle bugs for new ones, this patch just disables preemption
+around the FPSIMD state flushing so that races of this type can't
+occur here.  This brings fpsimd_flush_thread() into line with other
+code paths.
+
+Fixes: 674c242c9323 ("arm64: flush FP/SIMD state correctly after execve()")
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Dave Martin <Dave.Martin@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+
+For stable only.
+
+3.17.x-4.0.x don't appear active, and this patch isn't sufficient to fix
+them (they would need 674c242c9323 also).
+
+ arch/arm64/kernel/fpsimd.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -157,9 +157,11 @@ void fpsimd_thread_switch(struct task_st
+ void fpsimd_flush_thread(void)
+ {
++      preempt_disable();
+       memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+       fpsimd_flush_task_state(current);
+       set_thread_flag(TIF_FOREIGN_FPSTATE);
++      preempt_enable();
+ }
+ /*
diff --git a/queue-4.9/locking-spinlock-debug-remove-spinlock-lockup-detection-code.patch b/queue-4.9/locking-spinlock-debug-remove-spinlock-lockup-detection-code.patch
new file mode 100644 (file)
index 0000000..7f89768
--- /dev/null
@@ -0,0 +1,148 @@
+From bc88c10d7e6900916f5e1ba3829d66a9de92b633 Mon Sep 17 00:00:00 2001
+From: Waiman Long <longman@redhat.com>
+Date: Wed, 8 Feb 2017 14:46:48 -0500
+Subject: locking/spinlock/debug: Remove spinlock lockup detection code
+
+From: Waiman Long <longman@redhat.com>
+
+commit bc88c10d7e6900916f5e1ba3829d66a9de92b633 upstream.
+
+The current spinlock lockup detection code can sometimes produce false
+positives because of the unfairness of the locking algorithm itself.
+
+So the lockup detection code is now removed. Instead, we are relying
+on the NMI watchdog to detect potential lockup. We won't have lockup
+detection if the watchdog isn't running.
+
+The commented-out read-write lock lockup detection code are also
+removed.
+
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sasha Levin <sasha.levin@oracle.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Link: http://lkml.kernel.org/r/1486583208-11038-1-git-send-email-longman@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Cc: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/locking/spinlock_debug.c |   86 ++--------------------------------------
+ 1 file changed, 5 insertions(+), 81 deletions(-)
+
+--- a/kernel/locking/spinlock_debug.c
++++ b/kernel/locking/spinlock_debug.c
+@@ -103,38 +103,14 @@ static inline void debug_spin_unlock(raw
+       lock->owner_cpu = -1;
+ }
+-static void __spin_lock_debug(raw_spinlock_t *lock)
+-{
+-      u64 i;
+-      u64 loops = loops_per_jiffy * HZ;
+-
+-      for (i = 0; i < loops; i++) {
+-              if (arch_spin_trylock(&lock->raw_lock))
+-                      return;
+-              __delay(1);
+-      }
+-      /* lockup suspected: */
+-      spin_dump(lock, "lockup suspected");
+-#ifdef CONFIG_SMP
+-      trigger_all_cpu_backtrace();
+-#endif
+-
+-      /*
+-       * The trylock above was causing a livelock.  Give the lower level arch
+-       * specific lock code a chance to acquire the lock. We have already
+-       * printed a warning/backtrace at this point. The non-debug arch
+-       * specific code might actually succeed in acquiring the lock.  If it is
+-       * not successful, the end-result is the same - there is no forward
+-       * progress.
+-       */
+-      arch_spin_lock(&lock->raw_lock);
+-}
+-
++/*
++ * We are now relying on the NMI watchdog to detect lockup instead of doing
++ * the detection here with an unfair lock which can cause problem of its own.
++ */
+ void do_raw_spin_lock(raw_spinlock_t *lock)
+ {
+       debug_spin_lock_before(lock);
+-      if (unlikely(!arch_spin_trylock(&lock->raw_lock)))
+-              __spin_lock_debug(lock);
++      arch_spin_lock(&lock->raw_lock);
+       debug_spin_lock_after(lock);
+ }
+@@ -172,32 +148,6 @@ static void rwlock_bug(rwlock_t *lock, c
+ #define RWLOCK_BUG_ON(cond, lock, msg) if (unlikely(cond)) rwlock_bug(lock, msg)
+-#if 0         /* __write_lock_debug() can lock up - maybe this can too? */
+-static void __read_lock_debug(rwlock_t *lock)
+-{
+-      u64 i;
+-      u64 loops = loops_per_jiffy * HZ;
+-      int print_once = 1;
+-
+-      for (;;) {
+-              for (i = 0; i < loops; i++) {
+-                      if (arch_read_trylock(&lock->raw_lock))
+-                              return;
+-                      __delay(1);
+-              }
+-              /* lockup suspected: */
+-              if (print_once) {
+-                      print_once = 0;
+-                      printk(KERN_EMERG "BUG: read-lock lockup on CPU#%d, "
+-                                      "%s/%d, %p\n",
+-                              raw_smp_processor_id(), current->comm,
+-                              current->pid, lock);
+-                      dump_stack();
+-              }
+-      }
+-}
+-#endif
+-
+ void do_raw_read_lock(rwlock_t *lock)
+ {
+       RWLOCK_BUG_ON(lock->magic != RWLOCK_MAGIC, lock, "bad magic");
+@@ -247,32 +197,6 @@ static inline void debug_write_unlock(rw
+       lock->owner_cpu = -1;
+ }
+-#if 0         /* This can cause lockups */
+-static void __write_lock_debug(rwlock_t *lock)
+-{
+-      u64 i;
+-      u64 loops = loops_per_jiffy * HZ;
+-      int print_once = 1;
+-
+-      for (;;) {
+-              for (i = 0; i < loops; i++) {
+-                      if (arch_write_trylock(&lock->raw_lock))
+-                              return;
+-                      __delay(1);
+-              }
+-              /* lockup suspected: */
+-              if (print_once) {
+-                      print_once = 0;
+-                      printk(KERN_EMERG "BUG: write-lock lockup on CPU#%d, "
+-                                      "%s/%d, %p\n",
+-                              raw_smp_processor_id(), current->comm,
+-                              current->pid, lock);
+-                      dump_stack();
+-              }
+-      }
+-}
+-#endif
+-
+ void do_raw_write_lock(rwlock_t *lock)
+ {
+       debug_write_lock_before(lock);
diff --git a/queue-4.9/scsi-sg-protect-accesses-to-reserved-page-array.patch b/queue-4.9/scsi-sg-protect-accesses-to-reserved-page-array.patch
new file mode 100644 (file)
index 0000000..65e0443
--- /dev/null
@@ -0,0 +1,172 @@
+From 1bc0eb0446158cc76562176b80623aa119afee5b Mon Sep 17 00:00:00 2001
+From: Hannes Reinecke <hare@suse.de>
+Date: Fri, 7 Apr 2017 09:34:14 +0200
+Subject: scsi: sg: protect accesses to 'reserved' page array
+
+From: Hannes Reinecke <hare@suse.de>
+
+commit 1bc0eb0446158cc76562176b80623aa119afee5b upstream.
+
+The 'reserved' page array is used as a short-cut for mapping data,
+saving us to allocate pages per request. However, the 'reserved' array
+is only capable of holding one request, so this patch introduces a mutex
+for protect 'sg_fd' against concurrent accesses.
+
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Tested-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+[toddpoynor@google.com: backport to 3.18-4.9,  fixup for bad ioctl
+SG_SET_FORCE_LOW_DMA code removed in later versions and not modified by
+the original patch.]
+
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Tested-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Todd Poynor <toddpoynor@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sg.c |   47 ++++++++++++++++++++++++++---------------------
+ 1 file changed, 26 insertions(+), 21 deletions(-)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -142,6 +142,7 @@ typedef struct sg_fd {             /* holds the sta
+       struct sg_device *parentdp;     /* owning device */
+       wait_queue_head_t read_wait;    /* queue read until command done */
+       rwlock_t rq_list_lock;  /* protect access to list in req_arr */
++      struct mutex f_mutex;   /* protect against changes in this fd */
+       int timeout;            /* defaults to SG_DEFAULT_TIMEOUT      */
+       int timeout_user;       /* defaults to SG_DEFAULT_TIMEOUT_USER */
+       Sg_scatter_hold reserve;        /* buffer held for this file descriptor */
+@@ -155,6 +156,7 @@ typedef struct sg_fd {             /* holds the sta
+       unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
+       char keep_orphan;       /* 0 -> drop orphan (def), 1 -> keep for read() */
+       char mmap_called;       /* 0 -> mmap() never called on this fd */
++      char res_in_use;        /* 1 -> 'reserve' array in use */
+       struct kref f_ref;
+       struct execute_work ew;
+ } Sg_fd;
+@@ -198,7 +200,6 @@ static void sg_remove_sfp(struct kref *)
+ static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
+ static Sg_request *sg_add_request(Sg_fd * sfp);
+ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
+-static int sg_res_in_use(Sg_fd * sfp);
+ static Sg_device *sg_get_dev(int dev);
+ static void sg_device_destroy(struct kref *kref);
+@@ -614,6 +615,7 @@ sg_write(struct file *filp, const char _
+       }
+       buf += SZ_SG_HEADER;
+       __get_user(opcode, buf);
++      mutex_lock(&sfp->f_mutex);
+       if (sfp->next_cmd_len > 0) {
+               cmd_size = sfp->next_cmd_len;
+               sfp->next_cmd_len = 0;  /* reset so only this write() effected */
+@@ -622,6 +624,7 @@ sg_write(struct file *filp, const char _
+               if ((opcode >= 0xc0) && old_hdr.twelve_byte)
+                       cmd_size = 12;
+       }
++      mutex_unlock(&sfp->f_mutex);
+       SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
+               "sg_write:   scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
+ /* Determine buffer size.  */
+@@ -721,7 +724,7 @@ sg_new_write(Sg_fd *sfp, struct file *fi
+                       sg_remove_request(sfp, srp);
+                       return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
+               }
+-              if (sg_res_in_use(sfp)) {
++              if (sfp->res_in_use) {
+                       sg_remove_request(sfp, srp);
+                       return -EBUSY;  /* reserve buffer already being used */
+               }
+@@ -892,7 +895,7 @@ sg_ioctl(struct file *filp, unsigned int
+                       return result;
+               if (val) {
+                       sfp->low_dma = 1;
+-                      if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
++                      if ((0 == sfp->low_dma) && !sfp->res_in_use) {
+                               val = (int) sfp->reserve.bufflen;
+                               sg_remove_scat(sfp, &sfp->reserve);
+                               sg_build_reserve(sfp, val);
+@@ -967,12 +970,18 @@ sg_ioctl(struct file *filp, unsigned int
+                         return -EINVAL;
+               val = min_t(int, val,
+                           max_sectors_bytes(sdp->device->request_queue));
++              mutex_lock(&sfp->f_mutex);
+               if (val != sfp->reserve.bufflen) {
+-                      if (sg_res_in_use(sfp) || sfp->mmap_called)
++                      if (sfp->mmap_called ||
++                          sfp->res_in_use) {
++                              mutex_unlock(&sfp->f_mutex);
+                               return -EBUSY;
++                      }
++
+                       sg_remove_scat(sfp, &sfp->reserve);
+                       sg_build_reserve(sfp, val);
+               }
++              mutex_unlock(&sfp->f_mutex);
+               return 0;
+       case SG_GET_RESERVED_SIZE:
+               val = min_t(int, sfp->reserve.bufflen,
+@@ -1727,13 +1736,22 @@ sg_start_req(Sg_request *srp, unsigned c
+               md = &map_data;
+       if (md) {
+-              if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
++              mutex_lock(&sfp->f_mutex);
++              if (dxfer_len <= rsv_schp->bufflen &&
++                  !sfp->res_in_use) {
++                      sfp->res_in_use = 1;
+                       sg_link_reserve(sfp, srp, dxfer_len);
+-              else {
++              } else if ((hp->flags & SG_FLAG_MMAP_IO) && sfp->res_in_use) {
++                      mutex_unlock(&sfp->f_mutex);
++                      return -EBUSY;
++              } else {
+                       res = sg_build_indirect(req_schp, sfp, dxfer_len);
+-                      if (res)
++                      if (res) {
++                              mutex_unlock(&sfp->f_mutex);
+                               return res;
++                      }
+               }
++              mutex_unlock(&sfp->f_mutex);
+               md->pages = req_schp->pages;
+               md->page_order = req_schp->page_order;
+@@ -2135,6 +2153,7 @@ sg_add_sfp(Sg_device * sdp)
+       rwlock_init(&sfp->rq_list_lock);
+       kref_init(&sfp->f_ref);
++      mutex_init(&sfp->f_mutex);
+       sfp->timeout = SG_DEFAULT_TIMEOUT;
+       sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
+       sfp->force_packid = SG_DEF_FORCE_PACK_ID;
+@@ -2210,20 +2229,6 @@ sg_remove_sfp(struct kref *kref)
+       schedule_work(&sfp->ew.work);
+ }
+-static int
+-sg_res_in_use(Sg_fd * sfp)
+-{
+-      const Sg_request *srp;
+-      unsigned long iflags;
+-
+-      read_lock_irqsave(&sfp->rq_list_lock, iflags);
+-      for (srp = sfp->headrp; srp; srp = srp->nextrp)
+-              if (srp->res_used)
+-                      break;
+-      read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+-      return srp ? 1 : 0;
+-}
+-
+ #ifdef CONFIG_SCSI_PROC_FS
+ static int
+ sg_idr_max_id(int id, void *p, void *data)
diff --git a/queue-4.9/scsi-sg-reset-res_in_use-after-unlinking-reserved-array.patch b/queue-4.9/scsi-sg-reset-res_in_use-after-unlinking-reserved-array.patch
new file mode 100644 (file)
index 0000000..d449c3d
--- /dev/null
@@ -0,0 +1,38 @@
+From e791ce27c3f6a1d3c746fd6a8f8e36c9540ec6f9 Mon Sep 17 00:00:00 2001
+From: Hannes Reinecke <hare@suse.de>
+Date: Mon, 24 Apr 2017 10:26:36 +0200
+Subject: scsi: sg: reset 'res_in_use' after unlinking reserved array
+
+From: Hannes Reinecke <hare@suse.de>
+
+commit e791ce27c3f6a1d3c746fd6a8f8e36c9540ec6f9 upstream.
+
+Once the reserved page array is unused we can reset the 'res_in_use'
+state; here we can do a lazy update without holding the mutex as we only
+need to check against concurrent access, not concurrent release.
+
+[mkp: checkpatch]
+
+Fixes: 1bc0eb044615 ("scsi: sg: protect accesses to 'reserved' page array")
+Signed-off-by: Hannes Reinecke <hare@suse.com>
+Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Cc: Todd Poynor <toddpoynor@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/scsi/sg.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -2042,6 +2042,8 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_reques
+       req_schp->sglist_len = 0;
+       sfp->save_scat_len = 0;
+       srp->res_used = 0;
++      /* Called without mutex lock to avoid deadlock */
++      sfp->res_in_use = 0;
+ }
+ static Sg_request *
index af2b70af6e301bf604f8b0f6075b3fe8286d96b6..7f1c743271a5e863f1dd334642b3698fecc5291a 100644 (file)
@@ -5,3 +5,7 @@ gcov-support-gcc-7.1.patch
 kvm-arm-arm64-fix-race-in-resetting-stage2-pgd.patch
 arm64-mm-abort-uaccess-retries-upon-fatal-signal.patch
 x86-io-add-memory-clobber-to-insb-insw-insl-outsb-outsw-outsl.patch
+arm64-fpsimd-prevent-registers-leaking-across-exec.patch
+locking-spinlock-debug-remove-spinlock-lockup-detection-code.patch
+scsi-sg-protect-accesses-to-reserved-page-array.patch
+scsi-sg-reset-res_in_use-after-unlinking-reserved-array.patch