]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 24 Jul 2014 18:04:45 +0000 (11:04 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 24 Jul 2014 18:04:45 +0000 (11:04 -0700)
added patches:
e1000e-fix-shra-register-access-for-82579.patch
shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch
shmem-fix-faulting-into-a-hole-while-it-s-punched.patch

queue-3.14/e1000e-fix-shra-register-access-for-82579.patch [new file with mode: 0644]
queue-3.14/series
queue-3.14/shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch [new file with mode: 0644]
queue-3.14/shmem-fix-faulting-into-a-hole-while-it-s-punched.patch [new file with mode: 0644]

diff --git a/queue-3.14/e1000e-fix-shra-register-access-for-82579.patch b/queue-3.14/e1000e-fix-shra-register-access-for-82579.patch
new file mode 100644 (file)
index 0000000..114d5c8
--- /dev/null
@@ -0,0 +1,56 @@
+From 96dee024ca4799d6d21588951240035c21ba1c67 Mon Sep 17 00:00:00 2001
+From: David Ertman <davidx.m.ertman@intel.com>
+Date: Wed, 5 Mar 2014 07:50:46 +0000
+Subject: e1000e: Fix SHRA register access for 82579
+
+From: David Ertman <davidx.m.ertman@intel.com>
+
+commit 96dee024ca4799d6d21588951240035c21ba1c67 upstream.
+
+Previous commit c3a0dce35af0 fixed an overrun for the RAR on i218 devices.
+This commit also attempted to homogenize the RAR/SHRA access for all parts
+accessed by the e1000e driver.  This change introduced an error for
+assigning MAC addresses to guest OS's for 82579 devices.
+
+Only RAR[0] is accessible to the driver for 82579 parts, and additional
+addresses must be placed into the SHRA[L|H] registers.  The rar_entry_count
+was changed in the previous commit to an inaccurate value that accounted
+for all RAR and SHRA registers, not just the ones usable by the driver.
+
+This patch fixes the count to the correct value and adjusts the
+e1000_rar_set_pch2lan() function to user the correct index.
+
+Cc: John Greene <jogreene@redhat.com>
+Signed-off-by: Dave Ertman <davidx.m.ertman@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Cc: "Alexander Y. Fomichev" <aleksandr.fomichev@x5.ru>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/intel/e1000e/ich8lan.c |    2 +-
+ drivers/net/ethernet/intel/e1000e/ich8lan.h |    2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -1374,7 +1374,7 @@ static void e1000_rar_set_pch2lan(struct
+       /* RAR[1-6] are owned by manageability.  Skip those and program the
+        * next address into the SHRA register array.
+        */
+-      if (index < (u32)(hw->mac.rar_entry_count - 6)) {
++      if (index < (u32)(hw->mac.rar_entry_count)) {
+               s32 ret_val;
+               ret_val = e1000_acquire_swflag_ich8lan(hw);
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
+@@ -98,7 +98,7 @@
+ #define PCIE_ICH8_SNOOP_ALL   PCIE_NO_SNOOP_ALL
+ #define E1000_ICH_RAR_ENTRIES 7
+-#define E1000_PCH2_RAR_ENTRIES        11      /* RAR[0-6], SHRA[0-3] */
++#define E1000_PCH2_RAR_ENTRIES        5       /* RAR[0], SHRA[0-3] */
+ #define E1000_PCH_LPT_RAR_ENTRIES     12      /* RAR[0], SHRA[0-10] */
+ #define PHY_PAGE_SHIFT                5
index 705b3a39089162a6b972eb4bda81359ad44d1fe1..644fc15db807bd60bf9db0de8eb20938a637bd5d 100644 (file)
@@ -22,3 +22,6 @@ igb-do-a-reset-on-sr-iov-re-init-if-device-is-down.patch
 quota-missing-lock-in-dqcache_shrink_scan.patch
 iwlwifi-update-the-7265-series-hw-ids.patch
 iwlwifi-dvm-don-t-enable-cts-to-self.patch
+shmem-fix-faulting-into-a-hole-while-it-s-punched.patch
+shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch
+e1000e-fix-shra-register-access-for-82579.patch
diff --git a/queue-3.14/shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch b/queue-3.14/shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch
new file mode 100644 (file)
index 0000000..7b8195c
--- /dev/null
@@ -0,0 +1,194 @@
+From 8e205f779d1443a94b5ae81aa359cb535dd3021e Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Wed, 23 Jul 2014 14:00:10 -0700
+Subject: shmem: fix faulting into a hole, not taking i_mutex
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 8e205f779d1443a94b5ae81aa359cb535dd3021e upstream.
+
+Commit f00cdc6df7d7 ("shmem: fix faulting into a hole while it's
+punched") was buggy: Sasha sent a lockdep report to remind us that
+grabbing i_mutex in the fault path is a no-no (write syscall may already
+hold i_mutex while faulting user buffer).
+
+We tried a completely different approach (see following patch) but that
+proved inadequate: good enough for a rational workload, but not good
+enough against trinity - which forks off so many mappings of the object
+that contention on i_mmap_mutex while hole-puncher holds i_mutex builds
+into serious starvation when concurrent faults force the puncher to fall
+back to single-page unmap_mapping_range() searches of the i_mmap tree.
+
+So return to the original umbrella approach, but keep away from i_mutex
+this time.  We really don't want to bloat every shmem inode with a new
+mutex or completion, just to protect this unlikely case from trinity.
+So extend the original with wait_queue_head on stack at the hole-punch
+end, and wait_queue item on the stack at the fault end.
+
+This involves further use of i_lock to guard against the races: lockdep
+has been happy so far, and I see fs/inode.c:unlock_new_inode() holds
+i_lock around wake_up_bit(), which is comparable to what we do here.
+i_lock is more convenient, but we could switch to shmem's info->lock.
+
+This issue has been tagged with CVE-2014-4171, which will require commit
+f00cdc6df7d7 and this and the following patch to be backported: we
+suggest to 3.1+, though in fact the trinity forkbomb effect might go
+back as far as 2.6.16, when madvise(,,MADV_REMOVE) came in - or might
+not, since much has changed, with i_mmap_mutex a spinlock before 3.0.
+Anyone running trinity on 3.0 and earlier? I don't think we need care.
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Lukas Czerner <lczerner@redhat.com>
+Cc: Dave Jones <davej@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/shmem.c |   78 ++++++++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 52 insertions(+), 26 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
+  * a time): we would prefer not to enlarge the shmem inode just for that.
+  */
+ struct shmem_falloc {
+-      int     mode;           /* FALLOC_FL mode currently operating */
++      wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
+       pgoff_t start;          /* start of range currently being fallocated */
+       pgoff_t next;           /* the next page offset to be fallocated */
+       pgoff_t nr_falloced;    /* how many new pages have been fallocated */
+@@ -825,7 +825,7 @@ static int shmem_writepage(struct page *
+                       spin_lock(&inode->i_lock);
+                       shmem_falloc = inode->i_private;
+                       if (shmem_falloc &&
+-                          !shmem_falloc->mode &&
++                          !shmem_falloc->waitq &&
+                           index >= shmem_falloc->start &&
+                           index < shmem_falloc->next)
+                               shmem_falloc->nr_unswapped++;
+@@ -1304,38 +1304,58 @@ static int shmem_fault(struct vm_area_st
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+-       * faulting pages into the hole while it's being punched, and
+-       * wait on i_mutex to be released if vmf->flags permits.
++       * faulting pages into the hole while it's being punched.  Although
++       * shmem_undo_range() does remove the additions, it may be unable to
++       * keep up, as each new page needs its own unmap_mapping_range() call,
++       * and the i_mmap tree grows ever slower to scan if new vmas are added.
++       *
++       * It does not matter if we sometimes reach this check just before the
++       * hole-punch begins, so that one fault then races with the punch:
++       * we just need to make racing faults a rare case.
++       *
++       * The implementation below would be much simpler if we just used a
++       * standard mutex or completion: but we cannot take i_mutex in fault,
++       * and bloating every shmem inode for this unlikely case would be sad.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+-              if (!shmem_falloc ||
+-                  shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
+-                  vmf->pgoff < shmem_falloc->start ||
+-                  vmf->pgoff >= shmem_falloc->next)
+-                      shmem_falloc = NULL;
+-              spin_unlock(&inode->i_lock);
+-              /*
+-               * i_lock has protected us from taking shmem_falloc seriously
+-               * once return from shmem_fallocate() went back up that stack.
+-               * i_lock does not serialize with i_mutex at all, but it does
+-               * not matter if sometimes we wait unnecessarily, or sometimes
+-               * miss out on waiting: we just need to make those cases rare.
+-               */
+-              if (shmem_falloc) {
++              if (shmem_falloc &&
++                  shmem_falloc->waitq &&
++                  vmf->pgoff >= shmem_falloc->start &&
++                  vmf->pgoff < shmem_falloc->next) {
++                      wait_queue_head_t *shmem_falloc_waitq;
++                      DEFINE_WAIT(shmem_fault_wait);
++
++                      ret = VM_FAULT_NOPAGE;
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++                              /* It's polite to up mmap_sem if we can */
+                               up_read(&vma->vm_mm->mmap_sem);
+-                              mutex_lock(&inode->i_mutex);
+-                              mutex_unlock(&inode->i_mutex);
+-                              return VM_FAULT_RETRY;
++                              ret = VM_FAULT_RETRY;
+                       }
+-                      /* cond_resched? Leave that to GUP or return to user */
+-                      return VM_FAULT_NOPAGE;
++
++                      shmem_falloc_waitq = shmem_falloc->waitq;
++                      prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
++                                      TASK_UNINTERRUPTIBLE);
++                      spin_unlock(&inode->i_lock);
++                      schedule();
++
++                      /*
++                       * shmem_falloc_waitq points into the shmem_fallocate()
++                       * stack of the hole-punching task: shmem_falloc_waitq
++                       * is usually invalid by the time we reach here, but
++                       * finish_wait() does not dereference it in that case;
++                       * though i_lock needed lest racing with wake_up_all().
++                       */
++                      spin_lock(&inode->i_lock);
++                      finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
++                      spin_unlock(&inode->i_lock);
++                      return ret;
+               }
++              spin_unlock(&inode->i_lock);
+       }
+       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+@@ -1853,13 +1873,13 @@ static long shmem_fallocate(struct file
+       mutex_lock(&inode->i_mutex);
+-      shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
+-
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               struct address_space *mapping = file->f_mapping;
+               loff_t unmap_start = round_up(offset, PAGE_SIZE);
+               loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++              DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
++              shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+@@ -1871,8 +1891,13 @@ static long shmem_fallocate(struct file
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, offset, offset + len - 1);
+               /* No need to unmap again: hole-punching leaves COWed pages */
++
++              spin_lock(&inode->i_lock);
++              inode->i_private = NULL;
++              wake_up_all(&shmem_falloc_waitq);
++              spin_unlock(&inode->i_lock);
+               error = 0;
+-              goto undone;
++              goto out;
+       }
+       /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+@@ -1888,6 +1913,7 @@ static long shmem_fallocate(struct file
+               goto out;
+       }
++      shmem_falloc.waitq = NULL;
+       shmem_falloc.start = start;
+       shmem_falloc.next  = start;
+       shmem_falloc.nr_falloced = 0;
diff --git a/queue-3.14/shmem-fix-faulting-into-a-hole-while-it-s-punched.patch b/queue-3.14/shmem-fix-faulting-into-a-hole-while-it-s-punched.patch
new file mode 100644 (file)
index 0000000..b2cea12
--- /dev/null
@@ -0,0 +1,135 @@
+From f00cdc6df7d7cfcabb5b740911e6788cb0802bdb Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 23 Jun 2014 13:22:06 -0700
+Subject: shmem: fix faulting into a hole while it's punched
+
+From: Hugh Dickins <hughd@google.com>
+
+commit f00cdc6df7d7cfcabb5b740911e6788cb0802bdb upstream.
+
+Trinity finds that mmap access to a hole while it's punched from shmem
+can prevent the madvise(MADV_REMOVE) or fallocate(FALLOC_FL_PUNCH_HOLE)
+from completing, until the reader chooses to stop; with the puncher's
+hold on i_mutex locking out all other writers until it can complete.
+
+It appears that the tmpfs fault path is too light in comparison with its
+hole-punching path, lacking an i_data_sem to obstruct it; but we don't
+want to slow down the common case.
+
+Extend shmem_fallocate()'s existing range notification mechanism, so
+shmem_fault() can refrain from faulting pages into the hole while it's
+punched, waiting instead on i_mutex (when safe to sleep; or repeatedly
+faulting when not).
+
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Dave Jones <davej@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/shmem.c |   56 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 52 insertions(+), 4 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
+ #define SHORT_SYMLINK_LEN 128
+ /*
+- * shmem_fallocate and shmem_writepage communicate via inode->i_private
+- * (with i_mutex making sure that it has only one user at a time):
+- * we would prefer not to enlarge the shmem inode just for that.
++ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
++ * inode->i_private (with i_mutex making sure that it has only one user at
++ * a time): we would prefer not to enlarge the shmem inode just for that.
+  */
+ struct shmem_falloc {
++      int     mode;           /* FALLOC_FL mode currently operating */
+       pgoff_t start;          /* start of range currently being fallocated */
+       pgoff_t next;           /* the next page offset to be fallocated */
+       pgoff_t nr_falloced;    /* how many new pages have been fallocated */
+@@ -824,6 +825,7 @@ static int shmem_writepage(struct page *
+                       spin_lock(&inode->i_lock);
+                       shmem_falloc = inode->i_private;
+                       if (shmem_falloc &&
++                          !shmem_falloc->mode &&
+                           index >= shmem_falloc->start &&
+                           index < shmem_falloc->next)
+                               shmem_falloc->nr_unswapped++;
+@@ -1298,6 +1300,44 @@ static int shmem_fault(struct vm_area_st
+       int error;
+       int ret = VM_FAULT_LOCKED;
++      /*
++       * Trinity finds that probing a hole which tmpfs is punching can
++       * prevent the hole-punch from ever completing: which in turn
++       * locks writers out with its hold on i_mutex.  So refrain from
++       * faulting pages into the hole while it's being punched, and
++       * wait on i_mutex to be released if vmf->flags permits.
++       */
++      if (unlikely(inode->i_private)) {
++              struct shmem_falloc *shmem_falloc;
++
++              spin_lock(&inode->i_lock);
++              shmem_falloc = inode->i_private;
++              if (!shmem_falloc ||
++                  shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
++                  vmf->pgoff < shmem_falloc->start ||
++                  vmf->pgoff >= shmem_falloc->next)
++                      shmem_falloc = NULL;
++              spin_unlock(&inode->i_lock);
++              /*
++               * i_lock has protected us from taking shmem_falloc seriously
++               * once return from shmem_fallocate() went back up that stack.
++               * i_lock does not serialize with i_mutex at all, but it does
++               * not matter if sometimes we wait unnecessarily, or sometimes
++               * miss out on waiting: we just need to make those cases rare.
++               */
++              if (shmem_falloc) {
++                      if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
++                         !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++                              up_read(&vma->vm_mm->mmap_sem);
++                              mutex_lock(&inode->i_mutex);
++                              mutex_unlock(&inode->i_mutex);
++                              return VM_FAULT_RETRY;
++                      }
++                      /* cond_resched? Leave that to GUP or return to user */
++                      return VM_FAULT_NOPAGE;
++              }
++      }
++
+       error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+       if (error)
+               return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+@@ -1813,18 +1853,26 @@ static long shmem_fallocate(struct file
+       mutex_lock(&inode->i_mutex);
++      shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
++
+       if (mode & FALLOC_FL_PUNCH_HOLE) {
+               struct address_space *mapping = file->f_mapping;
+               loff_t unmap_start = round_up(offset, PAGE_SIZE);
+               loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++              shmem_falloc.start = unmap_start >> PAGE_SHIFT;
++              shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
++              spin_lock(&inode->i_lock);
++              inode->i_private = &shmem_falloc;
++              spin_unlock(&inode->i_lock);
++
+               if ((u64)unmap_end > (u64)unmap_start)
+                       unmap_mapping_range(mapping, unmap_start,
+                                           1 + unmap_end - unmap_start, 0);
+               shmem_truncate_range(inode, offset, offset + len - 1);
+               /* No need to unmap again: hole-punching leaves COWed pages */
+               error = 0;
+-              goto out;
++              goto undone;
+       }
+       /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */