--- /dev/null
+From 8e205f779d1443a94b5ae81aa359cb535dd3021e Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Wed, 23 Jul 2014 14:00:10 -0700
+Subject: shmem: fix faulting into a hole, not taking i_mutex
+
+From: Hugh Dickins <hughd@google.com>
+
+commit 8e205f779d1443a94b5ae81aa359cb535dd3021e upstream.
+
+Commit f00cdc6df7d7 ("shmem: fix faulting into a hole while it's
+punched") was buggy: Sasha sent a lockdep report to remind us that
+grabbing i_mutex in the fault path is a no-no (write syscall may already
+hold i_mutex while faulting user buffer).
+
+We tried a completely different approach (see following patch) but that
+proved inadequate: good enough for a rational workload, but not good
+enough against trinity - which forks off so many mappings of the object
+that contention on i_mmap_mutex while hole-puncher holds i_mutex builds
+into serious starvation when concurrent faults force the puncher to fall
+back to single-page unmap_mapping_range() searches of the i_mmap tree.
+
+So return to the original umbrella approach, but keep away from i_mutex
+this time. We really don't want to bloat every shmem inode with a new
+mutex or completion, just to protect this unlikely case from trinity.
+So extend the original with wait_queue_head on stack at the hole-punch
+end, and wait_queue item on the stack at the fault end.
+
+This involves further use of i_lock to guard against the races: lockdep
+has been happy so far, and I see fs/inode.c:unlock_new_inode() holds
+i_lock around wake_up_bit(), which is comparable to what we do here.
+i_lock is more convenient, but we could switch to shmem's info->lock.
+
+This issue has been tagged with CVE-2014-4171, which will require commit
+f00cdc6df7d7 and this and the following patch to be backported: we
+suggest to 3.1+, though in fact the trinity forkbomb effect might go
+back as far as 2.6.16, when madvise(,,MADV_REMOVE) came in - or might
+not, since much has changed, with i_mmap_mutex a spinlock before 3.0.
+Anyone running trinity on 3.0 and earlier? I don't think we need care.
+
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Lukas Czerner <lczerner@redhat.com>
+Cc: Dave Jones <davej@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/shmem.c | 78 ++++++++++++++++++++++++++++++++++++++++---------------------
+ 1 file changed, 52 insertions(+), 26 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -85,7 +85,7 @@ static struct vfsmount *shm_mnt;
+ * a time): we would prefer not to enlarge the shmem inode just for that.
+ */
+ struct shmem_falloc {
+- int mode; /* FALLOC_FL mode currently operating */
++ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
+ pgoff_t start; /* start of range currently being fallocated */
+ pgoff_t next; /* the next page offset to be fallocated */
+ pgoff_t nr_falloced; /* how many new pages have been fallocated */
+@@ -827,7 +827,7 @@ static int shmem_writepage(struct page *
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+ if (shmem_falloc &&
+- !shmem_falloc->mode &&
++ !shmem_falloc->waitq &&
+ index >= shmem_falloc->start &&
+ index < shmem_falloc->next)
+ shmem_falloc->nr_unswapped++;
+@@ -1306,38 +1306,58 @@ static int shmem_fault(struct vm_area_st
+ * Trinity finds that probing a hole which tmpfs is punching can
+ * prevent the hole-punch from ever completing: which in turn
+ * locks writers out with its hold on i_mutex. So refrain from
+- * faulting pages into the hole while it's being punched, and
+- * wait on i_mutex to be released if vmf->flags permits.
++ * faulting pages into the hole while it's being punched. Although
++ * shmem_undo_range() does remove the additions, it may be unable to
++ * keep up, as each new page needs its own unmap_mapping_range() call,
++ * and the i_mmap tree grows ever slower to scan if new vmas are added.
++ *
++ * It does not matter if we sometimes reach this check just before the
++ * hole-punch begins, so that one fault then races with the punch:
++ * we just need to make racing faults a rare case.
++ *
++ * The implementation below would be much simpler if we just used a
++ * standard mutex or completion: but we cannot take i_mutex in fault,
++ * and bloating every shmem inode for this unlikely case would be sad.
+ */
+ if (unlikely(inode->i_private)) {
+ struct shmem_falloc *shmem_falloc;
+
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+- if (!shmem_falloc ||
+- shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
+- vmf->pgoff < shmem_falloc->start ||
+- vmf->pgoff >= shmem_falloc->next)
+- shmem_falloc = NULL;
+- spin_unlock(&inode->i_lock);
+- /*
+- * i_lock has protected us from taking shmem_falloc seriously
+- * once return from shmem_fallocate() went back up that stack.
+- * i_lock does not serialize with i_mutex at all, but it does
+- * not matter if sometimes we wait unnecessarily, or sometimes
+- * miss out on waiting: we just need to make those cases rare.
+- */
+- if (shmem_falloc) {
++ if (shmem_falloc &&
++ shmem_falloc->waitq &&
++ vmf->pgoff >= shmem_falloc->start &&
++ vmf->pgoff < shmem_falloc->next) {
++ wait_queue_head_t *shmem_falloc_waitq;
++ DEFINE_WAIT(shmem_fault_wait);
++
++ ret = VM_FAULT_NOPAGE;
+ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++ /* It's polite to up mmap_sem if we can */
+ up_read(&vma->vm_mm->mmap_sem);
+- mutex_lock(&inode->i_mutex);
+- mutex_unlock(&inode->i_mutex);
+- return VM_FAULT_RETRY;
++ ret = VM_FAULT_RETRY;
+ }
+- /* cond_resched? Leave that to GUP or return to user */
+- return VM_FAULT_NOPAGE;
++
++ shmem_falloc_waitq = shmem_falloc->waitq;
++ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
++ TASK_UNINTERRUPTIBLE);
++ spin_unlock(&inode->i_lock);
++ schedule();
++
++ /*
++ * shmem_falloc_waitq points into the shmem_fallocate()
++ * stack of the hole-punching task: shmem_falloc_waitq
++ * is usually invalid by the time we reach here, but
++ * finish_wait() does not dereference it in that case;
++ * though i_lock needed lest racing with wake_up_all().
++ */
++ spin_lock(&inode->i_lock);
++ finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
++ spin_unlock(&inode->i_lock);
++ return ret;
+ }
++ spin_unlock(&inode->i_lock);
+ }
+
+ error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+@@ -1857,13 +1877,13 @@ static long shmem_fallocate(struct file
+
+ mutex_lock(&inode->i_mutex);
+
+- shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
+-
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ struct address_space *mapping = file->f_mapping;
+ loff_t unmap_start = round_up(offset, PAGE_SIZE);
+ loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
++ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
++ shmem_falloc.waitq = &shmem_falloc_waitq;
+ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+ spin_lock(&inode->i_lock);
+@@ -1875,8 +1895,13 @@ static long shmem_fallocate(struct file
+ 1 + unmap_end - unmap_start, 0);
+ shmem_truncate_range(inode, offset, offset + len - 1);
+ /* No need to unmap again: hole-punching leaves COWed pages */
++
++ spin_lock(&inode->i_lock);
++ inode->i_private = NULL;
++ wake_up_all(&shmem_falloc_waitq);
++ spin_unlock(&inode->i_lock);
+ error = 0;
+- goto undone;
++ goto out;
+ }
+
+ /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
+@@ -1892,6 +1917,7 @@ static long shmem_fallocate(struct file
+ goto out;
+ }
+
++ shmem_falloc.waitq = NULL;
+ shmem_falloc.start = start;
+ shmem_falloc.next = start;
+ shmem_falloc.nr_falloced = 0;
--- /dev/null
+From f00cdc6df7d7cfcabb5b740911e6788cb0802bdb Mon Sep 17 00:00:00 2001
+From: Hugh Dickins <hughd@google.com>
+Date: Mon, 23 Jun 2014 13:22:06 -0700
+Subject: shmem: fix faulting into a hole while it's punched
+
+From: Hugh Dickins <hughd@google.com>
+
+commit f00cdc6df7d7cfcabb5b740911e6788cb0802bdb upstream.
+
+Trinity finds that mmap access to a hole while it's punched from shmem
+can prevent the madvise(MADV_REMOVE) or fallocate(FALLOC_FL_PUNCH_HOLE)
+from completing, until the reader chooses to stop; with the puncher's
+hold on i_mutex locking out all other writers until it can complete.
+
+It appears that the tmpfs fault path is too light in comparison with its
+hole-punching path, lacking an i_data_sem to obstruct it; but we don't
+want to slow down the common case.
+
+Extend shmem_fallocate()'s existing range notification mechanism, so
+shmem_fault() can refrain from faulting pages into the hole while it's
+punched, waiting instead on i_mutex (when safe to sleep; or repeatedly
+faulting when not).
+
+[akpm@linux-foundation.org: coding-style fixes]
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Tested-by: Sasha Levin <sasha.levin@oracle.com>
+Cc: Dave Jones <davej@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/shmem.c | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 52 insertions(+), 4 deletions(-)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
+ #define SHORT_SYMLINK_LEN 128
+
+ /*
+- * shmem_fallocate and shmem_writepage communicate via inode->i_private
+- * (with i_mutex making sure that it has only one user at a time):
+- * we would prefer not to enlarge the shmem inode just for that.
++ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
++ * inode->i_private (with i_mutex making sure that it has only one user at
++ * a time): we would prefer not to enlarge the shmem inode just for that.
+ */
+ struct shmem_falloc {
++ int mode; /* FALLOC_FL mode currently operating */
+ pgoff_t start; /* start of range currently being fallocated */
+ pgoff_t next; /* the next page offset to be fallocated */
+ pgoff_t nr_falloced; /* how many new pages have been fallocated */
+@@ -826,6 +827,7 @@ static int shmem_writepage(struct page *
+ spin_lock(&inode->i_lock);
+ shmem_falloc = inode->i_private;
+ if (shmem_falloc &&
++ !shmem_falloc->mode &&
+ index >= shmem_falloc->start &&
+ index < shmem_falloc->next)
+ shmem_falloc->nr_unswapped++;
+@@ -1300,6 +1302,44 @@ static int shmem_fault(struct vm_area_st
+ int error;
+ int ret = VM_FAULT_LOCKED;
+
++ /*
++ * Trinity finds that probing a hole which tmpfs is punching can
++ * prevent the hole-punch from ever completing: which in turn
++ * locks writers out with its hold on i_mutex. So refrain from
++ * faulting pages into the hole while it's being punched, and
++ * wait on i_mutex to be released if vmf->flags permits.
++ */
++ if (unlikely(inode->i_private)) {
++ struct shmem_falloc *shmem_falloc;
++
++ spin_lock(&inode->i_lock);
++ shmem_falloc = inode->i_private;
++ if (!shmem_falloc ||
++ shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
++ vmf->pgoff < shmem_falloc->start ||
++ vmf->pgoff >= shmem_falloc->next)
++ shmem_falloc = NULL;
++ spin_unlock(&inode->i_lock);
++ /*
++ * i_lock has protected us from taking shmem_falloc seriously
++ * once return from shmem_fallocate() went back up that stack.
++ * i_lock does not serialize with i_mutex at all, but it does
++ * not matter if sometimes we wait unnecessarily, or sometimes
++ * miss out on waiting: we just need to make those cases rare.
++ */
++ if (shmem_falloc) {
++ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
++ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
++ up_read(&vma->vm_mm->mmap_sem);
++ mutex_lock(&inode->i_mutex);
++ mutex_unlock(&inode->i_mutex);
++ return VM_FAULT_RETRY;
++ }
++ /* cond_resched? Leave that to GUP or return to user */
++ return VM_FAULT_NOPAGE;
++ }
++ }
++
+ error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
+ if (error)
+ return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
+@@ -1817,18 +1857,26 @@ static long shmem_fallocate(struct file
+
+ mutex_lock(&inode->i_mutex);
+
++ shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
++
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ struct address_space *mapping = file->f_mapping;
+ loff_t unmap_start = round_up(offset, PAGE_SIZE);
+ loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+
++ shmem_falloc.start = unmap_start >> PAGE_SHIFT;
++ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
++ spin_lock(&inode->i_lock);
++ inode->i_private = &shmem_falloc;
++ spin_unlock(&inode->i_lock);
++
+ if ((u64)unmap_end > (u64)unmap_start)
+ unmap_mapping_range(mapping, unmap_start,
+ 1 + unmap_end - unmap_start, 0);
+ shmem_truncate_range(inode, offset, offset + len - 1);
+ /* No need to unmap again: hole-punching leaves COWed pages */
+ error = 0;
+- goto out;
++ goto undone;
+ }
+
+ /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */