From: Greg Kroah-Hartman Date: Thu, 24 Jul 2014 21:48:49 +0000 (-0700) Subject: 3.4-stable patches X-Git-Tag: v3.4.100~17 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=86d0e7c4e05648569299bec1f18a22ca9adc7da8;p=thirdparty%2Fkernel%2Fstable-queue.git 3.4-stable patches added patches: shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch shmem-fix-faulting-into-a-hole-while-it-s-punched.patch shmem-fix-splicing-from-a-hole-while-it-s-punched.patch --- diff --git a/queue-3.4/series b/queue-3.4/series index 3d192089982..28c02c9e044 100644 --- a/queue-3.4/series +++ b/queue-3.4/series @@ -1 +1,4 @@ crypto-testmgr-update-lzo-compression-test-vectors.patch +shmem-fix-faulting-into-a-hole-while-it-s-punched.patch +shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch +shmem-fix-splicing-from-a-hole-while-it-s-punched.patch diff --git a/queue-3.4/shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch b/queue-3.4/shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch new file mode 100644 index 00000000000..1d064cbac0e --- /dev/null +++ b/queue-3.4/shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch @@ -0,0 +1,162 @@ +From 8e205f779d1443a94b5ae81aa359cb535dd3021e Mon Sep 17 00:00:00 2001 +From: Hugh Dickins +Date: Wed, 23 Jul 2014 14:00:10 -0700 +Subject: shmem: fix faulting into a hole, not taking i_mutex + +From: Hugh Dickins + +commit 8e205f779d1443a94b5ae81aa359cb535dd3021e upstream. + +Commit f00cdc6df7d7 ("shmem: fix faulting into a hole while it's +punched") was buggy: Sasha sent a lockdep report to remind us that +grabbing i_mutex in the fault path is a no-no (write syscall may already +hold i_mutex while faulting user buffer). + +We tried a completely different approach (see following patch) but that +proved inadequate: good enough for a rational workload, but not good +enough against trinity - which forks off so many mappings of the object +that contention on i_mmap_mutex while hole-puncher holds i_mutex builds +into serious starvation when concurrent faults force the puncher to fall +back to single-page unmap_mapping_range() searches of the i_mmap tree. + +So return to the original umbrella approach, but keep away from i_mutex +this time. We really don't want to bloat every shmem inode with a new +mutex or completion, just to protect this unlikely case from trinity. +So extend the original with wait_queue_head on stack at the hole-punch +end, and wait_queue item on the stack at the fault end. + +This involves further use of i_lock to guard against the races: lockdep +has been happy so far, and I see fs/inode.c:unlock_new_inode() holds +i_lock around wake_up_bit(), which is comparable to what we do here. +i_lock is more convenient, but we could switch to shmem's info->lock. + +This issue has been tagged with CVE-2014-4171, which will require commit +f00cdc6df7d7 and this and the following patch to be backported: we +suggest to 3.1+, though in fact the trinity forkbomb effect might go +back as far as 2.6.16, when madvise(,,MADV_REMOVE) came in - or might +not, since much has changed, with i_mmap_mutex a spinlock before 3.0. +Anyone running trinity on 3.0 and earlier? I don't think we need care. + +Signed-off-by: Hugh Dickins +Reported-by: Sasha Levin +Tested-by: Sasha Levin +Cc: Vlastimil Babka +Cc: Konstantin Khlebnikov +Cc: Johannes Weiner +Cc: Lukas Czerner +Cc: Dave Jones +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + + +--- + mm/shmem.c | 64 +++++++++++++++++++++++++++++++++++++++++-------------------- + 1 file changed, 44 insertions(+), 20 deletions(-) + +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -82,6 +82,7 @@ static struct vfsmount *shm_mnt; + * a time): we would prefer not to enlarge the shmem inode just for that. + */ + struct shmem_falloc { ++ wait_queue_head_t *waitq; /* faults into hole wait for punch to end */ + pgoff_t start; /* start of range currently being fallocated */ + pgoff_t next; /* the next page offset to be fallocated */ + }; +@@ -1074,37 +1075,57 @@ static int shmem_fault(struct vm_area_st + * Trinity finds that probing a hole which tmpfs is punching can + * prevent the hole-punch from ever completing: which in turn + * locks writers out with its hold on i_mutex. So refrain from +- * faulting pages into the hole while it's being punched, and +- * wait on i_mutex to be released if vmf->flags permits. ++ * faulting pages into the hole while it's being punched. Although ++ * shmem_truncate_range() does remove the additions, it may be unable to ++ * keep up, as each new page needs its own unmap_mapping_range() call, ++ * and the i_mmap tree grows ever slower to scan if new vmas are added. ++ * ++ * It does not matter if we sometimes reach this check just before the ++ * hole-punch begins, so that one fault then races with the punch: ++ * we just need to make racing faults a rare case. ++ * ++ * The implementation below would be much simpler if we just used a ++ * standard mutex or completion: but we cannot take i_mutex in fault, ++ * and bloating every shmem inode for this unlikely case would be sad. + */ + if (unlikely(inode->i_private)) { + struct shmem_falloc *shmem_falloc; + + spin_lock(&inode->i_lock); + shmem_falloc = inode->i_private; +- if (!shmem_falloc || +- vmf->pgoff < shmem_falloc->start || +- vmf->pgoff >= shmem_falloc->next) +- shmem_falloc = NULL; +- spin_unlock(&inode->i_lock); +- /* +- * i_lock has protected us from taking shmem_falloc seriously +- * once return from vmtruncate_range() went back up that stack. +- * i_lock does not serialize with i_mutex at all, but it does +- * not matter if sometimes we wait unnecessarily, or sometimes +- * miss out on waiting: we just need to make those cases rare. +- */ +- if (shmem_falloc) { ++ if (shmem_falloc && ++ vmf->pgoff >= shmem_falloc->start && ++ vmf->pgoff < shmem_falloc->next) { ++ wait_queue_head_t *shmem_falloc_waitq; ++ DEFINE_WAIT(shmem_fault_wait); ++ ++ ret = VM_FAULT_NOPAGE; + if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && + !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ++ /* It's polite to up mmap_sem if we can */ + up_read(&vma->vm_mm->mmap_sem); +- mutex_lock(&inode->i_mutex); +- mutex_unlock(&inode->i_mutex); +- return VM_FAULT_RETRY; ++ ret = VM_FAULT_RETRY; + } +- /* cond_resched? Leave that to GUP or return to user */ +- return VM_FAULT_NOPAGE; ++ ++ shmem_falloc_waitq = shmem_falloc->waitq; ++ prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait, ++ TASK_UNINTERRUPTIBLE); ++ spin_unlock(&inode->i_lock); ++ schedule(); ++ ++ /* ++ * shmem_falloc_waitq points into the vmtruncate_range() ++ * stack of the hole-punching task: shmem_falloc_waitq ++ * is usually invalid by the time we reach here, but ++ * finish_wait() does not dereference it in that case; ++ * though i_lock needed lest racing with wake_up_all(). ++ */ ++ spin_lock(&inode->i_lock); ++ finish_wait(shmem_falloc_waitq, &shmem_fault_wait); ++ spin_unlock(&inode->i_lock); ++ return ret; + } ++ spin_unlock(&inode->i_lock); + } + + error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); +@@ -1135,7 +1156,9 @@ int vmtruncate_range(struct inode *inode + struct address_space *mapping = inode->i_mapping; + loff_t unmap_start = round_up(lstart, PAGE_SIZE); + loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; ++ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq); + ++ shmem_falloc.waitq = &shmem_falloc_waitq; + shmem_falloc.start = unmap_start >> PAGE_SHIFT; + shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; + spin_lock(&inode->i_lock); +@@ -1150,6 +1173,7 @@ int vmtruncate_range(struct inode *inode + + spin_lock(&inode->i_lock); + inode->i_private = NULL; ++ wake_up_all(&shmem_falloc_waitq); + spin_unlock(&inode->i_lock); + } + mutex_unlock(&inode->i_mutex); diff --git a/queue-3.4/shmem-fix-faulting-into-a-hole-while-it-s-punched.patch b/queue-3.4/shmem-fix-faulting-into-a-hole-while-it-s-punched.patch new file mode 100644 index 00000000000..08fb0286e06 --- /dev/null +++ b/queue-3.4/shmem-fix-faulting-into-a-hole-while-it-s-punched.patch @@ -0,0 +1,193 @@ +From f00cdc6df7d7cfcabb5b740911e6788cb0802bdb Mon Sep 17 00:00:00 2001 +From: Hugh Dickins +Date: Mon, 23 Jun 2014 13:22:06 -0700 +Subject: shmem: fix faulting into a hole while it's punched + +From: Hugh Dickins + +commit f00cdc6df7d7cfcabb5b740911e6788cb0802bdb upstream. + +Trinity finds that mmap access to a hole while it's punched from shmem +can prevent the madvise(MADV_REMOVE) or fallocate(FALLOC_FL_PUNCH_HOLE) +from completing, until the reader chooses to stop; with the puncher's +hold on i_mutex locking out all other writers until it can complete. + +It appears that the tmpfs fault path is too light in comparison with its +hole-punching path, lacking an i_data_sem to obstruct it; but we don't +want to slow down the common case. + +Extend shmem_fallocate()'s existing range notification mechanism, so +shmem_fault() can refrain from faulting pages into the hole while it's +punched, waiting instead on i_mutex (when safe to sleep; or repeatedly +faulting when not). + +[akpm@linux-foundation.org: coding-style fixes] +Signed-off-by: Hugh Dickins +Reported-by: Sasha Levin +Tested-by: Sasha Levin +Cc: Dave Jones +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + + +--- + mm/shmem.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + mm/truncate.c | 25 --------------- + 2 files changed, 91 insertions(+), 25 deletions(-) + +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -76,6 +76,16 @@ static struct vfsmount *shm_mnt; + /* Symlink up to this size is kmalloc'ed instead of using a swappable page */ + #define SHORT_SYMLINK_LEN 128 + ++/* ++ * vmtruncate_range() communicates with shmem_fault via ++ * inode->i_private (with i_mutex making sure that it has only one user at ++ * a time): we would prefer not to enlarge the shmem inode just for that. ++ */ ++struct shmem_falloc { ++ pgoff_t start; /* start of range currently being fallocated */ ++ pgoff_t next; /* the next page offset to be fallocated */ ++}; ++ + struct shmem_xattr { + struct list_head list; /* anchored by shmem_inode_info->xattr_list */ + char *name; /* xattr name */ +@@ -1060,6 +1070,43 @@ static int shmem_fault(struct vm_area_st + int error; + int ret = VM_FAULT_LOCKED; + ++ /* ++ * Trinity finds that probing a hole which tmpfs is punching can ++ * prevent the hole-punch from ever completing: which in turn ++ * locks writers out with its hold on i_mutex. So refrain from ++ * faulting pages into the hole while it's being punched, and ++ * wait on i_mutex to be released if vmf->flags permits. ++ */ ++ if (unlikely(inode->i_private)) { ++ struct shmem_falloc *shmem_falloc; ++ ++ spin_lock(&inode->i_lock); ++ shmem_falloc = inode->i_private; ++ if (!shmem_falloc || ++ vmf->pgoff < shmem_falloc->start || ++ vmf->pgoff >= shmem_falloc->next) ++ shmem_falloc = NULL; ++ spin_unlock(&inode->i_lock); ++ /* ++ * i_lock has protected us from taking shmem_falloc seriously ++ * once return from vmtruncate_range() went back up that stack. ++ * i_lock does not serialize with i_mutex at all, but it does ++ * not matter if sometimes we wait unnecessarily, or sometimes ++ * miss out on waiting: we just need to make those cases rare. ++ */ ++ if (shmem_falloc) { ++ if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) && ++ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) { ++ up_read(&vma->vm_mm->mmap_sem); ++ mutex_lock(&inode->i_mutex); ++ mutex_unlock(&inode->i_mutex); ++ return VM_FAULT_RETRY; ++ } ++ /* cond_resched? Leave that to GUP or return to user */ ++ return VM_FAULT_NOPAGE; ++ } ++ } ++ + error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); + if (error) + return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); +@@ -1071,6 +1118,44 @@ static int shmem_fault(struct vm_area_st + return ret; + } + ++int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend) ++{ ++ /* ++ * If the underlying filesystem is not going to provide ++ * a way to truncate a range of blocks (punch a hole) - ++ * we should return failure right now. ++ * Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). ++ */ ++ if (inode->i_op->truncate_range != shmem_truncate_range) ++ return -ENOSYS; ++ ++ mutex_lock(&inode->i_mutex); ++ { ++ struct shmem_falloc shmem_falloc; ++ struct address_space *mapping = inode->i_mapping; ++ loff_t unmap_start = round_up(lstart, PAGE_SIZE); ++ loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1; ++ ++ shmem_falloc.start = unmap_start >> PAGE_SHIFT; ++ shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT; ++ spin_lock(&inode->i_lock); ++ inode->i_private = &shmem_falloc; ++ spin_unlock(&inode->i_lock); ++ ++ if ((u64)unmap_end > (u64)unmap_start) ++ unmap_mapping_range(mapping, unmap_start, ++ 1 + unmap_end - unmap_start, 0); ++ shmem_truncate_range(inode, lstart, lend); ++ /* No need to unmap again: hole-punching leaves COWed pages */ ++ ++ spin_lock(&inode->i_lock); ++ inode->i_private = NULL; ++ spin_unlock(&inode->i_lock); ++ } ++ mutex_unlock(&inode->i_mutex); ++ return 0; ++} ++ + #ifdef CONFIG_NUMA + static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol) + { +@@ -2547,6 +2632,12 @@ void shmem_truncate_range(struct inode * + } + EXPORT_SYMBOL_GPL(shmem_truncate_range); + ++int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend) ++{ ++ /* Only CONFIG_SHMEM shmem.c ever supported i_op->truncate_range(). */ ++ return -ENOSYS; ++} ++ + #define shmem_vm_ops generic_file_vm_ops + #define shmem_file_operations ramfs_file_operations + #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev) +--- a/mm/truncate.c ++++ b/mm/truncate.c +@@ -603,31 +603,6 @@ int vmtruncate(struct inode *inode, loff + } + EXPORT_SYMBOL(vmtruncate); + +-int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend) +-{ +- struct address_space *mapping = inode->i_mapping; +- loff_t holebegin = round_up(lstart, PAGE_SIZE); +- loff_t holelen = 1 + lend - holebegin; +- +- /* +- * If the underlying filesystem is not going to provide +- * a way to truncate a range of blocks (punch a hole) - +- * we should return failure right now. +- */ +- if (!inode->i_op->truncate_range) +- return -ENOSYS; +- +- mutex_lock(&inode->i_mutex); +- inode_dio_wait(inode); +- unmap_mapping_range(mapping, holebegin, holelen, 1); +- inode->i_op->truncate_range(inode, lstart, lend); +- /* unmap again to remove racily COWed private pages */ +- unmap_mapping_range(mapping, holebegin, holelen, 1); +- mutex_unlock(&inode->i_mutex); +- +- return 0; +-} +- + /** + * truncate_pagecache_range - unmap and remove pagecache that is hole-punched + * @inode: inode diff --git a/queue-3.4/shmem-fix-splicing-from-a-hole-while-it-s-punched.patch b/queue-3.4/shmem-fix-splicing-from-a-hole-while-it-s-punched.patch new file mode 100644 index 00000000000..4b73f636af5 --- /dev/null +++ b/queue-3.4/shmem-fix-splicing-from-a-hole-while-it-s-punched.patch @@ -0,0 +1,129 @@ +From b1a366500bd537b50c3aad26dc7df083ec03a448 Mon Sep 17 00:00:00 2001 +From: Hugh Dickins +Date: Wed, 23 Jul 2014 14:00:13 -0700 +Subject: shmem: fix splicing from a hole while it's punched + +From: Hugh Dickins + +commit b1a366500bd537b50c3aad26dc7df083ec03a448 upstream. + +shmem_fault() is the actual culprit in trinity's hole-punch starvation, +and the most significant cause of such problems: since a page faulted is +one that then appears page_mapped(), needing unmap_mapping_range() and +i_mmap_mutex to be unmapped again. + +But it is not the only way in which a page can be brought into a hole in +the radix_tree while that hole is being punched; and Vlastimil's testing +implies that if enough other processors are busy filling in the hole, +then shmem_undo_range() can be kept from completing indefinitely. + +shmem_file_splice_read() is the main other user of SGP_CACHE, which can +instantiate shmem pagecache pages in the read-only case (without holding +i_mutex, so perhaps concurrently with a hole-punch). Probably it's +silly not to use SGP_READ already (using the ZERO_PAGE for holes): which +ought to be safe, but might bring surprises - not a change to be rushed. + +shmem_read_mapping_page_gfp() is an internal interface used by +drivers/gpu/drm GEM (and next by uprobes): it should be okay. And +shmem_file_read_iter() uses the SGP_DIRTY variant of SGP_CACHE, when +called internally by the kernel (perhaps for a stacking filesystem, +which might rely on holes to be reserved): it's unclear whether it could +be provoked to keep hole-punch busy or not. + +We could apply the same umbrella as now used in shmem_fault() to +shmem_file_splice_read() and the others; but it looks ugly, and use over +a range raises questions - should it actually be per page? can these get +starved themselves? + +The origin of this part of the problem is my v3.1 commit d0823576bf4b +("mm: pincer in truncate_inode_pages_range"), once it was duplicated +into shmem.c. It seemed like a nice idea at the time, to ensure +(barring RCU lookup fuzziness) that there's an instant when the entire +hole is empty; but the indefinitely repeated scans to ensure that make +it vulnerable. + +Revert that "enhancement" to hole-punch from shmem_undo_range(), but +retain the unproblematic rescanning when it's truncating; add a couple +of comments there. + +Remove the "indices[0] >= end" test: that is now handled satisfactorily +by the inner loop, and mem_cgroup_uncharge_start()/end() are too light +to be worth avoiding here. + +But if we do not always loop indefinitely, we do need to handle the case +of swap swizzled back to page before shmem_free_swap() gets it: add a +retry for that case, as suggested by Konstantin Khlebnikov; and for the +case of page swizzled back to swap, as suggested by Johannes Weiner. + +Signed-off-by: Hugh Dickins +Reported-by: Sasha Levin +Suggested-by: Vlastimil Babka +Cc: Konstantin Khlebnikov +Cc: Johannes Weiner +Cc: Lukas Czerner +Cc: Dave Jones +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + + +--- + mm/shmem.c | 24 +++++++++++++++--------- + 1 file changed, 15 insertions(+), 9 deletions(-) + +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -499,22 +499,19 @@ void shmem_truncate_range(struct inode * + } + + index = start; +- for ( ; ; ) { ++ while (index <= end) { + cond_resched(); + pvec.nr = shmem_find_get_pages_and_swap(mapping, index, + min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, + pvec.pages, indices); + if (!pvec.nr) { +- if (index == start) ++ /* If all gone or hole-punch, we're done */ ++ if (index == start || end != -1) + break; ++ /* But if truncating, restart to make sure all gone */ + index = start; + continue; + } +- if (index == start && indices[0] > end) { +- shmem_deswap_pagevec(&pvec); +- pagevec_release(&pvec); +- break; +- } + mem_cgroup_uncharge_start(); + for (i = 0; i < pagevec_count(&pvec); i++) { + struct page *page = pvec.pages[i]; +@@ -524,8 +521,12 @@ void shmem_truncate_range(struct inode * + break; + + if (radix_tree_exceptional_entry(page)) { +- nr_swaps_freed += !shmem_free_swap(mapping, +- index, page); ++ if (shmem_free_swap(mapping, index, page)) { ++ /* Swap was replaced by page: retry */ ++ index--; ++ break; ++ } ++ nr_swaps_freed++; + continue; + } + +@@ -533,6 +534,11 @@ void shmem_truncate_range(struct inode * + if (page->mapping == mapping) { + VM_BUG_ON(PageWriteback(page)); + truncate_inode_page(mapping, page); ++ } else { ++ /* Page was replaced by swap: retry */ ++ unlock_page(page); ++ index--; ++ break; + } + unlock_page(page); + }