]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jan 2015 01:19:51 +0000 (17:19 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jan 2015 01:19:51 +0000 (17:19 -0800)
added patches:
mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch

queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch b/queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch
new file mode 100644 (file)
index 0000000..33d30d4
--- /dev/null
@@ -0,0 +1,156 @@
+From 45f87de57f8fad59302fd263dd81ffa4843b5b24 Mon Sep 17 00:00:00 2001
+From: Michal Hocko <mhocko@suse.cz>
+Date: Mon, 29 Dec 2014 20:30:35 +0100
+Subject: mm: get rid of radix tree gfp mask for pagecache_get_page
+
+From: Michal Hocko <mhocko@suse.cz>
+
+commit 45f87de57f8fad59302fd263dd81ffa4843b5b24 upstream.
+
+Commit 2457aec63745 ("mm: non-atomically mark page accessed during page
+cache allocation where possible") has added a separate parameter for
+specifying gfp mask for radix tree allocations.
+
+Not only this is less than optimal from the API point of view because it
+is error prone, it is also buggy currently because
+grab_cache_page_write_begin is using GFP_KERNEL for radix tree and if
+fgp_flags doesn't contain FGP_NOFS (mostly controlled by fs by
+AOP_FLAG_NOFS flag) but the mapping_gfp_mask has __GFP_FS cleared then
+the radix tree allocation wouldn't obey the restriction and might
+recurse into filesystem and cause deadlocks.  This is the case for most
+filesystems unfortunately because only ext4 and gfs2 are using
+AOP_FLAG_NOFS.
+
+Let's simply remove radix_gfp_mask parameter because the allocation
+context is same for both page cache and for the radix tree.  Just make
+sure that the radix tree gets only the sane subset of the mask (e.g.  do
+not pass __GFP_WRITE).
+
+Long term it is more preferable to convert remaining users of
+AOP_FLAG_NOFS to use mapping_gfp_mask instead and simplify this
+interface even further.
+
+Reported-by: Dave Chinner <david@fromorbit.com>
+Signed-off-by: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ include/linux/pagemap.h |   13 ++++++-------
+ mm/filemap.c            |   20 +++++++++-----------
+ 2 files changed, 15 insertions(+), 18 deletions(-)
+
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -256,7 +256,7 @@ pgoff_t page_cache_prev_hole(struct addr
+ #define FGP_NOWAIT            0x00000020
+ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+-              int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
++              int fgp_flags, gfp_t cache_gfp_mask);
+ /**
+  * find_get_page - find and get a page reference
+@@ -271,13 +271,13 @@ struct page *pagecache_get_page(struct a
+ static inline struct page *find_get_page(struct address_space *mapping,
+                                       pgoff_t offset)
+ {
+-      return pagecache_get_page(mapping, offset, 0, 0, 0);
++      return pagecache_get_page(mapping, offset, 0, 0);
+ }
+ static inline struct page *find_get_page_flags(struct address_space *mapping,
+                                       pgoff_t offset, int fgp_flags)
+ {
+-      return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
++      return pagecache_get_page(mapping, offset, fgp_flags, 0);
+ }
+ /**
+@@ -297,7 +297,7 @@ static inline struct page *find_get_page
+ static inline struct page *find_lock_page(struct address_space *mapping,
+                                       pgoff_t offset)
+ {
+-      return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
++      return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
+ }
+ /**
+@@ -324,7 +324,7 @@ static inline struct page *find_or_creat
+ {
+       return pagecache_get_page(mapping, offset,
+                                       FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
+-                                      gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
++                                      gfp_mask);
+ }
+ /**
+@@ -345,8 +345,7 @@ static inline struct page *grab_cache_pa
+ {
+       return pagecache_get_page(mapping, index,
+                       FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
+-                      mapping_gfp_mask(mapping),
+-                      GFP_NOFS);
++                      mapping_gfp_mask(mapping));
+ }
+ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -897,7 +897,7 @@ EXPORT_SYMBOL(find_lock_entry);
+  * @mapping: the address_space to search
+  * @offset: the page index
+  * @fgp_flags: PCG flags
+- * @gfp_mask: gfp mask to use if a page is to be allocated
++ * @gfp_mask: gfp mask to use for the page cache data page allocation
+  *
+  * Looks up the page cache slot at @mapping & @offset.
+  *
+@@ -916,7 +916,7 @@ EXPORT_SYMBOL(find_lock_entry);
+  * If there is a page cache page, it is returned with an increased refcount.
+  */
+ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
+-      int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
++      int fgp_flags, gfp_t gfp_mask)
+ {
+       struct page *page;
+@@ -953,13 +953,11 @@ no_page:
+       if (!page && (fgp_flags & FGP_CREAT)) {
+               int err;
+               if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
+-                      cache_gfp_mask |= __GFP_WRITE;
+-              if (fgp_flags & FGP_NOFS) {
+-                      cache_gfp_mask &= ~__GFP_FS;
+-                      radix_gfp_mask &= ~__GFP_FS;
+-              }
++                      gfp_mask |= __GFP_WRITE;
++              if (fgp_flags & FGP_NOFS)
++                      gfp_mask &= ~__GFP_FS;
+-              page = __page_cache_alloc(cache_gfp_mask);
++              page = __page_cache_alloc(gfp_mask);
+               if (!page)
+                       return NULL;
+@@ -970,7 +968,8 @@ no_page:
+               if (fgp_flags & FGP_ACCESSED)
+                       init_page_accessed(page);
+-              err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
++              err = add_to_page_cache_lru(page, mapping, offset,
++                              gfp_mask & GFP_RECLAIM_MASK);
+               if (unlikely(err)) {
+                       page_cache_release(page);
+                       page = NULL;
+@@ -2460,8 +2459,7 @@ struct page *grab_cache_page_write_begin
+               fgp_flags |= FGP_NOFS;
+       page = pagecache_get_page(mapping, index, fgp_flags,
+-                      mapping_gfp_mask(mapping),
+-                      GFP_KERNEL);
++                      mapping_gfp_mask(mapping));
+       if (page)
+               wait_for_stable_page(page);
index 54104775ee3e31abcc31e21bcf556dcc7d24d6ed..98ac2a044f29d4d369a376ef664683f981eb5e7f 100644 (file)
@@ -73,3 +73,4 @@ mm-move-zone-pages_scanned-into-a-vmstat-counter.patch
 mm-vmscan-only-update-per-cpu-thresholds-for-online-cpu.patch
 mm-page_alloc-abort-fair-zone-allocation-policy-when-remotes-nodes-are-encountered.patch
 mm-page_alloc-reduce-cost-of-the-fair-zone-allocation-policy.patch
+mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch