From d418234c99390d35af34a0a9b77a15e9fe1d6614 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 27 Jan 2015 17:19:51 -0800 Subject: [PATCH] 3.14-stable patches added patches: mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch --- ...tree-gfp-mask-for-pagecache_get_page.patch | 156 ++++++++++++++++++ queue-3.14/series | 1 + 2 files changed, 157 insertions(+) create mode 100644 queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch diff --git a/queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch b/queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch new file mode 100644 index 00000000000..33d30d49926 --- /dev/null +++ b/queue-3.14/mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch @@ -0,0 +1,156 @@ +From 45f87de57f8fad59302fd263dd81ffa4843b5b24 Mon Sep 17 00:00:00 2001 +From: Michal Hocko +Date: Mon, 29 Dec 2014 20:30:35 +0100 +Subject: mm: get rid of radix tree gfp mask for pagecache_get_page + +From: Michal Hocko + +commit 45f87de57f8fad59302fd263dd81ffa4843b5b24 upstream. + +Commit 2457aec63745 ("mm: non-atomically mark page accessed during page +cache allocation where possible") has added a separate parameter for +specifying gfp mask for radix tree allocations. + +Not only this is less than optimal from the API point of view because it +is error prone, it is also buggy currently because +grab_cache_page_write_begin is using GFP_KERNEL for radix tree and if +fgp_flags doesn't contain FGP_NOFS (mostly controlled by fs by +AOP_FLAG_NOFS flag) but the mapping_gfp_mask has __GFP_FS cleared then +the radix tree allocation wouldn't obey the restriction and might +recurse into filesystem and cause deadlocks. This is the case for most +filesystems unfortunately because only ext4 and gfs2 are using +AOP_FLAG_NOFS. + +Let's simply remove radix_gfp_mask parameter because the allocation +context is same for both page cache and for the radix tree. Just make +sure that the radix tree gets only the sane subset of the mask (e.g. do +not pass __GFP_WRITE). + +Long term it is more preferable to convert remaining users of +AOP_FLAG_NOFS to use mapping_gfp_mask instead and simplify this +interface even further. + +Reported-by: Dave Chinner +Signed-off-by: Michal Hocko +Signed-off-by: Linus Torvalds +Signed-off-by: Greg Kroah-Hartman + + +--- + include/linux/pagemap.h | 13 ++++++------- + mm/filemap.c | 20 +++++++++----------- + 2 files changed, 15 insertions(+), 18 deletions(-) + +--- a/include/linux/pagemap.h ++++ b/include/linux/pagemap.h +@@ -256,7 +256,7 @@ pgoff_t page_cache_prev_hole(struct addr + #define FGP_NOWAIT 0x00000020 + + struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, +- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); ++ int fgp_flags, gfp_t cache_gfp_mask); + + /** + * find_get_page - find and get a page reference +@@ -271,13 +271,13 @@ struct page *pagecache_get_page(struct a + static inline struct page *find_get_page(struct address_space *mapping, + pgoff_t offset) + { +- return pagecache_get_page(mapping, offset, 0, 0, 0); ++ return pagecache_get_page(mapping, offset, 0, 0); + } + + static inline struct page *find_get_page_flags(struct address_space *mapping, + pgoff_t offset, int fgp_flags) + { +- return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); ++ return pagecache_get_page(mapping, offset, fgp_flags, 0); + } + + /** +@@ -297,7 +297,7 @@ static inline struct page *find_get_page + static inline struct page *find_lock_page(struct address_space *mapping, + pgoff_t offset) + { +- return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); ++ return pagecache_get_page(mapping, offset, FGP_LOCK, 0); + } + + /** +@@ -324,7 +324,7 @@ static inline struct page *find_or_creat + { + return pagecache_get_page(mapping, offset, + FGP_LOCK|FGP_ACCESSED|FGP_CREAT, +- gfp_mask, gfp_mask & GFP_RECLAIM_MASK); ++ gfp_mask); + } + + /** +@@ -345,8 +345,7 @@ static inline struct page *grab_cache_pa + { + return pagecache_get_page(mapping, index, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, +- mapping_gfp_mask(mapping), +- GFP_NOFS); ++ mapping_gfp_mask(mapping)); + } + + struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -897,7 +897,7 @@ EXPORT_SYMBOL(find_lock_entry); + * @mapping: the address_space to search + * @offset: the page index + * @fgp_flags: PCG flags +- * @gfp_mask: gfp mask to use if a page is to be allocated ++ * @gfp_mask: gfp mask to use for the page cache data page allocation + * + * Looks up the page cache slot at @mapping & @offset. + * +@@ -916,7 +916,7 @@ EXPORT_SYMBOL(find_lock_entry); + * If there is a page cache page, it is returned with an increased refcount. + */ + struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, +- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask) ++ int fgp_flags, gfp_t gfp_mask) + { + struct page *page; + +@@ -953,13 +953,11 @@ no_page: + if (!page && (fgp_flags & FGP_CREAT)) { + int err; + if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) +- cache_gfp_mask |= __GFP_WRITE; +- if (fgp_flags & FGP_NOFS) { +- cache_gfp_mask &= ~__GFP_FS; +- radix_gfp_mask &= ~__GFP_FS; +- } ++ gfp_mask |= __GFP_WRITE; ++ if (fgp_flags & FGP_NOFS) ++ gfp_mask &= ~__GFP_FS; + +- page = __page_cache_alloc(cache_gfp_mask); ++ page = __page_cache_alloc(gfp_mask); + if (!page) + return NULL; + +@@ -970,7 +968,8 @@ no_page: + if (fgp_flags & FGP_ACCESSED) + init_page_accessed(page); + +- err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); ++ err = add_to_page_cache_lru(page, mapping, offset, ++ gfp_mask & GFP_RECLAIM_MASK); + if (unlikely(err)) { + page_cache_release(page); + page = NULL; +@@ -2460,8 +2459,7 @@ struct page *grab_cache_page_write_begin + fgp_flags |= FGP_NOFS; + + page = pagecache_get_page(mapping, index, fgp_flags, +- mapping_gfp_mask(mapping), +- GFP_KERNEL); ++ mapping_gfp_mask(mapping)); + if (page) + wait_for_stable_page(page); + diff --git a/queue-3.14/series b/queue-3.14/series index 54104775ee3..98ac2a044f2 100644 --- a/queue-3.14/series +++ b/queue-3.14/series @@ -73,3 +73,4 @@ mm-move-zone-pages_scanned-into-a-vmstat-counter.patch mm-vmscan-only-update-per-cpu-thresholds-for-online-cpu.patch mm-page_alloc-abort-fair-zone-allocation-policy-when-remotes-nodes-are-encountered.patch mm-page_alloc-reduce-cost-of-the-fair-zone-allocation-policy.patch +mm-get-rid-of-radix-tree-gfp-mask-for-pagecache_get_page.patch -- 2.47.3