--- /dev/null
+From e7470ee89f003634a88e7b5e5a7b65b3025987de Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:29 -0700
+Subject: fs: buffer: do not use unnecessary atomic operations when discarding buffers
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit e7470ee89f003634a88e7b5e5a7b65b3025987de upstream.
+
+Discarding buffers uses a bunch of atomic operations when discarding
+buffers because ...... I can't think of a reason. Use a cmpxchg loop to
+clear all the necessary flags. In most (all?) cases this will be a single
+atomic operations.
+
+[akpm@linux-foundation.org: move BUFFER_FLAGS_DISCARD into the .c file]
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/buffer.c | 21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1485,16 +1485,27 @@ EXPORT_SYMBOL(set_bh_page);
+ /*
+ * Called when truncating a buffer on a page completely.
+ */
++
++/* Bits that are cleared during an invalidate */
++#define BUFFER_FLAGS_DISCARD \
++ (1 << BH_Mapped | 1 << BH_New | 1 << BH_Req | \
++ 1 << BH_Delay | 1 << BH_Unwritten)
++
+ static void discard_buffer(struct buffer_head * bh)
+ {
++ unsigned long b_state, b_state_old;
++
+ lock_buffer(bh);
+ clear_buffer_dirty(bh);
+ bh->b_bdev = NULL;
+- clear_buffer_mapped(bh);
+- clear_buffer_req(bh);
+- clear_buffer_new(bh);
+- clear_buffer_delay(bh);
+- clear_buffer_unwritten(bh);
++ b_state = bh->b_state;
++ for (;;) {
++ b_state_old = cmpxchg(&bh->b_state, b_state,
++ (b_state & ~BUFFER_FLAGS_DISCARD));
++ if (b_state_old == b_state)
++ break;
++ b_state = b_state_old;
++ }
+ unlock_buffer(bh);
+ }
+
--- /dev/null
+From e3741b506c5088fa8c911bb5884c430f770fb49d Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:26 -0700
+Subject: mm: do not use atomic operations when releasing pages
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit e3741b506c5088fa8c911bb5884c430f770fb49d upstream.
+
+There should be no references to it any more and a parallel mark should
+not be reordered against us. Use non-locked varient to clear page active.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/swap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -867,7 +867,7 @@ void release_pages(struct page **pages,
+ }
+
+ /* Clear Active bit in case of parallel mark_page_accessed */
+- ClearPageActive(page);
++ __ClearPageActive(page);
+
+ list_add(&page->lru, &pages_to_free);
+ }
--- /dev/null
+From 6fb81a17d21f2a138b8f424af4cf379f2b694060 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:28 -0700
+Subject: mm: do not use unnecessary atomic operations when adding pages to the LRU
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 6fb81a17d21f2a138b8f424af4cf379f2b694060 upstream.
+
+When adding pages to the LRU we clear the active bit unconditionally.
+As the page could be reachable from other paths we cannot use unlocked
+operations without risk of corruption such as a parallel
+mark_page_accessed. This patch tests if is necessary to clear the
+active flag before using an atomic operation. This potentially opens a
+tiny race when PageActive is checked as mark_page_accessed could be
+called after PageActive was checked. The race already exists but this
+patch changes it slightly. The consequence is that that the page may be
+promoted to the active list that might have been left on the inactive
+list before the patch. It's too tiny a race and too marginal a
+consequence to always use atomic operations for.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/swap.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -597,13 +597,15 @@ static void __lru_cache_add(struct page
+ */
+ void lru_cache_add_anon(struct page *page)
+ {
+- ClearPageActive(page);
++ if (PageActive(page))
++ ClearPageActive(page);
+ __lru_cache_add(page);
+ }
+
+ void lru_cache_add_file(struct page *page)
+ {
+- ClearPageActive(page);
++ if (PageActive(page))
++ ClearPageActive(page);
+ __lru_cache_add(page);
+ }
+ EXPORT_SYMBOL(lru_cache_add_file);
--- /dev/null
+From 2457aec63745e235bcafb7ef312b182d8682f0fc Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:31 -0700
+Subject: mm: non-atomically mark page accessed during page cache allocation where possible
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 2457aec63745e235bcafb7ef312b182d8682f0fc upstream.
+
+aops->write_begin may allocate a new page and make it visible only to have
+mark_page_accessed called almost immediately after. Once the page is
+visible the atomic operations are necessary which is noticable overhead
+when writing to an in-memory filesystem like tmpfs but should also be
+noticable with fast storage. The objective of the patch is to initialse
+the accessed information with non-atomic operations before the page is
+visible.
+
+The bulk of filesystems directly or indirectly use
+grab_cache_page_write_begin or find_or_create_page for the initial
+allocation of a page cache page. This patch adds an init_page_accessed()
+helper which behaves like the first call to mark_page_accessed() but may
+called before the page is visible and can be done non-atomically.
+
+The primary APIs of concern in this care are the following and are used
+by most filesystems.
+
+ find_get_page
+ find_lock_page
+ find_or_create_page
+ grab_cache_page_nowait
+ grab_cache_page_write_begin
+
+All of them are very similar in detail to the patch creates a core helper
+pagecache_get_page() which takes a flags parameter that affects its
+behavior such as whether the page should be marked accessed or not. Then
+old API is preserved but is basically a thin wrapper around this core
+function.
+
+Each of the filesystems are then updated to avoid calling
+mark_page_accessed when it is known that the VM interfaces have already
+done the job. There is a slight snag in that the timing of the
+mark_page_accessed() has now changed so in rare cases it's possible a page
+gets to the end of the LRU as PageReferenced where as previously it might
+have been repromoted. This is expected to be rare but it's worth the
+filesystem people thinking about it in case they see a problem with the
+timing change. It is also the case that some filesystems may be marking
+pages accessed that previously did not but it makes sense that filesystems
+have consistent behaviour in this regard.
+
+The test case used to evaulate this is a simple dd of a large file done
+multiple times with the file deleted on each iterations. The size of the
+file is 1/10th physical memory to avoid dirty page balancing. In the
+async case it will be possible that the workload completes without even
+hitting the disk and will have variable results but highlight the impact
+of mark_page_accessed for async IO. The sync results are expected to be
+more stable. The exception is tmpfs where the normal case is for the "IO"
+to not hit the disk.
+
+The test machine was single socket and UMA to avoid any scheduling or NUMA
+artifacts. Throughput and wall times are presented for sync IO, only wall
+times are shown for async as the granularity reported by dd and the
+variability is unsuitable for comparison. As async results were variable
+do to writback timings, I'm only reporting the maximum figures. The sync
+results were stable enough to make the mean and stddev uninteresting.
+
+The performance results are reported based on a run with no profiling.
+Profile data is based on a separate run with oprofile running.
+
+async dd
+ 3.15.0-rc3 3.15.0-rc3
+ vanilla accessed-v2
+ext3 Max elapsed 13.9900 ( 0.00%) 11.5900 ( 17.16%)
+tmpfs Max elapsed 0.5100 ( 0.00%) 0.4900 ( 3.92%)
+btrfs Max elapsed 12.8100 ( 0.00%) 12.7800 ( 0.23%)
+ext4 Max elapsed 18.6000 ( 0.00%) 13.3400 ( 28.28%)
+xfs Max elapsed 12.5600 ( 0.00%) 2.0900 ( 83.36%)
+
+The XFS figure is a bit strange as it managed to avoid a worst case by
+sheer luck but the average figures looked reasonable.
+
+ samples percentage
+ext3 86107 0.9783 vmlinux-3.15.0-rc4-vanilla mark_page_accessed
+ext3 23833 0.2710 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed
+ext3 5036 0.0573 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed
+ext4 64566 0.8961 vmlinux-3.15.0-rc4-vanilla mark_page_accessed
+ext4 5322 0.0713 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed
+ext4 2869 0.0384 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed
+xfs 62126 1.7675 vmlinux-3.15.0-rc4-vanilla mark_page_accessed
+xfs 1904 0.0554 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed
+xfs 103 0.0030 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed
+btrfs 10655 0.1338 vmlinux-3.15.0-rc4-vanilla mark_page_accessed
+btrfs 2020 0.0273 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed
+btrfs 587 0.0079 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed
+tmpfs 59562 3.2628 vmlinux-3.15.0-rc4-vanilla mark_page_accessed
+tmpfs 1210 0.0696 vmlinux-3.15.0-rc4-accessed-v3r25 init_page_accessed
+tmpfs 94 0.0054 vmlinux-3.15.0-rc4-accessed-v3r25 mark_page_accessed
+
+[akpm@linux-foundation.org: don't run init_page_accessed() against an uninitialised pointer]
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Tested-by: Prabhakar Lad <prabhakar.csengg@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/extent_io.c | 11 +-
+ fs/btrfs/file.c | 5 -
+ fs/buffer.c | 7 -
+ fs/ext4/mballoc.c | 14 +--
+ fs/f2fs/checkpoint.c | 1
+ fs/f2fs/node.c | 2
+ fs/fuse/file.c | 2
+ fs/gfs2/aops.c | 1
+ fs/gfs2/meta_io.c | 4
+ fs/ntfs/attrib.c | 1
+ fs/ntfs/file.c | 1
+ include/linux/page-flags.h | 1
+ include/linux/pagemap.h | 107 ++++++++++++++++++++++-
+ include/linux/swap.h | 1
+ mm/filemap.c | 202 ++++++++++++++++-----------------------------
+ mm/shmem.c | 6 +
+ mm/swap.c | 11 ++
+ 17 files changed, 217 insertions(+), 160 deletions(-)
+
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -4507,7 +4507,8 @@ static void check_buffer_tree_ref(struct
+ spin_unlock(&eb->refs_lock);
+ }
+
+-static void mark_extent_buffer_accessed(struct extent_buffer *eb)
++static void mark_extent_buffer_accessed(struct extent_buffer *eb,
++ struct page *accessed)
+ {
+ unsigned long num_pages, i;
+
+@@ -4516,7 +4517,8 @@ static void mark_extent_buffer_accessed(
+ num_pages = num_extent_pages(eb->start, eb->len);
+ for (i = 0; i < num_pages; i++) {
+ struct page *p = extent_buffer_page(eb, i);
+- mark_page_accessed(p);
++ if (p != accessed)
++ mark_page_accessed(p);
+ }
+ }
+
+@@ -4530,7 +4532,7 @@ struct extent_buffer *find_extent_buffer
+ start >> PAGE_CACHE_SHIFT);
+ if (eb && atomic_inc_not_zero(&eb->refs)) {
+ rcu_read_unlock();
+- mark_extent_buffer_accessed(eb);
++ mark_extent_buffer_accessed(eb, NULL);
+ return eb;
+ }
+ rcu_read_unlock();
+@@ -4578,7 +4580,7 @@ struct extent_buffer *alloc_extent_buffe
+ spin_unlock(&mapping->private_lock);
+ unlock_page(p);
+ page_cache_release(p);
+- mark_extent_buffer_accessed(exists);
++ mark_extent_buffer_accessed(exists, p);
+ goto free_eb;
+ }
+
+@@ -4593,7 +4595,6 @@ struct extent_buffer *alloc_extent_buffe
+ attach_extent_buffer_page(eb, p);
+ spin_unlock(&mapping->private_lock);
+ WARN_ON(PageDirty(p));
+- mark_page_accessed(p);
+ eb->pages[i] = p;
+ if (!PageUptodate(p))
+ uptodate = 0;
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -470,11 +470,12 @@ static void btrfs_drop_pages(struct page
+ for (i = 0; i < num_pages; i++) {
+ /* page checked is some magic around finding pages that
+ * have been modified without going through btrfs_set_page_dirty
+- * clear it here
++ * clear it here. There should be no need to mark the pages
++ * accessed as prepare_pages should have marked them accessed
++ * in prepare_pages via find_or_create_page()
+ */
+ ClearPageChecked(pages[i]);
+ unlock_page(pages[i]);
+- mark_page_accessed(pages[i]);
+ page_cache_release(pages[i]);
+ }
+ }
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -227,7 +227,7 @@ __find_get_block_slow(struct block_devic
+ int all_mapped = 1;
+
+ index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
+- page = find_get_page(bd_mapping, index);
++ page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
+ if (!page)
+ goto out;
+
+@@ -1368,12 +1368,13 @@ __find_get_block(struct block_device *bd
+ struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
+
+ if (bh == NULL) {
++ /* __find_get_block_slow will mark the page accessed */
+ bh = __find_get_block_slow(bdev, block);
+ if (bh)
+ bh_lru_install(bh);
+- }
+- if (bh)
++ } else
+ touch_buffer(bh);
++
+ return bh;
+ }
+ EXPORT_SYMBOL(__find_get_block);
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1044,6 +1044,8 @@ int ext4_mb_init_group(struct super_bloc
+ * allocating. If we are looking at the buddy cache we would
+ * have taken a reference using ext4_mb_load_buddy and that
+ * would have pinned buddy page to page cache.
++ * The call to ext4_mb_get_buddy_page_lock will mark the
++ * page accessed.
+ */
+ ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b);
+ if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) {
+@@ -1062,7 +1064,6 @@ int ext4_mb_init_group(struct super_bloc
+ ret = -EIO;
+ goto err;
+ }
+- mark_page_accessed(page);
+
+ if (e4b.bd_buddy_page == NULL) {
+ /*
+@@ -1082,7 +1083,6 @@ int ext4_mb_init_group(struct super_bloc
+ ret = -EIO;
+ goto err;
+ }
+- mark_page_accessed(page);
+ err:
+ ext4_mb_put_buddy_page_lock(&e4b);
+ return ret;
+@@ -1141,7 +1141,7 @@ ext4_mb_load_buddy(struct super_block *s
+
+ /* we could use find_or_create_page(), but it locks page
+ * what we'd like to avoid in fast path ... */
+- page = find_get_page(inode->i_mapping, pnum);
++ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+ if (page == NULL || !PageUptodate(page)) {
+ if (page)
+ /*
+@@ -1172,15 +1172,16 @@ ext4_mb_load_buddy(struct super_block *s
+ ret = -EIO;
+ goto err;
+ }
++
++ /* Pages marked accessed already */
+ e4b->bd_bitmap_page = page;
+ e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
+- mark_page_accessed(page);
+
+ block++;
+ pnum = block / blocks_per_page;
+ poff = block % blocks_per_page;
+
+- page = find_get_page(inode->i_mapping, pnum);
++ page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED);
+ if (page == NULL || !PageUptodate(page)) {
+ if (page)
+ page_cache_release(page);
+@@ -1201,9 +1202,10 @@ ext4_mb_load_buddy(struct super_block *s
+ ret = -EIO;
+ goto err;
+ }
++
++ /* Pages marked accessed already */
+ e4b->bd_buddy_page = page;
+ e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
+- mark_page_accessed(page);
+
+ BUG_ON(e4b->bd_bitmap_page == NULL);
+ BUG_ON(e4b->bd_buddy_page == NULL);
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -71,7 +71,6 @@ repeat:
+ goto repeat;
+ }
+ out:
+- mark_page_accessed(page);
+ return page;
+ }
+
+--- a/fs/f2fs/node.c
++++ b/fs/f2fs/node.c
+@@ -969,7 +969,6 @@ repeat:
+ }
+ got_it:
+ f2fs_bug_on(nid != nid_of_node(page));
+- mark_page_accessed(page);
+ return page;
+ }
+
+@@ -1024,7 +1023,6 @@ page_hit:
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
+- mark_page_accessed(page);
+ return page;
+ }
+
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1006,8 +1006,6 @@ static ssize_t fuse_fill_write_pages(str
+ tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
+ flush_dcache_page(page);
+
+- mark_page_accessed(page);
+-
+ if (!tmp) {
+ unlock_page(page);
+ page_cache_release(page);
+--- a/fs/gfs2/aops.c
++++ b/fs/gfs2/aops.c
+@@ -517,7 +517,6 @@ int gfs2_internal_read(struct gfs2_inode
+ p = kmap_atomic(page);
+ memcpy(buf + copied, p + offset, amt);
+ kunmap_atomic(p);
+- mark_page_accessed(page);
+ page_cache_release(page);
+ copied += amt;
+ index++;
+--- a/fs/gfs2/meta_io.c
++++ b/fs/gfs2/meta_io.c
+@@ -136,7 +136,8 @@ struct buffer_head *gfs2_getbuf(struct g
+ yield();
+ }
+ } else {
+- page = find_lock_page(mapping, index);
++ page = find_get_page_flags(mapping, index,
++ FGP_LOCK|FGP_ACCESSED);
+ if (!page)
+ return NULL;
+ }
+@@ -153,7 +154,6 @@ struct buffer_head *gfs2_getbuf(struct g
+ map_bh(bh, sdp->sd_vfs, blkno);
+
+ unlock_page(page);
+- mark_page_accessed(page);
+ page_cache_release(page);
+
+ return bh;
+--- a/fs/ntfs/attrib.c
++++ b/fs/ntfs/attrib.c
+@@ -1748,7 +1748,6 @@ int ntfs_attr_make_non_resident(ntfs_ino
+ if (page) {
+ set_page_dirty(page);
+ unlock_page(page);
+- mark_page_accessed(page);
+ page_cache_release(page);
+ }
+ ntfs_debug("Done.");
+--- a/fs/ntfs/file.c
++++ b/fs/ntfs/file.c
+@@ -2060,7 +2060,6 @@ static ssize_t ntfs_file_buffered_write(
+ }
+ do {
+ unlock_page(pages[--do_pages]);
+- mark_page_accessed(pages[do_pages]);
+ page_cache_release(pages[do_pages]);
+ } while (do_pages);
+ if (unlikely(status))
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -198,6 +198,7 @@ struct page; /* forward declaration */
+ TESTPAGEFLAG(Locked, locked)
+ PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
+ PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
++ __SETPAGEFLAG(Referenced, referenced)
+ PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+ PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+ PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -248,12 +248,109 @@ pgoff_t page_cache_next_hole(struct addr
+ pgoff_t page_cache_prev_hole(struct address_space *mapping,
+ pgoff_t index, unsigned long max_scan);
+
++#define FGP_ACCESSED 0x00000001
++#define FGP_LOCK 0x00000002
++#define FGP_CREAT 0x00000004
++#define FGP_WRITE 0x00000008
++#define FGP_NOFS 0x00000010
++#define FGP_NOWAIT 0x00000020
++
++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
++ int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
++
++/**
++ * find_get_page - find and get a page reference
++ * @mapping: the address_space to search
++ * @offset: the page index
++ *
++ * Looks up the page cache slot at @mapping & @offset. If there is a
++ * page cache page, it is returned with an increased refcount.
++ *
++ * Otherwise, %NULL is returned.
++ */
++static inline struct page *find_get_page(struct address_space *mapping,
++ pgoff_t offset)
++{
++ return pagecache_get_page(mapping, offset, 0, 0, 0);
++}
++
++static inline struct page *find_get_page_flags(struct address_space *mapping,
++ pgoff_t offset, int fgp_flags)
++{
++ return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
++}
++
++/**
++ * find_lock_page - locate, pin and lock a pagecache page
++ * pagecache_get_page - find and get a page reference
++ * @mapping: the address_space to search
++ * @offset: the page index
++ *
++ * Looks up the page cache slot at @mapping & @offset. If there is a
++ * page cache page, it is returned locked and with an increased
++ * refcount.
++ *
++ * Otherwise, %NULL is returned.
++ *
++ * find_lock_page() may sleep.
++ */
++static inline struct page *find_lock_page(struct address_space *mapping,
++ pgoff_t offset)
++{
++ return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
++}
++
++/**
++ * find_or_create_page - locate or add a pagecache page
++ * @mapping: the page's address_space
++ * @index: the page's index into the mapping
++ * @gfp_mask: page allocation mode
++ *
++ * Looks up the page cache slot at @mapping & @offset. If there is a
++ * page cache page, it is returned locked and with an increased
++ * refcount.
++ *
++ * If the page is not present, a new page is allocated using @gfp_mask
++ * and added to the page cache and the VM's LRU list. The page is
++ * returned locked and with an increased refcount.
++ *
++ * On memory exhaustion, %NULL is returned.
++ *
++ * find_or_create_page() may sleep, even if @gfp_flags specifies an
++ * atomic allocation!
++ */
++static inline struct page *find_or_create_page(struct address_space *mapping,
++ pgoff_t offset, gfp_t gfp_mask)
++{
++ return pagecache_get_page(mapping, offset,
++ FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
++ gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
++}
++
++/**
++ * grab_cache_page_nowait - returns locked page at given index in given cache
++ * @mapping: target address_space
++ * @index: the page index
++ *
++ * Same as grab_cache_page(), but do not wait if the page is unavailable.
++ * This is intended for speculative data generators, where the data can
++ * be regenerated if the page couldn't be grabbed. This routine should
++ * be safe to call while holding the lock for another page.
++ *
++ * Clear __GFP_FS when allocating the page to avoid recursion into the fs
++ * and deadlock against the caller's locked page.
++ */
++static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
++ pgoff_t index)
++{
++ return pagecache_get_page(mapping, index,
++ FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
++ mapping_gfp_mask(mapping),
++ GFP_NOFS);
++}
++
+ struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
+-struct page *find_get_page(struct address_space *mapping, pgoff_t offset);
+ struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
+-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset);
+-struct page *find_or_create_page(struct address_space *mapping, pgoff_t index,
+- gfp_t gfp_mask);
+ unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_entries, struct page **entries,
+ pgoff_t *indices);
+@@ -276,8 +373,6 @@ static inline struct page *grab_cache_pa
+ return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
+ }
+
+-extern struct page * grab_cache_page_nowait(struct address_space *mapping,
+- pgoff_t index);
+ extern struct page * read_cache_page(struct address_space *mapping,
+ pgoff_t index, filler_t *filler, void *data);
+ extern struct page * read_cache_page_gfp(struct address_space *mapping,
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -275,6 +275,7 @@ extern void lru_add_page_tail(struct pag
+ struct lruvec *lruvec, struct list_head *head);
+ extern void activate_page(struct page *);
+ extern void mark_page_accessed(struct page *);
++extern void init_page_accessed(struct page *page);
+ extern void lru_add_drain(void);
+ extern void lru_add_drain_cpu(int cpu);
+ extern void lru_add_drain_all(void);
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -848,26 +848,6 @@ out:
+ EXPORT_SYMBOL(find_get_entry);
+
+ /**
+- * find_get_page - find and get a page reference
+- * @mapping: the address_space to search
+- * @offset: the page index
+- *
+- * Looks up the page cache slot at @mapping & @offset. If there is a
+- * page cache page, it is returned with an increased refcount.
+- *
+- * Otherwise, %NULL is returned.
+- */
+-struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
+-{
+- struct page *page = find_get_entry(mapping, offset);
+-
+- if (radix_tree_exceptional_entry(page))
+- page = NULL;
+- return page;
+-}
+-EXPORT_SYMBOL(find_get_page);
+-
+-/**
+ * find_lock_entry - locate, pin and lock a page cache entry
+ * @mapping: the address_space to search
+ * @offset: the page cache index
+@@ -904,66 +884,84 @@ repeat:
+ EXPORT_SYMBOL(find_lock_entry);
+
+ /**
+- * find_lock_page - locate, pin and lock a pagecache page
++ * pagecache_get_page - find and get a page reference
+ * @mapping: the address_space to search
+ * @offset: the page index
++ * @fgp_flags: PCG flags
++ * @gfp_mask: gfp mask to use if a page is to be allocated
+ *
+- * Looks up the page cache slot at @mapping & @offset. If there is a
+- * page cache page, it is returned locked and with an increased
+- * refcount.
++ * Looks up the page cache slot at @mapping & @offset.
+ *
+- * Otherwise, %NULL is returned.
++ * PCG flags modify how the page is returned
+ *
+- * find_lock_page() may sleep.
+- */
+-struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
+-{
+- struct page *page = find_lock_entry(mapping, offset);
+-
+- if (radix_tree_exceptional_entry(page))
+- page = NULL;
+- return page;
+-}
+-EXPORT_SYMBOL(find_lock_page);
+-
+-/**
+- * find_or_create_page - locate or add a pagecache page
+- * @mapping: the page's address_space
+- * @index: the page's index into the mapping
+- * @gfp_mask: page allocation mode
++ * FGP_ACCESSED: the page will be marked accessed
++ * FGP_LOCK: Page is return locked
++ * FGP_CREAT: If page is not present then a new page is allocated using
++ * @gfp_mask and added to the page cache and the VM's LRU
++ * list. The page is returned locked and with an increased
++ * refcount. Otherwise, %NULL is returned.
+ *
+- * Looks up the page cache slot at @mapping & @offset. If there is a
+- * page cache page, it is returned locked and with an increased
+- * refcount.
+- *
+- * If the page is not present, a new page is allocated using @gfp_mask
+- * and added to the page cache and the VM's LRU list. The page is
+- * returned locked and with an increased refcount.
++ * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
++ * if the GFP flags specified for FGP_CREAT are atomic.
+ *
+- * On memory exhaustion, %NULL is returned.
+- *
+- * find_or_create_page() may sleep, even if @gfp_flags specifies an
+- * atomic allocation!
++ * If there is a page cache page, it is returned with an increased refcount.
+ */
+-struct page *find_or_create_page(struct address_space *mapping,
+- pgoff_t index, gfp_t gfp_mask)
++struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
++ int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+ {
+ struct page *page;
+- int err;
++
+ repeat:
+- page = find_lock_page(mapping, index);
+- if (!page) {
+- page = __page_cache_alloc(gfp_mask);
++ page = find_get_entry(mapping, offset);
++ if (radix_tree_exceptional_entry(page))
++ page = NULL;
++ if (!page)
++ goto no_page;
++
++ if (fgp_flags & FGP_LOCK) {
++ if (fgp_flags & FGP_NOWAIT) {
++ if (!trylock_page(page)) {
++ page_cache_release(page);
++ return NULL;
++ }
++ } else {
++ lock_page(page);
++ }
++
++ /* Has the page been truncated? */
++ if (unlikely(page->mapping != mapping)) {
++ unlock_page(page);
++ page_cache_release(page);
++ goto repeat;
++ }
++ VM_BUG_ON(page->index != offset);
++ }
++
++ if (page && (fgp_flags & FGP_ACCESSED))
++ mark_page_accessed(page);
++
++no_page:
++ if (!page && (fgp_flags & FGP_CREAT)) {
++ int err;
++ if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
++ cache_gfp_mask |= __GFP_WRITE;
++ if (fgp_flags & FGP_NOFS) {
++ cache_gfp_mask &= ~__GFP_FS;
++ radix_gfp_mask &= ~__GFP_FS;
++ }
++
++ page = __page_cache_alloc(cache_gfp_mask);
+ if (!page)
+ return NULL;
+- /*
+- * We want a regular kernel memory (not highmem or DMA etc)
+- * allocation for the radix tree nodes, but we need to honour
+- * the context-specific requirements the caller has asked for.
+- * GFP_RECLAIM_MASK collects those requirements.
+- */
+- err = add_to_page_cache_lru(page, mapping, index,
+- (gfp_mask & GFP_RECLAIM_MASK));
++
++ if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK)))
++ fgp_flags |= FGP_LOCK;
++
++ /* Init accessed so avoit atomic mark_page_accessed later */
++ if (fgp_flags & FGP_ACCESSED)
++ init_page_accessed(page);
++
++ err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+ if (unlikely(err)) {
+ page_cache_release(page);
+ page = NULL;
+@@ -971,9 +969,10 @@ repeat:
+ goto repeat;
+ }
+ }
++
+ return page;
+ }
+-EXPORT_SYMBOL(find_or_create_page);
++EXPORT_SYMBOL(pagecache_get_page);
+
+ /**
+ * find_get_entries - gang pagecache lookup
+@@ -1263,39 +1262,6 @@ repeat:
+ }
+ EXPORT_SYMBOL(find_get_pages_tag);
+
+-/**
+- * grab_cache_page_nowait - returns locked page at given index in given cache
+- * @mapping: target address_space
+- * @index: the page index
+- *
+- * Same as grab_cache_page(), but do not wait if the page is unavailable.
+- * This is intended for speculative data generators, where the data can
+- * be regenerated if the page couldn't be grabbed. This routine should
+- * be safe to call while holding the lock for another page.
+- *
+- * Clear __GFP_FS when allocating the page to avoid recursion into the fs
+- * and deadlock against the caller's locked page.
+- */
+-struct page *
+-grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
+-{
+- struct page *page = find_get_page(mapping, index);
+-
+- if (page) {
+- if (trylock_page(page))
+- return page;
+- page_cache_release(page);
+- return NULL;
+- }
+- page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
+- if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
+- page_cache_release(page);
+- page = NULL;
+- }
+- return page;
+-}
+-EXPORT_SYMBOL(grab_cache_page_nowait);
+-
+ /*
+ * CD/DVDs are error prone. When a medium error occurs, the driver may fail
+ * a _large_ part of the i/o request. Imagine the worst scenario:
+@@ -2397,7 +2363,6 @@ int pagecache_write_end(struct file *fil
+ {
+ const struct address_space_operations *aops = mapping->a_ops;
+
+- mark_page_accessed(page);
+ return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
+ }
+ EXPORT_SYMBOL(pagecache_write_end);
+@@ -2479,34 +2444,18 @@ EXPORT_SYMBOL(generic_file_direct_write)
+ struct page *grab_cache_page_write_begin(struct address_space *mapping,
+ pgoff_t index, unsigned flags)
+ {
+- int status;
+- gfp_t gfp_mask;
+ struct page *page;
+- gfp_t gfp_notmask = 0;
++ int fgp_flags = FGP_LOCK|FGP_ACCESSED|FGP_WRITE|FGP_CREAT;
+
+- gfp_mask = mapping_gfp_mask(mapping);
+- if (mapping_cap_account_dirty(mapping))
+- gfp_mask |= __GFP_WRITE;
+ if (flags & AOP_FLAG_NOFS)
+- gfp_notmask = __GFP_FS;
+-repeat:
+- page = find_lock_page(mapping, index);
++ fgp_flags |= FGP_NOFS;
++
++ page = pagecache_get_page(mapping, index, fgp_flags,
++ mapping_gfp_mask(mapping),
++ GFP_KERNEL);
+ if (page)
+- goto found;
++ wait_for_stable_page(page);
+
+- page = __page_cache_alloc(gfp_mask & ~gfp_notmask);
+- if (!page)
+- return NULL;
+- status = add_to_page_cache_lru(page, mapping, index,
+- GFP_KERNEL & ~gfp_notmask);
+- if (unlikely(status)) {
+- page_cache_release(page);
+- if (status == -EEXIST)
+- goto repeat;
+- return NULL;
+- }
+-found:
+- wait_for_stable_page(page);
+ return page;
+ }
+ EXPORT_SYMBOL(grab_cache_page_write_begin);
+@@ -2555,7 +2504,7 @@ again:
+
+ status = a_ops->write_begin(file, mapping, pos, bytes, flags,
+ &page, &fsdata);
+- if (unlikely(status))
++ if (unlikely(status < 0))
+ break;
+
+ if (mapping_writably_mapped(mapping))
+@@ -2564,7 +2513,6 @@ again:
+ copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+ flush_dcache_page(page);
+
+- mark_page_accessed(page);
+ status = a_ops->write_end(file, mapping, pos, bytes, copied,
+ page, fsdata);
+ if (unlikely(status < 0))
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1438,9 +1438,13 @@ shmem_write_begin(struct file *file, str
+ loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, void **fsdata)
+ {
++ int ret;
+ struct inode *inode = mapping->host;
+ pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+- return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
++ ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
++ if (ret == 0 && *pagep)
++ init_page_accessed(*pagep);
++ return ret;
+ }
+
+ static int
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -580,6 +580,17 @@ void mark_page_accessed(struct page *pag
+ }
+ EXPORT_SYMBOL(mark_page_accessed);
+
++/*
++ * Used to mark_page_accessed(page) that is not visible yet and when it is
++ * still safe to use non-atomic ops
++ */
++void init_page_accessed(struct page *page)
++{
++ if (!PageReferenced(page))
++ __SetPageReferenced(page);
++}
++EXPORT_SYMBOL(init_page_accessed);
++
+ static void __lru_cache_add(struct page *page)
+ {
+ struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
--- /dev/null
+From b745bc85f21ea707e4ea1a91948055fa3e72c77b Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:22 -0700
+Subject: mm: page_alloc: convert hot/cold parameter and immediate callers to bool
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit b745bc85f21ea707e4ea1a91948055fa3e72c77b upstream.
+
+cold is a bool, make it one. Make the likely case the "if" part of the
+block instead of the else as according to the optimisation manual this is
+preferred.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/tile/mm/homecache.c | 2 +-
+ fs/fuse/dev.c | 2 +-
+ include/linux/gfp.h | 4 ++--
+ include/linux/pagemap.h | 2 +-
+ include/linux/swap.h | 2 +-
+ mm/page_alloc.c | 20 ++++++++++----------
+ mm/swap.c | 4 ++--
+ mm/swap_state.c | 2 +-
+ mm/vmscan.c | 6 +++---
+ 9 files changed, 22 insertions(+), 22 deletions(-)
+
+--- a/arch/tile/mm/homecache.c
++++ b/arch/tile/mm/homecache.c
+@@ -417,7 +417,7 @@ void __homecache_free_pages(struct page
+ if (put_page_testzero(page)) {
+ homecache_change_page_home(page, order, PAGE_HOME_HASH);
+ if (order == 0) {
+- free_hot_cold_page(page, 0);
++ free_hot_cold_page(page, false);
+ } else {
+ init_page_count(page);
+ __free_pages(page, order);
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -1614,7 +1614,7 @@ out_finish:
+
+ static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
+ {
+- release_pages(req->pages, req->num_pages, 0);
++ release_pages(req->pages, req->num_pages, false);
+ }
+
+ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -369,8 +369,8 @@ void *alloc_pages_exact_nid(int nid, siz
+
+ extern void __free_pages(struct page *page, unsigned int order);
+ extern void free_pages(unsigned long addr, unsigned int order);
+-extern void free_hot_cold_page(struct page *page, int cold);
+-extern void free_hot_cold_page_list(struct list_head *list, int cold);
++extern void free_hot_cold_page(struct page *page, bool cold);
++extern void free_hot_cold_page_list(struct list_head *list, bool cold);
+
+ extern void __free_memcg_kmem_pages(struct page *page, unsigned int order);
+ extern void free_memcg_kmem_pages(unsigned long addr, unsigned int order);
+--- a/include/linux/pagemap.h
++++ b/include/linux/pagemap.h
+@@ -99,7 +99,7 @@ static inline void mapping_set_gfp_mask(
+
+ #define page_cache_get(page) get_page(page)
+ #define page_cache_release(page) put_page(page)
+-void release_pages(struct page **pages, int nr, int cold);
++void release_pages(struct page **pages, int nr, bool cold);
+
+ /*
+ * speculatively take a reference to a page.
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -441,7 +441,7 @@ mem_cgroup_uncharge_swapcache(struct pag
+ #define free_page_and_swap_cache(page) \
+ page_cache_release(page)
+ #define free_pages_and_swap_cache(pages, nr) \
+- release_pages((pages), (nr), 0);
++ release_pages((pages), (nr), false);
+
+ static inline void show_swap_cache_info(void)
+ {
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1210,7 +1210,7 @@ retry_reserve:
+ */
+ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ unsigned long count, struct list_head *list,
+- int migratetype, int cold)
++ int migratetype, bool cold)
+ {
+ int i;
+
+@@ -1229,7 +1229,7 @@ static int rmqueue_bulk(struct zone *zon
+ * merge IO requests if the physical pages are ordered
+ * properly.
+ */
+- if (likely(cold == 0))
++ if (likely(!cold))
+ list_add(&page->lru, list);
+ else
+ list_add_tail(&page->lru, list);
+@@ -1390,9 +1390,9 @@ void mark_free_pages(struct zone *zone)
+
+ /*
+ * Free a 0-order page
+- * cold == 1 ? free a cold page : free a hot page
++ * cold == true ? free a cold page : free a hot page
+ */
+-void free_hot_cold_page(struct page *page, int cold)
++void free_hot_cold_page(struct page *page, bool cold)
+ {
+ struct zone *zone = page_zone(page);
+ struct per_cpu_pages *pcp;
+@@ -1424,10 +1424,10 @@ void free_hot_cold_page(struct page *pag
+ }
+
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+- if (cold)
+- list_add_tail(&page->lru, &pcp->lists[migratetype]);
+- else
++ if (!cold)
+ list_add(&page->lru, &pcp->lists[migratetype]);
++ else
++ list_add_tail(&page->lru, &pcp->lists[migratetype]);
+ pcp->count++;
+ if (pcp->count >= pcp->high) {
+ unsigned long batch = ACCESS_ONCE(pcp->batch);
+@@ -1442,7 +1442,7 @@ out:
+ /*
+ * Free a list of 0-order pages
+ */
+-void free_hot_cold_page_list(struct list_head *list, int cold)
++void free_hot_cold_page_list(struct list_head *list, bool cold)
+ {
+ struct page *page, *next;
+
+@@ -1559,7 +1559,7 @@ struct page *buffered_rmqueue(struct zon
+ {
+ unsigned long flags;
+ struct page *page;
+- int cold = !!(gfp_flags & __GFP_COLD);
++ bool cold = ((gfp_flags & __GFP_COLD) != 0);
+
+ again:
+ if (likely(order == 0)) {
+@@ -2868,7 +2868,7 @@ void __free_pages(struct page *page, uns
+ {
+ if (put_page_testzero(page)) {
+ if (order == 0)
+- free_hot_cold_page(page, 0);
++ free_hot_cold_page(page, false);
+ else
+ __free_pages_ok(page, order);
+ }
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -67,7 +67,7 @@ static void __page_cache_release(struct
+ static void __put_single_page(struct page *page)
+ {
+ __page_cache_release(page);
+- free_hot_cold_page(page, 0);
++ free_hot_cold_page(page, false);
+ }
+
+ static void __put_compound_page(struct page *page)
+@@ -826,7 +826,7 @@ void lru_add_drain_all(void)
+ * grabbed the page via the LRU. If it did, give up: shrink_inactive_list()
+ * will free it.
+ */
+-void release_pages(struct page **pages, int nr, int cold)
++void release_pages(struct page **pages, int nr, bool cold)
+ {
+ int i;
+ LIST_HEAD(pages_to_free);
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -270,7 +270,7 @@ void free_pages_and_swap_cache(struct pa
+
+ for (i = 0; i < todo; i++)
+ free_swap_cache(pagep[i]);
+- release_pages(pagep, todo, 0);
++ release_pages(pagep, todo, false);
+ pagep += todo;
+ nr -= todo;
+ }
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1107,7 +1107,7 @@ keep:
+ VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
+ }
+
+- free_hot_cold_page_list(&free_pages, 1);
++ free_hot_cold_page_list(&free_pages, true);
+
+ list_splice(&ret_pages, page_list);
+ count_vm_events(PGACTIVATE, pgactivate);
+@@ -1505,7 +1505,7 @@ shrink_inactive_list(unsigned long nr_to
+
+ spin_unlock_irq(&zone->lru_lock);
+
+- free_hot_cold_page_list(&page_list, 1);
++ free_hot_cold_page_list(&page_list, true);
+
+ /*
+ * If reclaim is isolating dirty pages under writeback, it implies
+@@ -1725,7 +1725,7 @@ static void shrink_active_list(unsigned
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
+ spin_unlock_irq(&zone->lru_lock);
+
+- free_hot_cold_page_list(&l_hold, 1);
++ free_hot_cold_page_list(&l_hold, true);
+ }
+
+ #ifdef CONFIG_SWAP
--- /dev/null
+From cfc47a2803db42140167b92d991ef04018e162c7 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:19 -0700
+Subject: mm: page_alloc: lookup pageblock migratetype with IRQs enabled during free
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit cfc47a2803db42140167b92d991ef04018e162c7 upstream.
+
+get_pageblock_migratetype() is called during free with IRQs disabled.
+This is unnecessary and disables IRQs for longer than necessary.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -774,9 +774,9 @@ static void __free_pages_ok(struct page
+ if (!free_pages_prepare(page, order))
+ return;
+
++ migratetype = get_pfnblock_migratetype(page, pfn);
+ local_irq_save(flags);
+ __count_vm_events(PGFREE, 1 << order);
+- migratetype = get_pfnblock_migratetype(page, pfn);
+ set_freepage_migratetype(page, migratetype);
+ free_one_page(page_zone(page), page, pfn, order, migratetype);
+ local_irq_restore(flags);
--- /dev/null
+From a6e21b14f22041382e832d30deda6f26f37b1097 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:12 -0700
+Subject: mm: page_alloc: only check the alloc flags and gfp_mask for dirty once
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit a6e21b14f22041382e832d30deda6f26f37b1097 upstream.
+
+Currently it's calculated once per zone in the zonelist.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1940,6 +1940,8 @@ get_page_from_freelist(gfp_t gfp_mask, n
+ nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+ int zlc_active = 0; /* set if using zonelist_cache */
+ int did_zlc_setup = 0; /* just call zlc_setup() one time */
++ bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
++ (gfp_mask & __GFP_WRITE);
+
+ zonelist_scan:
+ /*
+@@ -1998,8 +2000,7 @@ zonelist_scan:
+ * will require awareness of zones in the
+ * dirty-throttling and the flusher threads.
+ */
+- if ((alloc_flags & ALLOC_WMARK_LOW) &&
+- (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
++ if (consider_zone_dirty && !zone_dirty_ok(zone))
+ continue;
+
+ mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
--- /dev/null
+From d34c5fa06fade08a689fc171bf756fba2858ae73 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:10 -0700
+Subject: mm: page_alloc: only check the zone id check if pages are buddies
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit d34c5fa06fade08a689fc171bf756fba2858ae73 upstream.
+
+A node/zone index is used to check if pages are compatible for merging
+but this happens unconditionally even if the buddy page is not free. Defer
+the calculation as long as possible. Ideally we would check the zone boundary
+but nodes can overlap.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -508,16 +508,26 @@ static inline int page_is_buddy(struct p
+ if (!pfn_valid_within(page_to_pfn(buddy)))
+ return 0;
+
+- if (page_zone_id(page) != page_zone_id(buddy))
+- return 0;
+-
+ if (page_is_guard(buddy) && page_order(buddy) == order) {
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
++
++ if (page_zone_id(page) != page_zone_id(buddy))
++ return 0;
++
+ return 1;
+ }
+
+ if (PageBuddy(buddy) && page_order(buddy) == order) {
+ VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
++
++ /*
++ * zone check is done late to avoid uselessly
++ * calculating zone/node ids for pages that could
++ * never merge.
++ */
++ if (page_zone_id(page) != page_zone_id(buddy))
++ return 0;
++
+ return 1;
+ }
+ return 0;
--- /dev/null
+From dc4b0caff24d9b2918e9f27bc65499ee63187eba Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:17 -0700
+Subject: mm: page_alloc: reduce number of times page_to_pfn is called
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit dc4b0caff24d9b2918e9f27bc65499ee63187eba upstream.
+
+In the free path we calculate page_to_pfn multiple times. Reduce that.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mmzone.h | 9 +++++++--
+ include/linux/pageblock-flags.h | 33 +++++++++++++--------------------
+ mm/page_alloc.c | 34 +++++++++++++++++++---------------
+ 3 files changed, 39 insertions(+), 37 deletions(-)
+
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -78,10 +78,15 @@ extern int page_group_by_mobility_disabl
+ #define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
+ #define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)
+
+-static inline int get_pageblock_migratetype(struct page *page)
++#define get_pageblock_migratetype(page) \
++ get_pfnblock_flags_mask(page, page_to_pfn(page), \
++ PB_migrate_end, MIGRATETYPE_MASK)
++
++static inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
+ {
+ BUILD_BUG_ON(PB_migrate_end - PB_migrate != 2);
+- return get_pageblock_flags_mask(page, PB_migrate_end, MIGRATETYPE_MASK);
++ return get_pfnblock_flags_mask(page, pfn, PB_migrate_end,
++ MIGRATETYPE_MASK);
+ }
+
+ struct free_area {
+--- a/include/linux/pageblock-flags.h
++++ b/include/linux/pageblock-flags.h
+@@ -65,33 +65,26 @@ extern int pageblock_order;
+ /* Forward declaration */
+ struct page;
+
+-unsigned long get_pageblock_flags_mask(struct page *page,
++unsigned long get_pfnblock_flags_mask(struct page *page,
++ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask);
+-void set_pageblock_flags_mask(struct page *page,
++
++void set_pfnblock_flags_mask(struct page *page,
+ unsigned long flags,
++ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask);
+
+ /* Declarations for getting and setting flags. See mm/page_alloc.c */
+-static inline unsigned long get_pageblock_flags_group(struct page *page,
+- int start_bitidx, int end_bitidx)
+-{
+- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
+- unsigned long mask = (1 << nr_flag_bits) - 1;
+-
+- return get_pageblock_flags_mask(page, end_bitidx, mask);
+-}
+-
+-static inline void set_pageblock_flags_group(struct page *page,
+- unsigned long flags,
+- int start_bitidx, int end_bitidx)
+-{
+- unsigned long nr_flag_bits = end_bitidx - start_bitidx + 1;
+- unsigned long mask = (1 << nr_flag_bits) - 1;
+-
+- set_pageblock_flags_mask(page, flags, end_bitidx, mask);
+-}
++#define get_pageblock_flags_group(page, start_bitidx, end_bitidx) \
++ get_pfnblock_flags_mask(page, page_to_pfn(page), \
++ end_bitidx, \
++ (1 << (end_bitidx - start_bitidx + 1)) - 1)
++#define set_pageblock_flags_group(page, flags, start_bitidx, end_bitidx) \
++ set_pfnblock_flags_mask(page, flags, page_to_pfn(page), \
++ end_bitidx, \
++ (1 << (end_bitidx - start_bitidx + 1)) - 1)
+
+ #ifdef CONFIG_COMPACTION
+ #define get_pageblock_skip(page) \
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -560,6 +560,7 @@ static inline int page_is_buddy(struct p
+ */
+
+ static inline void __free_one_page(struct page *page,
++ unsigned long pfn,
+ struct zone *zone, unsigned int order,
+ int migratetype)
+ {
+@@ -576,7 +577,7 @@ static inline void __free_one_page(struc
+
+ VM_BUG_ON(migratetype == -1);
+
+- page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
++ page_idx = pfn & ((1 << MAX_ORDER) - 1);
+
+ VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
+ VM_BUG_ON_PAGE(bad_range(zone, page), page);
+@@ -711,7 +712,7 @@ static void free_pcppages_bulk(struct zo
+ list_del(&page->lru);
+ mt = get_freepage_migratetype(page);
+ /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+- __free_one_page(page, zone, 0, mt);
++ __free_one_page(page, page_to_pfn(page), zone, 0, mt);
+ trace_mm_page_pcpu_drain(page, 0, mt);
+ if (likely(!is_migrate_isolate_page(page))) {
+ __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
+@@ -723,13 +724,15 @@ static void free_pcppages_bulk(struct zo
+ spin_unlock(&zone->lock);
+ }
+
+-static void free_one_page(struct zone *zone, struct page *page, unsigned int order,
++static void free_one_page(struct zone *zone,
++ struct page *page, unsigned long pfn,
++ unsigned int order,
+ int migratetype)
+ {
+ spin_lock(&zone->lock);
+ zone->pages_scanned = 0;
+
+- __free_one_page(page, zone, order, migratetype);
++ __free_one_page(page, pfn, zone, order, migratetype);
+ if (unlikely(!is_migrate_isolate(migratetype)))
+ __mod_zone_freepage_state(zone, 1 << order, migratetype);
+ spin_unlock(&zone->lock);
+@@ -766,15 +769,16 @@ static void __free_pages_ok(struct page
+ {
+ unsigned long flags;
+ int migratetype;
++ unsigned long pfn = page_to_pfn(page);
+
+ if (!free_pages_prepare(page, order))
+ return;
+
+ local_irq_save(flags);
+ __count_vm_events(PGFREE, 1 << order);
+- migratetype = get_pageblock_migratetype(page);
++ migratetype = get_pfnblock_migratetype(page, pfn);
+ set_freepage_migratetype(page, migratetype);
+- free_one_page(page_zone(page), page, order, migratetype);
++ free_one_page(page_zone(page), page, pfn, order, migratetype);
+ local_irq_restore(flags);
+ }
+
+@@ -1393,12 +1397,13 @@ void free_hot_cold_page(struct page *pag
+ struct zone *zone = page_zone(page);
+ struct per_cpu_pages *pcp;
+ unsigned long flags;
++ unsigned long pfn = page_to_pfn(page);
+ int migratetype;
+
+ if (!free_pages_prepare(page, 0))
+ return;
+
+- migratetype = get_pageblock_migratetype(page);
++ migratetype = get_pfnblock_migratetype(page, pfn);
+ set_freepage_migratetype(page, migratetype);
+ local_irq_save(flags);
+ __count_vm_event(PGFREE);
+@@ -1412,7 +1417,7 @@ void free_hot_cold_page(struct page *pag
+ */
+ if (migratetype >= MIGRATE_PCPTYPES) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
+- free_one_page(zone, page, 0, migratetype);
++ free_one_page(zone, page, pfn, 0, migratetype);
+ goto out;
+ }
+ migratetype = MIGRATE_MOVABLE;
+@@ -6068,17 +6073,16 @@ static inline int pfn_to_bitidx(struct z
+ * @end_bitidx: The last bit of interest
+ * returns pageblock_bits flags
+ */
+-unsigned long get_pageblock_flags_mask(struct page *page,
++unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask)
+ {
+ struct zone *zone;
+ unsigned long *bitmap;
+- unsigned long pfn, bitidx, word_bitidx;
++ unsigned long bitidx, word_bitidx;
+ unsigned long word;
+
+ zone = page_zone(page);
+- pfn = page_to_pfn(page);
+ bitmap = get_pageblock_bitmap(zone, pfn);
+ bitidx = pfn_to_bitidx(zone, pfn);
+ word_bitidx = bitidx / BITS_PER_LONG;
+@@ -6090,25 +6094,25 @@ unsigned long get_pageblock_flags_mask(s
+ }
+
+ /**
+- * set_pageblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
++ * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
+ * @page: The page within the block of interest
+ * @start_bitidx: The first bit of interest
+ * @end_bitidx: The last bit of interest
+ * @flags: The flags to set
+ */
+-void set_pageblock_flags_mask(struct page *page, unsigned long flags,
++void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
++ unsigned long pfn,
+ unsigned long end_bitidx,
+ unsigned long mask)
+ {
+ struct zone *zone;
+ unsigned long *bitmap;
+- unsigned long pfn, bitidx, word_bitidx;
++ unsigned long bitidx, word_bitidx;
+ unsigned long old_word, word;
+
+ BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
+
+ zone = page_zone(page);
+- pfn = page_to_pfn(page);
+ bitmap = get_pageblock_bitmap(zone, pfn);
+ bitidx = pfn_to_bitidx(zone, pfn);
+ word_bitidx = bitidx / BITS_PER_LONG;
--- /dev/null
+From 5dab29113ca56335c78be3f98bf5ddf2ef8eb6a6 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:14 -0700
+Subject: mm: page_alloc: take the ALLOC_NO_WATERMARK check out of the fast path
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 5dab29113ca56335c78be3f98bf5ddf2ef8eb6a6 upstream.
+
+ALLOC_NO_WATERMARK is set in a few cases. Always by kswapd, always for
+__GFP_MEMALLOC, sometimes for swap-over-nfs, tasks etc. Each of these
+cases are relatively rare events but the ALLOC_NO_WATERMARK check is an
+unlikely branch in the fast path. This patch moves the check out of the
+fast path and after it has been determined that the watermarks have not
+been met. This helps the common fast path at the cost of making the slow
+path slower and hitting kswapd with a performance cost. It's a reasonable
+tradeoff.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1959,9 +1959,6 @@ zonelist_scan:
+ (alloc_flags & ALLOC_CPUSET) &&
+ !cpuset_zone_allowed_softwall(zone, gfp_mask))
+ continue;
+- BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
+- if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
+- goto try_this_zone;
+ /*
+ * Distribute pages in proportion to the individual
+ * zone size to ensure fair page aging. The zone a
+@@ -2008,6 +2005,11 @@ zonelist_scan:
+ classzone_idx, alloc_flags)) {
+ int ret;
+
++ /* Checked here to keep the fast path fast */
++ BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
++ if (alloc_flags & ALLOC_NO_WATERMARKS)
++ goto try_this_zone;
++
+ if (IS_ENABLED(CONFIG_NUMA) &&
+ !did_zlc_setup && nr_online_nodes > 1) {
+ /*
--- /dev/null
+From 7aeb09f9104b760fc53c98cb7d20d06640baf9e6 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:21 -0700
+Subject: mm: page_alloc: use unsigned int for order in more places
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 7aeb09f9104b760fc53c98cb7d20d06640baf9e6 upstream.
+
+X86 prefers the use of unsigned types for iterators and there is a
+tendency to mix whether a signed or unsigned type if used for page order.
+This converts a number of sites in mm/page_alloc.c to use unsigned int for
+order where possible.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/mmzone.h | 8 ++++----
+ mm/page_alloc.c | 43 +++++++++++++++++++++++--------------------
+ 2 files changed, 27 insertions(+), 24 deletions(-)
+
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -807,10 +807,10 @@ static inline bool pgdat_is_empty(pg_dat
+ extern struct mutex zonelists_mutex;
+ void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
+ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
+-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+- int classzone_idx, int alloc_flags);
+-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+- int classzone_idx, int alloc_flags);
++bool zone_watermark_ok(struct zone *z, unsigned int order,
++ unsigned long mark, int classzone_idx, int alloc_flags);
++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
++ unsigned long mark, int classzone_idx, int alloc_flags);
+ enum memmap_context {
+ MEMMAP_EARLY,
+ MEMMAP_HOTPLUG,
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -408,7 +408,8 @@ static int destroy_compound_page(struct
+ return bad;
+ }
+
+-static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
++static inline void prep_zero_page(struct page *page, unsigned int order,
++ gfp_t gfp_flags)
+ {
+ int i;
+
+@@ -452,7 +453,7 @@ static inline void set_page_guard_flag(s
+ static inline void clear_page_guard_flag(struct page *page) { }
+ #endif
+
+-static inline void set_page_order(struct page *page, int order)
++static inline void set_page_order(struct page *page, unsigned int order)
+ {
+ set_page_private(page, order);
+ __SetPageBuddy(page);
+@@ -503,7 +504,7 @@ __find_buddy_index(unsigned long page_id
+ * For recording page's order, we use page_private(page).
+ */
+ static inline int page_is_buddy(struct page *page, struct page *buddy,
+- int order)
++ unsigned int order)
+ {
+ if (!pfn_valid_within(page_to_pfn(buddy)))
+ return 0;
+@@ -722,7 +723,7 @@ static void free_pcppages_bulk(struct zo
+ spin_unlock(&zone->lock);
+ }
+
+-static void free_one_page(struct zone *zone, struct page *page, int order,
++static void free_one_page(struct zone *zone, struct page *page, unsigned int order,
+ int migratetype)
+ {
+ spin_lock(&zone->lock);
+@@ -904,7 +905,7 @@ static inline int check_new_page(struct
+ return 0;
+ }
+
+-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
++static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
+ {
+ int i;
+
+@@ -1115,16 +1116,17 @@ static int try_to_steal_freepages(struct
+
+ /* Remove an element from the buddy allocator from the fallback list */
+ static inline struct page *
+-__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
++__rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
+ {
+ struct free_area *area;
+- int current_order;
++ unsigned int current_order;
+ struct page *page;
+ int migratetype, new_type, i;
+
+ /* Find the largest possible block of pages in the other list */
+- for (current_order = MAX_ORDER-1; current_order >= order;
+- --current_order) {
++ for (current_order = MAX_ORDER-1;
++ current_order >= order && current_order <= MAX_ORDER-1;
++ --current_order) {
+ for (i = 0;; i++) {
+ migratetype = fallbacks[start_migratetype][i];
+
+@@ -1352,7 +1354,7 @@ void mark_free_pages(struct zone *zone)
+ {
+ unsigned long pfn, max_zone_pfn;
+ unsigned long flags;
+- int order, t;
++ unsigned int order, t;
+ struct list_head *curr;
+
+ if (zone_is_empty(zone))
+@@ -1547,8 +1549,8 @@ int split_free_page(struct page *page)
+ */
+ static inline
+ struct page *buffered_rmqueue(struct zone *preferred_zone,
+- struct zone *zone, int order, gfp_t gfp_flags,
+- int migratetype)
++ struct zone *zone, unsigned int order,
++ gfp_t gfp_flags, int migratetype)
+ {
+ unsigned long flags;
+ struct page *page;
+@@ -1697,8 +1699,9 @@ static inline bool should_fail_alloc_pag
+ * Return true if free pages are above 'mark'. This takes into account the order
+ * of the allocation.
+ */
+-static bool __zone_watermark_ok(struct zone *z, int order, unsigned long mark,
+- int classzone_idx, int alloc_flags, long free_pages)
++static bool __zone_watermark_ok(struct zone *z, unsigned int order,
++ unsigned long mark, int classzone_idx, int alloc_flags,
++ long free_pages)
+ {
+ /* free_pages my go negative - that's OK */
+ long min = mark;
+@@ -1732,15 +1735,15 @@ static bool __zone_watermark_ok(struct z
+ return true;
+ }
+
+-bool zone_watermark_ok(struct zone *z, int order, unsigned long mark,
++bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
+ int classzone_idx, int alloc_flags)
+ {
+ return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
+ zone_page_state(z, NR_FREE_PAGES));
+ }
+
+-bool zone_watermark_ok_safe(struct zone *z, int order, unsigned long mark,
+- int classzone_idx, int alloc_flags)
++bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
++ unsigned long mark, int classzone_idx, int alloc_flags)
+ {
+ long free_pages = zone_page_state(z, NR_FREE_PAGES);
+
+@@ -4137,7 +4140,7 @@ void __meminit memmap_init_zone(unsigned
+
+ static void __meminit zone_init_free_lists(struct zone *zone)
+ {
+- int order, t;
++ unsigned int order, t;
+ for_each_migratetype_order(order, t) {
+ INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
+ zone->free_area[order].nr_free = 0;
+@@ -6477,7 +6480,7 @@ __offline_isolated_pages(unsigned long s
+ {
+ struct page *page;
+ struct zone *zone;
+- int order, i;
++ unsigned int order, i;
+ unsigned long pfn;
+ unsigned long flags;
+ /* find the first valid pfn */
+@@ -6529,7 +6532,7 @@ bool is_free_buddy_page(struct page *pag
+ struct zone *zone = page_zone(page);
+ unsigned long pfn = page_to_pfn(page);
+ unsigned long flags;
+- int order;
++ unsigned int order;
+
+ spin_lock_irqsave(&zone->lock, flags);
+ for (order = 0; order < MAX_ORDER; order++) {
--- /dev/null
+From 07a427884348d38a6fd56fa4d78249c407196650 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:24 -0700
+Subject: mm: shmem: avoid atomic operation during shmem_getpage_gfp
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 07a427884348d38a6fd56fa4d78249c407196650 upstream.
+
+shmem_getpage_gfp uses an atomic operation to set the SwapBacked field
+before it's even added to the LRU or visible. This is unnecessary as what
+could it possible race against? Use an unlocked variant.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/page-flags.h | 1 +
+ mm/shmem.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/include/linux/page-flags.h
++++ b/include/linux/page-flags.h
+@@ -208,6 +208,7 @@ PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinn
+ PAGEFLAG(SavePinned, savepinned); /* Xen */
+ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+ PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
++ __SETPAGEFLAG(SwapBacked, swapbacked)
+
+ __PAGEFLAG(SlobFree, slob_free)
+
+--- a/mm/shmem.c
++++ b/mm/shmem.c
+@@ -1140,7 +1140,7 @@ repeat:
+ goto decused;
+ }
+
+- SetPageSwapBacked(page);
++ __SetPageSwapBacked(page);
+ __set_page_locked(page);
+ error = mem_cgroup_cache_charge(page, current->mm,
+ gfp & GFP_RECLAIM_MASK);
include-linux-jump_label.h-expose-the-reference-count.patch
mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch
mm-page_alloc-calculate-classzone_idx-once-from-the.patch
+mm-page_alloc-only-check-the-zone-id-check-if-pages-are-buddies.patch
+mm-page_alloc-only-check-the-alloc-flags-and-gfp_mask-for-dirty-once.patch
+mm-page_alloc-take-the-alloc_no_watermark-check-out-of-the-fast-path.patch
+mm-page_alloc-use-unsigned-int-for-order-in-more-places.patch
+mm-page_alloc-reduce-number-of-times-page_to_pfn-is-called.patch
+mm-page_alloc-convert-hot-cold-parameter-and-immediate-callers-to-bool.patch
+mm-page_alloc-lookup-pageblock-migratetype-with-irqs-enabled-during-free.patch
+mm-shmem-avoid-atomic-operation-during-shmem_getpage_gfp.patch
+mm-do-not-use-atomic-operations-when-releasing-pages.patch
+mm-do-not-use-unnecessary-atomic-operations-when-adding-pages-to-the-lru.patch
+fs-buffer-do-not-use-unnecessary-atomic-operations-when-discarding-buffers.patch
+mm-non-atomically-mark-page-accessed-during-page-cache-allocation-where-possible.patch