]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jan 2015 00:43:35 +0000 (16:43 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Jan 2015 00:43:35 +0000 (16:43 -0800)
added patches:
include-linux-jump_label.h-expose-the-reference-count.patch
mm-page_alloc-calculate-classzone_idx-once-from-the.patch
mm-page_alloc-do-not-treat-a-zone-that-cannot-be-used-for-dirty-pages-as-full.patch
mm-page_alloc-do-not-update-zlc-unless-the-zlc-is-active.patch
mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch
mm-swap.c-clean-up-lru_cache_add-functions.patch

queue-3.14/include-linux-jump_label.h-expose-the-reference-count.patch [new file with mode: 0644]
queue-3.14/mm-page_alloc-calculate-classzone_idx-once-from-the.patch [new file with mode: 0644]
queue-3.14/mm-page_alloc-do-not-treat-a-zone-that-cannot-be-used-for-dirty-pages-as-full.patch [new file with mode: 0644]
queue-3.14/mm-page_alloc-do-not-update-zlc-unless-the-zlc-is-active.patch [new file with mode: 0644]
queue-3.14/mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch [new file with mode: 0644]
queue-3.14/mm-swap.c-clean-up-lru_cache_add-functions.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/include-linux-jump_label.h-expose-the-reference-count.patch b/queue-3.14/include-linux-jump_label.h-expose-the-reference-count.patch
new file mode 100644 (file)
index 0000000..a153872
--- /dev/null
@@ -0,0 +1,97 @@
+From ea5e9539abf1258f23e725cb9cb25aa74efa29eb Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:07 -0700
+Subject: include/linux/jump_label.h: expose the reference count
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit ea5e9539abf1258f23e725cb9cb25aa74efa29eb upstream.
+
+This patch exposes the jump_label reference count in preparation for the
+next patch.  cpusets cares about both the jump_label being enabled and how
+many users of the cpusets there currently are.
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/jump_label.h |   20 +++++++++++++-------
+ 1 file changed, 13 insertions(+), 7 deletions(-)
+
+--- a/include/linux/jump_label.h
++++ b/include/linux/jump_label.h
+@@ -69,6 +69,10 @@ struct static_key {
+ # include <asm/jump_label.h>
+ # define HAVE_JUMP_LABEL
++#else
++struct static_key {
++      atomic_t enabled;
++};
+ #endif        /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
+ enum jump_label_type {
+@@ -79,6 +83,12 @@ enum jump_label_type {
+ struct module;
+ #include <linux/atomic.h>
++
++static inline int static_key_count(struct static_key *key)
++{
++      return atomic_read(&key->enabled);
++}
++
+ #ifdef HAVE_JUMP_LABEL
+ #define JUMP_LABEL_TYPE_FALSE_BRANCH  0UL
+@@ -134,10 +144,6 @@ extern void jump_label_apply_nops(struct
+ #else  /* !HAVE_JUMP_LABEL */
+-struct static_key {
+-      atomic_t enabled;
+-};
+-
+ static __always_inline void jump_label_init(void)
+ {
+       static_key_initialized = true;
+@@ -145,14 +151,14 @@ static __always_inline void jump_label_i
+ static __always_inline bool static_key_false(struct static_key *key)
+ {
+-      if (unlikely(atomic_read(&key->enabled) > 0))
++      if (unlikely(static_key_count(key) > 0))
+               return true;
+       return false;
+ }
+ static __always_inline bool static_key_true(struct static_key *key)
+ {
+-      if (likely(atomic_read(&key->enabled) > 0))
++      if (likely(static_key_count(key) > 0))
+               return true;
+       return false;
+ }
+@@ -194,7 +200,7 @@ static inline int jump_label_apply_nops(
+ static inline bool static_key_enabled(struct static_key *key)
+ {
+-      return (atomic_read(&key->enabled) > 0);
++      return static_key_count(key) > 0;
+ }
+ #endif        /* _LINUX_JUMP_LABEL_H */
diff --git a/queue-3.14/mm-page_alloc-calculate-classzone_idx-once-from-the.patch b/queue-3.14/mm-page_alloc-calculate-classzone_idx-once-from-the.patch
new file mode 100644 (file)
index 0000000..7b44d0e
--- /dev/null
@@ -0,0 +1,266 @@
+From d8846374a85f4290a473a4e2a64c1ba046c4a0e1 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:33 -0700
+Subject: mm: page_alloc: calculate classzone_idx once from the
+ zonelist ref
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit d8846374a85f4290a473a4e2a64c1ba046c4a0e1 upstream.
+
+There is no need to calculate zone_idx(preferred_zone) multiple times
+or use the pgdat to figure it out.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Acked-by: David Rientjes <rientjes@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |   60 ++++++++++++++++++++++++++++++++------------------------
+ 1 file changed, 35 insertions(+), 25 deletions(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1922,17 +1922,15 @@ static inline void init_zone_allows_recl
+ static struct page *
+ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
+               struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
+-              struct zone *preferred_zone, int migratetype)
++              struct zone *preferred_zone, int classzone_idx, int migratetype)
+ {
+       struct zoneref *z;
+       struct page *page = NULL;
+-      int classzone_idx;
+       struct zone *zone;
+       nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+       int zlc_active = 0;             /* set if using zonelist_cache */
+       int did_zlc_setup = 0;          /* just call zlc_setup() one time */
+-      classzone_idx = zone_idx(preferred_zone);
+ zonelist_scan:
+       /*
+        * Scan zonelist, looking for a zone with enough free.
+@@ -2189,7 +2187,7 @@ static inline struct page *
+ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+-      int migratetype)
++      int classzone_idx, int migratetype)
+ {
+       struct page *page;
+@@ -2215,7 +2213,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, un
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
+               order, zonelist, high_zoneidx,
+               ALLOC_WMARK_HIGH|ALLOC_CPUSET,
+-              preferred_zone, migratetype);
++              preferred_zone, classzone_idx, migratetype);
+       if (page)
+               goto out;
+@@ -2250,7 +2248,7 @@ static struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+-      int migratetype, enum migrate_mode mode,
++      int classzone_idx, int migratetype, enum migrate_mode mode,
+       bool *contended_compaction, bool *deferred_compaction,
+       unsigned long *did_some_progress)
+ {
+@@ -2278,7 +2276,7 @@ __alloc_pages_direct_compact(gfp_t gfp_m
+               page = get_page_from_freelist(gfp_mask, nodemask,
+                               order, zonelist, high_zoneidx,
+                               alloc_flags & ~ALLOC_NO_WATERMARKS,
+-                              preferred_zone, migratetype);
++                              preferred_zone, classzone_idx, migratetype);
+               if (page) {
+                       preferred_zone->compact_blockskip_flush = false;
+                       compaction_defer_reset(preferred_zone, order, true);
+@@ -2310,7 +2308,8 @@ static inline struct page *
+ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+-      int migratetype, enum migrate_mode mode, bool *contended_compaction,
++      int classzone_idx, int migratetype,
++      enum migrate_mode mode, bool *contended_compaction,
+       bool *deferred_compaction, unsigned long *did_some_progress)
+ {
+       return NULL;
+@@ -2350,7 +2349,7 @@ static inline struct page *
+ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+-      int migratetype, unsigned long *did_some_progress)
++      int classzone_idx, int migratetype, unsigned long *did_some_progress)
+ {
+       struct page *page = NULL;
+       bool drained = false;
+@@ -2368,7 +2367,8 @@ retry:
+       page = get_page_from_freelist(gfp_mask, nodemask, order,
+                                       zonelist, high_zoneidx,
+                                       alloc_flags & ~ALLOC_NO_WATERMARKS,
+-                                      preferred_zone, migratetype);
++                                      preferred_zone, classzone_idx,
++                                      migratetype);
+       /*
+        * If an allocation failed after direct reclaim, it could be because
+@@ -2391,14 +2391,14 @@ static inline struct page *
+ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+-      int migratetype)
++      int classzone_idx, int migratetype)
+ {
+       struct page *page;
+       do {
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
+                       zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
+-                      preferred_zone, migratetype);
++                      preferred_zone, classzone_idx, migratetype);
+               if (!page && gfp_mask & __GFP_NOFAIL)
+                       wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
+@@ -2499,7 +2499,7 @@ static inline struct page *
+ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+-      int migratetype)
++      int classzone_idx, int migratetype)
+ {
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       struct page *page = NULL;
+@@ -2548,15 +2548,19 @@ restart:
+        * Find the true preferred zone if the allocation is unconstrained by
+        * cpusets.
+        */
+-      if (!(alloc_flags & ALLOC_CPUSET) && !nodemask)
+-              first_zones_zonelist(zonelist, high_zoneidx, NULL,
+-                                      &preferred_zone);
++      if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
++              struct zoneref *preferred_zoneref;
++              preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
++                              NULL,
++                              &preferred_zone);
++              classzone_idx = zonelist_zone_idx(preferred_zoneref);
++      }
+ rebalance:
+       /* This is the last chance, in general, before the goto nopage. */
+       page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
+                       high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+-                      preferred_zone, migratetype);
++                      preferred_zone, classzone_idx, migratetype);
+       if (page)
+               goto got_pg;
+@@ -2571,7 +2575,7 @@ rebalance:
+               page = __alloc_pages_high_priority(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+-                              preferred_zone, migratetype);
++                              preferred_zone, classzone_idx, migratetype);
+               if (page) {
+                       goto got_pg;
+               }
+@@ -2602,7 +2606,8 @@ rebalance:
+        */
+       page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
+                                       high_zoneidx, nodemask, alloc_flags,
+-                                      preferred_zone, migratetype,
++                                      preferred_zone,
++                                      classzone_idx, migratetype,
+                                       migration_mode, &contended_compaction,
+                                       &deferred_compaction,
+                                       &did_some_progress);
+@@ -2625,7 +2630,8 @@ rebalance:
+                                       zonelist, high_zoneidx,
+                                       nodemask,
+                                       alloc_flags, preferred_zone,
+-                                      migratetype, &did_some_progress);
++                                      classzone_idx, migratetype,
++                                      &did_some_progress);
+       if (page)
+               goto got_pg;
+@@ -2644,7 +2650,7 @@ rebalance:
+                       page = __alloc_pages_may_oom(gfp_mask, order,
+                                       zonelist, high_zoneidx,
+                                       nodemask, preferred_zone,
+-                                      migratetype);
++                                      classzone_idx, migratetype);
+                       if (page)
+                               goto got_pg;
+@@ -2685,7 +2691,8 @@ rebalance:
+                */
+               page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
+                                       high_zoneidx, nodemask, alloc_flags,
+-                                      preferred_zone, migratetype,
++                                      preferred_zone,
++                                      classzone_idx, migratetype,
+                                       migration_mode, &contended_compaction,
+                                       &deferred_compaction,
+                                       &did_some_progress);
+@@ -2712,11 +2719,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, u
+ {
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zone *preferred_zone;
++      struct zoneref *preferred_zoneref;
+       struct page *page = NULL;
+       int migratetype = allocflags_to_migratetype(gfp_mask);
+       unsigned int cpuset_mems_cookie;
+       int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
+       struct mem_cgroup *memcg = NULL;
++      int classzone_idx;
+       gfp_mask &= gfp_allowed_mask;
+@@ -2746,11 +2755,12 @@ retry_cpuset:
+       cpuset_mems_cookie = read_mems_allowed_begin();
+       /* The preferred zone is used for statistics later */
+-      first_zones_zonelist(zonelist, high_zoneidx,
++      preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
+                               nodemask ? : &cpuset_current_mems_allowed,
+                               &preferred_zone);
+       if (!preferred_zone)
+               goto out;
++      classzone_idx = zonelist_zone_idx(preferred_zoneref);
+ #ifdef CONFIG_CMA
+       if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+@@ -2760,7 +2770,7 @@ retry:
+       /* First allocation attempt */
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+                       zonelist, high_zoneidx, alloc_flags,
+-                      preferred_zone, migratetype);
++                      preferred_zone, classzone_idx, migratetype);
+       if (unlikely(!page)) {
+               /*
+                * The first pass makes sure allocations are spread
+@@ -2786,7 +2796,7 @@ retry:
+               gfp_mask = memalloc_noio_flags(gfp_mask);
+               page = __alloc_pages_slowpath(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+-                              preferred_zone, migratetype);
++                              preferred_zone, classzone_idx, migratetype);
+       }
+       trace_mm_page_alloc(page, order, gfp_mask, migratetype);
diff --git a/queue-3.14/mm-page_alloc-do-not-treat-a-zone-that-cannot-be-used-for-dirty-pages-as-full.patch b/queue-3.14/mm-page_alloc-do-not-treat-a-zone-that-cannot-be-used-for-dirty-pages-as-full.patch
new file mode 100644 (file)
index 0000000..1f25245
--- /dev/null
@@ -0,0 +1,36 @@
+From 800a1e750c7b04c2aa2459afca77e936e01c0029 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:06 -0700
+Subject: mm: page_alloc: do not treat a zone that cannot be used for dirty pages as "full"
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 800a1e750c7b04c2aa2459afca77e936e01c0029 upstream.
+
+If a zone cannot be used for a dirty page then it gets marked "full" which
+is cached in the zlc and later potentially skipped by allocation requests
+that have nothing to do with dirty zones.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1991,7 +1991,7 @@ zonelist_scan:
+                */
+               if ((alloc_flags & ALLOC_WMARK_LOW) &&
+                   (gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
+-                      goto this_zone_full;
++                      continue;
+               mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+               if (!zone_watermark_ok(zone, order, mark,
diff --git a/queue-3.14/mm-page_alloc-do-not-update-zlc-unless-the-zlc-is-active.patch b/queue-3.14/mm-page_alloc-do-not-update-zlc-unless-the-zlc-is-active.patch
new file mode 100644 (file)
index 0000000..94bad08
--- /dev/null
@@ -0,0 +1,38 @@
+From 65bb371984d6a2c909244eb749e482bb40b72e36 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:05 -0700
+Subject: mm: page_alloc: do not update zlc unless the zlc is active
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 65bb371984d6a2c909244eb749e482bb40b72e36 upstream.
+
+The zlc is used on NUMA machines to quickly skip over zones that are full.
+ However it is always updated, even for the first zone scanned when the
+zlc might not even be active.  As it's a write to a bitmap that
+potentially bounces cache line it's deceptively expensive and most
+machines will not care.  Only update the zlc if it was active.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/page_alloc.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2059,7 +2059,7 @@ try_this_zone:
+               if (page)
+                       break;
+ this_zone_full:
+-              if (IS_ENABLED(CONFIG_NUMA))
++              if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
+                       zlc_mark_zone_full(zonelist, z);
+       }
diff --git a/queue-3.14/mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch b/queue-3.14/mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch
new file mode 100644 (file)
index 0000000..16e3ecc
--- /dev/null
@@ -0,0 +1,159 @@
+From 664eeddeef6539247691197c1ac124d4aa872ab6 Mon Sep 17 00:00:00 2001
+From: Mel Gorman <mgorman@suse.de>
+Date: Wed, 4 Jun 2014 16:10:08 -0700
+Subject: mm: page_alloc: use jump labels to avoid checking number_of_cpusets
+
+From: Mel Gorman <mgorman@suse.de>
+
+commit 664eeddeef6539247691197c1ac124d4aa872ab6 upstream.
+
+If cpusets are not in use then we still check a global variable on every
+page allocation.  Use jump labels to avoid the overhead.
+
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Reviewed-by: Rik van Riel <riel@redhat.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Dave Hansen <dave.hansen@intel.com>
+Cc: Theodore Ts'o <tytso@mit.edu>
+Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephen Rothwell <sfr@canb.auug.org.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpuset.h |   29 ++++++++++++++++++++++++++---
+ kernel/cpuset.c        |   14 ++++----------
+ mm/page_alloc.c        |    3 ++-
+ 3 files changed, 32 insertions(+), 14 deletions(-)
+
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -12,10 +12,31 @@
+ #include <linux/cpumask.h>
+ #include <linux/nodemask.h>
+ #include <linux/mm.h>
++#include <linux/jump_label.h>
+ #ifdef CONFIG_CPUSETS
+-extern int number_of_cpusets; /* How many cpusets are defined in system? */
++extern struct static_key cpusets_enabled_key;
++static inline bool cpusets_enabled(void)
++{
++      return static_key_false(&cpusets_enabled_key);
++}
++
++static inline int nr_cpusets(void)
++{
++      /* jump label reference count + the top-level cpuset */
++      return static_key_count(&cpusets_enabled_key) + 1;
++}
++
++static inline void cpuset_inc(void)
++{
++      static_key_slow_inc(&cpusets_enabled_key);
++}
++
++static inline void cpuset_dec(void)
++{
++      static_key_slow_dec(&cpusets_enabled_key);
++}
+ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
+@@ -32,13 +53,13 @@ extern int __cpuset_node_allowed_hardwal
+ static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
+ {
+-      return number_of_cpusets <= 1 ||
++      return nr_cpusets() <= 1 ||
+               __cpuset_node_allowed_softwall(node, gfp_mask);
+ }
+ static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
+ {
+-      return number_of_cpusets <= 1 ||
++      return nr_cpusets() <= 1 ||
+               __cpuset_node_allowed_hardwall(node, gfp_mask);
+ }
+@@ -124,6 +145,8 @@ static inline void set_mems_allowed(node
+ #else /* !CONFIG_CPUSETS */
++static inline bool cpusets_enabled(void) { return false; }
++
+ static inline int cpuset_init(void) { return 0; }
+ static inline void cpuset_init_smp(void) {}
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -61,12 +61,7 @@
+ #include <linux/cgroup.h>
+ #include <linux/wait.h>
+-/*
+- * Tracks how many cpusets are currently defined in system.
+- * When there is only one cpuset (the root cpuset) we can
+- * short circuit some hooks.
+- */
+-int number_of_cpusets __read_mostly;
++struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
+ /* See "Frequency meter" comments, below. */
+@@ -611,7 +606,7 @@ static int generate_sched_domains(cpumas
+               goto done;
+       }
+-      csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL);
++      csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
+       if (!csa)
+               goto done;
+       csn = 0;
+@@ -1961,7 +1956,7 @@ static int cpuset_css_online(struct cgro
+       if (is_spread_slab(parent))
+               set_bit(CS_SPREAD_SLAB, &cs->flags);
+-      number_of_cpusets++;
++      cpuset_inc();
+       if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
+               goto out_unlock;
+@@ -2012,7 +2007,7 @@ static void cpuset_css_offline(struct cg
+       if (is_sched_load_balance(cs))
+               update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
+-      number_of_cpusets--;
++      cpuset_dec();
+       clear_bit(CS_ONLINE, &cs->flags);
+       mutex_unlock(&cpuset_mutex);
+@@ -2067,7 +2062,6 @@ int __init cpuset_init(void)
+       if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
+               BUG();
+-      number_of_cpusets = 1;
+       return 0;
+ }
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -1945,7 +1945,8 @@ zonelist_scan:
+               if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
+                       !zlc_zone_worth_trying(zonelist, z, allowednodes))
+                               continue;
+-              if ((alloc_flags & ALLOC_CPUSET) &&
++              if (cpusets_enabled() &&
++                      (alloc_flags & ALLOC_CPUSET) &&
+                       !cpuset_zone_allowed_softwall(zone, gfp_mask))
+                               continue;
+               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
diff --git a/queue-3.14/mm-swap.c-clean-up-lru_cache_add-functions.patch b/queue-3.14/mm-swap.c-clean-up-lru_cache_add-functions.patch
new file mode 100644 (file)
index 0000000..c183f6b
--- /dev/null
@@ -0,0 +1,127 @@
+From 2329d3751b082b4fd354f334a88662d72abac52d Mon Sep 17 00:00:00 2001
+From: Jianyu Zhan <nasa4836@gmail.com>
+Date: Wed, 4 Jun 2014 16:07:31 -0700
+Subject: mm/swap.c: clean up *lru_cache_add* functions
+
+From: Jianyu Zhan <nasa4836@gmail.com>
+
+commit 2329d3751b082b4fd354f334a88662d72abac52d upstream.
+
+In mm/swap.c, __lru_cache_add() is exported, but actually there are no
+users outside this file.
+
+This patch unexports __lru_cache_add(), and makes it static.  It also
+exports lru_cache_add_file(), as it is use by cifs and fuse, which can
+loaded as modules.
+
+Signed-off-by: Jianyu Zhan <nasa4836@gmail.com>
+Cc: Minchan Kim <minchan@kernel.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Shaohua Li <shli@kernel.org>
+Cc: Bob Liu <bob.liu@oracle.com>
+Cc: Seth Jennings <sjenning@linux.vnet.ibm.com>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Cc: Rafael Aquini <aquini@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Khalid Aziz <khalid.aziz@oracle.com>
+Cc: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/swap.h |   19 ++-----------------
+ mm/swap.c            |   31 +++++++++++++++++++++++--------
+ 2 files changed, 25 insertions(+), 25 deletions(-)
+
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -268,8 +268,9 @@ extern unsigned long nr_free_pagecache_p
+ /* linux/mm/swap.c */
+-extern void __lru_cache_add(struct page *);
+ extern void lru_cache_add(struct page *);
++extern void lru_cache_add_anon(struct page *page);
++extern void lru_cache_add_file(struct page *page);
+ extern void lru_add_page_tail(struct page *page, struct page *page_tail,
+                        struct lruvec *lruvec, struct list_head *head);
+ extern void activate_page(struct page *);
+@@ -283,22 +284,6 @@ extern void swap_setup(void);
+ extern void add_page_to_unevictable_list(struct page *page);
+-/**
+- * lru_cache_add: add a page to the page lists
+- * @page: the page to add
+- */
+-static inline void lru_cache_add_anon(struct page *page)
+-{
+-      ClearPageActive(page);
+-      __lru_cache_add(page);
+-}
+-
+-static inline void lru_cache_add_file(struct page *page)
+-{
+-      ClearPageActive(page);
+-      __lru_cache_add(page);
+-}
+-
+ /* linux/mm/vmscan.c */
+ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+                                       gfp_t gfp_mask, nodemask_t *mask);
+--- a/mm/swap.c
++++ b/mm/swap.c
+@@ -580,13 +580,7 @@ void mark_page_accessed(struct page *pag
+ }
+ EXPORT_SYMBOL(mark_page_accessed);
+-/*
+- * Queue the page for addition to the LRU via pagevec. The decision on whether
+- * to add the page to the [in]active [file|anon] list is deferred until the
+- * pagevec is drained. This gives a chance for the caller of __lru_cache_add()
+- * have the page added to the active list using mark_page_accessed().
+- */
+-void __lru_cache_add(struct page *page)
++static void __lru_cache_add(struct page *page)
+ {
+       struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
+@@ -596,11 +590,32 @@ void __lru_cache_add(struct page *page)
+       pagevec_add(pvec, page);
+       put_cpu_var(lru_add_pvec);
+ }
+-EXPORT_SYMBOL(__lru_cache_add);
++
++/**
++ * lru_cache_add: add a page to the page lists
++ * @page: the page to add
++ */
++void lru_cache_add_anon(struct page *page)
++{
++      ClearPageActive(page);
++      __lru_cache_add(page);
++}
++
++void lru_cache_add_file(struct page *page)
++{
++      ClearPageActive(page);
++      __lru_cache_add(page);
++}
++EXPORT_SYMBOL(lru_cache_add_file);
+ /**
+  * lru_cache_add - add a page to a page list
+  * @page: the page to be added to the LRU.
++ *
++ * Queue the page for addition to the LRU via pagevec. The decision on whether
++ * to add the page to the [in]active [file|anon] list is deferred until the
++ * pagevec is drained. This gives a chance for the caller of lru_cache_add()
++ * have the page added to the active list using mark_page_accessed().
+  */
+ void lru_cache_add(struct page *page)
+ {
index 68a2783c25b77041360c31cc5f917cf18ef1a8e4..d2c8691954288cc0bfa735f50a82361d89f9d6e0 100644 (file)
@@ -42,3 +42,9 @@ arc-fix-build-breakage-for-config_arc_dw2_unwind.patch
 input-evdev-fix-eviocg-type-ioctl.patch
 tty-fix-pty-master-poll-after-slave-closes-v2.patch
 mmc-sdhci-don-t-signal-the-sdio-irq-if-it-s-not-setup.patch
+mm-swap.c-clean-up-lru_cache_add-functions.patch
+mm-page_alloc-do-not-update-zlc-unless-the-zlc-is-active.patch
+mm-page_alloc-do-not-treat-a-zone-that-cannot-be-used-for-dirty-pages-as-full.patch
+include-linux-jump_label.h-expose-the-reference-count.patch
+mm-page_alloc-use-jump-labels-to-avoid-checking-number_of_cpusets.patch
+mm-page_alloc-calculate-classzone_idx-once-from-the.patch