]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Jul 2012 23:06:20 +0000 (16:06 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Jul 2012 23:06:20 +0000 (16:06 -0700)
added patches:
mm-change-isolate-mode-from-define-to-bitwise-type.patch

queue-3.0/mm-change-isolate-mode-from-define-to-bitwise-type.patch [new file with mode: 0644]
queue-3.0/series

diff --git a/queue-3.0/mm-change-isolate-mode-from-define-to-bitwise-type.patch b/queue-3.0/mm-change-isolate-mode-from-define-to-bitwise-type.patch
new file mode 100644 (file)
index 0000000..e36701a
--- /dev/null
@@ -0,0 +1,278 @@
+From 4356f21d09283dc6d39a6f7287a65ddab61e2808 Mon Sep 17 00:00:00 2001
+From: Minchan Kim <minchan.kim@gmail.com>
+Date: Mon, 31 Oct 2011 17:06:47 -0700
+Subject: mm: change isolate mode from #define to bitwise type
+
+From: Minchan Kim <minchan.kim@gmail.com>
+
+commit 4356f21d09283dc6d39a6f7287a65ddab61e2808 upstream.
+
+Stable note: Not tracked in Bugzilla. This patch makes later patches
+       easier to apply but has no other impact.
+
+Change ISOLATE_XXX macro with bitwise isolate_mode_t type.  Normally,
+macro isn't recommended as it's type-unsafe and making debugging harder as
+symbol cannot be passed throught to the debugger.
+
+Quote from Johannes
+" Hmm, it would probably be cleaner to fully convert the isolation mode
+into independent flags.  INACTIVE, ACTIVE, BOTH is currently a
+tri-state among flags, which is a bit ugly."
+
+This patch moves isolate mode from swap.h to mmzone.h by memcontrol.h
+
+Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ Documentation/trace/postprocess/trace-vmscan-postprocess.pl |    8 +-
+ include/linux/memcontrol.h                                  |    3 
+ include/linux/mmzone.h                                      |    8 ++
+ include/linux/swap.h                                        |    7 --
+ include/trace/events/vmscan.h                               |    8 +-
+ mm/compaction.c                                             |    3 
+ mm/memcontrol.c                                             |    3 
+ mm/vmscan.c                                                 |   37 ++++++------
+ 8 files changed, 43 insertions(+), 34 deletions(-)
+
+--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
++++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+@@ -379,10 +379,10 @@ EVENT_PROCESS:
+                       # To closer match vmstat scanning statistics, only count isolate_both
+                       # and isolate_inactive as scanning. isolate_active is rotation
+-                      # isolate_inactive == 0
+-                      # isolate_active   == 1
+-                      # isolate_both     == 2
+-                      if ($isolate_mode != 1) {
++                      # isolate_inactive == 1
++                      # isolate_active   == 2
++                      # isolate_both     == 3
++                      if ($isolate_mode != 2) {
+                               $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
+                       }
+                       $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -35,7 +35,8 @@ enum mem_cgroup_page_stat_item {
+ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
+                                       struct list_head *dst,
+                                       unsigned long *scanned, int order,
+-                                      int mode, struct zone *z,
++                                      isolate_mode_t mode,
++                                      struct zone *z,
+                                       struct mem_cgroup *mem_cont,
+                                       int active, int file);
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -158,6 +158,14 @@ static inline int is_unevictable_lru(enu
+       return (l == LRU_UNEVICTABLE);
+ }
++/* Isolate inactive pages */
++#define ISOLATE_INACTIVE      ((__force isolate_mode_t)0x1)
++/* Isolate active pages */
++#define ISOLATE_ACTIVE                ((__force isolate_mode_t)0x2)
++
++/* LRU Isolation modes. */
++typedef unsigned __bitwise__ isolate_mode_t;
++
+ enum zone_watermarks {
+       WMARK_MIN,
+       WMARK_LOW,
+--- a/include/linux/swap.h
++++ b/include/linux/swap.h
+@@ -243,11 +243,6 @@ static inline void lru_cache_add_file(st
+       __lru_cache_add(page, LRU_INACTIVE_FILE);
+ }
+-/* LRU Isolation modes. */
+-#define ISOLATE_INACTIVE 0    /* Isolate inactive pages. */
+-#define ISOLATE_ACTIVE 1      /* Isolate active pages. */
+-#define ISOLATE_BOTH 2                /* Isolate both active and inactive pages. */
+-
+ /* linux/mm/vmscan.c */
+ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+                                       gfp_t gfp_mask, nodemask_t *mask);
+@@ -259,7 +254,7 @@ extern unsigned long mem_cgroup_shrink_n
+                                               unsigned int swappiness,
+                                               struct zone *zone,
+                                               unsigned long *nr_scanned);
+-extern int __isolate_lru_page(struct page *page, int mode, int file);
++extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
+ extern unsigned long shrink_all_memory(unsigned long nr_pages);
+ extern int vm_swappiness;
+ extern int remove_mapping(struct address_space *mapping, struct page *page);
+--- a/include/trace/events/vmscan.h
++++ b/include/trace/events/vmscan.h
+@@ -266,7 +266,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolat
+               unsigned long nr_lumpy_taken,
+               unsigned long nr_lumpy_dirty,
+               unsigned long nr_lumpy_failed,
+-              int isolate_mode),
++              isolate_mode_t isolate_mode),
+       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode),
+@@ -278,7 +278,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolat
+               __field(unsigned long, nr_lumpy_taken)
+               __field(unsigned long, nr_lumpy_dirty)
+               __field(unsigned long, nr_lumpy_failed)
+-              __field(int, isolate_mode)
++              __field(isolate_mode_t, isolate_mode)
+       ),
+       TP_fast_assign(
+@@ -312,7 +312,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_templ
+               unsigned long nr_lumpy_taken,
+               unsigned long nr_lumpy_dirty,
+               unsigned long nr_lumpy_failed,
+-              int isolate_mode),
++              isolate_mode_t isolate_mode),
+       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
+@@ -327,7 +327,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_templ
+               unsigned long nr_lumpy_taken,
+               unsigned long nr_lumpy_dirty,
+               unsigned long nr_lumpy_failed,
+-              int isolate_mode),
++              isolate_mode_t isolate_mode),
+       TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -371,7 +371,8 @@ static isolate_migrate_t isolate_migrate
+               }
+               /* Try isolate the page */
+-              if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
++              if (__isolate_lru_page(page,
++                              ISOLATE_ACTIVE|ISOLATE_INACTIVE, 0) != 0)
+                       continue;
+               VM_BUG_ON(PageTransCompound(page));
+--- a/mm/memcontrol.c
++++ b/mm/memcontrol.c
+@@ -1251,7 +1251,8 @@ mem_cgroup_get_reclaim_stat_from_page(st
+ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
+                                       struct list_head *dst,
+                                       unsigned long *scanned, int order,
+-                                      int mode, struct zone *z,
++                                      isolate_mode_t mode,
++                                      struct zone *z,
+                                       struct mem_cgroup *mem_cont,
+                                       int active, int file)
+ {
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1012,23 +1012,27 @@ keep_lumpy:
+  *
+  * returns 0 on success, -ve errno on failure.
+  */
+-int __isolate_lru_page(struct page *page, int mode, int file)
++int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
+ {
++      bool all_lru_mode;
+       int ret = -EINVAL;
+       /* Only take pages on the LRU. */
+       if (!PageLRU(page))
+               return ret;
++      all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
++              (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
++
+       /*
+        * When checking the active state, we need to be sure we are
+        * dealing with comparible boolean values.  Take the logical not
+        * of each.
+        */
+-      if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
++      if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
+               return ret;
+-      if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
++      if (!all_lru_mode && !!page_is_file_cache(page) != file)
+               return ret;
+       /*
+@@ -1076,7 +1080,8 @@ int __isolate_lru_page(struct page *page
+  */
+ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
+               struct list_head *src, struct list_head *dst,
+-              unsigned long *scanned, int order, int mode, int file)
++              unsigned long *scanned, int order, isolate_mode_t mode,
++              int file)
+ {
+       unsigned long nr_taken = 0;
+       unsigned long nr_lumpy_taken = 0;
+@@ -1201,8 +1206,8 @@ static unsigned long isolate_lru_pages(u
+ static unsigned long isolate_pages_global(unsigned long nr,
+                                       struct list_head *dst,
+                                       unsigned long *scanned, int order,
+-                                      int mode, struct zone *z,
+-                                      int active, int file)
++                                      isolate_mode_t mode,
++                                      struct zone *z, int active, int file)
+ {
+       int lru = LRU_BASE;
+       if (active)
+@@ -1448,6 +1453,7 @@ shrink_inactive_list(unsigned long nr_to
+       unsigned long nr_taken;
+       unsigned long nr_anon;
+       unsigned long nr_file;
++      isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
+       while (unlikely(too_many_isolated(zone, file, sc))) {
+               congestion_wait(BLK_RW_ASYNC, HZ/10);
+@@ -1458,15 +1464,15 @@ shrink_inactive_list(unsigned long nr_to
+       }
+       set_reclaim_mode(priority, sc, false);
++      if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
++              reclaim_mode |= ISOLATE_ACTIVE;
++
+       lru_add_drain();
+       spin_lock_irq(&zone->lru_lock);
+       if (scanning_global_lru(sc)) {
+-              nr_taken = isolate_pages_global(nr_to_scan,
+-                      &page_list, &nr_scanned, sc->order,
+-                      sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+-                                      ISOLATE_BOTH : ISOLATE_INACTIVE,
+-                      zone, 0, file);
++              nr_taken = isolate_pages_global(nr_to_scan, &page_list,
++                      &nr_scanned, sc->order, reclaim_mode, zone, 0, file);
+               zone->pages_scanned += nr_scanned;
+               if (current_is_kswapd())
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+@@ -1475,12 +1481,9 @@ shrink_inactive_list(unsigned long nr_to
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                              nr_scanned);
+       } else {
+-              nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
+-                      &page_list, &nr_scanned, sc->order,
+-                      sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
+-                                      ISOLATE_BOTH : ISOLATE_INACTIVE,
+-                      zone, sc->mem_cgroup,
+-                      0, file);
++              nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
++                      &nr_scanned, sc->order, reclaim_mode, zone,
++                      sc->mem_cgroup, 0, file);
+               /*
+                * mem_cgroup_isolate_pages() keeps track of
+                * scanned pages on its own.
index 59fda2d916c3526b91a1cdbc16aaab9ad6b43b0e..ca0e09acdd8ba6851aa7a6517a7dc8ae069a279a 100644 (file)
@@ -15,3 +15,4 @@ vmscan-reduce-wind-up-shrinker-nr-when-shrinker-can-t-do-work.patch
 vmscan-limit-direct-reclaim-for-higher-order-allocations.patch
 vmscan-abort-reclaim-compaction-if-compaction-can-proceed.patch
 mm-compaction-trivial-clean-up-in-acct_isolated.patch
+mm-change-isolate-mode-from-define-to-bitwise-type.patch