]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Jun 2014 02:15:28 +0000 (19:15 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 8 Jun 2014 02:15:28 +0000 (19:15 -0700)
added patches:
revert-revert-mm-vmscan-do-not-swap-anon-pages-just.patch

queue-3.14/revert-revert-mm-vmscan-do-not-swap-anon-pages-just.patch [new file with mode: 0644]
queue-3.14/series

diff --git a/queue-3.14/revert-revert-mm-vmscan-do-not-swap-anon-pages-just.patch b/queue-3.14/revert-revert-mm-vmscan-do-not-swap-anon-pages-just.patch
new file mode 100644 (file)
index 0000000..25bda24
--- /dev/null
@@ -0,0 +1,55 @@
+From fde350e9497d7fb5a222dc02c4364ac6d998c64d Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sat, 7 Jun 2014 19:11:23 -0700
+Subject: Revert "revert "mm: vmscan: do not swap anon pages just because free+file is low""
+
+This reverts commit 623762517e2370be3b3f95f4fe08d6c063a49b06.
+
+Ben rightly points out that commit 0bf1457f0cfc, which is what this
+original commit was reverting, never ended up in 3.14-stable, but was
+only for 3.15.
+
+So revert this patch as we now have the same check twice in a row, which
+is pretty pointless.  Although the comments were "prettier"...
+
+Cc: Ben Hutchings <ben@decadent.org.uk>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Rafael Aquini <aquini@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c |   18 ------------------
+ 1 file changed, 18 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1916,24 +1916,6 @@ static void get_scan_count(struct lruvec
+       }
+       /*
+-       * Prevent the reclaimer from falling into the cache trap: as
+-       * cache pages start out inactive, every cache fault will tip
+-       * the scan balance towards the file LRU.  And as the file LRU
+-       * shrinks, so does the window for rotation from references.
+-       * This means we have a runaway feedback loop where a tiny
+-       * thrashing file LRU becomes infinitely more attractive than
+-       * anon pages.  Try to detect this based on file LRU size.
+-       */
+-      if (global_reclaim(sc)) {
+-              unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
+-
+-              if (unlikely(file + free <= high_wmark_pages(zone))) {
+-                      scan_balance = SCAN_ANON;
+-                      goto out;
+-              }
+-      }
+-
+-      /*
+        * There is enough inactive page cache, do not reclaim
+        * anything from the anonymous working set right now.
+        */
index ebeb25ba44075ce194d55879bc9e9204f62ecd98..e1b652bea8cb2488e706cc5480f1fdbc4a44b76e 100644 (file)
@@ -27,3 +27,4 @@ firewire-revert-to-4-gb-rdma-fix-protocols-using-memory-space.patch
 mips-fix-typo-when-reporting-cache-and-ftlb-errors-for-imgtec-cores.patch
 dm-thin-add-no_space_timeout-dm-thin-pool-module-param.patch
 dm-cache-always-split-discards-on-cache-block-boundaries.patch
+revert-revert-mm-vmscan-do-not-swap-anon-pages-just.patch