]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Jul 2012 17:19:00 +0000 (10:19 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 30 Jul 2012 17:19:00 +0000 (10:19 -0700)
added patches:
vmscan-fix-initial-shrinker-size-handling.patch

queue-3.0/series
queue-3.0/vmscan-fix-initial-shrinker-size-handling.patch [new file with mode: 0644]

index c55e3f65ccc67f307b8d051e85310ee1d1b41318..c5d42f08a9da645e817c39e2b762acbe48008087 100644 (file)
@@ -38,3 +38,4 @@ cpusets-avoid-looping-when-storing-to-mems_allowed-if-one-node-remains-set.patch
 cpusets-stall-when-updating-mems_allowed-for-mempolicy-or-disjoint-nodemask.patch
 cpuset-mm-reduce-large-amounts-of-memory-barrier-related-damage-v3.patch
 mm-hugetlb-fix-warning-in-alloc_huge_page-dequeue_huge_page_vma.patch
+vmscan-fix-initial-shrinker-size-handling.patch
diff --git a/queue-3.0/vmscan-fix-initial-shrinker-size-handling.patch b/queue-3.0/vmscan-fix-initial-shrinker-size-handling.patch
new file mode 100644 (file)
index 0000000..ff26222
--- /dev/null
@@ -0,0 +1,63 @@
+From 635697c663f38106063d5659f0cf2e45afcd4bb5 Mon Sep 17 00:00:00 2001
+From: Konstantin Khlebnikov <khlebnikov@openvz.org>
+Date: Thu, 8 Dec 2011 14:33:51 -0800
+Subject: vmscan: fix initial shrinker size handling
+
+From: Konstantin Khlebnikov <khlebnikov@openvz.org>
+
+commit 635697c663f38106063d5659f0cf2e45afcd4bb5 upstream.
+
+Stable note: The commit [acf92b48: vmscan: shrinker->nr updates race and
+       go wrong] aimed to reduce excessive reclaim of slab objects but
+       had bug in how it treated shrinker functions that returned -1.
+
+A shrinker function can return -1, means that it cannot do anything
+without a risk of deadlock.  For example prune_super() does this if it
+cannot grab a superblock refrence, even if nr_to_scan=0.  Currently we
+interpret this -1 as a ULONG_MAX size shrinker and evaluate `total_scan'
+according to this.  So the next time around this shrinker can cause
+really big pressure.  Let's skip such shrinkers instead.
+
+Also make total_scan signed, otherwise the check (total_scan < 0) below
+never works.
+
+Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
+Cc: Dave Chinner <david@fromorbit.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -248,12 +248,16 @@ unsigned long shrink_slab(struct shrink_
+       list_for_each_entry(shrinker, &shrinker_list, list) {
+               unsigned long long delta;
+-              unsigned long total_scan;
+-              unsigned long max_pass;
++              long total_scan;
++              long max_pass;
+               int shrink_ret = 0;
+               long nr;
+               long new_nr;
++              max_pass = do_shrinker_shrink(shrinker, shrink, 0);
++              if (max_pass <= 0)
++                      continue;
++
+               /*
+                * copy the current shrinker scan count into a local variable
+                * and zero it so that other concurrent shrinker invocations
+@@ -264,7 +268,6 @@ unsigned long shrink_slab(struct shrink_
+               } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+               total_scan = nr;
+-              max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+               delta = (4 * nr_pages_scanned) / shrinker->seeks;
+               delta *= max_pass;
+               do_div(delta, lru_pages + 1);