]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
queue revert-mm-slowly-shrink-slabs-with-a-relatively-smal.patch for 4.18
authorSasha Levin <sashal@kernel.org>
Wed, 31 Oct 2018 13:52:04 +0000 (09:52 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 31 Oct 2018 13:52:14 +0000 (09:52 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-4.18/revert-mm-slowly-shrink-slabs-with-a-relatively-smal.patch [new file with mode: 0644]
queue-4.18/series

diff --git a/queue-4.18/revert-mm-slowly-shrink-slabs-with-a-relatively-smal.patch b/queue-4.18/revert-mm-slowly-shrink-slabs-with-a-relatively-smal.patch
new file mode 100644 (file)
index 0000000..0614e22
--- /dev/null
@@ -0,0 +1,45 @@
+From 95bbb90429171c62fcb5debb5ea33992f0b7fbd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Oct 2018 07:18:59 -0400
+Subject: Revert "mm: slowly shrink slabs with a relatively small number of
+ objects"
+
+This reverts commit 62aad93f09c1952ede86405894df1b22012fd5ab.
+
+Which was upstream commit 172b06c32b94 ("mm: slowly shrink slabs with a
+relatively small number of objects").
+
+The upstream commit was found to cause regressions. While there is a
+proposed fix upstream, revent this patch from stable trees for now as
+testing the fix will take some time.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/vmscan.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index fc0436407471..03822f86f288 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -386,17 +386,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
+       delta = freeable >> priority;
+       delta *= 4;
+       do_div(delta, shrinker->seeks);
+-
+-      /*
+-       * Make sure we apply some minimal pressure on default priority
+-       * even on small cgroups. Stale objects are not only consuming memory
+-       * by themselves, but can also hold a reference to a dying cgroup,
+-       * preventing it from being reclaimed. A dying cgroup with all
+-       * corresponding structures like per-cpu stats and kmem caches
+-       * can be really big, so it may lead to a significant waste of memory.
+-       */
+-      delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+-
+       total_scan += delta;
+       if (total_scan < 0) {
+               pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
+-- 
+2.17.1
+
index 8aaf7d6af57644d5ff0c6ed6c971a13c63c70981..3120f43da70b702ed0a0798ec70d245f692c9c76 100644 (file)
@@ -97,3 +97,4 @@ vmlinux.lds.h-fix-incomplete-.text.exit-discards.patch
 vmlinux.lds.h-fix-linker-warnings-about-orphan-.lpbx.patch
 afs-fix-cell-proc-list.patch
 fs-fat-fatent.c-add-cond_resched-to-fat_count_free_c.patch
+revert-mm-slowly-shrink-slabs-with-a-relatively-smal.patch