]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blame - releases/3.4.83/mm-vmscan-fix-endless-loop-in-kswapd-balancing.patch
5.1-stable patches
[thirdparty/kernel/stable-queue.git] / releases / 3.4.83 / mm-vmscan-fix-endless-loop-in-kswapd-balancing.patch
CommitLineData
2306115e
GKH
1From 60cefed485a02bd99b6299dad70666fe49245da7 Mon Sep 17 00:00:00 2001
2From: Johannes Weiner <hannes@cmpxchg.org>
3Date: Thu, 29 Nov 2012 13:54:23 -0800
4Subject: mm: vmscan: fix endless loop in kswapd balancing
5
6From: Johannes Weiner <hannes@cmpxchg.org>
7
8commit 60cefed485a02bd99b6299dad70666fe49245da7 upstream.
9
10Kswapd does not in all places have the same criteria for a balanced
11zone. Zones are only being reclaimed when their high watermark is
12breached, but compaction checks loop over the zonelist again when the
13zone does not meet the low watermark plus two times the size of the
14allocation. This gets kswapd stuck in an endless loop over a small
15zone, like the DMA zone, where the high watermark is smaller than the
16compaction requirement.
17
18Add a function, zone_balanced(), that checks the watermark, and, for
19higher order allocations, if compaction has enough free memory. Then
20use it uniformly to check for balanced zones.
21
22This makes sure that when the compaction watermark is not met, at least
23reclaim happens and progress is made - or the zone is declared
24unreclaimable at some point and skipped entirely.
25
26Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
27Reported-by: George Spelvin <linux@horizon.com>
28Reported-by: Johannes Hirte <johannes.hirte@fem.tu-ilmenau.de>
29Reported-by: Tomas Racek <tracek@redhat.com>
30Tested-by: Johannes Hirte <johannes.hirte@fem.tu-ilmenau.de>
31Reviewed-by: Rik van Riel <riel@redhat.com>
32Cc: Mel Gorman <mel@csn.ul.ie>
33Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
34Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
35[hq: Backported to 3.4: adjust context]
36Signed-off-by: Qiang Huang <h.huangqiang@huawei.com>
37Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
38
39
40---
41 mm/vmscan.c | 27 ++++++++++++++++++---------
42 1 file changed, 18 insertions(+), 9 deletions(-)
43
44--- a/mm/vmscan.c
45+++ b/mm/vmscan.c
46@@ -2569,6 +2569,19 @@ static void age_active_anon(struct zone
47 } while (memcg);
48 }
49
50+static bool zone_balanced(struct zone *zone, int order,
51+ unsigned long balance_gap, int classzone_idx)
52+{
53+ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
54+ balance_gap, classzone_idx, 0))
55+ return false;
56+
57+ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order))
58+ return false;
59+
60+ return true;
61+}
62+
63 /*
64 * pgdat_balanced is used when checking if a node is balanced for high-order
65 * allocations. Only zones that meet watermarks and are in a zone allowed
66@@ -2628,8 +2641,7 @@ static bool sleeping_prematurely(pg_data
67 continue;
68 }
69
70- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone),
71- i, 0))
72+ if (!zone_balanced(zone, order, 0, i))
73 all_zones_ok = false;
74 else
75 balanced += zone->present_pages;
76@@ -2741,8 +2753,7 @@ loop_again:
77 break;
78 }
79
80- if (!zone_watermark_ok_safe(zone, order,
81- high_wmark_pages(zone), 0, 0)) {
82+ if (!zone_balanced(zone, order, 0, 0)) {
83 end_zone = i;
84 break;
85 } else {
86@@ -2817,9 +2828,8 @@ loop_again:
87 testorder = 0;
88
89 if ((buffer_heads_over_limit && is_highmem_idx(i)) ||
90- !zone_watermark_ok_safe(zone, testorder,
91- high_wmark_pages(zone) + balance_gap,
92- end_zone, 0)) {
93+ !zone_balanced(zone, testorder,
94+ balance_gap, end_zone)) {
95 shrink_zone(priority, zone, &sc);
96
97 reclaim_state->reclaimed_slab = 0;
98@@ -2846,8 +2856,7 @@ loop_again:
99 continue;
100 }
101
102- if (!zone_watermark_ok_safe(zone, testorder,
103- high_wmark_pages(zone), end_zone, 0)) {
104+ if (!zone_balanced(zone, testorder, 0, end_zone)) {
105 all_zones_ok = 0;
106 /*
107 * We are still under min water mark. This