]> git.ipfire.org Git - people/pmueller/ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.fixes/aggressive-zone-reclaim.patch
Fix oinkmaster patch.
[people/pmueller/ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.fixes / aggressive-zone-reclaim.patch
CommitLineData
2cb7cef9
BS
1From: Nick Piggin <npiggin@suse.de>
2Subject: be more aggressive with zone reclaims
3References: bnc#476525
4Patch-mainline: no
5
6The zone reclaim design is not very good for parallel allocations.
7The primary problem is that only one thread is allowed to perform
8zone-reclaim at a time. If another thread needs memory from that
9zone/node, then its zone-reclaim will fail and it will be forced
10to fall back to allocating from another zone.
11
12Additionally, the default zone reclaim priority is insufficient
13for massively parallel allocations. Lower ZONE_RECLAIM_PRIORITY
14to fix it. This can result in higher latency spikes, but similar
15kind of page allocation latency can often be encountered as
16normal part of page reclaim when pagecache fills memory.
17
18Signed-off-by: Petr Tesarik <ptesarik@suse.cz>
19
20---
21 mm/vmscan.c | 12 ++++--------
22 1 file changed, 4 insertions(+), 8 deletions(-)
23
24--- linux-2.6.27-SLE11_BRANCH.orig/mm/vmscan.c 2008-10-20 17:24:19.000000000 +0200
25+++ linux-2.6.27-SLE11_BRANCH/mm/vmscan.c 2009-06-29 12:59:09.000000000 +0200
26@@ -1988,7 +1988,7 @@ int zone_reclaim_mode __read_mostly;
27 * of a node considered for each zone_reclaim. 4 scans 1/16th of
28 * a zone.
29 */
30-#define ZONE_RECLAIM_PRIORITY 4
31+#define ZONE_RECLAIM_PRIORITY 0
32
33 /*
34 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
35@@ -2052,6 +2052,8 @@ static int __zone_reclaim(struct zone *z
36
37 slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
38 if (slab_reclaimable > zone->min_slab_pages) {
39+ unsigned long lru_pages = zone_page_state(zone, NR_ACTIVE)
40+ + zone_page_state(zone, NR_INACTIVE);
41 /*
42 * shrink_slab() does not currently allow us to determine how
43 * many pages were freed in this zone. So we take the current
44@@ -2062,10 +2064,7 @@ static int __zone_reclaim(struct zone *z
45 * Note that shrink_slab will free memory on all zones and may
46 * take a long time.
47 */
48- while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
49- zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
50- slab_reclaimable - nr_pages)
51- ;
52+ shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
53
54 /*
55 * Update nr_reclaimed by the number of slab pages we
56@@ -2120,10 +2119,7 @@ int zone_reclaim(struct zone *zone, gfp_
57 if (node_state(node_id, N_CPU) && node_id != numa_node_id())
58 return 0;
59
60- if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
61- return 0;
62 ret = __zone_reclaim(zone, gfp_mask, order);
63- zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
64
65 return ret;
66 }