From: Greg Kroah-Hartman Date: Wed, 25 Jul 2012 19:15:59 +0000 (-0700) Subject: 3.0-stable patches X-Git-Tag: v3.4.7~7 X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=b2d5db0a01c8d7c044ad4ef3022c648640791f3a;p=thirdparty%2Fkernel%2Fstable-queue.git 3.0-stable patches added patches: mm-vmscan-when-reclaiming-for-compaction-ensure-there-are-sufficient-free-pages-available.patch --- diff --git a/queue-3.0/mm-vmscan-when-reclaiming-for-compaction-ensure-there-are-sufficient-free-pages-available.patch b/queue-3.0/mm-vmscan-when-reclaiming-for-compaction-ensure-there-are-sufficient-free-pages-available.patch new file mode 100644 index 00000000000..4499bff5535 --- /dev/null +++ b/queue-3.0/mm-vmscan-when-reclaiming-for-compaction-ensure-there-are-sufficient-free-pages-available.patch @@ -0,0 +1,118 @@ +From fe4b1b244bdb96136855f2c694071cb09d140766 Mon Sep 17 00:00:00 2001 +From: Mel Gorman +Date: Thu, 12 Jan 2012 17:19:45 -0800 +Subject: mm: vmscan: when reclaiming for compaction, ensure there are sufficient free pages available + +From: Mel Gorman + +commit fe4b1b244bdb96136855f2c694071cb09d140766 upstream. + +Stable note: Not tracked on Bugzilla. THP and compaction was found to + aggressively reclaim pages and stall systems under different + situations that was addressed piecemeal over time. This patch + addresses a problem where the fix regressed THP allocation + success rates. + +In commit e0887c19 ("vmscan: limit direct reclaim for higher order +allocations"), Rik noted that reclaim was too aggressive when THP was +enabled. In his initial patch he used the number of free pages to decide +if reclaim should abort for compaction. My feedback was that reclaim and +compaction should be using the same logic when deciding if reclaim should +be aborted. + +Unfortunately, this had the effect of reducing THP success rates when the +workload included something like streaming reads that continually +allocated pages. The window during which compaction could run and return +a THP was too small. + +This patch combines Rik's two patches together. compaction_suitable() is +still used to decide if reclaim should be aborted to allow compaction is +used. However, it will also ensure that there is a reasonable buffer of +free pages available. This improves upon the THP allocation success rates +but bounds the number of pages that are freed for compaction. + +Signed-off-by: Mel Gorman +Reviewed-by: Rik van Riel +Cc: Andrea Arcangeli +Cc: Minchan Kim +Cc: Dave Jones +Cc: Jan Kara +Cc: Andy Isaacson +Cc: Nai Xia +Cc: Johannes Weiner +Signed-off-by: Andrew Morton +Signed-off-by: Linus Torvalds +Signed-off-by: Mel Gorman +Signed-off-by: Greg Kroah-Hartman + +--- + mm/vmscan.c | 44 +++++++++++++++++++++++++++++++++++++++----- + 1 file changed, 39 insertions(+), 5 deletions(-) + +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2075,6 +2075,42 @@ restart: + throttle_vm_writeout(sc->gfp_mask); + } + ++/* Returns true if compaction should go ahead for a high-order request */ ++static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) ++{ ++ unsigned long balance_gap, watermark; ++ bool watermark_ok; ++ ++ /* Do not consider compaction for orders reclaim is meant to satisfy */ ++ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER) ++ return false; ++ ++ /* ++ * Compaction takes time to run and there are potentially other ++ * callers using the pages just freed. Continue reclaiming until ++ * there is a buffer of free pages available to give compaction ++ * a reasonable chance of completing and allocating the page ++ */ ++ balance_gap = min(low_wmark_pages(zone), ++ (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / ++ KSWAPD_ZONE_BALANCE_GAP_RATIO); ++ watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); ++ watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0); ++ ++ /* ++ * If compaction is deferred, reclaim up to a point where ++ * compaction will have a chance of success when re-enabled ++ */ ++ if (compaction_deferred(zone)) ++ return watermark_ok; ++ ++ /* If compaction is not ready to start, keep reclaiming */ ++ if (!compaction_suitable(zone, sc->order)) ++ return false; ++ ++ return watermark_ok; ++} ++ + /* + * This is the direct reclaim path, for page-allocating processes. We only + * try to reclaim pages from zones which will satisfy the caller's allocation +@@ -2092,8 +2128,8 @@ restart: + * scan then give up on it. + * + * This function returns true if a zone is being reclaimed for a costly +- * high-order allocation and compaction is either ready to begin or deferred. +- * This indicates to the caller that it should retry the allocation or fail. ++ * high-order allocation and compaction is ready to begin. This indicates to ++ * the caller that it should retry the allocation or fail. + */ + static bool shrink_zones(int priority, struct zonelist *zonelist, + struct scan_control *sc) +@@ -2127,9 +2163,7 @@ static bool shrink_zones(int priority, s + * noticable problem, like transparent huge page + * allocations. + */ +- if (sc->order > PAGE_ALLOC_COSTLY_ORDER && +- (compaction_suitable(zone, sc->order) || +- compaction_deferred(zone))) { ++ if (compaction_ready(zone, sc)) { + should_abort_reclaim = true; + continue; + } diff --git a/queue-3.0/series b/queue-3.0/series index b7428b7c8f4..3bc6394d411 100644 --- a/queue-3.0/series +++ b/queue-3.0/series @@ -26,3 +26,4 @@ mm-compaction-make-isolate_lru_page-filter-aware-again.patch kswapd-avoid-unnecessary-rebalance-after-an-unsuccessful-balancing.patch kswapd-assign-new_order-and-new_classzone_idx-after-wakeup-in-sleeping.patch mm-compaction-introduce-sync-light-migration-for-use-by-compaction.patch +mm-vmscan-when-reclaiming-for-compaction-ensure-there-are-sufficient-free-pages-available.patch