--- /dev/null
+From bbc1c5e8ad6dfebf9d13b8a4ccdf66c92913eac9 Mon Sep 17 00:00:00 2001
+From: Lars Ellenberg <lars.ellenberg@linbit.com>
+Date: Wed, 9 Jul 2014 21:18:32 +0200
+Subject: drbd: fix regression 'out of mem, failed to invoke fence-peer helper'
+
+From: Lars Ellenberg <lars.ellenberg@linbit.com>
+
+commit bbc1c5e8ad6dfebf9d13b8a4ccdf66c92913eac9 upstream.
+
+Since linux kernel 3.13, kthread_run() internally uses
+wait_for_completion_killable(). We sometimes may use kthread_run()
+while we still have a signal pending, which we used to kick our threads
+out of potentially blocking network functions, causing kthread_run() to
+mistake that as a new fatal signal and fail.
+
+Fix: flush_signals() before kthread_run().
+
+Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
+Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
+Signed-off-by: Jens Axboe <axboe@fb.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/block/drbd/drbd_nl.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/block/drbd/drbd_nl.c
++++ b/drivers/block/drbd/drbd_nl.c
+@@ -525,6 +525,12 @@ void conn_try_outdate_peer_async(struct
+ struct task_struct *opa;
+
+ kref_get(&tconn->kref);
++ /* We may just have force_sig()'ed this thread
++ * to get it out of some blocking network function.
++ * Clear signals; otherwise kthread_run(), which internally uses
++ * wait_on_completion_killable(), will mistake our pending signal
++ * for a new fatal signal and fail. */
++ flush_signals(current);
+ opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+ if (IS_ERR(opa)) {
+ conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
--- /dev/null
+From be1aa03b973c7dcdc576f3503f7a60429825c35d Mon Sep 17 00:00:00 2001
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Date: Mon, 7 Apr 2014 15:37:05 -0700
+Subject: mm/compaction: change the timing to check to drop the spinlock
+
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+
+commit be1aa03b973c7dcdc576f3503f7a60429825c35d upstream.
+
+It is odd to drop the spinlock when we scan (SWAP_CLUSTER_MAX - 1) th
+pfn page. This may results in below situation while isolating
+migratepage.
+
+1. try isolate 0x0 ~ 0x200 pfn pages.
+2. When low_pfn is 0x1ff, ((low_pfn+1) % SWAP_CLUSTER_MAX) == 0, so drop
+ the spinlock.
+3. Then, to complete isolating, retry to aquire the lock.
+
+I think that it is better to use SWAP_CLUSTER_MAX th pfn for checking the
+criteria about dropping the lock. This has no harm 0x0 pfn, because, at
+this time, locked variable would be false.
+
+Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -487,7 +487,7 @@ isolate_migratepages_range(struct zone *
+ cond_resched();
+ for (; low_pfn < end_pfn; low_pfn++) {
+ /* give a chance to irqs before checking need_resched() */
+- if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) {
++ if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
+ if (should_release_lock(&zone->lru_lock)) {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ locked = false;
--- /dev/null
+From c122b2087ab94192f2b937e47b563a9c4e688ece Mon Sep 17 00:00:00 2001
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Date: Mon, 7 Apr 2014 15:37:06 -0700
+Subject: mm/compaction: check pageblock suitability once per pageblock
+
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+
+commit c122b2087ab94192f2b937e47b563a9c4e688ece upstream.
+
+isolation_suitable() and migrate_async_suitable() is used to be sure
+that this pageblock range is fine to be migragted. It isn't needed to
+call it on every page. Current code do well if not suitable, but, don't
+do well when suitable.
+
+1) It re-checks isolation_suitable() on each page of a pageblock that was
+ already estabilished as suitable.
+2) It re-checks migrate_async_suitable() on each page of a pageblock that
+ was not entered through the next_pageblock: label, because
+ last_pageblock_nr is not otherwise updated.
+
+This patch fixes situation by 1) calling isolation_suitable() only once
+per pageblock and 2) always updating last_pageblock_nr to the pageblock
+that was just checked.
+
+Additionally, move PageBuddy() check after pageblock unit check, since
+pageblock check is the first thing we should do and makes things more
+simple.
+
+[vbabka@suse.cz: rephrase commit description]
+Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 34 +++++++++++++++++++---------------
+ 1 file changed, 19 insertions(+), 15 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -526,8 +526,25 @@ isolate_migratepages_range(struct zone *
+
+ /* If isolation recently failed, do not retry */
+ pageblock_nr = low_pfn >> pageblock_order;
+- if (!isolation_suitable(cc, page))
+- goto next_pageblock;
++ if (last_pageblock_nr != pageblock_nr) {
++ int mt;
++
++ last_pageblock_nr = pageblock_nr;
++ if (!isolation_suitable(cc, page))
++ goto next_pageblock;
++
++ /*
++ * For async migration, also only scan in MOVABLE
++ * blocks. Async migration is optimistic to see if
++ * the minimum amount of work satisfies the allocation
++ */
++ mt = get_pageblock_migratetype(page);
++ if (!cc->sync && !migrate_async_suitable(mt)) {
++ cc->finished_update_migrate = true;
++ skipped_async_unsuitable = true;
++ goto next_pageblock;
++ }
++ }
+
+ /*
+ * Skip if free. page_order cannot be used without zone->lock
+@@ -537,18 +554,6 @@ isolate_migratepages_range(struct zone *
+ continue;
+
+ /*
+- * For async migration, also only scan in MOVABLE blocks. Async
+- * migration is optimistic to see if the minimum amount of work
+- * satisfies the allocation
+- */
+- if (!cc->sync && last_pageblock_nr != pageblock_nr &&
+- !migrate_async_suitable(get_pageblock_migratetype(page))) {
+- cc->finished_update_migrate = true;
+- skipped_async_unsuitable = true;
+- goto next_pageblock;
+- }
+-
+- /*
+ * Check may be lockless but that's ok as we recheck later.
+ * It's possible to migrate LRU pages and balloon pages
+ * Skip any other type of page
+@@ -639,7 +644,6 @@ check_compact_cluster:
+
+ next_pageblock:
+ low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
+- last_pageblock_nr = pageblock_nr;
+ }
+
+ acct_isolated(zone, locked, cc);
--- /dev/null
+From b6c750163c0d138f5041d95fcdbd1094b6928057 Mon Sep 17 00:00:00 2001
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Date: Mon, 7 Apr 2014 15:37:07 -0700
+Subject: mm/compaction: clean-up code on success of ballon isolation
+
+From: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+
+commit b6c750163c0d138f5041d95fcdbd1094b6928057 upstream.
+
+It is just for clean-up to reduce code size and improve readability.
+There is no functional change.
+
+Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -562,11 +562,7 @@ isolate_migratepages_range(struct zone *
+ if (unlikely(balloon_page_movable(page))) {
+ if (locked && balloon_page_isolate(page)) {
+ /* Successfully isolated */
+- cc->finished_update_migrate = true;
+- list_add(&page->lru, migratelist);
+- cc->nr_migratepages++;
+- nr_isolated++;
+- goto check_compact_cluster;
++ goto isolate_success;
+ }
+ }
+ continue;
+@@ -627,13 +623,14 @@ isolate_migratepages_range(struct zone *
+ VM_BUG_ON_PAGE(PageTransCompound(page), page);
+
+ /* Successfully isolated */
+- cc->finished_update_migrate = true;
+ del_page_from_lru_list(page, lruvec, page_lru(page));
++
++isolate_success:
++ cc->finished_update_migrate = true;
+ list_add(&page->lru, migratelist);
+ cc->nr_migratepages++;
+ nr_isolated++;
+
+-check_compact_cluster:
+ /* Avoid isolating too much */
+ if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
+ ++low_pfn;
--- /dev/null
+From da1c67a76f7cf2b3404823d24f9f10fa91aa5dc5 Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Mon, 7 Apr 2014 15:37:34 -0700
+Subject: mm, compaction: determine isolation mode only once
+
+From: David Rientjes <rientjes@google.com>
+
+commit da1c67a76f7cf2b3404823d24f9f10fa91aa5dc5 upstream.
+
+The conditions that control the isolation mode in
+isolate_migratepages_range() do not change during the iteration, so
+extract them out and only define the value once.
+
+This actually does have an effect, gcc doesn't optimize it itself because
+of cc->sync.
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Acked-by: Rik van Riel <riel@redhat.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 9 ++-------
+ 1 file changed, 2 insertions(+), 7 deletions(-)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -460,12 +460,13 @@ isolate_migratepages_range(struct zone *
+ unsigned long last_pageblock_nr = 0, pageblock_nr;
+ unsigned long nr_scanned = 0, nr_isolated = 0;
+ struct list_head *migratelist = &cc->migratepages;
+- isolate_mode_t mode = 0;
+ struct lruvec *lruvec;
+ unsigned long flags;
+ bool locked = false;
+ struct page *page = NULL, *valid_page = NULL;
+ bool skipped_async_unsuitable = false;
++ const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
++ (unevictable ? ISOLATE_UNEVICTABLE : 0);
+
+ /*
+ * Ensure that there are not too many pages isolated from the LRU
+@@ -608,12 +609,6 @@ isolate_migratepages_range(struct zone *
+ continue;
+ }
+
+- if (!cc->sync)
+- mode |= ISOLATE_ASYNC_MIGRATE;
+-
+- if (unevictable)
+- mode |= ISOLATE_UNEVICTABLE;
+-
+ lruvec = mem_cgroup_page_lruvec(page, zone);
+
+ /* Try isolate the page */
--- /dev/null
+From 91ca9186484809c57303b33778d841cc28f696ed Mon Sep 17 00:00:00 2001
+From: David Rientjes <rientjes@google.com>
+Date: Thu, 3 Apr 2014 14:47:23 -0700
+Subject: mm, compaction: ignore pageblock skip when manually invoking compaction
+
+From: David Rientjes <rientjes@google.com>
+
+commit 91ca9186484809c57303b33778d841cc28f696ed upstream.
+
+The cached pageblock hint should be ignored when triggering compaction
+through /proc/sys/vm/compact_memory so all eligible memory is isolated.
+Manually invoking compaction is known to be expensive, there's no need
+to skip pageblocks based on heuristics (mainly for debugging).
+
+Signed-off-by: David Rientjes <rientjes@google.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Cc: Rik van Riel <riel@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/compaction.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1193,6 +1193,7 @@ static void compact_node(int nid)
+ struct compact_control cc = {
+ .order = -1,
+ .sync = true,
++ .ignore_skip_hint = true,
+ };
+
+ __compact_pgdat(NODE_DATA(nid), &cc);
--- /dev/null
+From 6d2be915e589b58cb11418cbe1f22ff90732b6ac Mon Sep 17 00:00:00 2001
+From: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Date: Thu, 3 Apr 2014 14:48:23 -0700
+Subject: mm/readahead.c: fix readahead failure for memoryless NUMA nodes and limit readahead pages
+
+From: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+
+commit 6d2be915e589b58cb11418cbe1f22ff90732b6ac upstream.
+
+Currently max_sane_readahead() returns zero on the cpu whose NUMA node
+has no local memory which leads to readahead failure. Fix this
+readahead failure by returning minimum of (requested pages, 512). Users
+running applications on a memory-less cpu which needs readahead such as
+streaming application see considerable boost in the performance.
+
+Result:
+
+fadvise experiment with FADV_WILLNEED on a PPC machine having memoryless
+CPU with 1GB testfile (12 iterations) yielded around 46.66% improvement.
+
+fadvise experiment with FADV_WILLNEED on a x240 machine with 1GB
+testfile 32GB* 4G RAM numa machine (12 iterations) showed no impact on
+the normal NUMA cases w/ patch.
+
+ Kernel Avg Stddev
+ base 7.4975 3.92%
+ patched 7.4174 3.26%
+
+[Andrew: making return value PAGE_SIZE independent]
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
+Acked-by: Jan Kara <jack@suse.cz>
+Cc: Wu Fengguang <fengguang.wu@intel.com>
+Cc: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/readahead.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -233,14 +233,14 @@ int force_page_cache_readahead(struct ad
+ return 0;
+ }
+
++#define MAX_READAHEAD ((512*4096)/PAGE_CACHE_SIZE)
+ /*
+ * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
+ * sensible upper limit.
+ */
+ unsigned long max_sane_readahead(unsigned long nr)
+ {
+- return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
+- + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
++ return min(nr, MAX_READAHEAD);
+ }
+
+ /*
mm-compaction-avoid-isolating-pinned-pages.patch
mm-compaction-disallow-high-order-page-for-migration-target.patch
mm-compaction-do-not-call-suitable_migration_target-on-every-page.patch
+drbd-fix-regression-out-of-mem-failed-to-invoke-fence-peer-helper.patch
+mm-compaction-change-the-timing-to-check-to-drop-the-spinlock.patch
+mm-compaction-check-pageblock-suitability-once-per-pageblock.patch
+mm-compaction-clean-up-code-on-success-of-ballon-isolation.patch
+mm-compaction-determine-isolation-mode-only-once.patch
+mm-compaction-ignore-pageblock-skip-when-manually-invoking-compaction.patch
+mm-readahead.c-fix-readahead-failure-for-memoryless-numa-nodes-and-limit-readahead-pages.patch