--- /dev/null
+From e6c495a96ce02574e765d5140039a64c8d4e8c9e Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <Vineet.Gupta1@synopsys.com>
+Date: Wed, 3 Jul 2013 15:03:31 -0700
+Subject: mm: fix the TLB range flushed when __tlb_remove_page() runs out of slots
+
+From: Vineet Gupta <Vineet.Gupta1@synopsys.com>
+
+commit e6c495a96ce02574e765d5140039a64c8d4e8c9e upstream.
+
+zap_pte_range loops from @addr to @end. In the middle, if it runs out of
+batching slots, TLB entries needs to be flushed for @start to @interim,
+NOT @interim to @end.
+
+Since ARC port doesn't use page free batching I can't test it myself but
+this seems like the right thing to do.
+
+Observed this when working on a fix for the issue at thread:
+http://www.spinics.net/lists/linux-arch/msg21736.html
+
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/memory.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1101,6 +1101,7 @@ static unsigned long zap_pte_range(struc
+ spinlock_t *ptl;
+ pte_t *start_pte;
+ pte_t *pte;
++ unsigned long range_start = addr;
+
+ again:
+ init_rss_vec(rss);
+@@ -1206,12 +1207,14 @@ again:
+ force_flush = 0;
+
+ #ifdef HAVE_GENERIC_MMU_GATHER
+- tlb->start = addr;
+- tlb->end = end;
++ tlb->start = range_start;
++ tlb->end = addr;
+ #endif
+ tlb_flush_mmu(tlb);
+- if (addr != end)
++ if (addr != end) {
++ range_start = addr;
+ goto again;
++ }
+ }
+
+ return addr;