--- /dev/null
+From 951bf2011307f7fc6139f493f21d34e1504ccbbd Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 25 Oct 2011 02:30:50 +0000
+Subject: net: Unlock sock before calling sk_free()
+
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit b0691c8ee7c28a72748ff32e91b165ec12ae4de6 ]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+---
+ net/core/sock.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1260,6 +1260,7 @@ struct sock *sk_clone(const struct sock
+ /* It is still raw copy of parent, so invalidate
+ * destructor and make plain sk_free() */
+ newsk->sk_destruct = NULL;
++ bh_unlock_sock(newsk);
+ sk_free(newsk);
+ newsk = NULL;
+ goto out;
--- /dev/null
+From 405e44f2e312dd5dd63e5a9f459bffcbcd4368ef Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:08 -0700
+Subject: powerpc: get_hugepte() don't put_page() the wrong page
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 405e44f2e312dd5dd63e5a9f459bffcbcd4368ef upstream.
+
+"page" may have changed to point to the next hugepage after the loop
+completed, The references have been taken on the head page, so the
+put_page must happen there too.
+
+This is a longstanding issue pre-thp inclusion.
+
+It's totally unclear how these page_cache_add_speculative and
+pte_val(pte) != pte_val(*ptep) checks are necessary across all the
+powerpc gup_fast code, when x86 doesn't need any of that: there's no way
+the page can be freed with irq disabled so we're guaranteed the
+atomic_inc will happen on a page with page_count > 0 (so not needing the
+speculative check).
+
+The pte check is also meaningless on x86: no need to rollback on x86 if
+the pte changed, because the pte can still change a CPU tick after the
+check succeeded and it won't be rolled back in that case. The important
+thing is we got a reference on a valid page that was mapped there a CPU
+tick ago. So not knowing the soft tlb refill code of ppc64 in great
+detail I'm not removing the "speculative" page_count increase and the
+pte checks across all the code, but unless there's a strong reason for
+it they should be later cleaned up too.
+
+If a pte can change from huge to non-huge (like it could happen with
+THP) passing a pte_t *ptep to gup_hugepte() would also require to repeat
+the is_hugepd in gup_hugepte(), but that shouldn't happen with hugetlbfs
+only so I'm not altering that.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/hugetlbpage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -429,7 +429,7 @@ static noinline int gup_hugepte(pte_t *p
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ /* Could be optimized better */
+ while (*nr) {
+- put_page(page);
++ put_page(head);
+ (*nr)--;
+ }
+ }
--- /dev/null
+From cf592bf768c4fa40282b8fce58a80820065de2cb Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:19 -0700
+Subject: powerpc: gup_huge_pmd() return 0 if pte changes
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit cf592bf768c4fa40282b8fce58a80820065de2cb upstream.
+
+powerpc didn't return 0 in that case, if it's rolling back the *nr pointer
+it should also return zero to avoid adding pages to the array at the wrong
+offset.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/hugetlbpage.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -443,16 +443,17 @@ static noinline int gup_hugepte(pte_t *p
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+- } else {
+- /*
+- * Any tail page need their mapcount reference taken
+- * before we return.
+- */
+- while (refs--) {
+- if (PageTail(tail))
+- get_huge_page_tail(tail);
+- tail++;
+- }
++ return 0;
++ }
++
++ /*
++ * Any tail page need their mapcount reference taken before we
++ * return.
++ */
++ while (refs--) {
++ if (PageTail(tail))
++ get_huge_page_tail(tail);
++ tail++;
+ }
+
+ return 1;
--- /dev/null
+From 8596468487e2062cae2aad56e973784e03959245 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:11 -0700
+Subject: powerpc: gup_hugepte() avoid freeing the head page too many times
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 8596468487e2062cae2aad56e973784e03959245 upstream.
+
+We only taken "refs" pins on the head page not "*nr" pins.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/hugetlbpage.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -428,10 +428,9 @@ static noinline int gup_hugepte(pte_t *p
+
+ if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+ /* Could be optimized better */
+- while (*nr) {
++ *nr -= refs;
++ while (refs--)
+ put_page(head);
+- (*nr)--;
+- }
+ }
+
+ return 1;
--- /dev/null
+From 3526741f0964c88bc2ce511e1078359052bf225b Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:15 -0700
+Subject: powerpc: gup_hugepte() support THP based tail recounting
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 3526741f0964c88bc2ce511e1078359052bf225b upstream.
+
+Up to this point the code assumed old refcounting for hugepages (pre-thp).
+This updates the code directly to the thp mapcount tail page refcounting.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/hugetlbpage.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -385,12 +385,23 @@ follow_huge_pmd(struct mm_struct *mm, un
+ return NULL;
+ }
+
++static inline void get_huge_page_tail(struct page *page)
++{
++ /*
++ * __split_huge_page_refcount() cannot run
++ * from under us.
++ */
++ VM_BUG_ON(page_mapcount(page) < 0);
++ VM_BUG_ON(atomic_read(&page->_count) != 0);
++ atomic_inc(&page->_mapcount);
++}
++
+ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+ {
+ unsigned long mask;
+ unsigned long pte_end;
+- struct page *head, *page;
++ struct page *head, *page, *tail;
+ pte_t pte;
+ int refs;
+
+@@ -413,6 +424,7 @@ static noinline int gup_hugepte(pte_t *p
+ head = pte_page(pte);
+
+ page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
++ tail = page;
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+@@ -431,6 +443,16 @@ static noinline int gup_hugepte(pte_t *p
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
++ } else {
++ /*
++ * Any tail page need their mapcount reference taken
++ * before we return.
++ */
++ while (refs--) {
++ if (PageTail(tail))
++ get_huge_page_tail(tail);
++ tail++;
++ }
+ }
+
+ return 1;
--- /dev/null
+From 2839bdc1bfc0af76a2f0f11eca011590520a04fa Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:03 -0700
+Subject: powerpc: remove superfluous PageTail checks on the pte gup_fast
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 2839bdc1bfc0af76a2f0f11eca011590520a04fa upstream.
+
+This part of gup_fast doesn't seem capable of handling hugetlbfs ptes,
+those should be handled by gup_hugepd only, so these checks are
+superfluous.
+
+Plus if this wasn't a noop, it would have oopsed because, the insistence
+of using the speculative refcounting would trigger a VM_BUG_ON if a tail
+page was encountered in the page_cache_get_speculative().
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Acked-by: David Gibson <david@gibson.dropbear.id.au>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/gup.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+--- a/arch/powerpc/mm/gup.c
++++ b/arch/powerpc/mm/gup.c
+@@ -16,17 +16,6 @@
+
+ #ifdef __HAVE_ARCH_PTE_SPECIAL
+
+-static inline void get_huge_page_tail(struct page *page)
+-{
+- /*
+- * __split_huge_page_refcount() cannot run
+- * from under us.
+- */
+- VM_BUG_ON(page_mapcount(page) < 0);
+- VM_BUG_ON(atomic_read(&page->_count) != 0);
+- atomic_inc(&page->_mapcount);
+-}
+-
+ /*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+@@ -58,8 +47,6 @@ static noinline int gup_pte_range(pmd_t
+ put_page(page);
+ return 0;
+ }
+- if (PageTail(page))
+- get_huge_page_tail(page);
+ pages[*nr] = page;
+ (*nr)++;
+
--- /dev/null
+From 0693bc9ce2cc4f6a1b9c3c05790fc149a74c0b87 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:28 -0700
+Subject: s390: gup_huge_pmd() return 0 if pte changes
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 0693bc9ce2cc4f6a1b9c3c05790fc149a74c0b87 upstream.
+
+s390 didn't return 0 in that case, if it's rolling back the *nr pointer it
+should also return zero to avoid adding pages to the array at the wrong
+offset.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/s390/mm/gup.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -93,16 +93,17 @@ static inline int gup_huge_pmd(pmd_t *pm
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
+- } else {
+- /*
+- * Any tail page need their mapcount reference taken
+- * before we return.
+- */
+- while (refs--) {
+- if (PageTail(tail))
+- get_huge_page_tail(tail);
+- tail++;
+- }
++ return 0;
++ }
++
++ /*
++ * Any tail page need their mapcount reference taken before we
++ * return.
++ */
++ while (refs--) {
++ if (PageTail(tail))
++ get_huge_page_tail(tail);
++ tail++;
+ }
+
+ return 1;
--- /dev/null
+From 220a2eb228d032acde60e9fd044ca802706ff583 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:25 -0700
+Subject: s390: gup_huge_pmd() support THP tail recounting
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit 220a2eb228d032acde60e9fd044ca802706ff583 upstream.
+
+Up to this point the code assumed old refcounting for hugepages (pre-thp).
+This updates the code directly to the thp mapcount tail page refcounting.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/s390/mm/gup.c | 24 +++++++++++++++++++++++-
+ 1 file changed, 23 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -48,11 +48,22 @@ static inline int gup_pte_range(pmd_t *p
+ return 1;
+ }
+
++static inline void get_huge_page_tail(struct page *page)
++{
++ /*
++ * __split_huge_page_refcount() cannot run
++ * from under us.
++ */
++ VM_BUG_ON(page_mapcount(page) < 0);
++ VM_BUG_ON(atomic_read(&page->_count) != 0);
++ atomic_inc(&page->_mapcount);
++}
++
+ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+ {
+ unsigned long mask, result;
+- struct page *head, *page;
++ struct page *head, *page, *tail;
+ int refs;
+
+ result = write ? 0 : _SEGMENT_ENTRY_RO;
+@@ -64,6 +75,7 @@ static inline int gup_huge_pmd(pmd_t *pm
+ refs = 0;
+ head = pmd_page(pmd);
+ page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++ tail = page;
+ do {
+ VM_BUG_ON(compound_head(page) != head);
+ pages[*nr] = page;
+@@ -81,6 +93,16 @@ static inline int gup_huge_pmd(pmd_t *pm
+ *nr -= refs;
+ while (refs--)
+ put_page(head);
++ } else {
++ /*
++ * Any tail page need their mapcount reference taken
++ * before we return.
++ */
++ while (refs--) {
++ if (PageTail(tail))
++ get_huge_page_tail(tail);
++ tail++;
++ }
+ }
+
+ return 1;
xhci-if-no-endpoints-changed-don-t-issue-bw-command.patch
xhci-test-and-clear-rwc-bit.patch
xhci-clear-plc-for-usb2-root-hub-ports.patch
+powerpc-remove-superfluous-pagetail-checks-on-the-pte-gup_fast.patch
+powerpc-get_hugepte-don-t-put_page-the-wrong-page.patch
+powerpc-gup_hugepte-avoid-freeing-the-head-page-too-many-times.patch
+powerpc-gup_hugepte-support-thp-based-tail-recounting.patch
+powerpc-gup_huge_pmd-return-0-if-pte-changes.patch
+s390-gup_huge_pmd-support-thp-tail-recounting.patch
+s390-gup_huge_pmd-return-0-if-pte-changes.patch
+sparc-gup_pte_range-support-thp-based-tail-recounting.patch
+thp-share-get_huge_page_tail.patch
+net-unlock-sock-before-calling-sk_free.patch
--- /dev/null
+From e0d85a366c2300efd230ef82a9b22110b0658331 Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:31 -0700
+Subject: sparc: gup_pte_range() support THP based tail recounting
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit e0d85a366c2300efd230ef82a9b22110b0658331 upstream.
+
+Up to this point the code assumed old refcounting for hugepages (pre-thp).
+ This updates the code directly to the thp mapcount tail page refcounting.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Acked-by: David Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/sparc/mm/gup.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -12,6 +12,17 @@
+ #include <linux/rwsem.h>
+ #include <asm/pgtable.h>
+
++static inline void get_huge_page_tail(struct page *page)
++{
++ /*
++ * __split_huge_page_refcount() cannot run
++ * from under us.
++ */
++ VM_BUG_ON(page_mapcount(page) < 0);
++ VM_BUG_ON(atomic_read(&page->_count) != 0);
++ atomic_inc(&page->_mapcount);
++}
++
+ /*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+@@ -56,6 +67,8 @@ static noinline int gup_pte_range(pmd_t
+ put_page(head);
+ return 0;
+ }
++ if (head != page)
++ get_huge_page_tail(page);
+
+ pages[*nr] = page;
+ (*nr)++;
--- /dev/null
+From b35a35b556f5e6b7993ad0baf20173e75c09ce8c Mon Sep 17 00:00:00 2001
+From: Andrea Arcangeli <aarcange@redhat.com>
+Date: Wed, 2 Nov 2011 13:37:36 -0700
+Subject: thp: share get_huge_page_tail()
+
+From: Andrea Arcangeli <aarcange@redhat.com>
+
+commit b35a35b556f5e6b7993ad0baf20173e75c09ce8c upstream.
+
+This avoids duplicating the function in every arch gup_fast.
+
+Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <jweiner@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: David Gibson <david@gibson.dropbear.id.au>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: David Miller <davem@davemloft.net>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
+
+---
+ arch/powerpc/mm/hugetlbpage.c | 11 -----------
+ arch/s390/mm/gup.c | 11 -----------
+ arch/sparc/mm/gup.c | 11 -----------
+ arch/x86/mm/gup.c | 11 -----------
+ include/linux/mm.h | 11 +++++++++++
+ 5 files changed, 11 insertions(+), 44 deletions(-)
+
+--- a/arch/powerpc/mm/hugetlbpage.c
++++ b/arch/powerpc/mm/hugetlbpage.c
+@@ -385,17 +385,6 @@ follow_huge_pmd(struct mm_struct *mm, un
+ return NULL;
+ }
+
+-static inline void get_huge_page_tail(struct page *page)
+-{
+- /*
+- * __split_huge_page_refcount() cannot run
+- * from under us.
+- */
+- VM_BUG_ON(page_mapcount(page) < 0);
+- VM_BUG_ON(atomic_read(&page->_count) != 0);
+- atomic_inc(&page->_mapcount);
+-}
+-
+ static noinline int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+ {
+--- a/arch/s390/mm/gup.c
++++ b/arch/s390/mm/gup.c
+@@ -48,17 +48,6 @@ static inline int gup_pte_range(pmd_t *p
+ return 1;
+ }
+
+-static inline void get_huge_page_tail(struct page *page)
+-{
+- /*
+- * __split_huge_page_refcount() cannot run
+- * from under us.
+- */
+- VM_BUG_ON(page_mapcount(page) < 0);
+- VM_BUG_ON(atomic_read(&page->_count) != 0);
+- atomic_inc(&page->_mapcount);
+-}
+-
+ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+ {
+--- a/arch/sparc/mm/gup.c
++++ b/arch/sparc/mm/gup.c
+@@ -12,17 +12,6 @@
+ #include <linux/rwsem.h>
+ #include <asm/pgtable.h>
+
+-static inline void get_huge_page_tail(struct page *page)
+-{
+- /*
+- * __split_huge_page_refcount() cannot run
+- * from under us.
+- */
+- VM_BUG_ON(page_mapcount(page) < 0);
+- VM_BUG_ON(atomic_read(&page->_count) != 0);
+- atomic_inc(&page->_mapcount);
+-}
+-
+ /*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+--- a/arch/x86/mm/gup.c
++++ b/arch/x86/mm/gup.c
+@@ -108,17 +108,6 @@ static inline void get_head_page_multipl
+ SetPageReferenced(page);
+ }
+
+-static inline void get_huge_page_tail(struct page *page)
+-{
+- /*
+- * __split_huge_page_refcount() cannot run
+- * from under us.
+- */
+- VM_BUG_ON(page_mapcount(page) < 0);
+- VM_BUG_ON(atomic_read(&page->_count) != 0);
+- atomic_inc(&page->_mapcount);
+-}
+-
+ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
+ unsigned long end, int write, struct page **pages, int *nr)
+ {
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -376,6 +376,17 @@ static inline int page_count(struct page
+ return atomic_read(&compound_head(page)->_count);
+ }
+
++static inline void get_huge_page_tail(struct page *page)
++{
++ /*
++ * __split_huge_page_refcount() cannot run
++ * from under us.
++ */
++ VM_BUG_ON(page_mapcount(page) < 0);
++ VM_BUG_ON(atomic_read(&page->_count) != 0);
++ atomic_inc(&page->_mapcount);
++}
++
+ extern bool __get_page_tail(struct page *page);
+
+ static inline void get_page(struct page *page)