--- /dev/null
+From eb03aa008561004257900983193d024e57abdd96 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Fri, 7 Oct 2016 17:01:13 -0700
+Subject: mm/hugetlb: improve locking in dissolve_free_huge_pages()
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit eb03aa008561004257900983193d024e57abdd96 upstream.
+
+For every pfn aligned to minimum_order, dissolve_free_huge_pages() will
+call dissolve_free_huge_page() which takes the hugetlb spinlock, even if
+the page is not huge at all or a hugepage that is in-use.
+
+Improve this by doing the PageHuge() and page_count() checks already in
+dissolve_free_huge_pages() before calling dissolve_free_huge_page(). In
+dissolve_free_huge_page(), when holding the spinlock, those checks need
+to be revalidated.
+
+Link: http://lkml.kernel.org/r/20160926172811.94033-4-gerald.schaefer@de.ibm.com
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
+Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Mike Kravetz <mike.kravetz@oracle.com>
+Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Rui Teng <rui.teng@linux.vnet.ibm.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/hugetlb.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1476,14 +1476,20 @@ out:
+ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+ {
+ unsigned long pfn;
++ struct page *page;
+ int rc = 0;
+
+ if (!hugepages_supported())
+ return rc;
+
+- for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
+- if (rc = dissolve_free_huge_page(pfn_to_page(pfn)))
+- break;
++ for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
++ page = pfn_to_page(pfn);
++ if (PageHuge(page) && !page_count(page)) {
++ rc = dissolve_free_huge_page(page);
++ if (rc)
++ break;
++ }
++ }
+
+ return rc;
+ }