]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/hugetlb: check for reserved hugepages during memory offline
authorGerald Schaefer <gerald.schaefer@de.ibm.com>
Sat, 8 Oct 2016 00:01:10 +0000 (17:01 -0700)
committerBen Hutchings <ben@decadent.org.uk>
Thu, 23 Feb 2017 03:54:10 +0000 (03:54 +0000)
commit 082d5b6b60e9f25e1511557fcfcb21eedd267446 upstream.

In dissolve_free_huge_pages(), free hugepages will be dissolved without
making sure that there are enough of them left to satisfy hugepage
reservations.

Fix this by adding a return value to dissolve_free_huge_pages() and
checking h->free_huge_pages vs.  h->resv_huge_pages.  Note that this may
lead to the situation where dissolve_free_huge_page() returns an error
and all free hugepages that were dissolved before that error are lost,
while the memory block still cannot be set offline.

Fixes: c8721bbb ("mm: memory-hotplug: enable memory hotplug to handle hugepage")
Link: http://lkml.kernel.org/r/20160926172811.94033-3-gerald.schaefer@de.ibm.com
Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: "Aneesh Kumar K . V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Rui Teng <rui.teng@linux.vnet.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
[bwh: Backported to 3.16: adjust context]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
include/linux/hugetlb.h
mm/hugetlb.c
mm/memory_hotplug.c

index 62e94d2517bdaed62ef144ab0a2ad0a39a5678fc..6bc75164e68f3c9f82807948ffb6c331c8e87176 100644 (file)
@@ -396,8 +396,8 @@ static inline pgoff_t basepage_index(struct page *page)
        return __basepage_index(page);
 }
 
-extern void dissolve_free_huge_pages(unsigned long start_pfn,
-                                    unsigned long end_pfn);
+extern int dissolve_free_huge_pages(unsigned long start_pfn,
+                                   unsigned long end_pfn);
 static inline int hugepage_migration_supported(struct hstate *h)
 {
 #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
@@ -452,7 +452,7 @@ static inline pgoff_t basepage_index(struct page *page)
 {
        return page->index;
 }
-#define dissolve_free_huge_pages(s, e) do {} while (0)
+#define dissolve_free_huge_pages(s, e) 0
 #define hugepage_migration_supported(h)        0
 
 static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
index 5b8799a11133009d8dd568b64c2289e5464c9d1b..fca6c6c91d76c207c075dc284592b2fa9ad34fb9 100644 (file)
@@ -1067,21 +1067,31 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
 
 /*
  * Dissolve a given free hugepage into free buddy pages. This function does
- * nothing for in-use (including surplus) hugepages.
+ * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
+ * number of free hugepages would be reduced below the number of reserved
+ * hugepages.
  */
-static void dissolve_free_huge_page(struct page *page)
+static int dissolve_free_huge_page(struct page *page)
 {
+       int rc = 0;
+
        spin_lock(&hugetlb_lock);
        if (PageHuge(page) && !page_count(page)) {
                struct page *head = compound_head(page);
                struct hstate *h = page_hstate(head);
                int nid = page_to_nid(head);
+               if (h->free_huge_pages - h->resv_huge_pages == 0) {
+                       rc = -EBUSY;
+                       goto out;
+               }
                list_del(&head->lru);
                h->free_huge_pages--;
                h->free_huge_pages_node[nid]--;
                update_and_free_page(h, head);
        }
+out:
        spin_unlock(&hugetlb_lock);
+       return rc;
 }
 
 /*
@@ -1089,16 +1099,22 @@ static void dissolve_free_huge_page(struct page *page)
  * make specified memory blocks removable from the system.
  * Note that this will dissolve a free gigantic hugepage completely, if any
  * part of it lies within the given range.
+ * Also note that if dissolve_free_huge_page() returns with an error, all
+ * free hugepages that were dissolved before that error are lost.
  */
-void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
+int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
+       int rc = 0;
 
        if (!hugepages_supported())
-               return;
+               return rc;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
-               dissolve_free_huge_page(pfn_to_page(pfn));
+               if (rc = dissolve_free_huge_page(pfn_to_page(pfn)))
+                       break;
+
+       return rc;
 }
 
 static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
index 4ec1d4d7521ad343426af86c424545257037beab..4174487a1261e2b6698219903b40f05345ad572d 100644 (file)
@@ -1732,7 +1732,9 @@ repeat:
         * dissolve free hugepages in the memory block before doing offlining
         * actually in order to make hugetlbfs's object counting consistent.
         */
-       dissolve_free_huge_pages(start_pfn, end_pfn);
+       ret = dissolve_free_huge_pages(start_pfn, end_pfn);
+       if (ret)
+               goto failed_removal;
        /* check again */
        offlined_pages = check_pages_isolated(start_pfn, end_pfn);
        if (offlined_pages < 0) {