]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/hugetlb: remove unnecessary holding of hugetlb_lock
authorGe Yang <yangge1116@126.com>
Tue, 27 May 2025 03:36:50 +0000 (11:36 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 25 Jun 2025 22:55:03 +0000 (15:55 -0700)
In isolate_or_dissolve_huge_folio(), after acquiring the hugetlb_lock, it
is only for the purpose of obtaining the correct hstate, which is then
passed to alloc_and_dissolve_hugetlb_folio().

alloc_and_dissolve_hugetlb_folio() itself also acquires the hugetlb_lock.
We can have alloc_and_dissolve_hugetlb_folio() obtain the hstate by
itself, so that isolate_or_dissolve_huge_folio() no longer needs to
acquire the hugetlb_lock.  In addition, we keep the folio_test_hugetlb()
check within isolate_or_dissolve_huge_folio().  By doing so, we can avoid
disrupting the normal path by vainly holding the hugetlb_lock.

replace_free_hugepage_folios() has the same issue, and we should address
it as well.

Addresses a possible performance problem which was added by the hotfix
113ed54ad276 ("mm/hugetlb: fix kernel NULL pointer dereference when
replacing free hugetlb folios").

Link: https://lkml.kernel.org/r/1748317010-16272-1-git-send-email-yangge1116@126.com
Fixes: 113ed54ad276 ("mm/hugetlb: fix kernel NULL pointer dereference when replacing free hugetlb folios")
Signed-off-by: Ge Yang <yangge1116@126.com>
Suggested-by: Oscar Salvador <osalvador@suse.de>
Reviewed-by: Muchun Song <muchun.song@linux.dev>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Barry Song <21cnbao@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/hugetlb.c

index 8746ed2fec135b646ba28536fbb4c816461a058a..9dc95eac558cde833ad406f11fcfed61da183821 100644 (file)
@@ -2787,20 +2787,24 @@ void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
 /*
  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
  * the old one
- * @h: struct hstate old page belongs to
  * @old_folio: Old folio to dissolve
  * @list: List to isolate the page in case we need to
  * Returns 0 on success, otherwise negated error.
  */
-static int alloc_and_dissolve_hugetlb_folio(struct hstate *h,
-                       struct folio *old_folio, struct list_head *list)
+static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
+                       struct list_head *list)
 {
-       gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
+       gfp_t gfp_mask;
+       struct hstate *h;
        int nid = folio_nid(old_folio);
        struct folio *new_folio = NULL;
        int ret = 0;
 
 retry:
+       /*
+        * The old_folio might have been dissolved from under our feet, so make sure
+        * to carefully check the state under the lock.
+        */
        spin_lock_irq(&hugetlb_lock);
        if (!folio_test_hugetlb(old_folio)) {
                /*
@@ -2829,8 +2833,10 @@ retry:
                cond_resched();
                goto retry;
        } else {
+               h = folio_hstate(old_folio);
                if (!new_folio) {
                        spin_unlock_irq(&hugetlb_lock);
+                       gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
                        new_folio = alloc_buddy_hugetlb_folio(h, gfp_mask, nid,
                                                              NULL, NULL);
                        if (!new_folio)
@@ -2874,35 +2880,24 @@ free_new:
 
 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
 {
-       struct hstate *h;
        int ret = -EBUSY;
 
-       /*
-        * The page might have been dissolved from under our feet, so make sure
-        * to carefully check the state under the lock.
-        * Return success when racing as if we dissolved the page ourselves.
-        */
-       spin_lock_irq(&hugetlb_lock);
-       if (folio_test_hugetlb(folio)) {
-               h = folio_hstate(folio);
-       } else {
-               spin_unlock_irq(&hugetlb_lock);
+       /* Not to disrupt normal path by vainly holding hugetlb_lock */
+       if (!folio_test_hugetlb(folio))
                return 0;
-       }
-       spin_unlock_irq(&hugetlb_lock);
 
        /*
         * Fence off gigantic pages as there is a cyclic dependency between
         * alloc_contig_range and them. Return -ENOMEM as this has the effect
         * of bailing out right away without further retrying.
         */
-       if (hstate_is_gigantic(h))
+       if (folio_order(folio) > MAX_PAGE_ORDER)
                return -ENOMEM;
 
        if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
                ret = 0;
        else if (!folio_ref_count(folio))
-               ret = alloc_and_dissolve_hugetlb_folio(h, folio, list);
+               ret = alloc_and_dissolve_hugetlb_folio(folio, list);
 
        return ret;
 }
@@ -2916,7 +2911,6 @@ int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
  */
 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
 {
-       struct hstate *h;
        struct folio *folio;
        int ret = 0;
 
@@ -2925,23 +2919,9 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
        while (start_pfn < end_pfn) {
                folio = pfn_folio(start_pfn);
 
-               /*
-                * The folio might have been dissolved from under our feet, so make sure
-                * to carefully check the state under the lock.
-                */
-               spin_lock_irq(&hugetlb_lock);
-               if (folio_test_hugetlb(folio)) {
-                       h = folio_hstate(folio);
-               } else {
-                       spin_unlock_irq(&hugetlb_lock);
-                       start_pfn++;
-                       continue;
-               }
-               spin_unlock_irq(&hugetlb_lock);
-
-               if (!folio_ref_count(folio)) {
-                       ret = alloc_and_dissolve_hugetlb_folio(h, folio,
-                                                              &isolate_list);
+               /* Not to disrupt normal path by vainly holding hugetlb_lock */
+               if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
+                       ret = alloc_and_dissolve_hugetlb_folio(folio, &isolate_list);
                        if (ret)
                                break;