]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/khugepaged: remove definition of struct khugepaged_mm_slot
authorWei Yang <richard.weiyang@gmail.com>
Fri, 19 Sep 2025 07:12:44 +0000 (07:12 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sun, 28 Sep 2025 18:51:33 +0000 (11:51 -0700)
Current code is not correct to get struct khugepaged_mm_slot by
mm_slot_entry() without checking mm_slot is !NULL.  There is no problem
reported since slot is the first element of struct khugepaged_mm_slot.

While struct khugepaged_mm_slot is just a wrapper of struct mm_slot, there
is no need to define it.

Remove the definition of struct khugepaged_mm_slot, so there is not chance
to miss use mm_slot_entry().

[richard.weiyang@gmail.com: fix use-after-free crash]
Link: https://lkml.kernel.org/r/20250922002834.vz6ntj36e75ehkyp@master
Link: https://lkml.kernel.org/r/20250919071244.17020-3-richard.weiyang@gmail.com
Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
Cc: Lance Yang <lance.yang@linux.dev>
Cc: David Hildenbrand <david@redhat.com>
Cc: Dev Jain <dev.jain@arm.com>
Cc: Kiryl Shutsemau <kirill@shutemov.name>
Cc: xu xin <xu.xin16@zte.com.cn>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/khugepaged.c

index 9ed1af2b5c388426e5dc824110fffd20ca136a01..52786ffef80a1aad512fc03c3d0494949be55587 100644 (file)
@@ -103,14 +103,6 @@ struct collapse_control {
        nodemask_t alloc_nmask;
 };
 
-/**
- * struct khugepaged_mm_slot - khugepaged information per mm that is being scanned
- * @slot: hash lookup from mm to mm_slot
- */
-struct khugepaged_mm_slot {
-       struct mm_slot slot;
-};
-
 /**
  * struct khugepaged_scan - cursor for scanning
  * @mm_head: the head of the mm list to scan
@@ -121,7 +113,7 @@ struct khugepaged_mm_slot {
  */
 struct khugepaged_scan {
        struct list_head mm_head;
-       struct khugepaged_mm_slot *mm_slot;
+       struct mm_slot *mm_slot;
        unsigned long address;
 };
 
@@ -384,7 +376,10 @@ int hugepage_madvise(struct vm_area_struct *vma,
 
 int __init khugepaged_init(void)
 {
-       mm_slot_cache = KMEM_CACHE(khugepaged_mm_slot, 0);
+       mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
+                                         sizeof(struct mm_slot),
+                                         __alignof__(struct mm_slot),
+                                         0, NULL);
        if (!mm_slot_cache)
                return -ENOMEM;
 
@@ -438,7 +433,6 @@ static bool hugepage_pmd_enabled(void)
 
 void __khugepaged_enter(struct mm_struct *mm)
 {
-       struct khugepaged_mm_slot *mm_slot;
        struct mm_slot *slot;
        int wakeup;
 
@@ -447,12 +441,10 @@ void __khugepaged_enter(struct mm_struct *mm)
        if (unlikely(mm_flags_test_and_set(MMF_VM_HUGEPAGE, mm)))
                return;
 
-       mm_slot = mm_slot_alloc(mm_slot_cache);
-       if (!mm_slot)
+       slot = mm_slot_alloc(mm_slot_cache);
+       if (!slot)
                return;
 
-       slot = &mm_slot->slot;
-
        spin_lock(&khugepaged_mm_lock);
        mm_slot_insert(mm_slots_hash, mm, slot);
        /*
@@ -480,14 +472,12 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
 
 void __khugepaged_exit(struct mm_struct *mm)
 {
-       struct khugepaged_mm_slot *mm_slot;
        struct mm_slot *slot;
        int free = 0;
 
        spin_lock(&khugepaged_mm_lock);
        slot = mm_slot_lookup(mm_slots_hash, mm);
-       mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
-       if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
+       if (slot && khugepaged_scan.mm_slot != slot) {
                hash_del(&slot->hash);
                list_del(&slot->mm_node);
                free = 1;
@@ -496,9 +486,9 @@ void __khugepaged_exit(struct mm_struct *mm)
 
        if (free) {
                mm_flags_clear(MMF_VM_HUGEPAGE, mm);
-               mm_slot_free(mm_slot_cache, mm_slot);
+               mm_slot_free(mm_slot_cache, slot);
                mmdrop(mm);
-       } else if (mm_slot) {
+       } else if (slot) {
                /*
                 * This is required to serialize against
                 * hpage_collapse_test_exit() (which is guaranteed to run
@@ -1432,9 +1422,8 @@ out:
        return result;
 }
 
-static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
+static void collect_mm_slot(struct mm_slot *slot)
 {
-       struct mm_slot *slot = &mm_slot->slot;
        struct mm_struct *mm = slot->mm;
 
        lockdep_assert_held(&khugepaged_mm_lock);
@@ -1451,7 +1440,7 @@ static void collect_mm_slot(struct khugepaged_mm_slot *mm_slot)
                 */
 
                /* khugepaged_mm_lock actually not necessary for the below */
-               mm_slot_free(mm_slot_cache, mm_slot);
+               mm_slot_free(mm_slot_cache, slot);
                mmdrop(mm);
        }
 }
@@ -2394,7 +2383,6 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
        __acquires(&khugepaged_mm_lock)
 {
        struct vma_iterator vmi;
-       struct khugepaged_mm_slot *mm_slot;
        struct mm_slot *slot;
        struct mm_struct *mm;
        struct vm_area_struct *vma;
@@ -2405,14 +2393,12 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
        *result = SCAN_FAIL;
 
        if (khugepaged_scan.mm_slot) {
-               mm_slot = khugepaged_scan.mm_slot;
-               slot = &mm_slot->slot;
+               slot = khugepaged_scan.mm_slot;
        } else {
                slot = list_first_entry(&khugepaged_scan.mm_head,
                                     struct mm_slot, mm_node);
-               mm_slot = mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
                khugepaged_scan.address = 0;
-               khugepaged_scan.mm_slot = mm_slot;
+               khugepaged_scan.mm_slot = slot;
        }
        spin_unlock(&khugepaged_mm_lock);
 
@@ -2510,7 +2496,7 @@ breakouterloop:
 breakouterloop_mmap_lock:
 
        spin_lock(&khugepaged_mm_lock);
-       VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
+       VM_BUG_ON(khugepaged_scan.mm_slot != slot);
        /*
         * Release the current mm_slot if this mm is about to die, or
         * if we scanned all vmas of this mm.
@@ -2522,16 +2508,14 @@ breakouterloop_mmap_lock:
                 * mm_slot not pointing to the exiting mm.
                 */
                if (!list_is_last(&slot->mm_node, &khugepaged_scan.mm_head)) {
-                       slot = list_next_entry(slot, mm_node);
-                       khugepaged_scan.mm_slot =
-                               mm_slot_entry(slot, struct khugepaged_mm_slot, slot);
+                       khugepaged_scan.mm_slot = list_next_entry(slot, mm_node);
                        khugepaged_scan.address = 0;
                } else {
                        khugepaged_scan.mm_slot = NULL;
                        khugepaged_full_scans++;
                }
 
-               collect_mm_slot(mm_slot);
+               collect_mm_slot(slot);
        }
 
        return progress;
@@ -2618,7 +2602,7 @@ static void khugepaged_wait_work(void)
 
 static int khugepaged(void *none)
 {
-       struct khugepaged_mm_slot *mm_slot;
+       struct mm_slot *slot;
 
        set_freezable();
        set_user_nice(current, MAX_NICE);
@@ -2629,10 +2613,10 @@ static int khugepaged(void *none)
        }
 
        spin_lock(&khugepaged_mm_lock);
-       mm_slot = khugepaged_scan.mm_slot;
+       slot = khugepaged_scan.mm_slot;
        khugepaged_scan.mm_slot = NULL;
-       if (mm_slot)
-               collect_mm_slot(mm_slot);
+       if (slot)
+               collect_mm_slot(slot);
        spin_unlock(&khugepaged_mm_lock);
        return 0;
 }