]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm: change vmf_anon_prepare() to __vmf_anon_prepare()
authorVishal Moola (Oracle) <vishal.moola@gmail.com>
Sat, 14 Sep 2024 19:41:18 +0000 (12:41 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 4 Oct 2024 14:33:48 +0000 (16:33 +0200)
commit 2a058ab3286d6475b2082b90c2d2182d2fea4b39 upstream.

Some callers of vmf_anon_prepare() may not want us to release the per-VMA
lock ourselves.  Rename vmf_anon_prepare() to __vmf_anon_prepare() and let
the callers drop the lock when desired.

Also, make vmf_anon_prepare() a wrapper that releases the per-VMA lock
itself for any callers that don't care.

This is in preparation to fix this bug reported by syzbot:
https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/

Link: https://lkml.kernel.org/r/20240914194243.245-1-vishal.moola@gmail.com
Fixes: 9acad7ba3e25 ("hugetlb: use vmf_anon_prepare() instead of anon_vma_prepare()")
Reported-by: syzbot+2dab93857ee95f2eeb08@syzkaller.appspotmail.com
Closes: https://lore.kernel.org/linux-mm/00000000000067c20b06219fbc26@google.com/
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/internal.h
mm/memory.c

index cc2c5e07fad3ba75713b780d216e675c49713ce6..6ef5eecabfc4f9c519befc9eefbd767069956abc 100644 (file)
@@ -293,7 +293,16 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
                wake_up(wqh);
 }
 
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf);
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf);
+static inline vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+{
+       vm_fault_t ret = __vmf_anon_prepare(vmf);
+
+       if (unlikely(ret & VM_FAULT_RETRY))
+               vma_end_read(vmf->vma);
+       return ret;
+}
+
 vm_fault_t do_swap_page(struct vm_fault *vmf);
 void folio_rotate_reclaimable(struct folio *folio);
 bool __folio_end_writeback(struct folio *folio);
index 7a898b85788dd9a91e89aeabfc99694e820096df..cfc4df9fe995443884e17191a0e09096423bafc2 100644 (file)
@@ -3226,7 +3226,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
 }
 
 /**
- * vmf_anon_prepare - Prepare to handle an anonymous fault.
+ * __vmf_anon_prepare - Prepare to handle an anonymous fault.
  * @vmf: The vm_fault descriptor passed from the fault handler.
  *
  * When preparing to insert an anonymous page into a VMA from a
@@ -3240,7 +3240,7 @@ static inline vm_fault_t vmf_can_call_fault(const struct vm_fault *vmf)
  * Return: 0 if fault handling can proceed.  Any other value should be
  * returned to the caller.
  */
-vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
+vm_fault_t __vmf_anon_prepare(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        vm_fault_t ret = 0;
@@ -3248,10 +3248,8 @@ vm_fault_t vmf_anon_prepare(struct vm_fault *vmf)
        if (likely(vma->anon_vma))
                return 0;
        if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
-               if (!mmap_read_trylock(vma->vm_mm)) {
-                       vma_end_read(vma);
+               if (!mmap_read_trylock(vma->vm_mm))
                        return VM_FAULT_RETRY;
-               }
        }
        if (__anon_vma_prepare(vma))
                ret = VM_FAULT_OOM;