]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
mm/gup: remove (VM_)BUG_ONs
authorDavid Hildenbrand <david@redhat.com>
Wed, 4 Jun 2025 14:05:44 +0000 (16:05 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 10 Jul 2025 05:41:56 +0000 (22:41 -0700)
Especially once we hit one of the assertions in
sanity_check_pinned_pages(), observing follow-up assertions failing in
other code can give good clues about what went wrong, so use
VM_WARN_ON_ONCE instead.

While at it, let's just convert all VM_BUG_ON to VM_WARN_ON_ONCE as well.
Add one comment for the pfn_valid() check.

We have to introduce VM_WARN_ON_ONCE_VMA() to make that fly.

Drop the BUG_ON after mmap_read_lock_killable(), if that ever returns
something > 0 we're in bigger trouble.  Convert the other BUG_ON's into
VM_WARN_ON_ONCE as well, they are in a similar domain "should never
happen", but more reasonable to check for during early testing.

[david@redhat.com: use the _FOLIO variant where possible, per Lorenzo]
Link: https://lkml.kernel.org/r/844bd929-a551-48e3-a12e-285cd65ba580@redhat.com
Link: https://lkml.kernel.org/r/20250604140544.688711-1-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: SeongJae Park <sj@kernel.org>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Peter Xu <peterx@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmdebug.h
mm/gup.c

index a0a3894900ed475dbf2dd812752301f46b68875f..14a45979cccc9a13d674709c5480b7e8c2977812 100644 (file)
@@ -89,6 +89,17 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
        }                                                               \
        unlikely(__ret_warn_once);                                      \
 })
+#define VM_WARN_ON_ONCE_VMA(cond, vma)         ({                      \
+       static bool __section(".data..once") __warned;                  \
+       int __ret_warn_once = !!(cond);                                 \
+                                                                       \
+       if (unlikely(__ret_warn_once && !__warned)) {                   \
+               dump_vma(vma);                                          \
+               __warned = true;                                        \
+               WARN_ON(1);                                             \
+       }                                                               \
+       unlikely(__ret_warn_once);                                      \
+})
 #define VM_WARN_ON_VMG(cond, vmg)              ({                      \
        int __ret_warn = !!(cond);                                      \
                                                                        \
@@ -115,6 +126,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
 #define VM_WARN_ON_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_ONCE_FOLIO(cond, folio)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_ONCE_MM(cond, mm)  BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_VMA(cond, vma)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ON_VMG(cond, vmg)  BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
 #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
index 3c39cbbeebef1f789bd7b770325dec6f02042097..7f2644a433a0f7a4f3fc0b53e348f2a598d2d880 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -64,11 +64,11 @@ static inline void sanity_check_pinned_pages(struct page **pages,
                    !folio_test_anon(folio))
                        continue;
                if (!folio_test_large(folio) || folio_test_hugetlb(folio))
-                       VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
+                       VM_WARN_ON_ONCE_FOLIO(!PageAnonExclusive(&folio->page), folio);
                else
                        /* Either a PTE-mapped or a PMD-mapped THP. */
-                       VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
-                                      !PageAnonExclusive(page), page);
+                       VM_WARN_ON_ONCE_PAGE(!PageAnonExclusive(&folio->page) &&
+                                            !PageAnonExclusive(page), page);
        }
 }
 
@@ -760,8 +760,8 @@ static struct page *follow_huge_pmd(struct vm_area_struct *vma,
        if (!pmd_write(pmdval) && gup_must_unshare(vma, flags, page))
                return ERR_PTR(-EMLINK);
 
-       VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
-                       !PageAnonExclusive(page), page);
+       VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+                            !PageAnonExclusive(page), page);
 
        ret = try_grab_folio(page_folio(page), 1, flags);
        if (ret)
@@ -899,8 +899,8 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
                goto out;
        }
 
-       VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
-                      !PageAnonExclusive(page), page);
+       VM_WARN_ON_ONCE_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
+                            !PageAnonExclusive(page), page);
 
        /* try_grab_folio() does nothing unless FOLL_GET or FOLL_PIN is set. */
        ret = try_grab_folio(folio, 1, flags);
@@ -1180,7 +1180,7 @@ static int faultin_page(struct vm_area_struct *vma,
        if (unshare) {
                fault_flags |= FAULT_FLAG_UNSHARE;
                /* FAULT_FLAG_WRITE and FAULT_FLAG_UNSHARE are incompatible */
-               VM_BUG_ON(fault_flags & FAULT_FLAG_WRITE);
+               VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_WRITE);
        }
 
        ret = handle_mm_fault(vma, address, fault_flags, NULL);
@@ -1760,10 +1760,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm,
                }
 
                /* VM_FAULT_RETRY or VM_FAULT_COMPLETED cannot return errors */
-               if (!*locked) {
-                       BUG_ON(ret < 0);
-                       BUG_ON(ret >= nr_pages);
-               }
+               VM_WARN_ON_ONCE(!*locked && (ret < 0 || ret >= nr_pages));
 
                if (ret > 0) {
                        nr_pages -= ret;
@@ -1808,7 +1805,6 @@ retry:
 
                ret = mmap_read_lock_killable(mm);
                if (ret) {
-                       BUG_ON(ret > 0);
                        if (!pages_done)
                                pages_done = ret;
                        break;
@@ -1819,11 +1815,11 @@ retry:
                                       pages, locked);
                if (!*locked) {
                        /* Continue to retry until we succeeded */
-                       BUG_ON(ret != 0);
+                       VM_WARN_ON_ONCE(ret != 0);
                        goto retry;
                }
                if (ret != 1) {
-                       BUG_ON(ret > 1);
+                       VM_WARN_ON_ONCE(ret > 1);
                        if (!pages_done)
                                pages_done = ret;
                        break;
@@ -1885,10 +1881,10 @@ long populate_vma_page_range(struct vm_area_struct *vma,
        int gup_flags;
        long ret;
 
-       VM_BUG_ON(!PAGE_ALIGNED(start));
-       VM_BUG_ON(!PAGE_ALIGNED(end));
-       VM_BUG_ON_VMA(start < vma->vm_start, vma);
-       VM_BUG_ON_VMA(end   > vma->vm_end, vma);
+       VM_WARN_ON_ONCE(!PAGE_ALIGNED(start));
+       VM_WARN_ON_ONCE(!PAGE_ALIGNED(end));
+       VM_WARN_ON_ONCE_VMA(start < vma->vm_start, vma);
+       VM_WARN_ON_ONCE_VMA(end   > vma->vm_end, vma);
        mmap_assert_locked(mm);
 
        /*
@@ -1957,8 +1953,8 @@ long faultin_page_range(struct mm_struct *mm, unsigned long start,
        int gup_flags;
        long ret;
 
-       VM_BUG_ON(!PAGE_ALIGNED(start));
-       VM_BUG_ON(!PAGE_ALIGNED(end));
+       VM_WARN_ON_ONCE(!PAGE_ALIGNED(start));
+       VM_WARN_ON_ONCE(!PAGE_ALIGNED(end));
        mmap_assert_locked(mm);
 
        /*
@@ -2914,7 +2910,8 @@ static int gup_fast_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr,
                } else if (pte_special(pte))
                        goto pte_unmap;
 
-               VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+               /* If it's not marked as special it must have a valid memmap. */
+               VM_WARN_ON_ONCE(!pfn_valid(pte_pfn(pte)));
                page = pte_page(pte);
 
                folio = try_grab_folio_fast(page, 1, flags);