]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/vmalloc: leave lazy MMU mode on PTE mapping error
authorAlexander Gordeev <agordeev@linux.ibm.com>
Mon, 23 Jun 2025 07:57:21 +0000 (09:57 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 24 Jul 2025 06:51:55 +0000 (08:51 +0200)
commit fea18c686320a53fce7ad62a87a3e1d10ad02f31 upstream.

vmap_pages_pte_range() enters the lazy MMU mode, but fails to leave it in
case an error is encountered.

Link: https://lkml.kernel.org/r/20250623075721.2817094-1-agordeev@linux.ibm.com
Fixes: 2ba3e6947aed ("mm/vmalloc: track which page-table levels were modified")
Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
Closes: https://lore.kernel.org/r/202506132017.T1l1l6ME-lkp@intel.com/
Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
mm/vmalloc.c

index 562994159216c4ec1a8a6ea2edd8c89ccc1ae2ca..ebeb6b2e1a15bddfc2c04c397de875ae0ad8f30c 100644 (file)
@@ -467,6 +467,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
                unsigned long end, pgprot_t prot, struct page **pages, int *nr,
                pgtbl_mod_mask *mask)
 {
+       int err = 0;
        pte_t *pte;
 
        /*
@@ -480,18 +481,25 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
        do {
                struct page *page = pages[*nr];
 
-               if (WARN_ON(!pte_none(*pte)))
-                       return -EBUSY;
-               if (WARN_ON(!page))
-                       return -ENOMEM;
-               if (WARN_ON(!pfn_valid(page_to_pfn(page))))
-                       return -EINVAL;
+               if (WARN_ON(!pte_none(*pte))) {
+                       err = -EBUSY;
+                       break;
+               }
+               if (WARN_ON(!page)) {
+                       err = -ENOMEM;
+                       break;
+               }
+               if (WARN_ON(!pfn_valid(page_to_pfn(page)))) {
+                       err = -EINVAL;
+                       break;
+               }
 
                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
                (*nr)++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        *mask |= PGTBL_PTE_MODIFIED;
-       return 0;
+
+       return err;
 }
 
 static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,