]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/vmalloc: Enter lazy mmu mode while manipulating vmalloc ptes
authorRyan Roberts <ryan.roberts@arm.com>
Tue, 22 Apr 2025 08:18:18 +0000 (09:18 +0100)
committerWill Deacon <will@kernel.org>
Fri, 9 May 2025 12:43:07 +0000 (13:43 +0100)
Wrap vmalloc's pte table manipulation loops with
arch_enter_lazy_mmu_mode() / arch_leave_lazy_mmu_mode(). This provides
the arch code with the opportunity to optimize the pte manipulations.

Note that vmap_pfn() already uses lazy mmu mode since it delegates to
apply_to_page_range() which enters lazy mmu mode for both user and
kernel mappings.

These hooks will shortly be used by arm64 to improve vmalloc
performance.

Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Tested-by: Luiz Capitulino <luizcap@redhat.com>
Link: https://lore.kernel.org/r/20250422081822.1836315-11-ryan.roberts@arm.com
Signed-off-by: Will Deacon <will@kernel.org>
mm/vmalloc.c

index fe2e2cc8da94240d95c95fbd25cc39b498e09fdd..24430160b37fb9a3a7d25dff2b155e9fed8355f5 100644 (file)
@@ -104,6 +104,9 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        pte = pte_alloc_kernel_track(pmd, addr, mask);
        if (!pte)
                return -ENOMEM;
+
+       arch_enter_lazy_mmu_mode();
+
        do {
                if (unlikely(!pte_none(ptep_get(pte)))) {
                        if (pfn_valid(pfn)) {
@@ -127,6 +130,8 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
                pfn++;
        } while (pte += PFN_DOWN(size), addr += size, addr != end);
+
+       arch_leave_lazy_mmu_mode();
        *mask |= PGTBL_PTE_MODIFIED;
        return 0;
 }
@@ -354,6 +359,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        unsigned long size = PAGE_SIZE;
 
        pte = pte_offset_kernel(pmd, addr);
+       arch_enter_lazy_mmu_mode();
+
        do {
 #ifdef CONFIG_HUGETLB_PAGE
                size = arch_vmap_pte_range_unmap_size(addr, pte);
@@ -370,6 +377,8 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                        ptent = ptep_get_and_clear(&init_mm, addr, pte);
                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
        } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
+
+       arch_leave_lazy_mmu_mode();
        *mask |= PGTBL_PTE_MODIFIED;
 }
 
@@ -515,6 +524,9 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
        pte = pte_alloc_kernel_track(pmd, addr, mask);
        if (!pte)
                return -ENOMEM;
+
+       arch_enter_lazy_mmu_mode();
+
        do {
                struct page *page = pages[*nr];
 
@@ -528,6 +540,8 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
                set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
                (*nr)++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
+
+       arch_leave_lazy_mmu_mode();
        *mask |= PGTBL_PTE_MODIFIED;
        return 0;
 }