]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm: introduce generic lazy_mmu helpers
authorKevin Brodsky <kevin.brodsky@arm.com>
Mon, 15 Dec 2025 15:03:16 +0000 (15:03 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Wed, 21 Jan 2026 03:24:33 +0000 (19:24 -0800)
The implementation of the lazy MMU mode is currently entirely
arch-specific; core code directly calls arch helpers:
arch_{enter,leave}_lazy_mmu_mode().

We are about to introduce support for nested lazy MMU sections.  As things
stand we'd have to duplicate that logic in every arch implementing
lazy_mmu - adding to a fair amount of logic already duplicated across
lazy_mmu implementations.

This patch therefore introduces a new generic layer that calls the
existing arch_* helpers. Two pair of calls are introduced:

* lazy_mmu_mode_enable() ... lazy_mmu_mode_disable()
    This is the standard case where the mode is enabled for a given
    block of code by surrounding it with enable() and disable()
    calls.

* lazy_mmu_mode_pause() ... lazy_mmu_mode_resume()
    This is for situations where the mode is temporarily disabled
    by first calling pause() and then resume() (e.g. to prevent any
    batching from occurring in a critical section).

The documentation in <linux/pgtable.h> will be updated in a subsequent
patch.

No functional change should be introduced at this stage.  The
implementation of enable()/resume() and disable()/pause() is currently
identical, but nesting support will change that.

Most of the call sites have been updated using the following Coccinelle
script:

@@
@@
{
...
- arch_enter_lazy_mmu_mode();
+ lazy_mmu_mode_enable();
...
- arch_leave_lazy_mmu_mode();
+ lazy_mmu_mode_disable();
...
}

@@
@@
{
...
- arch_leave_lazy_mmu_mode();
+ lazy_mmu_mode_pause();
...
- arch_enter_lazy_mmu_mode();
+ lazy_mmu_mode_resume();
...
}

A couple of notes regarding x86:

* Xen is currently the only case where explicit handling is required
  for lazy MMU when context-switching. This is purely an
  implementation detail and using the generic lazy_mmu_mode_*
  functions would cause trouble when nesting support is introduced,
  because the generic functions must be called from the current task.
  For that reason we still use arch_leave() and arch_enter() there.

* x86 calls arch_flush_lazy_mmu_mode() unconditionally in a few
  places, but only defines it if PARAVIRT_XXL is selected, and we
  are removing the fallback in <linux/pgtable.h>. Add a new fallback
  definition to <asm/pgtable.h> to keep things building.

Link: https://lkml.kernel.org/r/20251215150323.2218608-8-kevin.brodsky@arm.com
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Yeoreum Yun <yeoreum.yun@arm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: David Hildenbrand (Red Hat) <david@kernel.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jann Horn <jannh@google.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
Cc: Ryan Roberts <ryan.roberts@arm.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Cc: Venkat Rao Bagalkote <venkat88@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
16 files changed:
arch/arm64/mm/mmu.c
arch/arm64/mm/pageattr.c
arch/powerpc/mm/book3s64/hash_tlb.c
arch/powerpc/mm/book3s64/subpage_prot.c
arch/x86/include/asm/pgtable.h
fs/proc/task_mmu.c
include/linux/pgtable.h
mm/kasan/shadow.c
mm/madvise.c
mm/memory.c
mm/migrate_device.c
mm/mprotect.c
mm/mremap.c
mm/userfaultfd.c
mm/vmalloc.c
mm/vmscan.c

index 8e1d80a7033e34388c05da4e4902f14d75f4f37c..a6a00accf4f9380ca17fa3be2aee6abf17009e12 100644 (file)
@@ -800,7 +800,7 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
                return -EINVAL;
 
        mutex_lock(&pgtable_split_lock);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        /*
         * The split_kernel_leaf_mapping_locked() may sleep, it is not a
@@ -822,7 +822,7 @@ int split_kernel_leaf_mapping(unsigned long start, unsigned long end)
                        ret = split_kernel_leaf_mapping_locked(end);
        }
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        mutex_unlock(&pgtable_split_lock);
        return ret;
 }
@@ -883,10 +883,10 @@ static int range_split_to_ptes(unsigned long start, unsigned long end, gfp_t gfp
 {
        int ret;
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        ret = walk_kernel_page_table_range_lockless(start, end,
                                        &split_to_ptes_ops, NULL, &gfp);
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
 
        return ret;
 }
index 7176ff39cb8796dff135686205263d1e28146286..358d1dc9a576f0d1a032b23ed59640a230f7439a 100644 (file)
@@ -110,7 +110,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
        if (WARN_ON_ONCE(ret))
                return ret;
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        /*
         * The caller must ensure that the range we are operating on does not
@@ -119,7 +119,7 @@ static int update_range_prot(unsigned long start, unsigned long size,
         */
        ret = walk_kernel_page_table_range_lockless(start, start + size,
                                                    &pageattr_ops, NULL, &data);
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
 
        return ret;
 }
index 21fcad97ae80dd46fe28b6f195150d82f52979d8..787f7a0e27f0c568b981c199b4c8835de5da0761 100644 (file)
@@ -205,7 +205,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end)
         * way to do things but is fine for our needs here.
         */
        local_irq_save(flags);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        for (; start < end; start += PAGE_SIZE) {
                pte_t *ptep = find_init_mm_pte(start, &hugepage_shift);
                unsigned long pte;
@@ -217,7 +217,7 @@ void __flush_hash_table_range(unsigned long start, unsigned long end)
                        continue;
                hpte_need_flush(&init_mm, start, ptep, pte, hugepage_shift);
        }
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        local_irq_restore(flags);
 }
 
@@ -237,7 +237,7 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long
         * way to do things but is fine for our needs here.
         */
        local_irq_save(flags);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        start_pte = pte_offset_map(pmd, addr);
        if (!start_pte)
                goto out;
@@ -249,6 +249,6 @@ void flush_hash_table_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long
        }
        pte_unmap(start_pte);
 out:
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        local_irq_restore(flags);
 }
index ec98e526167e543a4f4e4b9342020ad1f0a1a826..07c47673bba25126ce3a3a0eb67c066fa719e1e3 100644 (file)
@@ -73,13 +73,13 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
        pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return;
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        for (; npages > 0; --npages) {
                pte_update(mm, addr, pte, 0, 0, 0);
                addr += PAGE_SIZE;
                ++pte;
        }
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(pte - 1, ptl);
 }
 
index e33df3da698043aaa275f3f875bbf97ea8db5703..2842fa1f7a2ce2346c51f76470a0902155d7d00d 100644 (file)
@@ -118,6 +118,7 @@ extern pmdval_t early_pmd_flags;
 #define __pte(x)       native_make_pte(x)
 
 #define arch_end_context_switch(prev)  do {} while(0)
+static inline void arch_flush_lazy_mmu_mode(void) {}
 #endif /* CONFIG_PARAVIRT_XXL */
 
 static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
index 81dfc26bfae8af1e5d5cd579d5f25a19ae0069b8..480db575553ee3215ecb7f4319a06dfef52d779e 100644 (file)
@@ -2739,7 +2739,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
                return 0;
        }
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
                /* Fast path for performing exclusive WP */
@@ -2809,7 +2809,7 @@ flush_and_return:
        if (flush_end)
                flush_tlb_range(vma, start, addr);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(start_pte, ptl);
 
        cond_resched();
index d46d86959bd677ac7c20373a0a2cd0862f7d56a5..116a18b7916c400bc59841bf0322cb3bfc231a3e 100644 (file)
@@ -235,10 +235,31 @@ static inline int pmd_dirty(pmd_t pmd)
  *
  * Nesting is not permitted and the mode cannot be used in interrupt context.
  */
-#ifndef CONFIG_ARCH_HAS_LAZY_MMU_MODE
-static inline void arch_enter_lazy_mmu_mode(void) {}
-static inline void arch_leave_lazy_mmu_mode(void) {}
-static inline void arch_flush_lazy_mmu_mode(void) {}
+#ifdef CONFIG_ARCH_HAS_LAZY_MMU_MODE
+static inline void lazy_mmu_mode_enable(void)
+{
+       arch_enter_lazy_mmu_mode();
+}
+
+static inline void lazy_mmu_mode_disable(void)
+{
+       arch_leave_lazy_mmu_mode();
+}
+
+static inline void lazy_mmu_mode_pause(void)
+{
+       arch_leave_lazy_mmu_mode();
+}
+
+static inline void lazy_mmu_mode_resume(void)
+{
+       arch_enter_lazy_mmu_mode();
+}
+#else
+static inline void lazy_mmu_mode_enable(void) {}
+static inline void lazy_mmu_mode_disable(void) {}
+static inline void lazy_mmu_mode_pause(void) {}
+static inline void lazy_mmu_mode_resume(void) {}
 #endif
 
 #ifndef pte_batch_hint
index 32fbdf759ea2096c97c6efa3c88588d0683e75ae..d286e0a045437076c1ee87d66212603cc83fdaf3 100644 (file)
@@ -305,7 +305,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
        pte_t pte;
        int index;
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_pause();
 
        index = PFN_DOWN(addr - data->start);
        page = data->pages[index];
@@ -319,7 +319,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
        }
        spin_unlock(&init_mm.page_table_lock);
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_resume();
 
        return 0;
 }
@@ -471,7 +471,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
        pte_t pte;
        int none;
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_pause();
 
        spin_lock(&init_mm.page_table_lock);
        pte = ptep_get(ptep);
@@ -483,7 +483,7 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
        if (likely(!none))
                __free_page(pfn_to_page(pte_pfn(pte)));
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_resume();
 
        return 0;
 }
index b617b1be0f535f2601a13c92865c5335da147338..6bf7009fa5cedfc2dd6605a571ebf4c9114b711d 100644 (file)
@@ -453,7 +453,7 @@ restart:
        if (!start_pte)
                return 0;
        flush_tlb_batched_pending(mm);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        for (; addr < end; pte += nr, addr += nr * PAGE_SIZE) {
                nr = 1;
                ptent = ptep_get(pte);
@@ -461,7 +461,7 @@ restart:
                if (++batch_count == SWAP_CLUSTER_MAX) {
                        batch_count = 0;
                        if (need_resched()) {
-                               arch_leave_lazy_mmu_mode();
+                               lazy_mmu_mode_disable();
                                pte_unmap_unlock(start_pte, ptl);
                                cond_resched();
                                goto restart;
@@ -497,7 +497,7 @@ restart:
                                if (!folio_trylock(folio))
                                        continue;
                                folio_get(folio);
-                               arch_leave_lazy_mmu_mode();
+                               lazy_mmu_mode_disable();
                                pte_unmap_unlock(start_pte, ptl);
                                start_pte = NULL;
                                err = split_folio(folio);
@@ -508,7 +508,7 @@ restart:
                                if (!start_pte)
                                        break;
                                flush_tlb_batched_pending(mm);
-                               arch_enter_lazy_mmu_mode();
+                               lazy_mmu_mode_enable();
                                if (!err)
                                        nr = 0;
                                continue;
@@ -556,7 +556,7 @@ restart:
        }
 
        if (start_pte) {
-               arch_leave_lazy_mmu_mode();
+               lazy_mmu_mode_disable();
                pte_unmap_unlock(start_pte, ptl);
        }
        if (pageout)
@@ -675,7 +675,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
        if (!start_pte)
                return 0;
        flush_tlb_batched_pending(mm);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        for (; addr != end; pte += nr, addr += PAGE_SIZE * nr) {
                nr = 1;
                ptent = ptep_get(pte);
@@ -724,7 +724,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                                if (!folio_trylock(folio))
                                        continue;
                                folio_get(folio);
-                               arch_leave_lazy_mmu_mode();
+                               lazy_mmu_mode_disable();
                                pte_unmap_unlock(start_pte, ptl);
                                start_pte = NULL;
                                err = split_folio(folio);
@@ -735,7 +735,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
                                if (!start_pte)
                                        break;
                                flush_tlb_batched_pending(mm);
-                               arch_enter_lazy_mmu_mode();
+                               lazy_mmu_mode_enable();
                                if (!err)
                                        nr = 0;
                                continue;
@@ -775,7 +775,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
        if (nr_swap)
                add_mm_counter(mm, MM_SWAPENTS, nr_swap);
        if (start_pte) {
-               arch_leave_lazy_mmu_mode();
+               lazy_mmu_mode_disable();
                pte_unmap_unlock(start_pte, ptl);
        }
        cond_resched();
index da360a6eb8a48e29293430d0c577fb4b6ec58099..e0bce673f05305684772bd128c09480ff689cc2f 100644 (file)
@@ -1256,7 +1256,7 @@ again:
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
        orig_src_pte = src_pte;
        orig_dst_pte = dst_pte;
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        do {
                nr = 1;
@@ -1325,7 +1325,7 @@ again:
        } while (dst_pte += nr, src_pte += nr, addr += PAGE_SIZE * nr,
                 addr != end);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(orig_src_pte, src_ptl);
        add_mm_rss_vec(dst_mm, rss);
        pte_unmap_unlock(orig_dst_pte, dst_ptl);
@@ -1846,7 +1846,7 @@ retry:
                return addr;
 
        flush_tlb_batched_pending(mm);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        do {
                bool any_skipped = false;
 
@@ -1878,7 +1878,7 @@ retry:
                direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval);
 
        add_mm_rss_vec(mm, rss);
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
 
        /* Do the actual TLB flush before dropping ptl */
        if (force_flush) {
@@ -2816,7 +2816,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
        mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        do {
                BUG_ON(!pte_none(ptep_get(pte)));
                if (!pfn_modify_allowed(pfn, prot)) {
@@ -2826,7 +2826,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(mapped_pte, ptl);
        return err;
 }
@@ -3177,7 +3177,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        return -EINVAL;
        }
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        if (fn) {
                do {
@@ -3190,7 +3190,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
        }
        *mask |= PGTBL_PTE_MODIFIED;
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
 
        if (mm != &init_mm)
                pte_unmap_unlock(mapped_pte, ptl);
index 23379663b1e19233b676b8ecc82c42f7b9616765..0346c2d7819f6f1e6d4d29d5d7990bb4ae577461 100644 (file)
@@ -271,7 +271,7 @@ again:
        ptep = pte_offset_map_lock(mm, pmdp, start, &ptl);
        if (!ptep)
                goto again;
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        ptep += (addr - start) / PAGE_SIZE;
 
        for (; addr < end; addr += PAGE_SIZE, ptep++) {
@@ -313,7 +313,7 @@ again:
                        if (folio_test_large(folio)) {
                                int ret;
 
-                               arch_leave_lazy_mmu_mode();
+                               lazy_mmu_mode_disable();
                                pte_unmap_unlock(ptep, ptl);
                                ret = migrate_vma_split_folio(folio,
                                                          migrate->fault_page);
@@ -356,7 +356,7 @@ again:
                        if (folio && folio_test_large(folio)) {
                                int ret;
 
-                               arch_leave_lazy_mmu_mode();
+                               lazy_mmu_mode_disable();
                                pte_unmap_unlock(ptep, ptl);
                                ret = migrate_vma_split_folio(folio,
                                                          migrate->fault_page);
@@ -485,7 +485,7 @@ next:
        if (unmapped)
                flush_tlb_range(walk->vma, start, end);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(ptep - 1, ptl);
 
        return 0;
index 283889e4f1cec73983deff11e60c97b6564a4a6d..c0571445bef7fe2f0f4bce5e2483cfe9324c9275 100644 (file)
@@ -233,7 +233,7 @@ static long change_pte_range(struct mmu_gather *tlb,
                is_private_single_threaded = vma_is_single_threaded_private(vma);
 
        flush_tlb_batched_pending(vma->vm_mm);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
        do {
                nr_ptes = 1;
                oldpte = ptep_get(pte);
@@ -379,7 +379,7 @@ static long change_pte_range(struct mmu_gather *tlb,
                        }
                }
        } while (pte += nr_ptes, addr += nr_ptes * PAGE_SIZE, addr != end);
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(pte - 1, ptl);
 
        return pages;
index 672264807db6491bdcfca4b309a1c0dbc1286342..8275b9772ec1df903a77f1e300da507b4ddf1a83 100644 (file)
@@ -260,7 +260,7 @@ static int move_ptes(struct pagetable_move_control *pmc,
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
        flush_tlb_batched_pending(vma->vm_mm);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        for (; old_addr < old_end; old_ptep += nr_ptes, old_addr += nr_ptes * PAGE_SIZE,
                new_ptep += nr_ptes, new_addr += nr_ptes * PAGE_SIZE) {
@@ -305,7 +305,7 @@ static int move_ptes(struct pagetable_move_control *pmc,
                }
        }
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        if (force_flush)
                flush_tlb_range(vma, old_end - len, old_end);
        if (new_ptl != old_ptl)
index e6dfd5f28acd79d7f08fa4245f5d7a059be2d0e2..b11f81095fa5392fe911b3539588a85944ab1876 100644 (file)
@@ -1103,7 +1103,7 @@ static long move_present_ptes(struct mm_struct *mm,
        /* It's safe to drop the reference now as the page-table is holding one. */
        folio_put(*first_src_folio);
        *first_src_folio = NULL;
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        while (true) {
                orig_src_pte = ptep_get_and_clear(mm, src_addr, src_pte);
@@ -1140,7 +1140,7 @@ static long move_present_ptes(struct mm_struct *mm,
                        break;
        }
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        if (src_addr > src_start)
                flush_tlb_range(src_vma, src_start, src_addr);
 
index 429a893b050513de3a62184d743e9b2e9ab2b048..32d6ee92d4ff88cfdb52c5b262e8208a600a358e 100644 (file)
@@ -108,7 +108,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        if (!pte)
                return -ENOMEM;
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        do {
                if (unlikely(!pte_none(ptep_get(pte)))) {
@@ -134,7 +134,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                pfn++;
        } while (pte += PFN_DOWN(size), addr += size, addr != end);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        *mask |= PGTBL_PTE_MODIFIED;
        return 0;
 }
@@ -371,7 +371,7 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        unsigned long size = PAGE_SIZE;
 
        pte = pte_offset_kernel(pmd, addr);
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        do {
 #ifdef CONFIG_HUGETLB_PAGE
@@ -390,7 +390,7 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
                WARN_ON(!pte_none(ptent) && !pte_present(ptent));
        } while (pte += (size >> PAGE_SHIFT), addr += size, addr != end);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        *mask |= PGTBL_PTE_MODIFIED;
 }
 
@@ -538,7 +538,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
        if (!pte)
                return -ENOMEM;
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        do {
                struct page *page = pages[*nr];
@@ -560,7 +560,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
                (*nr)++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        *mask |= PGTBL_PTE_MODIFIED;
 
        return err;
index 614ccf39fe3fae6efedef312ee58d56bfdfdb4ab..6cf5ee94be7accc30ebf3b315d2a344ced6cbf70 100644 (file)
@@ -3516,7 +3516,7 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end,
                return false;
        }
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 restart:
        for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) {
                unsigned long pfn;
@@ -3557,7 +3557,7 @@ restart:
        if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end))
                goto restart;
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        pte_unmap_unlock(pte, ptl);
 
        return suitable_to_scan(total, young);
@@ -3598,7 +3598,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area
        if (!spin_trylock(ptl))
                goto done;
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        do {
                unsigned long pfn;
@@ -3645,7 +3645,7 @@ next:
 
        walk_update_folio(walk, last, gen, dirty);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
        spin_unlock(ptl);
 done:
        *first = -1;
@@ -4244,7 +4244,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
                }
        }
 
-       arch_enter_lazy_mmu_mode();
+       lazy_mmu_mode_enable();
 
        pte -= (addr - start) / PAGE_SIZE;
 
@@ -4278,7 +4278,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw)
 
        walk_update_folio(walk, last, gen, dirty);
 
-       arch_leave_lazy_mmu_mode();
+       lazy_mmu_mode_disable();
 
        /* feedback from rmap walkers to page table walkers */
        if (mm_state && suitable_to_scan(i, young))