]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
fs/proc/task_mmu: reduce scope of lazy mmu region
authorRyan Roberts <ryan.roberts@arm.com>
Mon, 3 Mar 2025 14:15:36 +0000 (14:15 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 17 Mar 2025 07:05:34 +0000 (00:05 -0700)
Update the way arch_[enter|leave]_lazy_mmu_mode() is called in
pagemap_scan_pmd_entry() to follow the normal pattern of holding the ptl
for user space mappings.  As a result the scope is reduced to only the pte
table, but that's where most of the performance win is.

While I believe there wasn't technically a bug here, the original scope
made it easier to accidentally nest or, worse, accidentally call something
like kmap() which would expect an immediate mode pte modification but it
would end up deferred.

Link: https://lkml.kernel.org/r/20250303141542.3371656-3-ryan.roberts@arm.com
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Acked-by: David Hildenbrand <david@redhat.com>
Acked-by: Juergen Gross <jgross@suse.com>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Borislav Betkov <bp@alien8.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Juegren Gross <jgross@suse.com>
Cc: Matthew Wilcow (Oracle) <willy@infradead.org>
Cc: Thomas Gleinxer <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/proc/task_mmu.c

index c17615e21a5d6fa2b967569a11116258b0e63727..b0f189815512c700070fab2db68dbc35e3818d11 100644 (file)
@@ -2459,22 +2459,19 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
        spinlock_t *ptl;
        int ret;
 
-       arch_enter_lazy_mmu_mode();
-
        ret = pagemap_scan_thp_entry(pmd, start, end, walk);
-       if (ret != -ENOENT) {
-               arch_leave_lazy_mmu_mode();
+       if (ret != -ENOENT)
                return ret;
-       }
 
        ret = 0;
        start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
        if (!pte) {
-               arch_leave_lazy_mmu_mode();
                walk->action = ACTION_AGAIN;
                return 0;
        }
 
+       arch_enter_lazy_mmu_mode();
+
        if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
                /* Fast path for performing exclusive WP */
                for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
@@ -2543,8 +2540,8 @@ flush_and_return:
        if (flush_end)
                flush_tlb_range(vma, start, addr);
 
-       pte_unmap_unlock(start_pte, ptl);
        arch_leave_lazy_mmu_mode();
+       pte_unmap_unlock(start_pte, ptl);
 
        cond_resched();
        return ret;