]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
LoongArch: Try VMA lock-based page fault handling first
authorWentao Guan <guanwentao@uniontech.com>
Thu, 2 Oct 2025 14:39:19 +0000 (22:39 +0800)
committerHuacai Chen <chenhuacai@loongson.cn>
Thu, 2 Oct 2025 14:39:19 +0000 (22:39 +0800)
Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.

The "ebizzy -mTRp" test on Loongson-3A6000 shows that PER_VMA_LOCK can
improve the benchmark by about 17.9% (97837.7 to 115430.8).

This is the LoongArch variant of "x86/mm: try VMA lock-based page fault
handling first".

Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
arch/loongarch/Kconfig
arch/loongarch/mm/fault.c

index 0f1f53b3c58bf41131ee6361f399e16d02995acb..29e486cc2d049839e4179ca2047ce69a46546964 100644 (file)
@@ -69,6 +69,7 @@ config LOONGARCH
        select ARCH_SUPPORTS_LTO_CLANG_THIN
        select ARCH_SUPPORTS_MSEAL_SYSTEM_MAPPINGS
        select ARCH_SUPPORTS_NUMA_BALANCING
+       select ARCH_SUPPORTS_PER_VMA_LOCK
        select ARCH_SUPPORTS_RT
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
index deefd9617d00857a49095de9dcf1cc3b6913e105..2c93d33356e57b5ced52796d918133f2b696c221 100644 (file)
@@ -215,6 +215,58 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
                flags |= FAULT_FLAG_USER;
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+       if (!(flags & FAULT_FLAG_USER))
+               goto lock_mmap;
+
+       vma = lock_vma_under_rcu(mm, address);
+       if (!vma)
+               goto lock_mmap;
+
+       if (write) {
+               flags |= FAULT_FLAG_WRITE;
+               if (!(vma->vm_flags & VM_WRITE)) {
+                       vma_end_read(vma);
+                       si_code = SEGV_ACCERR;
+                       count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+                       goto bad_area_nosemaphore;
+               }
+       } else {
+               if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs)) {
+                       vma_end_read(vma);
+                       si_code = SEGV_ACCERR;
+                       count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+                       goto bad_area_nosemaphore;
+               }
+               if (!(vma->vm_flags & (VM_READ | VM_WRITE)) && address != exception_era(regs)) {
+                       vma_end_read(vma);
+                       si_code = SEGV_ACCERR;
+                       count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+                       goto bad_area_nosemaphore;
+               }
+       }
+
+       fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
+       if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
+               vma_end_read(vma);
+
+       if (!(fault & VM_FAULT_RETRY)) {
+               count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+               goto done;
+       }
+
+       count_vm_vma_lock_event(VMA_LOCK_RETRY);
+       if (fault & VM_FAULT_MAJOR)
+               flags |= FAULT_FLAG_TRIED;
+
+       /* Quick path to respond to signals */
+       if (fault_signal_pending(fault, regs)) {
+               if (!user_mode(regs))
+                       no_context(regs, write, address);
+               return;
+       }
+lock_mmap:
+
 retry:
        vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma))
@@ -276,8 +328,10 @@ good_area:
                 */
                goto retry;
        }
+       mmap_read_unlock(mm);
+
+done:
        if (unlikely(fault & VM_FAULT_ERROR)) {
-               mmap_read_unlock(mm);
                if (fault & VM_FAULT_OOM) {
                        do_out_of_memory(regs, write, address);
                        return;
@@ -290,8 +344,6 @@ good_area:
                }
                BUG();
        }
-
-       mmap_read_unlock(mm);
 }
 
 asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,