]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
mm/fault: convert remaining simple cases to lock_mm_and_find_vma()
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Jun 2023 17:55:38 +0000 (10:55 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 1 Jul 2023 11:14:45 +0000 (13:14 +0200)
commit a050ba1e7422f2cc60ff8bfde3f96d34d00cb585 upstream.

This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper.  They all have the regular fault handling pattern without odd
special cases.

The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).

And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer.  That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.

Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment.  The cases are all simple, and I went through the
changes several times, but...

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
18 files changed:
arch/alpha/Kconfig
arch/alpha/mm/fault.c
arch/arc/Kconfig
arch/arc/mm/fault.c
arch/csky/Kconfig
arch/csky/mm/fault.c
arch/hexagon/Kconfig
arch/hexagon/mm/vm_fault.c
arch/loongarch/Kconfig
arch/loongarch/mm/fault.c
arch/nios2/Kconfig
arch/nios2/mm/fault.c
arch/sh/Kconfig
arch/sh/mm/fault.c
arch/sparc/Kconfig
arch/sparc/mm/fault_32.c
arch/xtensa/Kconfig
arch/xtensa/mm/fault.c

index 780d4673c3ca78feed39590e11096090f4ef8262..a40a61583b3355afd82ab712379a2e16ccbccbba 100644 (file)
@@ -29,6 +29,7 @@ config ALPHA
        select GENERIC_SMP_IDLE_THREAD
        select HAVE_ARCH_AUDITSYSCALL
        select HAVE_MOD_ARCH_SPECIFIC
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select ODD_RT_SIGACTION
        select OLD_SIGSUSPEND
index 7b01ae4f3bc6c7ab53dfb0c219c6fd75e2ffdf36..8c9850437e674451576c27388e0f6fe7b8780758 100644 (file)
@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
                flags |= FAULT_FLAG_USER;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 
        /* Ok, we have a good vm_area for this memory access, so
           we can handle it.  */
- good_area:
        si_code = SEGV_ACCERR;
        if (cause < 0) {
                if (!(vma->vm_flags & VM_EXEC))
@@ -192,6 +184,7 @@ retry:
  bad_area:
        mmap_read_unlock(mm);
 
+ bad_area_nosemaphore:
        if (user_mode(regs))
                goto do_sigsegv;
 
index d9a13ccf89a3aa5d6a4fb5dc8158ba2e4dba4671..cb1074f74c3f1e6417aafcf84aa6300c70126b8a 100644 (file)
@@ -41,6 +41,7 @@ config ARC
        select HAVE_PERF_EVENTS
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select OF
        select OF_EARLY_FLATTREE
index 5ca59a482632a880f0a40d31c44d9b1159581b85..f59e722d147f91972ac8cf42b1f77d1a12cca80b 100644 (file)
@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (unlikely(address < vma->vm_start)) {
-               if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
-                       goto bad_area;
-       }
+               goto bad_area_nosemaphore;
 
        /*
         * vm_area is good, now check permissions for this memory access
@@ -161,6 +155,7 @@ retry:
 bad_area:
        mmap_read_unlock(mm);
 
+bad_area_nosemaphore:
        /*
         * Major/minor page fault accounting
         * (in case of retry we only land here once)
index dba02da6fa344ca5dbeeab2bd106d06fd3332136..225df1674b33aeae44e3f003f91cc00607bb7759 100644 (file)
@@ -96,6 +96,7 @@ config CSKY
        select HAVE_REGS_AND_STACK_ACCESS_API
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
+       select LOCK_MM_AND_FIND_VMA
        select MAY_HAVE_SPARSE_IRQ
        select MODULES_USE_ELF_RELA if MODULES
        select OF
index e15f736cca4b4a43fb14989e099051fbdbf0a082..ae9781b7d92ea5e65554d42d6d9d6ba2ac5838d6 100644 (file)
@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
        BUG();
 }
 
-static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
 {
        /*
         * Something tried to access memory that isn't in our memory map.
         * Fix it, but check if it's kernel or user first.
         */
-       mmap_read_unlock(mm);
        /* User mode accesses just cause a SIGSEGV */
        if (user_mode(regs)) {
                do_trap(regs, SIGSEGV, code, addr);
@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
        if (is_write(regs))
                flags |= FAULT_FLAG_WRITE;
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, addr);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma)) {
-               bad_area(regs, mm, code, addr);
-               return;
-       }
-       if (likely(vma->vm_start <= addr))
-               goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, mm, code, addr);
-               return;
-       }
-       if (unlikely(expand_stack(vma, addr))) {
-               bad_area(regs, mm, code, addr);
+               bad_area_nosemaphore(regs, mm, code, addr);
                return;
        }
 
@@ -259,11 +247,11 @@ retry:
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it.
         */
-good_area:
        code = SEGV_ACCERR;
 
        if (unlikely(access_error(regs, vma))) {
-               bad_area(regs, mm, code, addr);
+               mmap_read_unlock(mm);
+               bad_area_nosemaphore(regs, mm, code, addr);
                return;
        }
 
index 54eadf26517868f8cd575a1d5b777be700e35354..6726f4941015f353624a81838f6368c85508a0a7 100644 (file)
@@ -28,6 +28,7 @@ config HEXAGON
        select GENERIC_SMP_IDLE_THREAD
        select STACKTRACE_SUPPORT
        select GENERIC_CLOCKEVENTS_BROADCAST
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select GENERIC_CPU_DEVICES
        select ARCH_WANT_LD_ORPHAN_WARN
index 4b578d02fd01a9eb1513d34b708a84f6e5af31b5..7295ea3f8cc8d3c6f7cdec6fdcded75f11facb7a 100644 (file)
@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
+       vma = lock_mm_and_find_vma(mm, address, regs);
+       if (unlikely(!vma))
+               goto bad_area_nosemaphore;
 
-       if (vma->vm_start <= address)
-               goto good_area;
-
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-
-       if (expand_stack(vma, address))
-               goto bad_area;
-
-good_area:
        /* Address space is OK.  Now check access rights. */
        si_code = SEGV_ACCERR;
 
@@ -143,6 +132,7 @@ good_area:
 bad_area:
        mmap_read_unlock(mm);
 
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                force_sig_fault(SIGSEGV, si_code, (void __user *)address);
                return;
index 3e5d6acbf240968995b09f4da517ecdeba3ce280..085d87d7e91d91976653cc6ca91266fa7e8b61d6 100644 (file)
@@ -125,6 +125,7 @@ config LOONGARCH
        select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
        select IRQ_FORCED_THREADING
        select IRQ_LOONGARCH_CPU
+       select LOCK_MM_AND_FIND_VMA
        select MMU_GATHER_MERGE_VMAS if MMU
        select MODULES_USE_ELF_RELA if MODULES
        select NEED_PER_CPU_EMBED_FIRST_CHUNK
index 449087bd589d339dba1b27f66cc07f170085cff7..da5b6d518cdb1d6c5ec550083f52f588ccf29066 100644 (file)
@@ -169,22 +169,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-       if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (!expand_stack(vma, address))
-               goto good_area;
+       vma = lock_mm_and_find_vma(mm, address, regs);
+       if (unlikely(!vma))
+               goto bad_area_nosemaphore;
+       goto good_area;
+
 /*
  * Something tried to access memory that isn't in our memory map..
  * Fix it, but check if it's kernel or user first..
  */
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        do_sigsegv(regs, write, address, si_code);
        return;
 
index a582f72104f39229ca524cc2aa5f645f3bc1d5f3..1fb78865a459337b3e62443c06f5f9b1229ff21b 100644 (file)
@@ -16,6 +16,7 @@ config NIOS2
        select HAVE_ARCH_TRACEHOOK
        select HAVE_ARCH_KGDB
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select OF
        select OF_EARLY_FLATTREE
index ca64eccea5511d223add396c5f92d486703858b0..e3fa9c15181df23e01085416dec3de5e41b922cf 100644 (file)
@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
-       if (!mmap_read_trylock(mm)) {
-               if (!user_mode(regs) && !search_exception_tables(regs->ea))
-                       goto bad_area_nosemaphore;
 retry:
-               mmap_read_lock(mm);
-       }
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 /*
  * Ok, we have a good vm_area for this memory access, so
  * we can handle it..
  */
-good_area:
        code = SEGV_ACCERR;
 
        switch (cause) {
index 0665ac0add0b4991fd6ed82d4928dfa86063ffe7..101a0d094a667cda11357fb3134165da0f769e92 100644 (file)
@@ -56,6 +56,7 @@ config SUPERH
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select IRQ_FORCED_THREADING
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select NEED_SG_DMA_LENGTH
        select NO_DMA if !MMU && !DMA_COHERENT
index acd2f5e50bfcd08ccb71fc71a8f33a1c9cb84c06..06e6b49529245a58bce87aef5caaeb0e5051357d 100644 (file)
@@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        }
 
 retry:
-       mmap_read_lock(mm);
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (unlikely(!vma)) {
-               bad_area(regs, error_code, address);
-               return;
-       }
-       if (likely(vma->vm_start <= address))
-               goto good_area;
-       if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-               bad_area(regs, error_code, address);
-               return;
-       }
-       if (unlikely(expand_stack(vma, address))) {
-               bad_area(regs, error_code, address);
+               bad_area_nosemaphore(regs, error_code, address);
                return;
        }
 
@@ -461,7 +449,6 @@ retry:
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
-good_area:
        if (unlikely(access_error(error_code, vma))) {
                bad_area_access_error(regs, error_code, address);
                return;
index 84437a4c65454ca0677c6d4271b55fa4a7ffbc3f..dbb1760cbe8c977e3647e7ab8aadd46c6689ab14 100644 (file)
@@ -56,6 +56,7 @@ config SPARC32
        select DMA_DIRECT_REMAP
        select GENERIC_ATOMIC64
        select HAVE_UID16
+       select LOCK_MM_AND_FIND_VMA
        select OLD_SIGACTION
        select ZONE_DMA
 
index 179295b14664a55d889844790f0048c8b29ef553..a3ccc0267bc20515fcf42437430f0e410c6c2b0f 100644 (file)
@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
        if (pagefault_disabled() || !mm)
                goto no_context;
 
+       if (!from_user && address >= PAGE_OFFSET)
+               goto no_context;
+
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       mmap_read_lock(mm);
-
-       if (!from_user && address >= PAGE_OFFSET)
-               goto bad_area;
-
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
        /*
         * Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
-good_area:
        code = SEGV_ACCERR;
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -321,17 +312,9 @@ static void force_user_fault(unsigned long address, int write)
 
        code = SEGV_MAPERR;
 
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
-good_area:
+               goto bad_area_nosemaphore;
        code = SEGV_ACCERR;
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
@@ -350,6 +333,7 @@ good_area:
        return;
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
        return;
 
index bcb0c5d2abc2fe78337eec71fab09d11d212898a..6d3c9257aa133fc0b44da472dd66d45c0d0025b6 100644 (file)
@@ -49,6 +49,7 @@ config XTENSA
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_VIRT_CPU_ACCOUNTING_GEN
        select IRQ_DOMAIN
+       select LOCK_MM_AND_FIND_VMA
        select MODULES_USE_ELF_RELA
        select PERF_USE_VMALLOC
        select TRACE_IRQFLAGS_SUPPORT
index faf7cf35a0ee3d244f60332ad967da9e6d500142..d1eb8d6c5b826702b87e289b5737310cb9695995 100644 (file)
@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs)
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
 retry:
-       mmap_read_lock(mm);
-       vma = find_vma(mm, address);
-
+       vma = lock_mm_and_find_vma(mm, address, regs);
        if (!vma)
-               goto bad_area;
-       if (vma->vm_start <= address)
-               goto good_area;
-       if (!(vma->vm_flags & VM_GROWSDOWN))
-               goto bad_area;
-       if (expand_stack(vma, address))
-               goto bad_area;
+               goto bad_area_nosemaphore;
 
        /* Ok, we have a good vm_area for this memory access, so
         * we can handle it..
         */
 
-good_area:
        code = SEGV_ACCERR;
 
        if (is_write) {
@@ -205,6 +196,7 @@ good_area:
         */
 bad_area:
        mmap_read_unlock(mm);
+bad_area_nosemaphore:
        if (user_mode(regs)) {
                force_sig_fault(SIGSEGV, code, (void *) address);
                return;