]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/vma: improve and document __is_vma_write_locked()
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Fri, 23 Jan 2026 20:12:18 +0000 (20:12 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 31 Jan 2026 22:22:51 +0000 (14:22 -0800)
We don't actually need to return an output parameter providing mm sequence
number, rather we can separate that out into another function -
__vma_raw_mm_seqnum() - and have any callers which need to obtain that
invoke that instead.

The access to the raw sequence number requires that we hold the exclusive
mmap lock such that we know we can't race vma_end_write_all(), so move the
assert to __vma_raw_mm_seqnum() to make this requirement clear.

Also while we're here, convert all of the VM_BUG_ON_VMA()'s to
VM_WARN_ON_ONCE_VMA()'s in line with the convention that we do not invoke
oopses when we can avoid it.

[lorenzo.stoakes@oracle.com: minor tweaks, per Vlastimil]
Link: https://lkml.kernel.org/r/3fa89c13-232d-4eee-86cc-96caa75c2c67@lucifer.local
Link: https://lkml.kernel.org/r/ef6c415c2d2c03f529dca124ccaed66bc2f60edc.1769198904.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Waiman Long <longman@redhat.com>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmap_lock.h
mm/mmap_lock.c

index 678f90080fa6036508f88ae3945fcd07d6635cc7..1746a172a81c87be052a753089fe8c1ec4e77f4a 100644 (file)
@@ -258,21 +258,31 @@ static inline void vma_end_read(struct vm_area_struct *vma)
        vma_refcount_put(vma);
 }
 
-/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
-static inline bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq)
+static inline unsigned int __vma_raw_mm_seqnum(struct vm_area_struct *vma)
 {
+       const struct mm_struct *mm = vma->vm_mm;
+
+       /* We must hold an exclusive write lock for this access to be valid. */
        mmap_assert_write_locked(vma->vm_mm);
+       return mm->mm_lock_seq.sequence;
+}
 
+/*
+ * Determine whether a VMA is write-locked. Must be invoked ONLY if the mmap
+ * write lock is held.
+ *
+ * Returns true if write-locked, otherwise false.
+ */
+static inline bool __is_vma_write_locked(struct vm_area_struct *vma)
+{
        /*
         * current task is holding mmap_write_lock, both vma->vm_lock_seq and
         * mm->mm_lock_seq can't be concurrently modified.
         */
-       *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence;
-       return (vma->vm_lock_seq == *mm_lock_seq);
+       return vma->vm_lock_seq == __vma_raw_mm_seqnum(vma);
 }
 
-int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
-               int state);
+int __vma_start_write(struct vm_area_struct *vma, int state);
 
 /*
  * Begin writing to a VMA.
@@ -281,12 +291,10 @@ int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
  */
 static inline void vma_start_write(struct vm_area_struct *vma)
 {
-       unsigned int mm_lock_seq;
-
-       if (__is_vma_write_locked(vma, &mm_lock_seq))
+       if (__is_vma_write_locked(vma))
                return;
 
-       __vma_start_write(vma, mm_lock_seq, TASK_UNINTERRUPTIBLE);
+       __vma_start_write(vma, TASK_UNINTERRUPTIBLE);
 }
 
 /**
@@ -305,30 +313,25 @@ static inline void vma_start_write(struct vm_area_struct *vma)
 static inline __must_check
 int vma_start_write_killable(struct vm_area_struct *vma)
 {
-       unsigned int mm_lock_seq;
-
-       if (__is_vma_write_locked(vma, &mm_lock_seq))
+       if (__is_vma_write_locked(vma))
                return 0;
-       return __vma_start_write(vma, mm_lock_seq, TASK_KILLABLE);
+
+       return __vma_start_write(vma, TASK_KILLABLE);
 }
 
 static inline void vma_assert_write_locked(struct vm_area_struct *vma)
 {
-       unsigned int mm_lock_seq;
-
-       VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+       VM_WARN_ON_ONCE_VMA(!__is_vma_write_locked(vma), vma);
 }
 
 static inline void vma_assert_locked(struct vm_area_struct *vma)
 {
-       unsigned int mm_lock_seq;
-
        /*
         * See the comment describing the vm_area_struct->vm_refcnt field for
         * details of possible refcnt values.
         */
-       VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
-                     !__is_vma_write_locked(vma, &mm_lock_seq), vma);
+       VM_WARN_ON_ONCE_VMA(refcount_read(&vma->vm_refcnt) <= 1 &&
+                           !__is_vma_write_locked(vma), vma);
 }
 
 static inline bool vma_is_attached(struct vm_area_struct *vma)
index 490793ac88edda2575d2a5c399cc13fbd43af257..898c2ef1e9580371501b1bbc1ed5267286af2e15 100644 (file)
@@ -136,14 +136,14 @@ static int __vma_start_exclude_readers(struct vma_exclude_readers_state *ves)
        return 0;
 }
 
-int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
-               int state)
+int __vma_start_write(struct vm_area_struct *vma, int state)
 {
-       int err;
+       const unsigned int mm_lock_seq = __vma_raw_mm_seqnum(vma);
        struct vma_exclude_readers_state ves = {
                .vma = vma,
                .state = state,
        };
+       int err;
 
        err = __vma_start_exclude_readers(&ves);
        if (err) {