]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
mm/vma: add+use vma lockdep acquire/release defines
authorLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Fri, 23 Jan 2026 20:12:14 +0000 (20:12 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 31 Jan 2026 22:22:50 +0000 (14:22 -0800)
The code is littered with inscrutable and duplicative lockdep
incantations, replace these with defines which explain what is going on
and add commentary to explain what we're doing.

If lockdep is disabled these become no-ops.  We must use defines so
_RET_IP_ remains meaningful.

These are self-documenting and aid readability of the code.

Additionally, instead of using the confusing rwsem_*() form for something
that is emphatically not an rwsem, we instead explicitly use
lock_[acquired, release]_shared/exclusive() lockdep invocations since we
are doing something rather custom here and these make more sense to use.

No functional change intended.

Link: https://lkml.kernel.org/r/fdae72441949ecf3b4a0ed3510da803e881bb153.1769198904.git.lorenzo.stoakes@oracle.com
Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Reviewed-by: Suren Baghdasaryan <surenb@google.com>
Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Liam Howlett <liam.howlett@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Rapoport <rppt@kernel.org>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
include/linux/mmap_lock.h
mm/mmap_lock.c

index 294fb282052d1252fdf88f9d956498257a83347c..1887ca55ead7ec591226383e5d886db50ff8dc73 100644 (file)
@@ -78,6 +78,37 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm)
 
 #ifdef CONFIG_PER_VMA_LOCK
 
+/*
+ * VMA locks do not behave like most ordinary locks found in the kernel, so we
+ * cannot quite have full lockdep tracking in the way we would ideally prefer.
+ *
+ * Read locks act as shared locks which exclude an exclusive lock being
+ * taken. We therefore mark these accordingly on read lock acquire/release.
+ *
+ * Write locks are acquired exclusively per-VMA, but released in a shared
+ * fashion, that is upon vma_end_write_all(), we update the mmap's seqcount such
+ * that write lock is released.
+ *
+ * We therefore cannot track write locks per-VMA, nor do we try. Mitigating this
+ * is the fact that, of course, we do lockdep-track the mmap lock rwsem which
+ * must be held when taking a VMA write lock.
+ *
+ * We do, however, want to indicate that during either acquisition of a VMA
+ * write lock or detachment of a VMA that we require the lock held be exclusive,
+ * so we utilise lockdep to do so.
+ */
+#define __vma_lockdep_acquire_read(vma) \
+       lock_acquire_shared(&vma->vmlock_dep_map, 0, 1, NULL, _RET_IP_)
+#define __vma_lockdep_release_read(vma) \
+       lock_release(&vma->vmlock_dep_map, _RET_IP_)
+#define __vma_lockdep_acquire_exclusive(vma) \
+       lock_acquire_exclusive(&vma->vmlock_dep_map, 0, 0, NULL, _RET_IP_)
+#define __vma_lockdep_release_exclusive(vma) \
+       lock_release(&vma->vmlock_dep_map, _RET_IP_)
+/* Only meaningful if CONFIG_LOCK_STAT is defined. */
+#define __vma_lockdep_stat_mark_acquired(vma) \
+       lock_acquired(&vma->vmlock_dep_map, _RET_IP_)
+
 static inline void mm_lock_seqcount_init(struct mm_struct *mm)
 {
        seqcount_init(&mm->mm_lock_seq);
@@ -176,9 +207,9 @@ static inline void vma_refcount_put(struct vm_area_struct *vma)
        struct mm_struct *mm = vma->vm_mm;
        int newcnt;
 
-       rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
-
+       __vma_lockdep_release_read(vma);
        newcnt = __vma_refcount_put_return(vma);
+
        /*
         * __vma_enter_locked() may be sleeping waiting for readers to drop
         * their reference count, so wake it up if we were the last reader
@@ -207,7 +238,7 @@ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int
                                                              VM_REFCNT_LIMIT)))
                return false;
 
-       rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+       __vma_lockdep_acquire_read(vma);
        return true;
 }
 
index 6be1bbcde09ee4cf22fc6bd51b95a78e0d8a75a4..85b2ae1d97208affcfc3ecd66141d4a4d6558d83 100644 (file)
@@ -72,7 +72,7 @@ static inline int __vma_enter_locked(struct vm_area_struct *vma,
        if (!refcount_add_not_zero(VM_REFCNT_EXCLUDE_READERS_FLAG, &vma->vm_refcnt))
                return 0;
 
-       rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
+       __vma_lockdep_acquire_exclusive(vma);
        err = rcuwait_wait_event(&vma->vm_mm->vma_writer_wait,
                   refcount_read(&vma->vm_refcnt) == tgt_refcnt,
                   state);
@@ -85,10 +85,10 @@ static inline int __vma_enter_locked(struct vm_area_struct *vma,
                        WARN_ON_ONCE(!detaching);
                        err = 0;
                }
-               rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+               __vma_lockdep_release_exclusive(vma);
                return err;
        }
-       lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
+       __vma_lockdep_stat_mark_acquired(vma);
 
        return 1;
 }
@@ -97,7 +97,7 @@ static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached)
 {
        *detached = refcount_sub_and_test(VM_REFCNT_EXCLUDE_READERS_FLAG,
                                          &vma->vm_refcnt);
-       rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+       __vma_lockdep_release_exclusive(vma);
 }
 
 int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
@@ -204,7 +204,7 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm,
                goto err;
        }
 
-       rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+       __vma_lockdep_acquire_read(vma);
 
        if (unlikely(vma->vm_mm != mm))
                goto err_unstable;