#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * VMA locks do not behave like most ordinary locks found in the kernel, so we
+ * cannot quite have full lockdep tracking in the way we would ideally prefer.
+ *
+ * Read locks act as shared locks which exclude an exclusive lock being
+ * taken. We therefore mark these accordingly on read lock acquire/release.
+ *
+ * Write locks are acquired exclusively per-VMA, but released in a shared
+ * fashion, that is upon vma_end_write_all(), we update the mmap's seqcount such
+ * that write lock is released.
+ *
+ * We therefore cannot track write locks per-VMA, nor do we try. Mitigating this
+ * is the fact that, of course, we do lockdep-track the mmap lock rwsem which
+ * must be held when taking a VMA write lock.
+ *
+ * We do, however, want to indicate that during either acquisition of a VMA
+ * write lock or detachment of a VMA that we require the lock held be exclusive,
+ * so we utilise lockdep to do so.
+ */
+#define __vma_lockdep_acquire_read(vma) \
+ lock_acquire_shared(&vma->vmlock_dep_map, 0, 1, NULL, _RET_IP_)
+#define __vma_lockdep_release_read(vma) \
+ lock_release(&vma->vmlock_dep_map, _RET_IP_)
+#define __vma_lockdep_acquire_exclusive(vma) \
+ lock_acquire_exclusive(&vma->vmlock_dep_map, 0, 0, NULL, _RET_IP_)
+#define __vma_lockdep_release_exclusive(vma) \
+ lock_release(&vma->vmlock_dep_map, _RET_IP_)
+/* Only meaningful if CONFIG_LOCK_STAT is defined. */
+#define __vma_lockdep_stat_mark_acquired(vma) \
+ lock_acquired(&vma->vmlock_dep_map, _RET_IP_)
+
static inline void mm_lock_seqcount_init(struct mm_struct *mm)
{
seqcount_init(&mm->mm_lock_seq);
struct mm_struct *mm = vma->vm_mm;
int newcnt;
- rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
-
+ __vma_lockdep_release_read(vma);
newcnt = __vma_refcount_put_return(vma);
+
/*
* __vma_enter_locked() may be sleeping waiting for readers to drop
* their reference count, so wake it up if we were the last reader
VM_REFCNT_LIMIT)))
return false;
- rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+ __vma_lockdep_acquire_read(vma);
return true;
}
if (!refcount_add_not_zero(VM_REFCNT_EXCLUDE_READERS_FLAG, &vma->vm_refcnt))
return 0;
- rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_);
+ __vma_lockdep_acquire_exclusive(vma);
err = rcuwait_wait_event(&vma->vm_mm->vma_writer_wait,
refcount_read(&vma->vm_refcnt) == tgt_refcnt,
state);
WARN_ON_ONCE(!detaching);
err = 0;
}
- rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+ __vma_lockdep_release_exclusive(vma);
return err;
}
- lock_acquired(&vma->vmlock_dep_map, _RET_IP_);
+ __vma_lockdep_stat_mark_acquired(vma);
return 1;
}
{
*detached = refcount_sub_and_test(VM_REFCNT_EXCLUDE_READERS_FLAG,
&vma->vm_refcnt);
- rwsem_release(&vma->vmlock_dep_map, _RET_IP_);
+ __vma_lockdep_release_exclusive(vma);
}
int __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq,
goto err;
}
- rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_);
+ __vma_lockdep_acquire_read(vma);
if (unlikely(vma->vm_mm != mm))
goto err_unstable;