]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: switch task_vma iterator from mmap_lock to per-VMA locks
authorPuranjay Mohan <puranjay@kernel.org>
Wed, 8 Apr 2026 15:45:36 +0000 (08:45 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 10 Apr 2026 19:05:16 +0000 (12:05 -0700)
The open-coded task_vma iterator holds mmap_lock for the entire duration
of iteration, increasing contention on this highly contended lock.

Switch to per-VMA locking. Find the next VMA via an RCU-protected maple
tree walk and lock it with lock_vma_under_rcu(). lock_next_vma() is not
used because its fallback takes mmap_read_lock(), and the iterator must
work in non-sleepable contexts.

lock_vma_under_rcu() is a point lookup (mas_walk) that finds the VMA
containing a given address but cannot iterate across gaps. An
RCU-protected vma_next() walk (mas_find) first locates the next VMA's
vm_start to pass to lock_vma_under_rcu().

Between the RCU walk and the lock, the VMA may be removed, shrunk, or
write-locked. On failure, advance past it using vm_end from the RCU
walk. Because the VMA slab is SLAB_TYPESAFE_BY_RCU, vm_end may be
stale; fall back to PAGE_SIZE advancement when it does not make forward
progress. Concurrent VMA insertions at addresses already passed by the
iterator are not detected.

CONFIG_PER_VMA_LOCK is required; return -EOPNOTSUPP without it.

Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Link: https://lore.kernel.org/r/20260408154539.3832150-3-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/task_iter.c

index c1f5fbe9dc2f3d2865779cc8189784a604cdd103..87e87f18913d9805e81d920cf29d5e5d46e1bc94 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/bpf_mem_alloc.h>
 #include <linux/btf_ids.h>
 #include <linux/mm_types.h>
+#include <linux/mmap_lock.h>
 #include <linux/sched/mm.h>
 #include "mmap_unlock_work.h"
 
@@ -807,8 +808,8 @@ static inline void bpf_iter_mmput_async(struct mm_struct *mm)
 struct bpf_iter_task_vma_kern_data {
        struct task_struct *task;
        struct mm_struct *mm;
-       struct mmap_unlock_irq_work *work;
-       struct vma_iterator vmi;
+       struct vm_area_struct *locked_vma;
+       u64 next_addr;
 };
 
 struct bpf_iter_task_vma {
@@ -829,21 +830,19 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
                                      struct task_struct *task, u64 addr)
 {
        struct bpf_iter_task_vma_kern *kit = (void *)it;
-       bool irq_work_busy = false;
        int err;
 
        BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma));
        BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma));
 
-       /* bpf_iter_mmput_async() needs mmput_async() which requires CONFIG_MMU */
-       if (!IS_ENABLED(CONFIG_MMU)) {
+       if (!IS_ENABLED(CONFIG_PER_VMA_LOCK)) {
                kit->data = NULL;
                return -EOPNOTSUPP;
        }
 
        /*
         * Reject irqs-disabled contexts including NMI. Operations used
-        * by _next() and _destroy() (mmap_read_unlock, bpf_iter_mmput_async)
+        * by _next() and _destroy() (vma_end_read, bpf_iter_mmput_async)
         * can take spinlocks with IRQs disabled (pi_lock, pool->lock).
         * Running from NMI or from a tracepoint that fires with those
         * locks held could deadlock.
@@ -886,18 +885,10 @@ __bpf_kfunc int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it,
                goto err_cleanup_iter;
        }
 
-       /* kit->data->work == NULL is valid after bpf_mmap_unlock_get_irq_work */
-       irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
-       if (irq_work_busy || !mmap_read_trylock(kit->data->mm)) {
-               err = -EBUSY;
-               goto err_cleanup_mmget;
-       }
-
-       vma_iter_init(&kit->data->vmi, kit->data->mm, addr);
+       kit->data->locked_vma = NULL;
+       kit->data->next_addr = addr;
        return 0;
 
-err_cleanup_mmget:
-       bpf_iter_mmput_async(kit->data->mm);
 err_cleanup_iter:
        put_task_struct(kit->data->task);
        bpf_mem_free(&bpf_global_ma, kit->data);
@@ -906,13 +897,76 @@ err_cleanup_iter:
        return err;
 }
 
+/*
+ * Find and lock the next VMA at or after data->next_addr.
+ *
+ * lock_vma_under_rcu() is a point lookup (mas_walk): it finds the VMA
+ * containing a given address but cannot iterate. An RCU-protected
+ * maple tree walk with vma_next() (mas_find) is needed first to locate
+ * the next VMA's vm_start across any gap.
+ *
+ * Between the RCU walk and the lock, the VMA may be removed, shrunk,
+ * or write-locked. On failure, advance past it using vm_end from the
+ * RCU walk. SLAB_TYPESAFE_BY_RCU can make vm_end stale, so fall back
+ * to PAGE_SIZE advancement to guarantee forward progress.
+ */
+static struct vm_area_struct *
+bpf_iter_task_vma_find_next(struct bpf_iter_task_vma_kern_data *data)
+{
+       struct vm_area_struct *vma;
+       struct vma_iterator vmi;
+       unsigned long start, end;
+
+retry:
+       rcu_read_lock();
+       vma_iter_init(&vmi, data->mm, data->next_addr);
+       vma = vma_next(&vmi);
+       if (!vma) {
+               rcu_read_unlock();
+               return NULL;
+       }
+       start = vma->vm_start;
+       end = vma->vm_end;
+       rcu_read_unlock();
+
+       vma = lock_vma_under_rcu(data->mm, start);
+       if (!vma) {
+               if (end <= data->next_addr)
+                       data->next_addr += PAGE_SIZE;
+               else
+                       data->next_addr = end;
+               goto retry;
+       }
+
+       if (unlikely(vma->vm_end <= data->next_addr)) {
+               data->next_addr += PAGE_SIZE;
+               vma_end_read(vma);
+               goto retry;
+       }
+
+       return vma;
+}
+
 __bpf_kfunc struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it)
 {
        struct bpf_iter_task_vma_kern *kit = (void *)it;
+       struct vm_area_struct *vma;
 
        if (!kit->data) /* bpf_iter_task_vma_new failed */
                return NULL;
-       return vma_next(&kit->data->vmi);
+
+       if (kit->data->locked_vma) {
+               vma_end_read(kit->data->locked_vma);
+               kit->data->locked_vma = NULL;
+       }
+
+       vma = bpf_iter_task_vma_find_next(kit->data);
+       if (!vma)
+               return NULL;
+
+       kit->data->locked_vma = vma;
+       kit->data->next_addr = vma->vm_end;
+       return vma;
 }
 
 __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
@@ -920,7 +974,8 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it)
        struct bpf_iter_task_vma_kern *kit = (void *)it;
 
        if (kit->data) {
-               bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
+               if (kit->data->locked_vma)
+                       vma_end_read(kit->data->locked_vma);
                put_task_struct(kit->data->task);
                bpf_iter_mmput_async(kit->data->mm);
                bpf_mem_free(&bpf_global_ma, kit->data);