#include <linux/bpf_mem_alloc.h>
#include <linux/btf_ids.h>
#include <linux/mm_types.h>
+#include <linux/sched/mm.h>
#include "mmap_unlock_work.h"
static const char * const iter_task_type_names[] = {
.arg5_type = ARG_ANYTHING,
};
+static inline void bpf_iter_mmput_async(struct mm_struct *mm)
+{
+#ifdef CONFIG_MMU
+ mmput_async(mm);
+#else
+ mmput(mm);
+#endif
+}
+
struct bpf_iter_task_vma_kern_data {
struct task_struct *task;
struct mm_struct *mm;
BUILD_BUG_ON(sizeof(struct bpf_iter_task_vma_kern) != sizeof(struct bpf_iter_task_vma));
BUILD_BUG_ON(__alignof__(struct bpf_iter_task_vma_kern) != __alignof__(struct bpf_iter_task_vma));
+ /* bpf_iter_mmput_async() needs mmput_async() which requires CONFIG_MMU */
+ if (!IS_ENABLED(CONFIG_MMU)) {
+ kit->data = NULL;
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * Reject irqs-disabled contexts including NMI. Operations used
+ * by _next() and _destroy() (mmap_read_unlock, bpf_iter_mmput_async)
+ * can take spinlocks with IRQs disabled (pi_lock, pool->lock).
+ * Running from NMI or from a tracepoint that fires with those
+ * locks held could deadlock.
+ */
+ if (irqs_disabled()) {
+ kit->data = NULL;
+ return -EBUSY;
+ }
+
/* is_iter_reg_valid_uninit guarantees that kit hasn't been initialized
* before, so non-NULL kit->data doesn't point to previously
* bpf_mem_alloc'd bpf_iter_task_vma_kern_data
return -ENOMEM;
kit->data->task = get_task_struct(task);
+ /*
+ * Safely read task->mm and acquire an mm reference.
+ *
+ * Cannot use get_task_mm() because its task_lock() is a
+ * blocking spin_lock that would deadlock if the target task
+ * already holds alloc_lock on this CPU (e.g. a softirq BPF
+ * program iterating a task interrupted while holding its
+ * alloc_lock).
+ */
+ if (!spin_trylock(&task->alloc_lock)) {
+ err = -EBUSY;
+ goto err_cleanup_iter;
+ }
kit->data->mm = task->mm;
+ if (kit->data->mm && !(task->flags & PF_KTHREAD))
+ mmget(kit->data->mm);
+ else
+ kit->data->mm = NULL;
+ spin_unlock(&task->alloc_lock);
if (!kit->data->mm) {
err = -ENOENT;
goto err_cleanup_iter;
irq_work_busy = bpf_mmap_unlock_get_irq_work(&kit->data->work);
if (irq_work_busy || !mmap_read_trylock(kit->data->mm)) {
err = -EBUSY;
- goto err_cleanup_iter;
+ goto err_cleanup_mmget;
}
vma_iter_init(&kit->data->vmi, kit->data->mm, addr);
return 0;
+err_cleanup_mmget:
+ bpf_iter_mmput_async(kit->data->mm);
err_cleanup_iter:
- if (kit->data->task)
- put_task_struct(kit->data->task);
+ put_task_struct(kit->data->task);
bpf_mem_free(&bpf_global_ma, kit->data);
/* NULL kit->data signals failed bpf_iter_task_vma initialization */
kit->data = NULL;
if (kit->data) {
bpf_mmap_unlock_mm(kit->data->work, kit->data->mm);
put_task_struct(kit->data->task);
+ bpf_iter_mmput_async(kit->data->mm);
bpf_mem_free(&bpf_global_ma, kit->data);
}
}