From 81e026ca47b386e4213c1beff069038a3ba8bb76 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 12 Aug 2025 12:39:00 +0200 Subject: [PATCH] perf: Split out mlock limit handling To prepare for splitting the buffer allocation out into separate functions for the ring buffer and the AUX buffer, split out mlock limit handling into a helper function, which can be called from both. No functional change intended. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Lorenzo Stoakes Link: https://lore.kernel.org/r/20250812104018.541975109@infradead.org --- kernel/events/core.c | 75 ++++++++++++++++++++++---------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index eea3a7d6c61d7..f6299012ed734 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma) return err; } +static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra) +{ + unsigned long user_locked, user_lock_limit, locked, lock_limit; + struct user_struct *user = current_user(); + + user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); + /* Increase the limit linearly with more CPUs */ + user_lock_limit *= num_online_cpus(); + + user_locked = atomic_long_read(&user->locked_vm); + + /* + * sysctl_perf_event_mlock may have changed, so that + * user->locked_vm > user_lock_limit + */ + if (user_locked > user_lock_limit) + user_locked = user_lock_limit; + user_locked += *user_extra; + + if (user_locked > user_lock_limit) { + /* + * charge locked_vm until it hits user_lock_limit; + * charge the rest from pinned_vm + */ + *extra = user_locked - user_lock_limit; + *user_extra -= *extra; + } + + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; + locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra; + + return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK); +} + static int perf_mmap(struct file *file, struct vm_area_struct *vma) { struct perf_event *event = file->private_data; - unsigned long user_locked, user_lock_limit; struct user_struct *user = current_user(); + unsigned long vma_size, nr_pages; + long user_extra = 0, extra = 0; struct mutex *aux_mutex = NULL; struct perf_buffer *rb = NULL; - unsigned long locked, lock_limit; - unsigned long vma_size; - unsigned long nr_pages; - long user_extra = 0, extra = 0; int ret, flags = 0; mapped_f mapped; @@ -7063,38 +7095,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) } } - user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10); - - /* - * Increase the limit linearly with more CPUs: - */ - user_lock_limit *= num_online_cpus(); - - user_locked = atomic_long_read(&user->locked_vm); - - /* - * sysctl_perf_event_mlock may have changed, so that - * user->locked_vm > user_lock_limit - */ - if (user_locked > user_lock_limit) - user_locked = user_lock_limit; - user_locked += user_extra; - - if (user_locked > user_lock_limit) { - /* - * charge locked_vm until it hits user_lock_limit; - * charge the rest from pinned_vm - */ - extra = user_locked - user_lock_limit; - user_extra -= extra; - } - - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra; - - if ((locked > lock_limit) && perf_is_paranoid() && - !capable(CAP_IPC_LOCK)) { + if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) { ret = -EPERM; goto unlock; } -- 2.47.3