]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
perf: Split out mlock limit handling
authorThomas Gleixner <tglx@linutronix.de>
Tue, 12 Aug 2025 10:39:00 +0000 (12:39 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 15 Aug 2025 11:12:58 +0000 (13:12 +0200)
To prepare for splitting the buffer allocation out into separate functions
for the ring buffer and the AUX buffer, split out mlock limit handling into
a helper function, which can be called from both.

No functional change intended.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/r/20250812104018.541975109@infradead.org
kernel/events/core.c

index eea3a7d6c61d7048dbd97ff4892569602323e30f..f6299012ed734fd29ef51a851c340b28d9256c1b 100644 (file)
@@ -6927,17 +6927,49 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
        return err;
 }
 
+static bool perf_mmap_calc_limits(struct vm_area_struct *vma, long *user_extra, long *extra)
+{
+       unsigned long user_locked, user_lock_limit, locked, lock_limit;
+       struct user_struct *user = current_user();
+
+       user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
+       /* Increase the limit linearly with more CPUs */
+       user_lock_limit *= num_online_cpus();
+
+       user_locked = atomic_long_read(&user->locked_vm);
+
+       /*
+        * sysctl_perf_event_mlock may have changed, so that
+        *     user->locked_vm > user_lock_limit
+        */
+       if (user_locked > user_lock_limit)
+               user_locked = user_lock_limit;
+       user_locked += *user_extra;
+
+       if (user_locked > user_lock_limit) {
+               /*
+                * charge locked_vm until it hits user_lock_limit;
+                * charge the rest from pinned_vm
+                */
+               *extra = user_locked - user_lock_limit;
+               *user_extra -= *extra;
+       }
+
+       lock_limit = rlimit(RLIMIT_MEMLOCK);
+       lock_limit >>= PAGE_SHIFT;
+       locked = atomic64_read(&vma->vm_mm->pinned_vm) + *extra;
+
+       return locked <= lock_limit || !perf_is_paranoid() || capable(CAP_IPC_LOCK);
+}
+
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct perf_event *event = file->private_data;
-       unsigned long user_locked, user_lock_limit;
        struct user_struct *user = current_user();
+       unsigned long vma_size, nr_pages;
+       long user_extra = 0, extra = 0;
        struct mutex *aux_mutex = NULL;
        struct perf_buffer *rb = NULL;
-       unsigned long locked, lock_limit;
-       unsigned long vma_size;
-       unsigned long nr_pages;
-       long user_extra = 0, extra = 0;
        int ret, flags = 0;
        mapped_f mapped;
 
@@ -7063,38 +7095,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                }
        }
 
-       user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
-
-       /*
-        * Increase the limit linearly with more CPUs:
-        */
-       user_lock_limit *= num_online_cpus();
-
-       user_locked = atomic_long_read(&user->locked_vm);
-
-       /*
-        * sysctl_perf_event_mlock may have changed, so that
-        *     user->locked_vm > user_lock_limit
-        */
-       if (user_locked > user_lock_limit)
-               user_locked = user_lock_limit;
-       user_locked += user_extra;
-
-       if (user_locked > user_lock_limit) {
-               /*
-                * charge locked_vm until it hits user_lock_limit;
-                * charge the rest from pinned_vm
-                */
-               extra = user_locked - user_lock_limit;
-               user_extra -= extra;
-       }
-
-       lock_limit = rlimit(RLIMIT_MEMLOCK);
-       lock_limit >>= PAGE_SHIFT;
-       locked = atomic64_read(&vma->vm_mm->pinned_vm) + extra;
-
-       if ((locked > lock_limit) && perf_is_paranoid() &&
-               !capable(CAP_IPC_LOCK)) {
+       if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
                ret = -EPERM;
                goto unlock;
        }