]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
perf: Move common code into both rb and aux branches
authorPeter Zijlstra <peterz@infradead.org>
Tue, 12 Aug 2025 10:39:04 +0000 (12:39 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Fri, 15 Aug 2025 11:12:59 +0000 (13:12 +0200)
  if (cond) {
    A;
  } else {
    B;
  }
  C;

into

  if (cond) {
    A;
    C;
  } else {
    B;
    C;
  }

Notably C has a success branch and both A and B have two places for
success. For A (rb case), duplicate the success case because later
patches will result in them no longer being identical. For B (aux
case), share using goto (cleaned up later).

Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Link: https://lore.kernel.org/r/20250812104019.016252852@infradead.org
kernel/events/core.c

index 085f36f611375481cf41a5c0b63d2018114a4ff8..dfe09b0332739d3348065f77a1939f8979ceea70 100644 (file)
@@ -7043,6 +7043,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                                ret = 0;
                                /* We need the rb to map pages. */
                                rb = event->rb;
+                               perf_mmap_account(vma, user_extra, extra);
+                               atomic_inc(&event->mmap_count);
                                goto unlock;
                        }
 
@@ -7083,6 +7085,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                perf_event_init_userpage(event);
                perf_event_update_userpage(event);
                ret = 0;
+
+               perf_mmap_account(vma, user_extra, extra);
+               atomic_inc(&event->mmap_count);
        } else {
                /*
                 * AUX area mapping: if rb->aux_nr_pages != 0, it's already
@@ -7127,11 +7132,12 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                if (rb_has_aux(rb)) {
                        atomic_inc(&rb->aux_mmap_count);
                        ret = 0;
-                       goto unlock;
+                       goto aux_success;
                }
 
                if (!perf_mmap_calc_limits(vma, &user_extra, &extra)) {
                        ret = -EPERM;
+                       atomic_dec(&rb->mmap_count);
                        goto unlock;
                }
 
@@ -7142,20 +7148,19 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
                ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
                                   event->attr.aux_watermark, flags);
-               if (!ret) {
-                       atomic_set(&rb->aux_mmap_count, 1);
-                       rb->aux_mmap_locked = extra;
+               if (ret) {
+                       atomic_dec(&rb->mmap_count);
+                       goto unlock;
                }
-       }
 
-unlock:
-       if (!ret) {
+               atomic_set(&rb->aux_mmap_count, 1);
+               rb->aux_mmap_locked = extra;
+aux_success:
                perf_mmap_account(vma, user_extra, extra);
                atomic_inc(&event->mmap_count);
-       } else if (rb) {
-               /* AUX allocation failed */
-               atomic_dec(&rb->mmap_count);
        }
+
+unlock:
 aux_unlock:
        if (aux_mutex)
                mutex_unlock(aux_mutex);