--- /dev/null
+From 1a76209abd468a04ff183c4ba9282a7c37650544 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 2 Aug 2025 12:39:39 +0200
+Subject: perf/core: Don't leak AUX buffer refcount on allocation failure
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 5468c0fbccbb9d156522c50832244a8b722374fb upstream.
+
+Failure of the AUX buffer allocation leaks the reference count.
+
+Set the reference count to 1 only when the allocation succeeds.
+
+Fixes: 45bfb2e50471 ("perf/core: Add AUX area to ring buffer for raw data streams")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5877,9 +5877,7 @@ static int perf_mmap(struct file *file,
+ goto unlock;
+ }
+
+- atomic_set(&rb->aux_mmap_count, 1);
+ user_extra = nr_pages;
+-
+ goto accounting;
+ }
+
+@@ -5986,8 +5984,10 @@ accounting:
+ } else {
+ ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+ event->attr.aux_watermark, flags);
+- if (!ret)
++ if (!ret) {
++ atomic_set(&rb->aux_mmap_count, 1);
+ rb->aux_mmap_locked = extra;
++ }
+ }
+
+ unlock:
+@@ -5997,6 +5997,7 @@ unlock:
+
+ atomic_inc(&event->mmap_count);
+ } else if (rb) {
++ /* AUX allocation failed */
+ atomic_dec(&rb->mmap_count);
+ }
+ aux_unlock:
--- /dev/null
+From 8dfa884e9021cb0a9c85c2ed7b712432f1a10ec3 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 2 Aug 2025 12:49:48 +0200
+Subject: perf/core: Exit early on perf_mmap() fail
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 07091aade394f690e7b655578140ef84d0e8d7b0 upstream.
+
+When perf_mmap() fails to allocate a buffer, it still invokes the
+event_mapped() callback of the related event. On X86 this might increase
+the perf_rdpmc_allowed reference counter. But nothing undoes this as
+perf_mmap_close() is never called in this case, which causes another
+reference count leak.
+
+Return early on failure to prevent that.
+
+Fixes: 1e0fb9ec679c ("perf/core: Add pmu callbacks to track event mapping and unmapping")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6003,6 +6003,9 @@ unlock:
+ aux_unlock:
+ mutex_unlock(&event->mmap_mutex);
+
++ if (ret)
++ return ret;
++
+ /*
+ * Since pinned accounting is per vm we cannot allow fork() to copy our
+ * vma.
--- /dev/null
+From a7254c9bf0bf1210bbe7923f64dd314db1e9476f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 30 Jul 2025 23:01:21 +0200
+Subject: perf/core: Prevent VMA split of buffer mappings
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b024d7b56c77191cde544f838debb7f8451cd0d6 upstream.
+
+The perf mmap code is careful about mmap()'ing the user page with the
+ringbuffer and additionally the auxiliary buffer, when the event supports
+it. Once the first mapping is established, subsequent mapping have to use
+the same offset and the same size in both cases. The reference counting for
+the ringbuffer and the auxiliary buffer depends on this being correct.
+
+Though perf does not prevent that a related mapping is split via mmap(2),
+munmap(2) or mremap(2). A split of a VMA results in perf_mmap_open() calls,
+which take reference counts, but then the subsequent perf_mmap_close()
+calls are not longer fulfilling the offset and size checks. This leads to
+reference count leaks.
+
+As perf already has the requirement for subsequent mappings to match the
+initial mapping, the obvious consequence is that VMA splits, caused by
+resizing of a mapping or partial unmapping, have to be prevented.
+
+Implement the vm_operations_struct::may_split() callback and return
+unconditionally -EINVAL.
+
+That ensures that the mapping offsets and sizes cannot be changed after the
+fact. Remapping to a different fixed address with the same size is still
+possible as it takes the references for the new mapping and drops those of
+the old mapping.
+
+Fixes: 45bfb2e50471 ("perf/core: Add AUX area to ring buffer for raw data streams")
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-27504
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -5789,11 +5789,21 @@ out_put:
+ ring_buffer_put(rb); /* could be last */
+ }
+
++static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr)
++{
++ /*
++ * Forbid splitting perf mappings to prevent refcount leaks due to
++ * the resulting non-matching offsets and sizes. See open()/close().
++ */
++ return -EINVAL;
++}
++
+ static const struct vm_operations_struct perf_mmap_vmops = {
+ .open = perf_mmap_open,
+ .close = perf_mmap_close, /* non mergeable */
+ .fault = perf_mmap_fault,
+ .page_mkwrite = perf_mmap_fault,
++ .split = perf_mmap_may_split,
+ };
+
+ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
benet-fix-bug-when-creating-vfs.patch
smb-client-let-recv_done-cleanup-before-notifying-th.patch
pptp-fix-pptp_xmit-error-path.patch
+perf-core-don-t-leak-aux-buffer-refcount-on-allocation-failure.patch
+perf-core-exit-early-on-perf_mmap-fail.patch
+perf-core-prevent-vma-split-of-buffer-mappings.patch