--- /dev/null
+From c261393bc5cb02bfc6808c64736170ab98c119e8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 2 Aug 2025 12:39:39 +0200
+Subject: perf/core: Don't leak AUX buffer refcount on allocation failure
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 5468c0fbccbb9d156522c50832244a8b722374fb upstream.
+
+Failure of the AUX buffer allocation leaks the reference count.
+
+Set the reference count to 1 only when the allocation succeeds.
+
+Fixes: 45bfb2e50471 ("perf/core: Add AUX area to ring buffer for raw data streams")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6988,8 +6988,6 @@ static int perf_mmap(struct file *file,
+ ret = 0;
+ goto unlock;
+ }
+-
+- atomic_set(&rb->aux_mmap_count, 1);
+ }
+
+ user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
+@@ -7056,8 +7054,10 @@ static int perf_mmap(struct file *file,
+ } else {
+ ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+ event->attr.aux_watermark, flags);
+- if (!ret)
++ if (!ret) {
++ atomic_set(&rb->aux_mmap_count, 1);
+ rb->aux_mmap_locked = extra;
++ }
+ }
+
+ unlock:
+@@ -7067,6 +7067,7 @@ unlock:
+
+ atomic_inc(&event->mmap_count);
+ } else if (rb) {
++ /* AUX allocation failed */
+ atomic_dec(&rb->mmap_count);
+ }
+ aux_unlock:
--- /dev/null
+From 52ce83d4d19240e63ac0de182eaca39650b483a8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 2 Aug 2025 12:49:48 +0200
+Subject: perf/core: Exit early on perf_mmap() fail
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 07091aade394f690e7b655578140ef84d0e8d7b0 upstream.
+
+When perf_mmap() fails to allocate a buffer, it still invokes the
+event_mapped() callback of the related event. On X86 this might increase
+the perf_rdpmc_allowed reference counter. But nothing undoes this as
+perf_mmap_close() is never called in this case, which causes another
+reference count leak.
+
+Return early on failure to prevent that.
+
+Fixes: 1e0fb9ec679c ("perf/core: Add pmu callbacks to track event mapping and unmapping")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7075,6 +7075,9 @@ aux_unlock:
+ mutex_unlock(aux_mutex);
+ mutex_unlock(&event->mmap_mutex);
+
++ if (ret)
++ return ret;
++
+ /*
+ * Since pinned accounting is per vm we cannot allow fork() to copy our
+ * vma.
+@@ -7082,8 +7085,7 @@ aux_unlock:
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &perf_mmap_vmops;
+
+- if (!ret)
+- ret = map_range(rb, vma);
++ ret = map_range(rb, vma);
+
+ if (!ret && event->pmu->event_mapped)
+ event->pmu->event_mapped(event, vma->vm_mm);
--- /dev/null
+From 2d3b7603cfdcae1c41f4e14f5dfe6ede9ce213bb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 2 Aug 2025 12:48:55 +0200
+Subject: perf/core: Handle buffer mapping fail correctly in perf_mmap()
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit f74b9f4ba63ffdf597aaaa6cad7e284cb8e04820 upstream.
+
+After successful allocation of a buffer or a successful attachment to an
+existing buffer perf_mmap() tries to map the buffer read only into the page
+table. If that fails, the already set up page table entries are zapped, but
+the other perf specific side effects of that failure are not handled. The
+calling code just cleans up the VMA and does not invoke perf_mmap_close().
+
+This leaks reference counts, corrupts user->vm accounting and also results
+in an unbalanced invocation of event::event_mapped().
+
+Cure this by moving the event::event_mapped() invocation before the
+map_range() call so that on map_range() failure perf_mmap_close() can be
+invoked without causing an unbalanced event::event_unmapped() call.
+
+perf_mmap_close() undoes the reference counts and eventually frees buffers.
+
+Fixes: b709eb872e19 ("perf/core: map pages in advance")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7085,11 +7085,19 @@ aux_unlock:
+ vm_flags_set(vma, VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP);
+ vma->vm_ops = &perf_mmap_vmops;
+
+- ret = map_range(rb, vma);
+-
+- if (!ret && event->pmu->event_mapped)
++ if (event->pmu->event_mapped)
+ event->pmu->event_mapped(event, vma->vm_mm);
+
++ /*
++ * Try to map it into the page table. On fail, invoke
++ * perf_mmap_close() to undo the above, as the callsite expects
++ * full cleanup in this case and therefore does not invoke
++ * vmops::close().
++ */
++ ret = map_range(rb, vma);
++ if (ret)
++ perf_mmap_close(vma);
++
+ return ret;
+ }
+
--- /dev/null
+From 54473e0ef849f44e5ee43e6d6746c27030c3825b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 4 Aug 2025 22:22:09 +0200
+Subject: perf/core: Preserve AUX buffer allocation failure result
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 54473e0ef849f44e5ee43e6d6746c27030c3825b upstream.
+
+A recent overhaul sets the return value to 0 unconditionally after the
+allocations, which causes reference count leaks and corrupts the user->vm
+accounting.
+
+Preserve the AUX buffer allocation failure return value, so that the
+subsequent code works correctly.
+
+Fixes: 0983593f32c4 ("perf/core: Lift event->mmap_mutex in perf_mmap()")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7052,6 +7052,7 @@ static int perf_mmap(struct file *file,
+ perf_event_update_time(event);
+ perf_event_init_userpage(event);
+ perf_event_update_userpage(event);
++ ret = 0;
+ } else {
+ ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages,
+ event->attr.aux_watermark, flags);
+@@ -7059,8 +7060,6 @@ static int perf_mmap(struct file *file,
+ rb->aux_mmap_locked = extra;
+ }
+
+- ret = 0;
+-
+ unlock:
+ if (!ret) {
+ atomic_long_add(user_extra, &user->locked_vm);
--- /dev/null
+From d92aa5f655a580ab0be765e29c03fbcfef0c1522 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 30 Jul 2025 23:01:21 +0200
+Subject: perf/core: Prevent VMA split of buffer mappings
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit b024d7b56c77191cde544f838debb7f8451cd0d6 upstream.
+
+The perf mmap code is careful about mmap()'ing the user page with the
+ringbuffer and additionally the auxiliary buffer, when the event supports
+it. Once the first mapping is established, subsequent mapping have to use
+the same offset and the same size in both cases. The reference counting for
+the ringbuffer and the auxiliary buffer depends on this being correct.
+
+Though perf does not prevent that a related mapping is split via mmap(2),
+munmap(2) or mremap(2). A split of a VMA results in perf_mmap_open() calls,
+which take reference counts, but then the subsequent perf_mmap_close()
+calls are not longer fulfilling the offset and size checks. This leads to
+reference count leaks.
+
+As perf already has the requirement for subsequent mappings to match the
+initial mapping, the obvious consequence is that VMA splits, caused by
+resizing of a mapping or partial unmapping, have to be prevented.
+
+Implement the vm_operations_struct::may_split() callback and return
+unconditionally -EINVAL.
+
+That ensures that the mapping offsets and sizes cannot be changed after the
+fact. Remapping to a different fixed address with the same size is still
+possible as it takes the references for the new mapping and drops those of
+the old mapping.
+
+Fixes: 45bfb2e50471 ("perf/core: Add AUX area to ring buffer for raw data streams")
+Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-27504
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/events/core.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -6790,10 +6790,20 @@ static vm_fault_t perf_mmap_pfn_mkwrite(
+ return vmf->pgoff == 0 ? 0 : VM_FAULT_SIGBUS;
+ }
+
++static int perf_mmap_may_split(struct vm_area_struct *vma, unsigned long addr)
++{
++ /*
++ * Forbid splitting perf mappings to prevent refcount leaks due to
++ * the resulting non-matching offsets and sizes. See open()/close().
++ */
++ return -EINVAL;
++}
++
+ static const struct vm_operations_struct perf_mmap_vmops = {
+ .open = perf_mmap_open,
+ .close = perf_mmap_close, /* non mergeable */
+ .pfn_mkwrite = perf_mmap_pfn_mkwrite,
++ .may_split = perf_mmap_may_split,
+ };
+
+ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma)
--- /dev/null
+From 0697d73a4b2276b6d1206c297d1d9ea8c751cb40 Mon Sep 17 00:00:00 2001
+From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Date: Sat, 2 Aug 2025 22:55:35 +0200
+Subject: selftests/perf_events: Add a mmap() correctness test
+
+From: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+
+commit 084d2ac4030c5919e85bba1f4af26e33491469cb upstream.
+
+Exercise various mmap(), munmap() and mremap() invocations, which might
+cause a perf buffer mapping to be split or truncated.
+
+To avoid hard coding the perf event and having dependencies on
+architectures and configuration options, scan through event types in sysfs
+and try to open them. On success, try to mmap() and if that succeeds try to
+mmap() the AUX buffer.
+
+In case that no AUX buffer supporting event is found, only test the base
+buffer mapping. If no mappable event is found or permissions are not
+sufficient, skip the tests.
+
+Reserve a PROT_NONE region for both rb and aux tests to allow testing the
+case where mremap unmaps beyond the end of a mapped VMA to prevent it from
+unmapping unrelated mappings.
+
+Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Co-developed-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/perf_events/.gitignore | 1
+ tools/testing/selftests/perf_events/Makefile | 2
+ tools/testing/selftests/perf_events/mmap.c | 236 +++++++++++++++++++++++++
+ 3 files changed, 238 insertions(+), 1 deletion(-)
+ create mode 100644 tools/testing/selftests/perf_events/mmap.c
+
+--- a/tools/testing/selftests/perf_events/.gitignore
++++ b/tools/testing/selftests/perf_events/.gitignore
+@@ -2,3 +2,4 @@
+ sigtrap_threads
+ remove_on_exec
+ watermark_signal
++mmap
+--- a/tools/testing/selftests/perf_events/Makefile
++++ b/tools/testing/selftests/perf_events/Makefile
+@@ -2,5 +2,5 @@
+ CFLAGS += -Wl,-no-as-needed -Wall $(KHDR_INCLUDES)
+ LDFLAGS += -lpthread
+
+-TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal
++TEST_GEN_PROGS := sigtrap_threads remove_on_exec watermark_signal mmap
+ include ../lib.mk
+--- /dev/null
++++ b/tools/testing/selftests/perf_events/mmap.c
+@@ -0,0 +1,236 @@
++// SPDX-License-Identifier: GPL-2.0-only
++#define _GNU_SOURCE
++
++#include <dirent.h>
++#include <sched.h>
++#include <stdbool.h>
++#include <stdio.h>
++#include <unistd.h>
++
++#include <sys/ioctl.h>
++#include <sys/mman.h>
++#include <sys/syscall.h>
++#include <sys/types.h>
++
++#include <linux/perf_event.h>
++
++#include "../kselftest_harness.h"
++
++#define RB_SIZE 0x3000
++#define AUX_SIZE 0x10000
++#define AUX_OFFS 0x4000
++
++#define HOLE_SIZE 0x1000
++
++/* Reserve space for rb, aux with space for shrink-beyond-vma testing. */
++#define REGION_SIZE (2 * RB_SIZE + 2 * AUX_SIZE)
++#define REGION_AUX_OFFS (2 * RB_SIZE)
++
++#define MAP_BASE 1
++#define MAP_AUX 2
++
++#define EVENT_SRC_DIR "/sys/bus/event_source/devices"
++
++FIXTURE(perf_mmap)
++{
++ int fd;
++ void *ptr;
++ void *region;
++};
++
++FIXTURE_VARIANT(perf_mmap)
++{
++ bool aux;
++ unsigned long ptr_size;
++};
++
++FIXTURE_VARIANT_ADD(perf_mmap, rb)
++{
++ .aux = false,
++ .ptr_size = RB_SIZE,
++};
++
++FIXTURE_VARIANT_ADD(perf_mmap, aux)
++{
++ .aux = true,
++ .ptr_size = AUX_SIZE,
++};
++
++static bool read_event_type(struct dirent *dent, __u32 *type)
++{
++ char typefn[512];
++ FILE *fp;
++ int res;
++
++ snprintf(typefn, sizeof(typefn), "%s/%s/type", EVENT_SRC_DIR, dent->d_name);
++ fp = fopen(typefn, "r");
++ if (!fp)
++ return false;
++
++ res = fscanf(fp, "%u", type);
++ fclose(fp);
++ return res > 0;
++}
++
++FIXTURE_SETUP(perf_mmap)
++{
++ struct perf_event_attr attr = {
++ .size = sizeof(attr),
++ .disabled = 1,
++ .exclude_kernel = 1,
++ .exclude_hv = 1,
++ };
++ struct perf_event_attr attr_ok = {};
++ unsigned int eacces = 0, map = 0;
++ struct perf_event_mmap_page *rb;
++ struct dirent *dent;
++ void *aux, *region;
++ DIR *dir;
++
++ self->ptr = NULL;
++
++ dir = opendir(EVENT_SRC_DIR);
++ if (!dir)
++ SKIP(return, "perf not available.");
++
++ region = mmap(NULL, REGION_SIZE, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0);
++ ASSERT_NE(region, MAP_FAILED);
++ self->region = region;
++
++ // Try to find a suitable event on this system
++ while ((dent = readdir(dir))) {
++ int fd;
++
++ if (!read_event_type(dent, &attr.type))
++ continue;
++
++ fd = syscall(SYS_perf_event_open, &attr, 0, -1, -1, 0);
++ if (fd < 0) {
++ if (errno == EACCES)
++ eacces++;
++ continue;
++ }
++
++ // Check whether the event supports mmap()
++ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
++ if (rb == MAP_FAILED) {
++ close(fd);
++ continue;
++ }
++
++ if (!map) {
++ // Save the event in case that no AUX capable event is found
++ attr_ok = attr;
++ map = MAP_BASE;
++ }
++
++ if (!variant->aux)
++ continue;
++
++ rb->aux_offset = AUX_OFFS;
++ rb->aux_size = AUX_SIZE;
++
++ // Check whether it supports a AUX buffer
++ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
++ MAP_SHARED | MAP_FIXED, fd, AUX_OFFS);
++ if (aux == MAP_FAILED) {
++ munmap(rb, RB_SIZE);
++ close(fd);
++ continue;
++ }
++
++ attr_ok = attr;
++ map = MAP_AUX;
++ munmap(aux, AUX_SIZE);
++ munmap(rb, RB_SIZE);
++ close(fd);
++ break;
++ }
++ closedir(dir);
++
++ if (!map) {
++ if (!eacces)
++ SKIP(return, "No mappable perf event found.");
++ else
++ SKIP(return, "No permissions for perf_event_open()");
++ }
++
++ self->fd = syscall(SYS_perf_event_open, &attr_ok, 0, -1, -1, 0);
++ ASSERT_NE(self->fd, -1);
++
++ rb = mmap(region, RB_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, self->fd, 0);
++ ASSERT_NE(rb, MAP_FAILED);
++
++ if (!variant->aux) {
++ self->ptr = rb;
++ return;
++ }
++
++ if (map != MAP_AUX)
++ SKIP(return, "No AUX event found.");
++
++ rb->aux_offset = AUX_OFFS;
++ rb->aux_size = AUX_SIZE;
++ aux = mmap(region + REGION_AUX_OFFS, AUX_SIZE, PROT_READ | PROT_WRITE,
++ MAP_SHARED | MAP_FIXED, self->fd, AUX_OFFS);
++ ASSERT_NE(aux, MAP_FAILED);
++ self->ptr = aux;
++}
++
++FIXTURE_TEARDOWN(perf_mmap)
++{
++ ASSERT_EQ(munmap(self->region, REGION_SIZE), 0);
++ if (self->fd != -1)
++ ASSERT_EQ(close(self->fd), 0);
++}
++
++TEST_F(perf_mmap, remap)
++{
++ void *tmp, *ptr = self->ptr;
++ unsigned long size = variant->ptr_size;
++
++ // Test the invalid remaps
++ ASSERT_EQ(mremap(ptr, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
++ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
++ ASSERT_EQ(mremap(ptr + size - HOLE_SIZE, HOLE_SIZE, size, MREMAP_MAYMOVE), MAP_FAILED);
++ // Shrink the end of the mapping such that we only unmap past end of the VMA,
++ // which should succeed and poke a hole into the PROT_NONE region
++ ASSERT_NE(mremap(ptr + size - HOLE_SIZE, size, HOLE_SIZE, MREMAP_MAYMOVE), MAP_FAILED);
++
++ // Remap the whole buffer to a new address
++ tmp = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
++ ASSERT_NE(tmp, MAP_FAILED);
++
++ // Try splitting offset 1 hole size into VMA, this should fail
++ ASSERT_EQ(mremap(ptr + HOLE_SIZE, size - HOLE_SIZE, size - HOLE_SIZE,
++ MREMAP_MAYMOVE | MREMAP_FIXED, tmp), MAP_FAILED);
++ // Remapping the whole thing should succeed fine
++ ptr = mremap(ptr, size, size, MREMAP_MAYMOVE | MREMAP_FIXED, tmp);
++ ASSERT_EQ(ptr, tmp);
++ ASSERT_EQ(munmap(tmp, size), 0);
++}
++
++TEST_F(perf_mmap, unmap)
++{
++ unsigned long size = variant->ptr_size;
++
++ // Try to poke holes into the mappings
++ ASSERT_NE(munmap(self->ptr, HOLE_SIZE), 0);
++ ASSERT_NE(munmap(self->ptr + HOLE_SIZE, HOLE_SIZE), 0);
++ ASSERT_NE(munmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE), 0);
++}
++
++TEST_F(perf_mmap, map)
++{
++ unsigned long size = variant->ptr_size;
++
++ // Try to poke holes into the mappings by mapping anonymous memory over it
++ ASSERT_EQ(mmap(self->ptr, HOLE_SIZE, PROT_READ | PROT_WRITE,
++ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
++ ASSERT_EQ(mmap(self->ptr + HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
++ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
++ ASSERT_EQ(mmap(self->ptr + size - HOLE_SIZE, HOLE_SIZE, PROT_READ | PROT_WRITE,
++ MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0), MAP_FAILED);
++}
++
++TEST_HARNESS_MAIN
nfsd-don-t-set-the-ctime-on-delegated-atime-updates.patch
nfsd-avoid-ref-leak-in-nfsd_open_local_fh.patch
sunrpc-fix-handling-of-server-side-tls-alerts.patch
+perf-core-preserve-aux-buffer-allocation-failure-result.patch
+perf-core-don-t-leak-aux-buffer-refcount-on-allocation-failure.patch
+perf-core-exit-early-on-perf_mmap-fail.patch
+perf-core-handle-buffer-mapping-fail-correctly-in-perf_mmap.patch
+perf-core-prevent-vma-split-of-buffer-mappings.patch
+selftests-perf_events-add-a-mmap-correctness-test.patch