--- /dev/null
+From 5973a62efa34c80c9a4e5eac1fca6f6209b902af Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Fri, 19 Sep 2025 14:27:51 -0700
+Subject: arm64: map [_text, _stext) virtual address range non-executable+read-only
+
+From: Omar Sandoval <osandov@fb.com>
+
+commit 5973a62efa34c80c9a4e5eac1fca6f6209b902af upstream.
+
+Since the referenced fixes commit, the kernel's .text section is only
+mapped starting from _stext; the region [_text, _stext) is omitted. As a
+result, other vmalloc/vmap allocations may use the virtual addresses
+nominally in the range [_text, _stext). This address reuse confuses
+multiple things:
+
+1. crash_prepare_elf64_headers() sets up a segment in /proc/vmcore
+ mapping the entire range [_text, _end) to
+ [__pa_symbol(_text), __pa_symbol(_end)). Reading an address in
+ [_text, _stext) from /proc/vmcore therefore gives the incorrect
+ result.
+2. Tools doing symbolization (either by reading /proc/kallsyms or based
+ on the vmlinux ELF file) will incorrectly identify vmalloc/vmap
+ allocations in [_text, _stext) as kernel symbols.
+
+In practice, both of these issues affect the drgn debugger.
+Specifically, there were cases where the vmap IRQ stacks for some CPUs
+were allocated in [_text, _stext). As a result, drgn could not get the
+stack trace for a crash in an IRQ handler because the core dump
+contained invalid data for the IRQ stack address. The stack addresses
+were also symbolized as being in the _text symbol.
+
+Fix this by bringing back the mapping of [_text, _stext), but now make
+it non-executable and read-only. This prevents other allocations from
+using it while still achieving the original goal of not mapping
+unpredictable data as executable. Other than the changed protection,
+this is effectively a revert of the fixes commit.
+
+Fixes: e2a073dde921 ("arm64: omit [_text, _stext) from permanent kernel mapping")
+Cc: stable@vger.kernel.org
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/pi/map_kernel.c | 6 ++++++
+ arch/arm64/kernel/setup.c | 4 ++--
+ arch/arm64/mm/init.c | 2 +-
+ arch/arm64/mm/mmu.c | 14 +++++++++-----
+ 4 files changed, 18 insertions(+), 8 deletions(-)
+
+--- a/arch/arm64/kernel/pi/map_kernel.c
++++ b/arch/arm64/kernel/pi/map_kernel.c
+@@ -78,6 +78,12 @@ static void __init map_kernel(u64 kaslr_
+ twopass |= enable_scs;
+ prot = twopass ? data_prot : text_prot;
+
++ /*
++ * [_stext, _text) isn't executed after boot and contains some
++ * non-executable, unpredictable data, so map it non-executable.
++ */
++ map_segment(init_pg_dir, &pgdp, va_offset, _text, _stext, data_prot,
++ false, root_level);
+ map_segment(init_pg_dir, &pgdp, va_offset, _stext, _etext, prot,
+ !twopass, root_level);
+ map_segment(init_pg_dir, &pgdp, va_offset, __start_rodata,
+--- a/arch/arm64/kernel/setup.c
++++ b/arch/arm64/kernel/setup.c
+@@ -213,7 +213,7 @@ static void __init request_standard_reso
+ unsigned long i = 0;
+ size_t res_size;
+
+- kernel_code.start = __pa_symbol(_stext);
++ kernel_code.start = __pa_symbol(_text);
+ kernel_code.end = __pa_symbol(__init_begin - 1);
+ kernel_data.start = __pa_symbol(_sdata);
+ kernel_data.end = __pa_symbol(_end - 1);
+@@ -281,7 +281,7 @@ u64 cpu_logical_map(unsigned int cpu)
+
+ void __init __no_sanitize_address setup_arch(char **cmdline_p)
+ {
+- setup_initial_init_mm(_stext, _etext, _edata, _end);
++ setup_initial_init_mm(_text, _etext, _edata, _end);
+
+ *cmdline_p = boot_command_line;
+
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -300,7 +300,7 @@ void __init arm64_memblock_init(void)
+ * Register the kernel text, kernel data, initrd, and initial
+ * pagetables with memblock.
+ */
+- memblock_reserve(__pa_symbol(_stext), _end - _stext);
++ memblock_reserve(__pa_symbol(_text), _end - _text);
+ if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
+ /* the generic initrd code expects virtual addresses */
+ initrd_start = __phys_to_virt(phys_initrd_start);
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -561,8 +561,8 @@ void __init mark_linear_text_alias_ro(vo
+ /*
+ * Remove the write permissions from the linear alias of .text/.rodata
+ */
+- update_mapping_prot(__pa_symbol(_stext), (unsigned long)lm_alias(_stext),
+- (unsigned long)__init_begin - (unsigned long)_stext,
++ update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
++ (unsigned long)__init_begin - (unsigned long)_text,
+ PAGE_KERNEL_RO);
+ }
+
+@@ -623,7 +623,7 @@ static inline void arm64_kfence_map_pool
+ static void __init map_mem(pgd_t *pgdp)
+ {
+ static const u64 direct_map_end = _PAGE_END(VA_BITS_MIN);
+- phys_addr_t kernel_start = __pa_symbol(_stext);
++ phys_addr_t kernel_start = __pa_symbol(_text);
+ phys_addr_t kernel_end = __pa_symbol(__init_begin);
+ phys_addr_t start, end;
+ phys_addr_t early_kfence_pool;
+@@ -670,7 +670,7 @@ static void __init map_mem(pgd_t *pgdp)
+ }
+
+ /*
+- * Map the linear alias of the [_stext, __init_begin) interval
++ * Map the linear alias of the [_text, __init_begin) interval
+ * as non-executable now, and remove the write permission in
+ * mark_linear_text_alias_ro() below (which will be called after
+ * alternative patching has completed). This makes the contents
+@@ -697,6 +697,10 @@ void mark_rodata_ro(void)
+ WRITE_ONCE(rodata_is_rw, false);
+ update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
+ section_size, PAGE_KERNEL_RO);
++ /* mark the range between _text and _stext as read only. */
++ update_mapping_prot(__pa_symbol(_text), (unsigned long)_text,
++ (unsigned long)_stext - (unsigned long)_text,
++ PAGE_KERNEL_RO);
+ }
+
+ static void __init declare_vma(struct vm_struct *vma,
+@@ -767,7 +771,7 @@ static void __init declare_kernel_vmas(v
+ {
+ static struct vm_struct vmlinux_seg[KERNEL_SEGMENT_COUNT];
+
+- declare_vma(&vmlinux_seg[0], _stext, _etext, VM_NO_GUARD);
++ declare_vma(&vmlinux_seg[0], _text, _etext, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[1], __start_rodata, __inittext_begin, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[2], __inittext_begin, __inittext_end, VM_NO_GUARD);
+ declare_vma(&vmlinux_seg[3], __initdata_begin, __initdata_end, VM_NO_GUARD);
--- /dev/null
+From 2e454fb8056df6da4bba7d89a57bf60e217463c0 Mon Sep 17 00:00:00 2001
+From: Dave Jiang <dave.jiang@intel.com>
+Date: Fri, 29 Aug 2025 15:29:06 -0700
+Subject: cxl, acpi/hmat: Update CXL access coordinates directly instead of through HMAT
+
+From: Dave Jiang <dave.jiang@intel.com>
+
+commit 2e454fb8056df6da4bba7d89a57bf60e217463c0 upstream.
+
+The current implementation of CXL memory hotplug notifier gets called
+before the HMAT memory hotplug notifier. The CXL driver calculates the
+access coordinates (bandwidth and latency values) for the CXL end to
+end path (i.e. CPU to endpoint). When the CXL region is onlined, the CXL
+memory hotplug notifier writes the access coordinates to the HMAT target
+structs. Then the HMAT memory hotplug notifier is called and it creates
+the access coordinates for the node sysfs attributes.
+
+During testing on an Intel platform, it was found that although the
+newly calculated coordinates were pushed to sysfs, the sysfs attributes for
+the access coordinates showed up with the wrong initiator. The system has
+4 nodes (0, 1, 2, 3) where node 0 and 1 are CPU nodes and node 2 and 3 are
+CXL nodes. The expectation is that node 2 would show up as a target to node
+0:
+/sys/devices/system/node/node2/access0/initiators/node0
+
+However it was observed that node 2 showed up as a target under node 1:
+/sys/devices/system/node/node2/access0/initiators/node1
+
+The original intent of the 'ext_updated' flag in HMAT handling code was to
+stop HMAT memory hotplug callback from clobbering the access coordinates
+after CXL has injected its calculated coordinates and replaced the generic
+target access coordinates provided by the HMAT table in the HMAT target
+structs. However the flag is hacky at best and blocks the updates from
+other CXL regions that are onlined in the same node later on. Remove the
+'ext_updated' flag usage and just update the access coordinates for the
+nodes directly without touching HMAT target data.
+
+The hotplug memory callback ordering is changed. Instead of changing CXL,
+move HMAT back so there's room for the levels rather than have CXL share
+the same level as SLAB_CALLBACK_PRI. The change will resulting in the CXL
+callback to be executed after the HMAT callback.
+
+With the change, the CXL hotplug memory notifier runs after the HMAT
+callback. The HMAT callback will create the node sysfs attributes for
+access coordinates. The CXL callback will write the access coordinates to
+the now created node sysfs attributes directly and will not pollute the
+HMAT target values.
+
+A nodemask is introduced to keep track if a node has been updated and
+prevents further updates.
+
+Fixes: 067353a46d8c ("cxl/region: Add memory hotplug notifier for cxl region")
+Cc: stable@vger.kernel.org
+Tested-by: Marc Herbert <marc.herbert@linux.intel.com>
+Reviewed-by: Dan Williams <dan.j.williams@intel.com>
+Reviewed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
+Link: https://patch.msgid.link/20250829222907.1290912-4-dave.jiang@intel.com
+Signed-off-by: Dave Jiang <dave.jiang@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/acpi/numa/hmat.c | 6 ------
+ drivers/cxl/core/cdat.c | 5 -----
+ drivers/cxl/core/core.h | 1 -
+ drivers/cxl/core/region.c | 20 ++++++++++++--------
+ include/linux/memory.h | 2 +-
+ 5 files changed, 13 insertions(+), 21 deletions(-)
+
+--- a/drivers/acpi/numa/hmat.c
++++ b/drivers/acpi/numa/hmat.c
+@@ -74,7 +74,6 @@ struct memory_target {
+ struct node_cache_attrs cache_attrs;
+ u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
+ bool registered;
+- bool ext_updated; /* externally updated */
+ };
+
+ struct memory_initiator {
+@@ -352,7 +351,6 @@ int hmat_update_target_coordinates(int n
+ coord->read_bandwidth, access);
+ hmat_update_target_access(target, ACPI_HMAT_WRITE_BANDWIDTH,
+ coord->write_bandwidth, access);
+- target->ext_updated = true;
+
+ return 0;
+ }
+@@ -729,10 +727,6 @@ static void hmat_update_target_attrs(str
+ u32 best = 0;
+ int i;
+
+- /* Don't update if an external agent has changed the data. */
+- if (target->ext_updated)
+- return;
+-
+ /* Don't update for generic port if there's no device handle */
+ if ((access == NODE_ACCESS_CLASS_GENPORT_SINK_LOCAL ||
+ access == NODE_ACCESS_CLASS_GENPORT_SINK_CPU) &&
+--- a/drivers/cxl/core/cdat.c
++++ b/drivers/cxl/core/cdat.c
+@@ -1075,8 +1075,3 @@ int cxl_update_hmat_access_coordinates(i
+ {
+ return hmat_update_target_coordinates(nid, &cxlr->coord[access], access);
+ }
+-
+-bool cxl_need_node_perf_attrs_update(int nid)
+-{
+- return !acpi_node_backed_by_real_pxm(nid);
+-}
+--- a/drivers/cxl/core/core.h
++++ b/drivers/cxl/core/core.h
+@@ -106,7 +106,6 @@ long cxl_pci_get_latency(struct pci_dev
+ int cxl_pci_get_bandwidth(struct pci_dev *pdev, struct access_coordinate *c);
+ int cxl_update_hmat_access_coordinates(int nid, struct cxl_region *cxlr,
+ enum access_coordinate_class access);
+-bool cxl_need_node_perf_attrs_update(int nid);
+ int cxl_port_get_switch_dport_bandwidth(struct cxl_port *port,
+ struct access_coordinate *c);
+
+--- a/drivers/cxl/core/region.c
++++ b/drivers/cxl/core/region.c
+@@ -30,6 +30,12 @@
+ * 3. Decoder targets
+ */
+
++/*
++ * nodemask that sets per node when the access_coordinates for the node has
++ * been updated by the CXL memory hotplug notifier.
++ */
++static nodemask_t nodemask_region_seen = NODE_MASK_NONE;
++
+ static struct cxl_region *to_cxl_region(struct device *dev);
+
+ #define __ACCESS_ATTR_RO(_level, _name) { \
+@@ -2385,14 +2391,8 @@ static bool cxl_region_update_coordinate
+
+ for (int i = 0; i < ACCESS_COORDINATE_MAX; i++) {
+ if (cxlr->coord[i].read_bandwidth) {
+- rc = 0;
+- if (cxl_need_node_perf_attrs_update(nid))
+- node_set_perf_attrs(nid, &cxlr->coord[i], i);
+- else
+- rc = cxl_update_hmat_access_coordinates(nid, cxlr, i);
+-
+- if (rc == 0)
+- cset++;
++ node_update_perf_attrs(nid, &cxlr->coord[i], i);
++ cset++;
+ }
+ }
+
+@@ -2430,6 +2430,10 @@ static int cxl_region_perf_attrs_callbac
+ if (nid != region_nid)
+ return NOTIFY_DONE;
+
++ /* No action needed if node bit already set */
++ if (node_test_and_set(nid, nodemask_region_seen))
++ return NOTIFY_DONE;
++
+ if (!cxl_region_update_coordinates(cxlr, nid))
+ return NOTIFY_DONE;
+
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -122,8 +122,8 @@ struct mem_section;
+ */
+ #define DEFAULT_CALLBACK_PRI 0
+ #define SLAB_CALLBACK_PRI 1
+-#define HMAT_CALLBACK_PRI 2
+ #define CXL_CALLBACK_PRI 5
++#define HMAT_CALLBACK_PRI 6
+ #define MM_COMPUTE_BATCH_PRI 10
+ #define CPUSET_CALLBACK_PRI 10
+ #define MEMTIER_HOTPLUG_PRI 100
--- /dev/null
+From 708c04a5c2b78e22f56e2350de41feba74dfccd9 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= <thomas.weissschuh@linutronix.de>
+Date: Tue, 5 Aug 2025 14:38:08 +0200
+Subject: fs: always return zero on success from replace_fd()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+
+commit 708c04a5c2b78e22f56e2350de41feba74dfccd9 upstream.
+
+replace_fd() returns the number of the new file descriptor through the
+return value of do_dup2(). However its callers never care about the
+specific returned number. In fact the caller in receive_fd_replace() treats
+any non-zero return value as an error and therefore never calls
+__receive_sock() for most file descriptors, which is a bug.
+
+To fix the bug in receive_fd_replace() and to avoid the same issue
+happening in future callers, signal success through a plain zero.
+
+Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
+Link: https://lore.kernel.org/lkml/20250801220215.GS222315@ZenIV/
+Fixes: 173817151b15 ("fs: Expand __receive_fd() to accept existing fd")
+Fixes: 42eb0d54c08a ("fs: split receive_fd_replace from __receive_fd")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+Link: https://lore.kernel.org/20250805-fix-receive_fd_replace-v3-1-b72ba8b34bac@linutronix.de
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/file.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/file.c
++++ b/fs/file.c
+@@ -1262,7 +1262,10 @@ int replace_fd(unsigned fd, struct file
+ err = expand_files(files, fd);
+ if (unlikely(err < 0))
+ goto out_unlock;
+- return do_dup2(files, file, fd, flags);
++ err = do_dup2(files, file, fd, flags);
++ if (err < 0)
++ return err;
++ return 0;
+
+ out_unlock:
+ spin_unlock(&files->file_lock);
--- /dev/null
+From 72d271a7baa7062cb27e774ac37c5459c6d20e22 Mon Sep 17 00:00:00 2001
+From: Aleksa Sarai <cyphar@cyphar.com>
+Date: Thu, 7 Aug 2025 03:55:23 +1000
+Subject: fscontext: do not consume log entries when returning -EMSGSIZE
+
+From: Aleksa Sarai <cyphar@cyphar.com>
+
+commit 72d271a7baa7062cb27e774ac37c5459c6d20e22 upstream.
+
+Userspace generally expects APIs that return -EMSGSIZE to allow for them
+to adjust their buffer size and retry the operation. However, the
+fscontext log would previously clear the message even in the -EMSGSIZE
+case.
+
+Given that it is very cheap for us to check whether the buffer is too
+small before we remove the message from the ring buffer, let's just do
+that instead. While we're at it, refactor some fscontext_read() into a
+separate helper to make the ring buffer logic a bit easier to read.
+
+Fixes: 007ec26cdc9f ("vfs: Implement logging through fs_context")
+Cc: David Howells <dhowells@redhat.com>
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Aleksa Sarai <cyphar@cyphar.com>
+Link: https://lore.kernel.org/20250807-fscontext-log-cleanups-v3-1-8d91d6242dc3@cyphar.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fsopen.c | 70 ++++++++++++++++++++++++++++++++----------------------------
+ 1 file changed, 38 insertions(+), 32 deletions(-)
+
+--- a/fs/fsopen.c
++++ b/fs/fsopen.c
+@@ -18,50 +18,56 @@
+ #include "internal.h"
+ #include "mount.h"
+
++static inline const char *fetch_message_locked(struct fc_log *log, size_t len,
++ bool *need_free)
++{
++ const char *p;
++ int index;
++
++ if (unlikely(log->head == log->tail))
++ return ERR_PTR(-ENODATA);
++
++ index = log->tail & (ARRAY_SIZE(log->buffer) - 1);
++ p = log->buffer[index];
++ if (unlikely(strlen(p) > len))
++ return ERR_PTR(-EMSGSIZE);
++
++ log->buffer[index] = NULL;
++ *need_free = log->need_free & (1 << index);
++ log->need_free &= ~(1 << index);
++ log->tail++;
++
++ return p;
++}
++
+ /*
+ * Allow the user to read back any error, warning or informational messages.
++ * Only one message is returned for each read(2) call.
+ */
+ static ssize_t fscontext_read(struct file *file,
+ char __user *_buf, size_t len, loff_t *pos)
+ {
+ struct fs_context *fc = file->private_data;
+- struct fc_log *log = fc->log.log;
+- unsigned int logsize = ARRAY_SIZE(log->buffer);
+- ssize_t ret;
+- char *p;
++ ssize_t err;
++ const char *p __free(kfree) = NULL, *message;
+ bool need_free;
+- int index, n;
+-
+- ret = mutex_lock_interruptible(&fc->uapi_mutex);
+- if (ret < 0)
+- return ret;
+-
+- if (log->head == log->tail) {
+- mutex_unlock(&fc->uapi_mutex);
+- return -ENODATA;
+- }
++ int n;
+
+- index = log->tail & (logsize - 1);
+- p = log->buffer[index];
+- need_free = log->need_free & (1 << index);
+- log->buffer[index] = NULL;
+- log->need_free &= ~(1 << index);
+- log->tail++;
++ err = mutex_lock_interruptible(&fc->uapi_mutex);
++ if (err < 0)
++ return err;
++ message = fetch_message_locked(fc->log.log, len, &need_free);
+ mutex_unlock(&fc->uapi_mutex);
++ if (IS_ERR(message))
++ return PTR_ERR(message);
+
+- ret = -EMSGSIZE;
+- n = strlen(p);
+- if (n > len)
+- goto err_free;
+- ret = -EFAULT;
+- if (copy_to_user(_buf, p, n) != 0)
+- goto err_free;
+- ret = n;
+-
+-err_free:
+ if (need_free)
+- kfree(p);
+- return ret;
++ p = message;
++
++ n = strlen(message);
++ if (copy_to_user(_buf, message, n))
++ return -EFAULT;
++ return n;
+ }
+
+ static int fscontext_release(struct inode *inode, struct file *file)
--- /dev/null
+From 6eb350a2233100a283f882c023e5ad426d0ed63b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 13 Aug 2025 17:02:30 +0200
+Subject: rseq: Protect event mask against membarrier IPI
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+commit 6eb350a2233100a283f882c023e5ad426d0ed63b upstream.
+
+rseq_need_restart() reads and clears task::rseq_event_mask with preemption
+disabled to guard against the scheduler.
+
+But membarrier() uses an IPI and sets the PREEMPT bit in the event mask
+from the IPI, which leaves that RMW operation unprotected.
+
+Use guard(irq) if CONFIG_MEMBARRIER is enabled to fix that.
+
+Fixes: 2a36ab717e8f ("rseq/membarrier: Add MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Boqun Feng <boqun.feng@gmail.com>
+Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rseq.h | 11 ++++++++---
+ kernel/rseq.c | 10 +++++-----
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/include/linux/rseq.h
++++ b/include/linux/rseq.h
+@@ -7,6 +7,12 @@
+ #include <linux/preempt.h>
+ #include <linux/sched.h>
+
++#ifdef CONFIG_MEMBARRIER
++# define RSEQ_EVENT_GUARD irq
++#else
++# define RSEQ_EVENT_GUARD preempt
++#endif
++
+ /*
+ * Map the event mask on the user-space ABI enum rseq_cs_flags
+ * for direct mask checks.
+@@ -41,9 +47,8 @@ static inline void rseq_handle_notify_re
+ static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
+ {
+- preempt_disable();
+- __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
+- preempt_enable();
++ scoped_guard(RSEQ_EVENT_GUARD)
++ __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
+ rseq_handle_notify_resume(ksig, regs);
+ }
+
+--- a/kernel/rseq.c
++++ b/kernel/rseq.c
+@@ -255,12 +255,12 @@ static int rseq_need_restart(struct task
+
+ /*
+ * Load and clear event mask atomically with respect to
+- * scheduler preemption.
++ * scheduler preemption and membarrier IPIs.
+ */
+- preempt_disable();
+- event_mask = t->rseq_event_mask;
+- t->rseq_event_mask = 0;
+- preempt_enable();
++ scoped_guard(RSEQ_EVENT_GUARD) {
++ event_mask = t->rseq_event_mask;
++ t->rseq_event_mask = 0;
++ }
+
+ return !!event_mask;
+ }
--- /dev/null
+fs-always-return-zero-on-success-from-replace_fd.patch
+fscontext-do-not-consume-log-entries-when-returning-emsgsize.patch
+arm64-map-_text-_stext-virtual-address-range-non-executable-read-only.patch
+cxl-acpi-hmat-update-cxl-access-coordinates-directly-instead-of-through-hmat.patch
+rseq-protect-event-mask-against-membarrier-ipi.patch