--- /dev/null
+From 030e8a40fff65ca6ac1c04a4d3c08afe72438922 Mon Sep 17 00:00:00 2001
+From: Kevin Brodsky <kevin.brodsky@arm.com>
+Date: Mon, 27 Apr 2026 13:03:33 +0100
+Subject: arm64: signal: Preserve POR_EL0 if poe_context is missing
+
+From: Kevin Brodsky <kevin.brodsky@arm.com>
+
+commit 030e8a40fff65ca6ac1c04a4d3c08afe72438922 upstream.
+
+Commit 2e8a1acea859 ("arm64: signal: Improve POR_EL0 handling to
+avoid uaccess failures") delayed the write to POR_EL0 in
+rt_sigreturn to avoid spurious uaccess failures. This change however
+relies on the poe_context frame record being present: on a system
+supporting POE, calling sigreturn without a poe_context record now
+results in writing arbitrary data from the kernel stack into POR_EL0.
+
+Fix this by adding a __valid_fields member to struct
+user_access_state, and zeroing the struct on allocation.
+restore_poe_context() then indicates that the por_el0 field is valid
+by setting the corresponding bit in __valid_fields, and
+restore_user_access_state() only touches POR_EL0 if there is a valid
+value to set it to. This is in line with how POR_EL0 was originally
+handled; all frame records are currently optional, except
+fpsimd_context.
+
+To ensure that __valid_fields is kept in sync, fields (currently
+just por_el0) are now accessed via accessors and prefixed with __ to
+discourage direct access.
+
+Fixes: 2e8a1acea859 ("arm64: signal: Improve POR_EL0 handling to avoid uaccess failures")
+Cc: <stable@vger.kernel.org>
+Reported-by: Will Deacon <will@kernel.org>
+Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/signal.c | 54 +++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 43 insertions(+), 11 deletions(-)
+
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -67,6 +67,9 @@ struct rt_sigframe_user_layout {
+ unsigned long end_offset;
+ };
+
++#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
++#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
++
+ /*
+ * Holds any EL0-controlled state that influences unprivileged memory accesses.
+ * This includes both accesses done in userspace and uaccess done in the kernel.
+@@ -74,13 +77,35 @@ struct rt_sigframe_user_layout {
+ * This state needs to be carefully managed to ensure that it doesn't cause
+ * uaccess to fail when setting up the signal frame, and the signal handler
+ * itself also expects a well-defined state when entered.
++ *
++ * The struct should be zero-initialised. Its members should only be accessed
++ * via the accessors below. __valid_fields tracks which of the fields are valid
++ * (have been set to some value).
+ */
+ struct user_access_state {
+- u64 por_el0;
++ unsigned int __valid_fields;
++ u64 __por_el0;
+ };
+
+-#define TERMINATOR_SIZE round_up(sizeof(struct _aarch64_ctx), 16)
+-#define EXTRA_CONTEXT_SIZE round_up(sizeof(struct extra_context), 16)
++#define UA_STATE_HAS_POR_EL0 BIT(0)
++
++static void set_ua_state_por_el0(struct user_access_state *ua_state,
++ u64 por_el0)
++{
++ ua_state->__por_el0 = por_el0;
++ ua_state->__valid_fields |= UA_STATE_HAS_POR_EL0;
++}
++
++static int get_ua_state_por_el0(const struct user_access_state *ua_state,
++ u64 *por_el0)
++{
++ if (ua_state->__valid_fields & UA_STATE_HAS_POR_EL0) {
++ *por_el0 = ua_state->__por_el0;
++ return 0;
++ }
++
++ return -ENOENT;
++}
+
+ /*
+ * Save the user access state into ua_state and reset it to disable any
+@@ -94,7 +119,7 @@ static void save_reset_user_access_state
+ for (int pkey = 0; pkey < arch_max_pkey(); pkey++)
+ por_enable_all |= POR_ELx_PERM_PREP(pkey, POE_RWX);
+
+- ua_state->por_el0 = read_sysreg_s(SYS_POR_EL0);
++ set_ua_state_por_el0(ua_state, read_sysreg_s(SYS_POR_EL0));
+ write_sysreg_s(por_enable_all, SYS_POR_EL0);
+ /*
+ * No ISB required as we can tolerate spurious Overlay faults -
+@@ -122,8 +147,10 @@ static void set_handler_user_access_stat
+ */
+ static void restore_user_access_state(const struct user_access_state *ua_state)
+ {
+- if (system_supports_poe())
+- write_sysreg_s(ua_state->por_el0, SYS_POR_EL0);
++ u64 por_el0;
++
++ if (get_ua_state_por_el0(ua_state, &por_el0) == 0)
++ write_sysreg_s(por_el0, SYS_POR_EL0);
+ }
+
+ static void init_user_layout(struct rt_sigframe_user_layout *user)
+@@ -333,11 +360,16 @@ static int restore_fpmr_context(struct u
+ static int preserve_poe_context(struct poe_context __user *ctx,
+ const struct user_access_state *ua_state)
+ {
+- int err = 0;
++ int err;
++ u64 por_el0;
++
++ err = get_ua_state_por_el0(ua_state, &por_el0);
++ if (WARN_ON_ONCE(err))
++ return err;
+
+ __put_user_error(POE_MAGIC, &ctx->head.magic, err);
+ __put_user_error(sizeof(*ctx), &ctx->head.size, err);
+- __put_user_error(ua_state->por_el0, &ctx->por_el0, err);
++ __put_user_error(por_el0, &ctx->por_el0, err);
+
+ return err;
+ }
+@@ -353,7 +385,7 @@ static int restore_poe_context(struct us
+
+ __get_user_error(por_el0, &(user->poe->por_el0), err);
+ if (!err)
+- ua_state->por_el0 = por_el0;
++ set_ua_state_por_el0(ua_state, por_el0);
+
+ return err;
+ }
+@@ -1095,7 +1127,7 @@ SYSCALL_DEFINE0(rt_sigreturn)
+ {
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+- struct user_access_state ua_state;
++ struct user_access_state ua_state = {};
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+@@ -1507,7 +1539,7 @@ static int setup_rt_frame(int usig, stru
+ {
+ struct rt_sigframe_user_layout user;
+ struct rt_sigframe __user *frame;
+- struct user_access_state ua_state;
++ struct user_access_state ua_state = {};
+ int err = 0;
+
+ fpsimd_save_and_flush_current_state();
--- /dev/null
+From 7746e3bd4cc19b5092e00d32d676e329bfcb6900 Mon Sep 17 00:00:00 2001
+From: Miklos Szeredi <mszeredi@redhat.com>
+Date: Fri, 10 Apr 2026 16:49:47 +0200
+Subject: fanotify: fix false positive on permission events
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+commit 7746e3bd4cc19b5092e00d32d676e329bfcb6900 upstream.
+
+fsnotify_get_mark_safe() may return false for a mark on an unrelated group,
+which results in bypassing the permission check.
+
+Fix by skipping over detached marks that are not in the current group.
+
+CC: stable@vger.kernel.org
+Fixes: abc77577a669 ("fsnotify: Provide framework for dropping SRCU lock in ->handle_event")
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Link: https://patch.msgid.link/20260410144950.156160-1-mszeredi@redhat.com
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/notify/fsnotify.c | 2 +-
+ fs/notify/mark.c | 18 +++++++++++-------
+ include/linux/fsnotify_backend.h | 1 +
+ 3 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/fs/notify/fsnotify.c
++++ b/fs/notify/fsnotify.c
+@@ -444,7 +444,7 @@ static struct fsnotify_mark *fsnotify_fi
+ return hlist_entry_safe(node, struct fsnotify_mark, obj_list);
+ }
+
+-static struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
++struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark)
+ {
+ struct hlist_node *node = NULL;
+
+--- a/fs/notify/mark.c
++++ b/fs/notify/mark.c
+@@ -453,9 +453,6 @@ EXPORT_SYMBOL_GPL(fsnotify_put_mark);
+ */
+ static bool fsnotify_get_mark_safe(struct fsnotify_mark *mark)
+ {
+- if (!mark)
+- return true;
+-
+ if (refcount_inc_not_zero(&mark->refcnt)) {
+ spin_lock(&mark->lock);
+ if (mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) {
+@@ -496,15 +493,22 @@ bool fsnotify_prepare_user_wait(struct f
+ int type;
+
+ fsnotify_foreach_iter_type(type) {
++ struct fsnotify_mark *mark = iter_info->marks[type];
++
+ /* This can fail if mark is being removed */
+- if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
+- __release(&fsnotify_mark_srcu);
+- goto fail;
++ while (mark && !fsnotify_get_mark_safe(mark)) {
++ if (mark->group == iter_info->current_group) {
++ __release(&fsnotify_mark_srcu);
++ goto fail;
++ }
++ /* This is a mark in an unrelated group, skip */
++ mark = fsnotify_next_mark(mark);
++ iter_info->marks[type] = mark;
+ }
+ }
+
+ /*
+- * Now that both marks are pinned by refcount in the inode / vfsmount
++ * Now that all marks are pinned by refcount in the inode / vfsmount / etc
+ * lists, we can drop SRCU lock, and safely resume the list iteration
+ * once userspace returns.
+ */
+--- a/include/linux/fsnotify_backend.h
++++ b/include/linux/fsnotify_backend.h
+@@ -912,6 +912,7 @@ extern void fsnotify_clear_marks_by_grou
+ unsigned int obj_type);
+ extern void fsnotify_get_mark(struct fsnotify_mark *mark);
+ extern void fsnotify_put_mark(struct fsnotify_mark *mark);
++struct fsnotify_mark *fsnotify_next_mark(struct fsnotify_mark *mark);
+ extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
+ extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
+
--- /dev/null
+From d89fdda7dd8a488f922e1175e6782f781ba8a23b Mon Sep 17 00:00:00 2001
+From: Fuad Tabba <tabba@google.com>
+Date: Fri, 24 Apr 2026 09:49:06 +0100
+Subject: KVM: arm64: Fix kvm_vcpu_initialized() macro parameter
+
+From: Fuad Tabba <tabba@google.com>
+
+commit d89fdda7dd8a488f922e1175e6782f781ba8a23b upstream.
+
+The macro is defined with parameter 'v' but the body references the
+literal token 'vcpu' instead, causing it to silently operate on whatever
+'vcpu' resolves to in the caller's scope rather than the value passed by
+the caller. All current call sites happen to use a variable named 'vcpu',
+so the bug is latent.
+
+Fixes: e016333745c7 ("KVM: arm64: Only reset vCPU-scoped feature ID regs once")
+Signed-off-by: Fuad Tabba <tabba@google.com>
+Link: https://patch.msgid.link/20260424084908.370776-5-tabba@google.com
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/include/asm/kvm_host.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -1476,7 +1476,7 @@ static inline bool __vcpu_has_feature(co
+ #define kvm_vcpu_has_feature(k, f) __vcpu_has_feature(&(k)->arch, (f))
+ #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
+
+-#define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
++#define kvm_vcpu_initialized(v) vcpu_get_flag(v, VCPU_INITIALIZED)
+
+ int kvm_trng_call(struct kvm_vcpu *vcpu);
+ #ifdef CONFIG_KVM
--- /dev/null
+From 98b8aebb14fdc0133939fd8fe07d0d98333dc976 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Mon, 4 May 2026 09:00:01 +0800
+Subject: LoongArch: Fix SYM_SIGFUNC_START definition for 32BIT
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 98b8aebb14fdc0133939fd8fe07d0d98333dc976 upstream.
+
+The SYM_SIGFUNC_START definition should match sigcontext that the length
+of GPRs are 8 bytes for both 32BIT and 64BIT. So replace SZREG with 8 to
+fix it.
+
+Cc: stable@vger.kernel.org
+Fixes: e4878c37f6679fde ("LoongArch: vDSO: Emit GNU_EH_FRAME correctly")
+Suggested-by: Xi Ruoyao <xry111@xry111.site>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/linkage.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/loongarch/include/asm/linkage.h
++++ b/arch/loongarch/include/asm/linkage.h
+@@ -69,7 +69,7 @@
+ 9, 10, 11, 12, 13, 14, 15, 16, \
+ 17, 18, 19, 20, 21, 22, 23, 24, \
+ 25, 26, 27, 28, 29, 30, 31; \
+- .cfi_offset \num, SC_REGS + \num * SZREG; \
++ .cfi_offset \num, SC_REGS + \num * 8; \
+ .endr; \
+ \
+ nop; \
--- /dev/null
+From 5203012fa6045aac4b69d4e7c212e16dcf38ef10 Mon Sep 17 00:00:00 2001
+From: Xianglai Li <lixianglai@loongson.cn>
+Date: Mon, 4 May 2026 09:00:37 +0800
+Subject: LoongArch: KVM: Compile switch.S directly into the kernel
+
+From: Xianglai Li <lixianglai@loongson.cn>
+
+commit 5203012fa6045aac4b69d4e7c212e16dcf38ef10 upstream.
+
+If we directly compile the switch.S file into the kernel, the address of
+the kvm_exc_entry function will definitely be within the DMW memory area.
+Therefore, we will no longer need to perform a copy relocation of the
+kvm_exc_entry.
+
+So this patch compiles switch.S directly into the kernel, and then remove
+the copy relocation execution logic for the kvm_exc_entry function.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Xianglai Li <lixianglai@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/Kbuild | 2 -
+ arch/loongarch/include/asm/asm-prototypes.h | 20 ++++++++++++++++
+ arch/loongarch/include/asm/kvm_host.h | 3 --
+ arch/loongarch/kvm/Makefile | 3 +-
+ arch/loongarch/kvm/main.c | 35 ++--------------------------
+ arch/loongarch/kvm/switch.S | 20 ++++++++++++----
+ 6 files changed, 41 insertions(+), 42 deletions(-)
+
+--- a/arch/loongarch/Kbuild
++++ b/arch/loongarch/Kbuild
+@@ -3,7 +3,7 @@ obj-y += mm/
+ obj-y += net/
+ obj-y += vdso/
+
+-obj-$(CONFIG_KVM) += kvm/
++obj-$(subst m,y,$(CONFIG_KVM)) += kvm/
+
+ # for cleaning
+ subdir- += boot
+--- a/arch/loongarch/include/asm/asm-prototypes.h
++++ b/arch/loongarch/include/asm/asm-prototypes.h
+@@ -20,3 +20,23 @@ asmlinkage void noinstr __no_stack_prote
+ struct pt_regs *regs,
+ int (*fn)(void *),
+ void *fn_arg);
++
++struct kvm_run;
++struct kvm_vcpu;
++struct loongarch_fpu;
++
++void kvm_exc_entry(void);
++int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
++
++void kvm_save_fpu(struct loongarch_fpu *fpu);
++void kvm_restore_fpu(struct loongarch_fpu *fpu);
++
++#ifdef CONFIG_CPU_HAS_LSX
++void kvm_save_lsx(struct loongarch_fpu *fpu);
++void kvm_restore_lsx(struct loongarch_fpu *fpu);
++#endif
++
++#ifdef CONFIG_CPU_HAS_LASX
++void kvm_save_lasx(struct loongarch_fpu *fpu);
++void kvm_restore_lasx(struct loongarch_fpu *fpu);
++#endif
+--- a/arch/loongarch/include/asm/kvm_host.h
++++ b/arch/loongarch/include/asm/kvm_host.h
+@@ -85,7 +85,6 @@ struct kvm_context {
+ struct kvm_world_switch {
+ int (*exc_entry)(void);
+ int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu);
+- unsigned long page_order;
+ };
+
+ #define MAX_PGTABLE_LEVELS 4
+@@ -339,8 +338,6 @@ void kvm_exc_entry(void);
+ int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+ extern unsigned long vpid_mask;
+-extern const unsigned long kvm_exception_size;
+-extern const unsigned long kvm_enter_guest_size;
+ extern struct kvm_world_switch *kvm_loongarch_ops;
+
+ #define SW_GCSR (1 << 0)
+--- a/arch/loongarch/kvm/Makefile
++++ b/arch/loongarch/kvm/Makefile
+@@ -7,11 +7,12 @@ include $(srctree)/virt/kvm/Makefile.kvm
+
+ obj-$(CONFIG_KVM) += kvm.o
+
++obj-y += switch.o
++
+ kvm-y += exit.o
+ kvm-y += interrupt.o
+ kvm-y += main.o
+ kvm-y += mmu.o
+-kvm-y += switch.o
+ kvm-y += timer.o
+ kvm-y += tlb.o
+ kvm-y += vcpu.o
+--- a/arch/loongarch/kvm/main.c
++++ b/arch/loongarch/kvm/main.c
+@@ -340,8 +340,7 @@ void kvm_arch_disable_virtualization_cpu
+
+ static int kvm_loongarch_env_init(void)
+ {
+- int cpu, order, ret;
+- void *addr;
++ int cpu, ret;
+ struct kvm_context *context;
+
+ vmcs = alloc_percpu(struct kvm_context);
+@@ -357,30 +356,8 @@ static int kvm_loongarch_env_init(void)
+ return -ENOMEM;
+ }
+
+- /*
+- * PGD register is shared between root kernel and kvm hypervisor.
+- * So world switch entry should be in DMW area rather than TLB area
+- * to avoid page fault reenter.
+- *
+- * In future if hardware pagetable walking is supported, we won't
+- * need to copy world switch code to DMW area.
+- */
+- order = get_order(kvm_exception_size + kvm_enter_guest_size);
+- addr = (void *)__get_free_pages(GFP_KERNEL, order);
+- if (!addr) {
+- free_percpu(vmcs);
+- vmcs = NULL;
+- kfree(kvm_loongarch_ops);
+- kvm_loongarch_ops = NULL;
+- return -ENOMEM;
+- }
+-
+- memcpy(addr, kvm_exc_entry, kvm_exception_size);
+- memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size);
+- flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size);
+- kvm_loongarch_ops->exc_entry = addr;
+- kvm_loongarch_ops->enter_guest = addr + kvm_exception_size;
+- kvm_loongarch_ops->page_order = order;
++ kvm_loongarch_ops->exc_entry = (void *)kvm_exc_entry;
++ kvm_loongarch_ops->enter_guest = (void *)kvm_enter_guest;
+
+ vpid_mask = read_csr_gstat();
+ vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT;
+@@ -414,16 +391,10 @@ static int kvm_loongarch_env_init(void)
+
+ static void kvm_loongarch_env_exit(void)
+ {
+- unsigned long addr;
+-
+ if (vmcs)
+ free_percpu(vmcs);
+
+ if (kvm_loongarch_ops) {
+- if (kvm_loongarch_ops->exc_entry) {
+- addr = (unsigned long)kvm_loongarch_ops->exc_entry;
+- free_pages(addr, kvm_loongarch_ops->page_order);
+- }
+ kfree(kvm_loongarch_ops);
+ }
+
+--- a/arch/loongarch/kvm/switch.S
++++ b/arch/loongarch/kvm/switch.S
+@@ -4,9 +4,11 @@
+ */
+
+ #include <linux/linkage.h>
++#include <linux/kvm_types.h>
+ #include <asm/asm.h>
+ #include <asm/asmmacro.h>
+ #include <asm/loongarch.h>
++#include <asm/page.h>
+ #include <asm/regdef.h>
+ #include <asm/unwind_hints.h>
+
+@@ -100,8 +102,13 @@
+ * - is still in guest mode, such as pgd table/vmid registers etc,
+ * - will fix with hw page walk enabled in future
+ * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
++ *
++ * PGD register is shared between root kernel and kvm hypervisor.
++ * So world switch entry should be in DMW area rather than TLB area
++ * to avoid page fault re-enter.
+ */
+ .text
++ .p2align PAGE_SHIFT
+ .cfi_sections .debug_frame
+ SYM_CODE_START(kvm_exc_entry)
+ UNWIND_HINT_UNDEFINED
+@@ -190,8 +197,8 @@ ret_to_host:
+ kvm_restore_host_gpr a2
+ jr ra
+
+-SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
+ SYM_CODE_END(kvm_exc_entry)
++EXPORT_SYMBOL_FOR_KVM(kvm_exc_entry)
+
+ /*
+ * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
+@@ -215,8 +222,8 @@ SYM_FUNC_START(kvm_enter_guest)
+ /* Save kvm_vcpu to kscratch */
+ csrwr a1, KVM_VCPU_KS
+ kvm_switch_to_guest
+-SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
+ SYM_FUNC_END(kvm_enter_guest)
++EXPORT_SYMBOL_FOR_KVM(kvm_enter_guest)
+
+ SYM_FUNC_START(kvm_save_fpu)
+ fpu_save_csr a0 t1
+@@ -224,6 +231,7 @@ SYM_FUNC_START(kvm_save_fpu)
+ fpu_save_cc a0 t1 t2
+ jr ra
+ SYM_FUNC_END(kvm_save_fpu)
++EXPORT_SYMBOL_FOR_KVM(kvm_save_fpu)
+
+ SYM_FUNC_START(kvm_restore_fpu)
+ fpu_restore_double a0 t1
+@@ -231,6 +239,7 @@ SYM_FUNC_START(kvm_restore_fpu)
+ fpu_restore_cc a0 t1 t2
+ jr ra
+ SYM_FUNC_END(kvm_restore_fpu)
++EXPORT_SYMBOL_FOR_KVM(kvm_restore_fpu)
+
+ #ifdef CONFIG_CPU_HAS_LSX
+ SYM_FUNC_START(kvm_save_lsx)
+@@ -239,6 +248,7 @@ SYM_FUNC_START(kvm_save_lsx)
+ lsx_save_data a0 t1
+ jr ra
+ SYM_FUNC_END(kvm_save_lsx)
++EXPORT_SYMBOL_FOR_KVM(kvm_save_lsx)
+
+ SYM_FUNC_START(kvm_restore_lsx)
+ lsx_restore_data a0 t1
+@@ -246,6 +256,7 @@ SYM_FUNC_START(kvm_restore_lsx)
+ fpu_restore_csr a0 t1 t2
+ jr ra
+ SYM_FUNC_END(kvm_restore_lsx)
++EXPORT_SYMBOL_FOR_KVM(kvm_restore_lsx)
+ #endif
+
+ #ifdef CONFIG_CPU_HAS_LASX
+@@ -255,6 +266,7 @@ SYM_FUNC_START(kvm_save_lasx)
+ lasx_save_data a0 t1
+ jr ra
+ SYM_FUNC_END(kvm_save_lasx)
++EXPORT_SYMBOL_FOR_KVM(kvm_save_lasx)
+
+ SYM_FUNC_START(kvm_restore_lasx)
+ lasx_restore_data a0 t1
+@@ -262,10 +274,8 @@ SYM_FUNC_START(kvm_restore_lasx)
+ fpu_restore_csr a0 t1 t2
+ jr ra
+ SYM_FUNC_END(kvm_restore_lasx)
++EXPORT_SYMBOL_FOR_KVM(kvm_restore_lasx)
+ #endif
+- .section ".rodata"
+-SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
+-SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
+
+ #ifdef CONFIG_CPU_HAS_LBT
+ STACK_FRAME_NON_STANDARD kvm_restore_fpu
--- /dev/null
+From 8f5ce56b76303c55b78a87af996e2e0f8535f979 Mon Sep 17 00:00:00 2001
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+Date: Wed, 22 Apr 2026 23:33:53 +0900
+Subject: mm/hugetlb_cma: round up per_node before logging it
+
+From: Sang-Heon Jeon <ekffu200098@gmail.com>
+
+commit 8f5ce56b76303c55b78a87af996e2e0f8535f979 upstream.
+
+When the user requests a total hugetlb CMA size without per-node
+specification, hugetlb_cma_reserve() computes per_node from
+hugetlb_cma_size and the number of nodes that have memory
+
+ per_node = DIV_ROUND_UP(hugetlb_cma_size,
+ nodes_weight(hugetlb_bootmem_nodes));
+
+The reservation loop later computes
+
+ size = round_up(min(per_node, hugetlb_cma_size - reserved),
+ PAGE_SIZE << order);
+
+So the actually reserved per_node size is multiple of (PAGE_SIZE <<
+order), but the logged per_node is not rounded up, so it may be smaller
+than the actual reserved size.
+
+For example, as the existing comment describes, if a 3 GB area is
+requested on a machine with 4 NUMA nodes that have memory, 1 GB is
+allocated on the first three nodes, but the printed log is
+
+ hugetlb_cma: reserve 3072 MiB, up to 768 MiB per node
+
+Round per_node up to (PAGE_SIZE << order) before logging so that the
+printed log always matches the actual reserved size. No functional change
+to the actual reservation size, as the following case analysis shows
+
+1. remaining (hugetlb_cma_size - reserved) >= rounded per_node
+ - AS-IS: min() picks unrounded per_node;
+ round_up() returns rounded per_node
+ - TO-BE: min() picks rounded per_node;
+ round_up() returns rounded per_node (no-op)
+2. remaining < unrounded per_node
+ - AS-IS: min() picks remaining;
+ round_up() returns round_up(remaining)
+ - TO-BE: min() picks remaining;
+ round_up() returns round_up(remaining)
+3. unrounded per_node <= remaining < rounded per_node
+ - AS-IS: min() picks unrounded per_node;
+ round_up() returns rounded per_node
+ - TO-BE: min() picks remaining;
+ round_up() returns round_up(remaining) equals rounded per_node
+
+Link: https://lore.kernel.org/20260422143353.852257-1-ekffu200098@gmail.com
+Fixes: cf11e85fc08c ("mm: hugetlb: optionally allocate gigantic hugepages using cma") # 5.7
+Signed-off-by: Sang-Heon Jeon <ekffu200098@gmail.com>
+Reviewed-by: Muchun Song <muchun.song@linux.dev>
+Cc: David Hildenbrand <david@kernel.org>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb_cma.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/hugetlb_cma.c
++++ b/mm/hugetlb_cma.c
+@@ -193,6 +193,7 @@ void __init hugetlb_cma_reserve(int orde
+ */
+ per_node = DIV_ROUND_UP(hugetlb_cma_size,
+ nodes_weight(hugetlb_bootmem_nodes));
++ per_node = round_up(per_node, PAGE_SIZE << order);
+ pr_info("hugetlb_cma: reserve %lu MiB, up to %lu MiB per node\n",
+ hugetlb_cma_size / SZ_1M, per_node / SZ_1M);
+ }
--- /dev/null
+From c6d395e2de1306b5fef0344a3c3835fbbfaa18be Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Tue, 5 May 2026 17:00:55 +0200
+Subject: mptcp: pm: ADD_ADDR rtx: skip inactive subflows
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit c6d395e2de1306b5fef0344a3c3835fbbfaa18be upstream.
+
+When looking at the maximum RTO amongst the subflows, inactive subflows
+were taken into account: that includes stale ones, and the initial one
+if it has been already been closed.
+
+Unusable subflows are now simply skipped. Stale ones are used as an
+alternative: if there are only stale ones, to take their maximum RTO and
+avoid to eventually fallback to net.mptcp.add_addr_timeout, which is set
+to 2 minutes by default.
+
+Fixes: 30549eebc4d8 ("mptcp: make ADD_ADDR retransmission timeout adaptive")
+Cc: stable@vger.kernel.org
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20260505-net-mptcp-pm-fixes-7-1-rc3-v1-7-fca8091060a4@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -305,18 +305,28 @@ static unsigned int mptcp_adjust_add_add
+ const struct net *net = sock_net((struct sock *)msk);
+ unsigned int rto = mptcp_get_add_addr_timeout(net);
+ struct mptcp_subflow_context *subflow;
+- unsigned int max = 0;
++ unsigned int max = 0, max_stale = 0;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+ struct inet_connection_sock *icsk = inet_csk(ssk);
+
+- if (icsk->icsk_rto > max)
++ if (!__mptcp_subflow_active(subflow))
++ continue;
++
++ if (unlikely(subflow->stale)) {
++ if (icsk->icsk_rto > max_stale)
++ max_stale = icsk->icsk_rto;
++ } else if (icsk->icsk_rto > max) {
+ max = icsk->icsk_rto;
++ }
+ }
+
+- if (max && max < rto)
+- rto = max;
++ if (max)
++ return min(max, rto);
++
++ if (max_stale)
++ return min(max_stale, rto);
+
+ return rto;
+ }
--- /dev/null
+From e47029b977e747cb3a9174308fd55762cce70147 Mon Sep 17 00:00:00 2001
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+Date: Fri, 17 Apr 2026 15:24:39 +0000
+Subject: mtd: spi-nor: debugfs: fix out-of-bounds read in spi_nor_params_show()
+
+From: Tudor Ambarus <tudor.ambarus@linaro.org>
+
+commit e47029b977e747cb3a9174308fd55762cce70147 upstream.
+
+Sashiko noticed an out-of-bounds read [1].
+
+In spi_nor_params_show(), the snor_f_names array is passed to
+spi_nor_print_flags() using sizeof(snor_f_names).
+
+Since snor_f_names is an array of pointers, sizeof() returns the total
+number of bytes occupied by the pointers
+ (element_count * sizeof(void *))
+rather than the element count itself. On 64-bit systems, this makes the
+passed length 8x larger than intended.
+
+Inside spi_nor_print_flags(), the 'names_len' argument is used to
+bounds-check the 'names' array access. An out-of-bounds read occurs
+if a flag bit is set that exceeds the array's actual element count
+but is within the inflated byte-size count.
+
+Correct this by using ARRAY_SIZE() to pass the actual number of
+string pointers in the array.
+
+Cc: stable@vger.kernel.org
+Fixes: 0257be79fc4a ("mtd: spi-nor: expose internal parameters via debugfs")
+Closes: https://sashiko.dev/#/patchset/20260417-die-erase-fix-v2-1-73bb7004ebad%40infineon.com [1]
+Signed-off-by: Tudor Ambarus <tudor.ambarus@linaro.org>
+Reviewed-by: Takahiro Kuwano <takahiro.kuwano@infineon.com>
+Reviewed-by: Michael Walle <mwalle@kernel.org>
+Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/debugfs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/mtd/spi-nor/debugfs.c
++++ b/drivers/mtd/spi-nor/debugfs.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+
++#include <linux/array_size.h>
+ #include <linux/debugfs.h>
+ #include <linux/mtd/spi-nor.h>
+ #include <linux/spi/spi.h>
+@@ -92,7 +93,8 @@ static int spi_nor_params_show(struct se
+ seq_printf(s, "address nbytes\t%u\n", nor->addr_nbytes);
+
+ seq_puts(s, "flags\t\t");
+- spi_nor_print_flags(s, nor->flags, snor_f_names, sizeof(snor_f_names));
++ spi_nor_print_flags(s, nor->flags, snor_f_names,
++ ARRAY_SIZE(snor_f_names));
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\nopcodes\n");
--- /dev/null
+From 4b9e327991815e128ad3af75c3a04630a63ce3e0 Mon Sep 17 00:00:00 2001
+From: Kai Zen <kai.aizen.dev@gmail.com>
+Date: Thu, 30 Apr 2026 18:26:48 +0300
+Subject: net: rtnetlink: zero ifla_vf_broadcast to avoid stack infoleak in rtnl_fill_vfinfo
+
+From: Kai Zen <kai.aizen.dev@gmail.com>
+
+commit 4b9e327991815e128ad3af75c3a04630a63ce3e0 upstream.
+
+rtnl_fill_vfinfo() declares struct ifla_vf_broadcast on the stack
+without initialisation:
+
+ struct ifla_vf_broadcast vf_broadcast;
+
+The struct contains a single fixed 32-byte field:
+
+ /* include/uapi/linux/if_link.h */
+ struct ifla_vf_broadcast {
+ __u8 broadcast[32];
+ };
+
+The function then copies dev->broadcast into it using dev->addr_len
+as the length:
+
+ memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
+
+On Ethernet devices (the overwhelming majority of SR-IOV NICs)
+dev->addr_len is 6, so only the first 6 bytes of broadcast[] are
+written. The remaining 26 bytes retain whatever was previously on
+the kernel stack. The full struct is then handed to userspace via:
+
+ nla_put(skb, IFLA_VF_BROADCAST,
+ sizeof(vf_broadcast), &vf_broadcast)
+
+leaking up to 26 bytes of uninitialised kernel stack per VF per
+RTM_GETLINK request, repeatable.
+
+The other vf_* structs in the same function are explicitly zeroed
+for exactly this reason - see the memset() calls for ivi,
+vf_vlan_info, node_guid and port_guid a few lines above.
+vf_broadcast was simply missed when it was added.
+
+Reachability: any unprivileged local process can open AF_NETLINK /
+NETLINK_ROUTE without capabilities and send RTM_GETLINK with an
+IFLA_EXT_MASK attribute carrying RTEXT_FILTER_VF. The kernel walks
+each VF and emits IFLA_VF_BROADCAST, leaking 26 bytes of stack per
+VF per request. Stack residue at this call site can include return
+addresses and transient sensitive data; KASAN with stack
+instrumentation, or KMSAN, will flag the nla_put() when reproduced.
+
+Zero the on-stack struct before the partial memcpy, matching the
+existing pattern used for the other vf_* structs in the same
+function.
+
+Fixes: 75345f888f70 ("ipoib: show VF broadcast address")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kai Zen <kai.aizen.dev@gmail.com>
+Link: https://patch.msgid.link/3c506e8f936e52b57620269b55c348af05d413a2.1777557228.git.kai.aizen.dev@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1566,6 +1566,7 @@ static noinline_for_stack int rtnl_fill_
+ port_guid.vf = ivi.vf;
+
+ memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
++ memset(&vf_broadcast, 0, sizeof(vf_broadcast));
+ memcpy(vf_broadcast.broadcast, dev->broadcast, dev->addr_len);
+ vf_vlan.vlan = ivi.vlan;
+ vf_vlan.qos = ivi.qos;
--- /dev/null
+From 5ad732a56be46aabf158c16aa0c095291727aaef Mon Sep 17 00:00:00 2001
+From: Dapeng Mi <dapeng1.mi@linux.intel.com>
+Date: Thu, 30 Apr 2026 08:25:54 +0800
+Subject: perf/x86/intel: Improve validation and configuration of ACR masks
+
+From: Dapeng Mi <dapeng1.mi@linux.intel.com>
+
+commit 5ad732a56be46aabf158c16aa0c095291727aaef upstream.
+
+Currently there are several issues on the user space ACR mask validation
+and configuration.
+- The validation for user space ACR mask (attr.config2) is incomplete,
+ e.g., the ACR mask could include the index which belongs to another
+ ACR events group, but it's not validated.
+- An early return on an invalid ACR mask caused all subsequent ACR groups
+ to be skipped.
+- The stale hardware ACR mask (hw.config1) is not cleared before setting
+ new hardware ACR mask.
+
+The following changes address all of the above issues.
+- Figure out the event index group of an ACR group. Any bits in the
+ user-space mask not present in the index group are now dropped.
+- Instead of an early return on invalid bits, drop only the invalid
+ portions and continue iterating through all ACR events to ensure full
+ configuration.
+- Explicitly clear the stale hardware ACR mask for each event prior to
+ writing the new configuration.
+
+Besides, a non-leader event member of ACR group could be disabled in
+theory. This could cause bit-shifting errors in the acr_mask of remaining
+group members. But since ACR sampling requires all events to be active,
+this should not be a big concern in real use case. Add a "FIXME" comment
+to notice this risk.
+
+Fixes: ec980e4facef ("perf/x86/intel: Support auto counter reload")
+Signed-off-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260430002558.712334-2-dapeng1.mi@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 32 +++++++++++++++++++++++++-------
+ 1 file changed, 25 insertions(+), 7 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -2980,23 +2980,41 @@ static void intel_pmu_enable_event(struc
+ static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
+ {
+ struct perf_event *event, *leader;
+- int i, j, idx;
++ int i, j, k, bit, idx;
+
++ /*
++ * FIXME: ACR mask parsing relies on cpuc->event_list[] (active events only).
++ * Disabling an ACR event causes bit-shifting errors in the acr_mask of
++ * remaining group members. As ACR sampling requires all events to be active,
++ * this limitation is acceptable for now. Revisit if independent event toggling
++ * is required.
++ */
+ for (i = 0; i < cpuc->n_events; i++) {
+ leader = cpuc->event_list[i];
+ if (!is_acr_event_group(leader))
+ continue;
+
+- /* The ACR events must be contiguous. */
++ /* Find the last event of the ACR group. */
+ for (j = i; j < cpuc->n_events; j++) {
+ event = cpuc->event_list[j];
+ if (event->group_leader != leader->group_leader)
+ break;
+- for_each_set_bit(idx, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
+- if (i + idx >= cpuc->n_events ||
+- !is_acr_event_group(cpuc->event_list[i + idx]))
+- return;
+- __set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1);
++ }
++
++ /*
++ * Translate the user-space ACR mask (attr.config2) into the physical
++ * counter bitmask (hw.config1) for each ACR event in the group.
++ * NOTE: ACR event contiguity is guaranteed by intel_pmu_hw_config().
++ */
++ for (k = i; k < j; k++) {
++ event = cpuc->event_list[k];
++ event->hw.config1 = 0;
++ for_each_set_bit(bit, (unsigned long *)&event->attr.config2, X86_PMC_IDX_MAX) {
++ idx = i + bit;
++ /* Event index of ACR group must locate in [i, j). */
++ if (idx >= j || !is_acr_event_group(cpuc->event_list[idx]))
++ continue;
++ __set_bit(cpuc->assign[idx], (unsigned long *)&event->hw.config1);
+ }
+ }
+ i = j - 1;
spi-zynqmp-gqspi-fix-controller-deregistration.patch
spi-s3c64xx-fix-null-deref-on-driver-unbind.patch
staging-vme_user-fix-root-device-leak-on-init-failure.patch
+fanotify-fix-false-positive-on-permission-events.patch
+kvm-arm64-fix-kvm_vcpu_initialized-macro-parameter.patch
+mtd-spi-nor-debugfs-fix-out-of-bounds-read-in-spi_nor_params_show.patch
+arm64-signal-preserve-por_el0-if-poe_context-is-missing.patch
+mm-hugetlb_cma-round-up-per_node-before-logging-it.patch
+loongarch-fix-sym_sigfunc_start-definition-for-32bit.patch
+loongarch-kvm-compile-switch.s-directly-into-the-kernel.patch
+net-rtnetlink-zero-ifla_vf_broadcast-to-avoid-stack-infoleak-in-rtnl_fill_vfinfo.patch
+mptcp-pm-add_addr-rtx-skip-inactive-subflows.patch
+perf-x86-intel-improve-validation-and-configuration-of-acr-masks.patch