]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
7.0-stable patches master
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 1 May 2026 13:29:47 +0000 (15:29 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 1 May 2026 13:29:47 +0000 (15:29 +0200)
added patches:
apparmor-fix-string-overrun-due-to-missing-termination.patch
arm64-dts-marvell-udpu-add-ethernet-aliases.patch
drm-arcpgu-fix-device-node-leak.patch
extract-cert-wrap-key_pass-with-ifdef-use_pkcs11_engine.patch
fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
fs-prepare-for-adding-lsm-blob-to-backing_file.patch
hwmon-isl28022-fix-integer-overflow-in-power-calculation-on-32-bit.patch
hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch
ipv4-icmp-validate-reply-type-before-using-icmp_pointers.patch
libceph-prevent-potential-null-ptr-deref-in-ceph_handle_auth_reply.patch
loongarch-make-arch_irq_work_has_interrupt-true-only-if-ipi-hw-exist.patch
loongarch-show-cpu-vulnerabilites-correctly.patch
lsm-add-backing_file-lsm-hooks.patch
media-rzv2h-ivc-fix-axirx_vblank-register-write.patch
media-rzv2h-ivc-revise-default-vblank-formula.patch
net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch
net-ks8851-avoid-excess-softirq-scheduling.patch
net-ks8851-reinstate-disabling-of-bhs-around-irq-handler.patch
net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch
net-qrtr-ns-free-the-node-during-ctrl_cmd_bye.patch
net-qrtr-ns-limit-the-maximum-number-of-lookups.patch
net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch
net-qrtr-ns-limit-the-total-number-of-nodes.patch
net-rds-fix-mr-cleanup-on-copy-error.patch
net-smc-avoid-early-lgr-access-in-smc_clc_wait_msg.patch
net-txgbe-fix-firmware-version-check.patch
netconsole-avoid-out-of-bounds-access-on-empty-string-in-trim_newline.patch
power-supply-axp288_charger-do-not-cancel-work-before-initializing-it.patch
printf-compile-the-kunit-test-with-disable_branch_profiling-disable_branch_profiling.patch
rdma-rxe-validate-pad-and-icrc-before-payload_size-in-rxe_rcv.patch
selinux-fix-overlayfs-mmap-and-mprotect-access-checks.patch
slub-fix-data-loss-and-overflow-in-krealloc.patch
spi-fix-resource-leaks-on-device-setup-failure.patch
tpm-avoid-wunused-but-set-variable.patch
tracing-fprobe-reject-registration-of-a-registered-fprobe-before-init.patch

36 files changed:
queue-7.0/apparmor-fix-string-overrun-due-to-missing-termination.patch [new file with mode: 0644]
queue-7.0/arm64-dts-marvell-udpu-add-ethernet-aliases.patch [new file with mode: 0644]
queue-7.0/drm-arcpgu-fix-device-node-leak.patch [new file with mode: 0644]
queue-7.0/extract-cert-wrap-key_pass-with-ifdef-use_pkcs11_engine.patch [new file with mode: 0644]
queue-7.0/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch [new file with mode: 0644]
queue-7.0/fs-prepare-for-adding-lsm-blob-to-backing_file.patch [new file with mode: 0644]
queue-7.0/hwmon-isl28022-fix-integer-overflow-in-power-calculation-on-32-bit.patch [new file with mode: 0644]
queue-7.0/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch [new file with mode: 0644]
queue-7.0/ipv4-icmp-validate-reply-type-before-using-icmp_pointers.patch [new file with mode: 0644]
queue-7.0/libceph-prevent-potential-null-ptr-deref-in-ceph_handle_auth_reply.patch [new file with mode: 0644]
queue-7.0/loongarch-make-arch_irq_work_has_interrupt-true-only-if-ipi-hw-exist.patch [new file with mode: 0644]
queue-7.0/loongarch-show-cpu-vulnerabilites-correctly.patch [new file with mode: 0644]
queue-7.0/lsm-add-backing_file-lsm-hooks.patch [new file with mode: 0644]
queue-7.0/media-rzv2h-ivc-fix-axirx_vblank-register-write.patch [new file with mode: 0644]
queue-7.0/media-rzv2h-ivc-revise-default-vblank-formula.patch [new file with mode: 0644]
queue-7.0/net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch [new file with mode: 0644]
queue-7.0/net-ks8851-avoid-excess-softirq-scheduling.patch [new file with mode: 0644]
queue-7.0/net-ks8851-reinstate-disabling-of-bhs-around-irq-handler.patch [new file with mode: 0644]
queue-7.0/net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch [new file with mode: 0644]
queue-7.0/net-qrtr-ns-free-the-node-during-ctrl_cmd_bye.patch [new file with mode: 0644]
queue-7.0/net-qrtr-ns-limit-the-maximum-number-of-lookups.patch [new file with mode: 0644]
queue-7.0/net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch [new file with mode: 0644]
queue-7.0/net-qrtr-ns-limit-the-total-number-of-nodes.patch [new file with mode: 0644]
queue-7.0/net-rds-fix-mr-cleanup-on-copy-error.patch [new file with mode: 0644]
queue-7.0/net-smc-avoid-early-lgr-access-in-smc_clc_wait_msg.patch [new file with mode: 0644]
queue-7.0/net-txgbe-fix-firmware-version-check.patch [new file with mode: 0644]
queue-7.0/netconsole-avoid-out-of-bounds-access-on-empty-string-in-trim_newline.patch [new file with mode: 0644]
queue-7.0/power-supply-axp288_charger-do-not-cancel-work-before-initializing-it.patch [new file with mode: 0644]
queue-7.0/printf-compile-the-kunit-test-with-disable_branch_profiling-disable_branch_profiling.patch [new file with mode: 0644]
queue-7.0/rdma-rxe-validate-pad-and-icrc-before-payload_size-in-rxe_rcv.patch [new file with mode: 0644]
queue-7.0/selinux-fix-overlayfs-mmap-and-mprotect-access-checks.patch [new file with mode: 0644]
queue-7.0/series
queue-7.0/slub-fix-data-loss-and-overflow-in-krealloc.patch [new file with mode: 0644]
queue-7.0/spi-fix-resource-leaks-on-device-setup-failure.patch [new file with mode: 0644]
queue-7.0/tpm-avoid-wunused-but-set-variable.patch [new file with mode: 0644]
queue-7.0/tracing-fprobe-reject-registration-of-a-registered-fprobe-before-init.patch [new file with mode: 0644]

diff --git a/queue-7.0/apparmor-fix-string-overrun-due-to-missing-termination.patch b/queue-7.0/apparmor-fix-string-overrun-due-to-missing-termination.patch
new file mode 100644 (file)
index 0000000..35600ae
--- /dev/null
@@ -0,0 +1,123 @@
+From 828bf7929bedcb79b560b5b4e44f22abee07d31b Mon Sep 17 00:00:00 2001
+From: Daniel J Blueman <daniel@quora.org>
+Date: Fri, 27 Mar 2026 19:58:32 +0800
+Subject: apparmor: Fix string overrun due to missing termination
+
+From: Daniel J Blueman <daniel@quora.org>
+
+commit 828bf7929bedcb79b560b5b4e44f22abee07d31b upstream.
+
+When booting Ubuntu 26.04 with Linux 7.0-rc4 on an ARM64 Qualcomm
+Snapdragon X1 we see a string buffer overrun:
+
+BUG: KASAN: slab-out-of-bounds in aa_dfa_match (security/apparmor/match.c:535)
+Read of size 1 at addr ffff0008901cc000 by task snap-update-ns/2120
+
+CPU: 5 UID: 60578 PID: 2120 Comm: snap-update-ns Not tainted 7.0.0-rc4+ #22 PREEMPTLAZY
+Hardware name: LENOVO 83ED/LNVNB161216, BIOS NHCN60WW 09/11/2025
+Call trace:
+show_stack (arch/arm64/kernel/stacktrace.c:501) (C)
+dump_stack_lvl (lib/dump_stack.c:122)
+print_report (mm/kasan/report.c:379 mm/kasan/report.c:482)
+kasan_report (mm/kasan/report.c:597)
+__asan_report_load1_noabort (mm/kasan/report_generic.c:378)
+aa_dfa_match (security/apparmor/match.c:535)
+match_mnt_path_str (security/apparmor/mount.c:244 security/apparmor/mount.c:336)
+match_mnt (security/apparmor/mount.c:371)
+aa_bind_mount (security/apparmor/mount.c:447 (discriminator 4))
+apparmor_sb_mount (security/apparmor/lsm.c:719 (discriminator 1))
+security_sb_mount (security/security.c:1062 (discriminator 31))
+path_mount (fs/namespace.c:4101)
+__arm64_sys_mount (fs/namespace.c:4172 fs/namespace.c:4361 fs/namespace.c:4338 fs/namespace.c:4338)
+invoke_syscall.constprop.0 (arch/arm64/kernel/syscall.c:35 arch/arm64/kernel/syscall.c:49)
+el0_svc_common.constprop.0 (./include/linux/thread_info.h:142 (discriminator 2) arch/arm64/kernel/syscall.c:140 (discriminator 2))
+do_el0_svc (arch/arm64/kernel/syscall.c:152)
+el0_svc (arch/arm64/kernel/entry-common.c:80 arch/arm64/kernel/entry-common.c:725)
+el0t_64_sync_handler (arch/arm64/kernel/entry-common.c:744)
+el0t_64_sync (arch/arm64/kernel/entry.S:596)
+
+Allocated by task 2120:
+kasan_save_stack (mm/kasan/common.c:58)
+kasan_save_track (./arch/arm64/include/asm/current.h:19 mm/kasan/common.c:70 mm/kasan/common.c:79)
+kasan_save_alloc_info (mm/kasan/generic.c:571)
+__kasan_kmalloc (mm/kasan/common.c:419)
+__kmalloc_noprof (./include/linux/kasan.h:263 mm/slub.c:5260 mm/slub.c:5272)
+aa_get_buffer (security/apparmor/lsm.c:2201)
+aa_bind_mount (security/apparmor/mount.c:442)
+apparmor_sb_mount (security/apparmor/lsm.c:719 (discriminator 1))
+security_sb_mount (security/security.c:1062 (discriminator 31))
+path_mount (fs/namespace.c:4101)
+__arm64_sys_mount (fs/namespace.c:4172 fs/namespace.c:4361 fs/namespace.c:4338 fs/namespace.c:4338)
+invoke_syscall.constprop.0 (arch/arm64/kernel/syscall.c:35 arch/arm64/kernel/syscall.c:49)
+el0_svc_common.constprop.0 (./include/linux/thread_info.h:142 (discriminator 2) arch/arm64/kernel/syscall.c:140 (discriminator 2))
+do_el0_svc (arch/arm64/kernel/syscall.c:152)
+el0_svc (arch/arm64/kernel/entry-common.c:80 arch/arm64/kernel/entry-common.c:725)
+el0t_64_sync_handler (arch/arm64/kernel/entry-common.c:744)
+el0t_64_sync (arch/arm64/kernel/entry.S:596)
+
+The buggy address belongs to the object at ffff0008901ca000
+which belongs to the cache kmalloc-rnd-06-8k of size 8192
+The buggy address is located 0 bytes to the right of
+allocated 8192-byte region [ffff0008901ca000, ffff0008901cc000)
+
+The buggy address belongs to the physical page:
+page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x9101c8
+head: order:3 mapcount:0 entire_mapcount:0 nr_pages_mapped:-1 pincount:0
+flags: 0x8000000000000040(head|zone=2)
+page_type: f5(slab)
+raw: 8000000000000040 ffff000800016c40 fffffdffe2d14e10 ffff000800015c70
+raw: 0000000000000000 0000000800010001 00000000f5000000 0000000000000000
+head: 8000000000000040 ffff000800016c40 fffffdffe2d14e10 ffff000800015c70
+head: 0000000000000000 0000000800010001 00000000f5000000 0000000000000000
+head: 8000000000000003 fffffdffe2407201 fffffdffffffffff 00000000ffffffff
+head: ffffffffffffffff 0000000000000000 00000000ffffffff 0000000000000008
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ffff0008901cbf00: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ffff0008901cbf80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+>ffff0008901cc000: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+^
+ffff0008901cc080: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+ffff0008901cc100: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
+
+This was introduced by previous incorrect conversion from strcpy(). Fix it
+by adding the missing terminator.
+
+Cc: stable@vger.kernel.org
+Reviewed-by: Georgia Garcia <georgia.garcia@canonical.com>
+Signed-off-by: Daniel J Blueman <daniel@quora.org>
+Fixes: 93d4dbdc8da0 ("apparmor: Replace deprecated strcpy in d_namespace_path")
+Signed-off-by: John Johansen <john.johansen@canonical.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/apparmor/path.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/security/apparmor/path.c b/security/apparmor/path.c
+index 65a0ca5cc1bd..2494e8101538 100644
+--- a/security/apparmor/path.c
++++ b/security/apparmor/path.c
+@@ -164,14 +164,16 @@ static int d_namespace_path(const struct path *path, char *buf, char **name,
+       }
+ out:
+-      /* Append "/" to directory paths, except for root "/" which
+-       * already ends in a slash.
++      /* Append "/" to directory paths and reterminate string, except for
++       * root "/" which already ends in a slash.
+        */
+       if (!error && isdir) {
+               bool is_root = (*name)[0] == '/' && (*name)[1] == '\0';
+-              if (!is_root)
++              if (!is_root) {
+                       buf[aa_g_path_max - 2] = '/';
++                      buf[aa_g_path_max - 1] = '\0';
++              }
+       }
+       return error;
+-- 
+2.54.0
+
diff --git a/queue-7.0/arm64-dts-marvell-udpu-add-ethernet-aliases.patch b/queue-7.0/arm64-dts-marvell-udpu-add-ethernet-aliases.patch
new file mode 100644 (file)
index 0000000..e2bd95e
--- /dev/null
@@ -0,0 +1,39 @@
+From 38f09c97340cd23f976242e6cb1e7aa4c8ed28d0 Mon Sep 17 00:00:00 2001
+From: Robert Marko <robert.marko@sartura.hr>
+Date: Tue, 27 Jan 2026 13:32:15 +0100
+Subject: arm64: dts: marvell: uDPU: add ethernet aliases
+
+From: Robert Marko <robert.marko@sartura.hr>
+
+commit 38f09c97340cd23f976242e6cb1e7aa4c8ed28d0 upstream.
+
+On eDPU plus, which is an updated revision of eDPU which uses an external
+MV88E6361 switch we are relying on U-Boot to detect the board, and then
+enable and disable the required nodes for that revision.
+
+However, it seems that I missed adding the required aliases for ethernet
+controllers, and this worked as in OpenWrt we had added those locally.
+
+Cc: stable@vger.kernel.org
+Fixes: 660b8b2f3944 ("arm64: dts: marvell: eDPU: add support for version with external switch")
+Signed-off-by: Robert Marko <robert.marko@sartura.hr>
+Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-3720-uDPU.dtsi
+@@ -15,6 +15,11 @@
+ #include "armada-372x.dtsi"
+ / {
++      aliases {
++              ethernet0 = &eth0;
++              ethernet1 = &eth1;
++      };
++
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
diff --git a/queue-7.0/drm-arcpgu-fix-device-node-leak.patch b/queue-7.0/drm-arcpgu-fix-device-node-leak.patch
new file mode 100644 (file)
index 0000000..b608e6c
--- /dev/null
@@ -0,0 +1,39 @@
+From ad3ac32a3893a2bbcad545efc005a8e4e7ecf10c Mon Sep 17 00:00:00 2001
+From: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Date: Thu, 2 Apr 2026 18:42:20 +0200
+Subject: drm/arcpgu: fix device node leak
+
+From: Luca Ceresoli <luca.ceresoli@bootlin.com>
+
+commit ad3ac32a3893a2bbcad545efc005a8e4e7ecf10c upstream.
+
+This function gets a device_node reference via
+of_graph_get_remote_port_parent() and stores it in encoder_node, but never
+puts that reference. Add it.
+
+There used to be a of_node_put(encoder_node) but it has been removed by
+mistake during a rework in commit 3ea66a794fdc ("drm/arc: Inline
+arcpgu_drm_hdmi_init").
+
+Fixes: 3ea66a794fdc ("drm/arc: Inline arcpgu_drm_hdmi_init")
+Cc: stable@vger.kernel.org
+Reviewed-by: Louis Chauvet <louis.chauvet@bootlin.com>
+Link: https://patch.msgid.link/20260402-drm-arcgpu-fix-device-node-leak-v2-1-d773cf754ae5@bootlin.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/tiny/arcpgu.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/tiny/arcpgu.c
++++ b/drivers/gpu/drm/tiny/arcpgu.c
+@@ -250,7 +250,8 @@ DEFINE_DRM_GEM_DMA_FOPS(arcpgu_drm_ops);
+ static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
+ {
+       struct platform_device *pdev = to_platform_device(arcpgu->drm.dev);
+-      struct device_node *encoder_node = NULL, *endpoint_node = NULL;
++      struct device_node *encoder_node __free(device_node) = NULL;
++      struct device_node *endpoint_node = NULL;
+       struct drm_connector *connector = NULL;
+       struct drm_device *drm = &arcpgu->drm;
+       int ret;
diff --git a/queue-7.0/extract-cert-wrap-key_pass-with-ifdef-use_pkcs11_engine.patch b/queue-7.0/extract-cert-wrap-key_pass-with-ifdef-use_pkcs11_engine.patch
new file mode 100644 (file)
index 0000000..33bd510
--- /dev/null
@@ -0,0 +1,61 @@
+From 4f96b7c68a9904e01049ef610d701b382dca9574 Mon Sep 17 00:00:00 2001
+From: Nathan Chancellor <nathan@kernel.org>
+Date: Wed, 25 Mar 2026 18:19:15 -0700
+Subject: extract-cert: Wrap key_pass with '#ifdef USE_PKCS11_ENGINE'
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+commit 4f96b7c68a9904e01049ef610d701b382dca9574 upstream.
+
+A recent strengthening of -Wunused-but-set-variable (enabled with -Wall)
+in clang under a new subwarning, -Wunused-but-set-global, points out an
+unused static global variable in certs/extract-cert.c:
+
+  certs/extract-cert.c:46:20: error: variable 'key_pass' set but not used [-Werror,-Wunused-but-set-global]
+     46 | static const char *key_pass;
+        |                    ^
+
+After commit 558bdc45dfb2 ("sign-file,extract-cert: use pkcs11 provider
+for OPENSSL MAJOR >= 3"), key_pass is only used with the OpenSSL engine
+API, not the new provider API. Wrap key_pass's declaration and
+assignment with '#ifdef USE_PKCS11_ENGINE' so that it is only included
+with its use to clear up the warning. While this is a little uglier than
+just marking key_pass with the unused attribute, this will make it
+easier to clean up all code associated with the use of the engine API if
+it were ever removed in the future. While in the area, use a tab for
+the key_pass assignment line to match the rest of the file.
+
+Cc: stable@vger.kernel.org
+Fixes: 558bdc45dfb2 ("sign-file,extract-cert: use pkcs11 provider for OPENSSL MAJOR >= 3")
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Tested-by: Nick Desaulniers <ndesaulniers@google.com>
+Link: https://patch.msgid.link/20260325-certs-extract-cert-key_pass-unused-but-set-global-v1-1-ecf94326d532@kernel.org
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ certs/extract-cert.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/certs/extract-cert.c
++++ b/certs/extract-cert.c
+@@ -43,7 +43,9 @@ void format(void)
+       exit(2);
+ }
++#ifdef USE_PKCS11_ENGINE
+ static const char *key_pass;
++#endif
+ static BIO *wb;
+ static char *cert_dst;
+ static bool verbose;
+@@ -135,7 +137,9 @@ int main(int argc, char **argv)
+       if (verbose_env && strchr(verbose_env, '1'))
+               verbose = true;
+-        key_pass = getenv("KBUILD_SIGN_PIN");
++#ifdef USE_PKCS11_ENGINE
++      key_pass = getenv("KBUILD_SIGN_PIN");
++#endif
+       if (argc != 3)
+               format();
diff --git a/queue-7.0/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch b/queue-7.0/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
new file mode 100644 (file)
index 0000000..9f1b40a
--- /dev/null
@@ -0,0 +1,366 @@
+From 9ded47ad003f09a94b6a710b5c47f4aa5ceb7429 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 24 Feb 2026 09:25:54 +0100
+Subject: fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit 9ded47ad003f09a94b6a710b5c47f4aa5ceb7429 upstream.
+
+Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an
+instance as part of initializing deferred I/O and remove it only after
+the final mapping has been closed. If the fb_info and the contained
+deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info
+to invalidate the mapping. Any access will then result in a SIGBUS
+signal.
+
+Fixes a long-standing problem, where a device hot-unplug happens while
+user space still has an active mapping of the graphics memory. The hot-
+unplug frees the instance of struct fb_info. Accessing the memory will
+operate on undefined state.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support")
+Cc: Helge Deller <deller@gmx.de>
+Cc: linux-fbdev@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: stable@vger.kernel.org # v2.6.22+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fb_defio.c |  178 ++++++++++++++++++++++++++++--------
+ include/linux/fb.h                  |    4 
+ 2 files changed, 145 insertions(+), 37 deletions(-)
+
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -24,6 +24,75 @@
+ #include <linux/rmap.h>
+ #include <linux/pagemap.h>
++/*
++ * struct fb_deferred_io_state
++ */
++
++struct fb_deferred_io_state {
++      struct kref ref;
++
++      struct mutex lock; /* mutex that protects the pageref list */
++      /* fields protected by lock */
++      struct fb_info *info;
++};
++
++static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
++{
++      struct fb_deferred_io_state *fbdefio_state;
++
++      fbdefio_state = kzalloc_obj(*fbdefio_state);
++      if (!fbdefio_state)
++              return NULL;
++
++      kref_init(&fbdefio_state->ref);
++      mutex_init(&fbdefio_state->lock);
++
++      return fbdefio_state;
++}
++
++static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
++{
++      mutex_destroy(&fbdefio_state->lock);
++
++      kfree(fbdefio_state);
++}
++
++static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
++{
++      kref_get(&fbdefio_state->ref);
++}
++
++static void __fb_deferred_io_state_release(struct kref *ref)
++{
++      struct fb_deferred_io_state *fbdefio_state =
++              container_of(ref, struct fb_deferred_io_state, ref);
++
++      fb_deferred_io_state_release(fbdefio_state);
++}
++
++static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
++{
++      kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
++}
++
++/*
++ * struct vm_operations_struct
++ */
++
++static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
++{
++      struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
++
++      fb_deferred_io_state_get(fbdefio_state);
++}
++
++static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
++{
++      struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
++
++      fb_deferred_io_state_put(fbdefio_state);
++}
++
+ static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
+ {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+@@ -121,25 +190,46 @@ static void fb_deferred_io_pageref_put(s
+ /* this is to find and return the vmalloc-ed fb pages */
+ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
+ {
++      struct fb_info *info;
+       unsigned long offset;
+       struct page *page;
+-      struct fb_info *info = vmf->vma->vm_private_data;
++      vm_fault_t ret;
++      struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
++
++      mutex_lock(&fbdefio_state->lock);
++
++      info = fbdefio_state->info;
++      if (!info) {
++              ret = VM_FAULT_SIGBUS; /* our device is gone */
++              goto err_mutex_unlock;
++      }
+       offset = vmf->pgoff << PAGE_SHIFT;
+-      if (offset >= info->fix.smem_len)
+-              return VM_FAULT_SIGBUS;
++      if (offset >= info->fix.smem_len) {
++              ret = VM_FAULT_SIGBUS;
++              goto err_mutex_unlock;
++      }
+       page = fb_deferred_io_get_page(info, offset);
+-      if (!page)
+-              return VM_FAULT_SIGBUS;
++      if (!page) {
++              ret = VM_FAULT_SIGBUS;
++              goto err_mutex_unlock;
++      }
+       if (!vmf->vma->vm_file)
+               fb_err(info, "no mapping available\n");
+       BUG_ON(!info->fbdefio->mapping);
++      mutex_unlock(&fbdefio_state->lock);
++
+       vmf->page = page;
++
+       return 0;
++
++err_mutex_unlock:
++      mutex_unlock(&fbdefio_state->lock);
++      return ret;
+ }
+ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+@@ -166,15 +256,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
+  * Adds a page to the dirty list. Call this from struct
+  * vm_operations_struct.page_mkwrite.
+  */
+-static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
+-                                          struct page *page)
++static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
++                                          unsigned long offset, struct page *page)
+ {
+-      struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_info *info;
++      struct fb_deferred_io *fbdefio;
+       struct fb_deferred_io_pageref *pageref;
+       vm_fault_t ret;
+       /* protect against the workqueue changing the page list */
+-      mutex_lock(&fbdefio->lock);
++      mutex_lock(&fbdefio_state->lock);
++
++      info = fbdefio_state->info;
++      if (!info) {
++              ret = VM_FAULT_SIGBUS; /* our device is gone */
++              goto err_mutex_unlock;
++      }
++
++      fbdefio = info->fbdefio;
+       pageref = fb_deferred_io_pageref_get(info, offset, page);
+       if (WARN_ON_ONCE(!pageref)) {
+@@ -192,50 +291,38 @@ static vm_fault_t fb_deferred_io_track_p
+        */
+       lock_page(pageref->page);
+-      mutex_unlock(&fbdefio->lock);
++      mutex_unlock(&fbdefio_state->lock);
+       /* come back after delay to process the deferred IO */
+       schedule_delayed_work(&info->deferred_work, fbdefio->delay);
+       return VM_FAULT_LOCKED;
+ err_mutex_unlock:
+-      mutex_unlock(&fbdefio->lock);
++      mutex_unlock(&fbdefio_state->lock);
+       return ret;
+ }
+-/*
+- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
+- * @fb_info: The fbdev info structure
+- * @vmf: The VM fault
+- *
+- * This is a callback we get when userspace first tries to
+- * write to the page. We schedule a workqueue. That workqueue
+- * will eventually mkclean the touched pages and execute the
+- * deferred framebuffer IO. Then if userspace touches a page
+- * again, we repeat the same scheme.
+- *
+- * Returns:
+- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
+- */
+-static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
++static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
++                                            struct vm_fault *vmf)
+ {
+       unsigned long offset = vmf->pgoff << PAGE_SHIFT;
+       struct page *page = vmf->page;
+       file_update_time(vmf->vma->vm_file);
+-      return fb_deferred_io_track_page(info, offset, page);
++      return fb_deferred_io_track_page(fbdefio_state, offset, page);
+ }
+-/* vm_ops->page_mkwrite handler */
+ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+ {
+-      struct fb_info *info = vmf->vma->vm_private_data;
++      struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
+-      return fb_deferred_io_page_mkwrite(info, vmf);
++      return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
+ }
+ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
++      .open           = fb_deferred_io_vm_open,
++      .close          = fb_deferred_io_vm_close,
+       .fault          = fb_deferred_io_fault,
+       .page_mkwrite   = fb_deferred_io_mkwrite,
+ };
+@@ -252,7 +339,10 @@ int fb_deferred_io_mmap(struct fb_info *
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
+       if (!(info->flags & FBINFO_VIRTFB))
+               vm_flags_set(vma, VM_IO);
+-      vma->vm_private_data = info;
++      vma->vm_private_data = info->fbdefio_state;
++
++      fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
++
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
+@@ -263,9 +353,10 @@ static void fb_deferred_io_work(struct w
+       struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
+       struct fb_deferred_io_pageref *pageref, *next;
+       struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+       /* here we wrprotect the page's mappings, then do all deferred IO. */
+-      mutex_lock(&fbdefio->lock);
++      mutex_lock(&fbdefio_state->lock);
+ #ifdef CONFIG_MMU
+       list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
+               struct page *page = pageref->page;
+@@ -283,12 +374,13 @@ static void fb_deferred_io_work(struct w
+       list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
+               fb_deferred_io_pageref_put(pageref, info);
+-      mutex_unlock(&fbdefio->lock);
++      mutex_unlock(&fbdefio_state->lock);
+ }
+ int fb_deferred_io_init(struct fb_info *info)
+ {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_deferred_io_state *fbdefio_state;
+       struct fb_deferred_io_pageref *pagerefs;
+       unsigned long npagerefs;
+       int ret;
+@@ -298,7 +390,11 @@ int fb_deferred_io_init(struct fb_info *
+       if (WARN_ON(!info->fix.smem_len))
+               return -EINVAL;
+-      mutex_init(&fbdefio->lock);
++      fbdefio_state = fb_deferred_io_state_alloc();
++      if (!fbdefio_state)
++              return -ENOMEM;
++      fbdefio_state->info = info;
++
+       INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
+       INIT_LIST_HEAD(&fbdefio->pagereflist);
+       if (fbdefio->delay == 0) /* set a default of 1 s */
+@@ -315,10 +411,12 @@ int fb_deferred_io_init(struct fb_info *
+       info->npagerefs = npagerefs;
+       info->pagerefs = pagerefs;
++      info->fbdefio_state = fbdefio_state;
++
+       return 0;
+ err:
+-      mutex_destroy(&fbdefio->lock);
++      fb_deferred_io_state_release(fbdefio_state);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_init);
+@@ -352,11 +450,19 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release
+ void fb_deferred_io_cleanup(struct fb_info *info)
+ {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+       fb_deferred_io_lastclose(info);
++      info->fbdefio_state = NULL;
++
++      mutex_lock(&fbdefio_state->lock);
++      fbdefio_state->info = NULL;
++      mutex_unlock(&fbdefio_state->lock);
++
++      fb_deferred_io_state_put(fbdefio_state);
++
+       kvfree(info->pagerefs);
+-      mutex_destroy(&fbdefio->lock);
+       fbdefio->mapping = NULL;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -218,13 +218,14 @@ struct fb_deferred_io {
+       unsigned long delay;
+       bool sort_pagereflist; /* sort pagelist by offset */
+       int open_count; /* number of opened files; protected by fb_info lock */
+-      struct mutex lock; /* mutex that protects the pageref list */
+       struct list_head pagereflist; /* list of pagerefs for touched pages */
+       struct address_space *mapping; /* page cache object for fb device */
+       /* callback */
+       struct page *(*get_page)(struct fb_info *info, unsigned long offset);
+       void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
+ };
++
++struct fb_deferred_io_state;
+ #endif
+ /*
+@@ -487,6 +488,7 @@ struct fb_info {
+       unsigned long npagerefs;
+       struct fb_deferred_io_pageref *pagerefs;
+       struct fb_deferred_io *fbdefio;
++      struct fb_deferred_io_state *fbdefio_state;
+ #endif
+       const struct fb_ops *fbops;
diff --git a/queue-7.0/fs-prepare-for-adding-lsm-blob-to-backing_file.patch b/queue-7.0/fs-prepare-for-adding-lsm-blob-to-backing_file.patch
new file mode 100644 (file)
index 0000000..8cf6e71
--- /dev/null
@@ -0,0 +1,78 @@
+From 880bd496ec72a6dcb00cb70c430ef752ba242ae7 Mon Sep 17 00:00:00 2001
+From: Amir Goldstein <amir73il@gmail.com>
+Date: Mon, 30 Mar 2026 10:27:51 +0200
+Subject: fs: prepare for adding LSM blob to backing_file
+
+From: Amir Goldstein <amir73il@gmail.com>
+
+commit 880bd496ec72a6dcb00cb70c430ef752ba242ae7 upstream.
+
+In preparation to adding LSM blob to backing_file struct, factor out
+helpers init_backing_file() and backing_file_free().
+
+Cc: stable@vger.kernel.org
+Cc: linux-fsdevel@vger.kernel.org
+Cc: linux-unionfs@vger.kernel.org
+Cc: linux-erofs@lists.ozlabs.org
+Signed-off-by: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Serge Hallyn <serge@hallyn.com>
+[PM: use the term "LSM blob", fix comment style to match file]
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/file_table.c |   22 ++++++++++++++++++++--
+ 1 file changed, 20 insertions(+), 2 deletions(-)
+
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -66,6 +66,12 @@ void backing_file_set_user_path(struct f
+ }
+ EXPORT_SYMBOL_GPL(backing_file_set_user_path);
++static inline void backing_file_free(struct backing_file *ff)
++{
++      path_put(&ff->user_path);
++      kmem_cache_free(bfilp_cachep, ff);
++}
++
+ static inline void file_free(struct file *f)
+ {
+       security_file_free(f);
+@@ -73,8 +79,7 @@ static inline void file_free(struct file
+               percpu_counter_dec(&nr_files);
+       put_cred(f->f_cred);
+       if (unlikely(f->f_mode & FMODE_BACKING)) {
+-              path_put(backing_file_user_path(f));
+-              kmem_cache_free(bfilp_cachep, backing_file(f));
++              backing_file_free(backing_file(f));
+       } else {
+               kmem_cache_free(filp_cachep, f);
+       }
+@@ -283,6 +288,12 @@ struct file *alloc_empty_file_noaccount(
+       return f;
+ }
++static int init_backing_file(struct backing_file *ff)
++{
++      memset(&ff->user_path, 0, sizeof(ff->user_path));
++      return 0;
++}
++
+ /*
+  * Variant of alloc_empty_file() that allocates a backing_file container
+  * and doesn't check and modify nr_files.
+@@ -305,7 +316,14 @@ struct file *alloc_empty_backing_file(in
+               return ERR_PTR(error);
+       }
++      /* The f_mode flags must be set before fput(). */
+       ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
++      error = init_backing_file(ff);
++      if (unlikely(error)) {
++              fput(&ff->file);
++              return ERR_PTR(error);
++      }
++
+       return &ff->file;
+ }
+ EXPORT_SYMBOL_GPL(alloc_empty_backing_file);
diff --git a/queue-7.0/hwmon-isl28022-fix-integer-overflow-in-power-calculation-on-32-bit.patch b/queue-7.0/hwmon-isl28022-fix-integer-overflow-in-power-calculation-on-32-bit.patch
new file mode 100644 (file)
index 0000000..52d77ca
--- /dev/null
@@ -0,0 +1,64 @@
+From a7c0aaa50e40ffd8fd703d006d5a04b540b9ca92 Mon Sep 17 00:00:00 2001
+From: Sanman Pradhan <psanman@juniper.net>
+Date: Fri, 10 Apr 2026 00:26:19 +0000
+Subject: hwmon: (isl28022) Fix integer overflow in power calculation on 32-bit
+
+From: Sanman Pradhan <psanman@juniper.net>
+
+commit a7c0aaa50e40ffd8fd703d006d5a04b540b9ca92 upstream.
+
+isl28022_read_power() computes:
+
+  *val = ((51200000L * ((long)data->gain)) /
+          (long)data->shunt) * (long)regval;
+
+On 32-bit platforms, 'long' is 32 bits. With gain=8 and shunt=10000
+(the default configuration):
+
+  (51200000 * 8) / 10000 = 40960
+  40960 * 65535 = 2,684,313,600
+
+This exceeds LONG_MAX (2,147,483,647), resulting in signed integer
+overflow.
+
+Additionally, dividing before multiplying by regval loses precision
+unnecessarily.
+
+Use u64 arithmetic with div_u64() and multiply before dividing to
+retain precision. The intermediate product cannot overflow u64
+(worst case: 51200000 * 8 * 65535 = 26843136000000). Power is
+inherently non-negative, so unsigned types are the natural fit.
+Cap the result to LONG_MAX before returning it through the hwmon
+callback.
+
+Fixes: 39671a14df4f2 ("hwmon: (isl28022) new driver for ISL28022 power monitor")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sanman Pradhan <psanman@juniper.net>
+Link: https://lore.kernel.org/r/20260410002613.424557-1-sanman.pradhan@hpe.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/isl28022.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwmon/isl28022.c
++++ b/drivers/hwmon/isl28022.c
+@@ -9,6 +9,7 @@
+ #include <linux/err.h>
+ #include <linux/hwmon.h>
+ #include <linux/i2c.h>
++#include <linux/math64.h>
+ #include <linux/module.h>
+ #include <linux/regmap.h>
+@@ -185,8 +186,8 @@ static int isl28022_read_power(struct de
+                                 ISL28022_REG_POWER, &regval);
+               if (err < 0)
+                       return err;
+-              *val = ((51200000L * ((long)data->gain)) /
+-                      (long)data->shunt) * (long)regval;
++              *val = min(div_u64(51200000ULL * data->gain * regval,
++                                 data->shunt), LONG_MAX);
+               break;
+       default:
+               return -EOPNOTSUPP;
diff --git a/queue-7.0/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch b/queue-7.0/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch
new file mode 100644 (file)
index 0000000..279ba47
--- /dev/null
@@ -0,0 +1,54 @@
+From 3023c050af3600bf451153335dea5e073c9a3088 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= <linux@weissschuh.net>
+Date: Wed, 8 Apr 2026 20:45:50 +0200
+Subject: hwmon: (powerz) Avoid cacheline sharing for DMA buffer
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weißschuh <linux@weissschuh.net>
+
+commit 3023c050af3600bf451153335dea5e073c9a3088 upstream.
+
+Depending on the architecture the transfer buffer may share a cacheline
+with the following mutex. As the buffer may be used for DMA, that is
+problematic.
+
+Use the high-level DMA helpers to make sure that cacheline sharing can
+not happen.
+
+Also drop the comment, as the helpers are documentation enough.
+
+https://sashiko.dev/#/message/20260408175814.934BFC19421%40smtp.kernel.org
+
+Fixes: 4381a36abdf1c ("hwmon: add POWER-Z driver")
+Cc: stable@vger.kernel.org # ca085faabb42: dma-mapping: add __dma_from_device_group_begin()/end()
+Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/r/20260408-powerz-cacheline-alias-v1-1-1254891be0dd@weissschuh.net
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/powerz.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/hwmon/powerz.c
++++ b/drivers/hwmon/powerz.c
+@@ -6,6 +6,7 @@
+ #include <linux/completion.h>
+ #include <linux/device.h>
++#include <linux/dma-mapping.h>
+ #include <linux/hwmon.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+@@ -33,7 +34,9 @@ struct powerz_sensor_data {
+ } __packed;
+ struct powerz_priv {
+-      char transfer_buffer[64];       /* first member to satisfy DMA alignment */
++      __dma_from_device_group_begin();
++      char transfer_buffer[64];
++      __dma_from_device_group_end();
+       struct mutex mutex;
+       struct completion completion;
+       struct urb *urb;
diff --git a/queue-7.0/ipv4-icmp-validate-reply-type-before-using-icmp_pointers.patch b/queue-7.0/ipv4-icmp-validate-reply-type-before-using-icmp_pointers.patch
new file mode 100644 (file)
index 0000000..d265c7b
--- /dev/null
@@ -0,0 +1,54 @@
+From 67bf002a2d7387a6312138210d0bd06e3cf4879b Mon Sep 17 00:00:00 2001
+From: Ruide Cao <caoruide123@gmail.com>
+Date: Tue, 21 Apr 2026 12:16:31 +0800
+Subject: ipv4: icmp: validate reply type before using icmp_pointers
+
+From: Ruide Cao <caoruide123@gmail.com>
+
+commit 67bf002a2d7387a6312138210d0bd06e3cf4879b upstream.
+
+Extended echo replies use ICMP_EXT_ECHOREPLY as the outbound reply type.
+That value is outside the range covered by icmp_pointers[], which only
+describes the traditional ICMP types up to NR_ICMP_TYPES.
+
+Avoid consulting icmp_pointers[] for reply types outside that range, and
+use array_index_nospec() for the remaining in-range lookup. Normal ICMP
+replies keep their existing behavior unchanged.
+
+Fixes: d329ea5bd884 ("icmp: add response to RFC 8335 PROBE messages")
+Cc: stable@kernel.org
+Reported-by: Yuan Tan <yuantan098@gmail.com>
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Reported-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Ruide Cao <caoruide123@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/0dace90c01a5978e829ca741ef684dbd7304ce62.1776628519.git.caoruide123@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/icmp.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -64,6 +64,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/kernel.h>
+ #include <linux/fcntl.h>
++#include <linux/nospec.h>
+ #include <linux/socket.h>
+ #include <linux/in.h>
+ #include <linux/inet.h>
+@@ -373,7 +374,9 @@ static int icmp_glue_bits(void *from, ch
+                                     to, len);
+       skb->csum = csum_block_add(skb->csum, csum, odd);
+-      if (icmp_pointers[icmp_param->data.icmph.type].error)
++      if (icmp_param->data.icmph.type <= NR_ICMP_TYPES &&
++          icmp_pointers[array_index_nospec(icmp_param->data.icmph.type,
++                                           NR_ICMP_TYPES + 1)].error)
+               nf_ct_attach(skb, icmp_param->skb);
+       return 0;
+ }
diff --git a/queue-7.0/libceph-prevent-potential-null-ptr-deref-in-ceph_handle_auth_reply.patch b/queue-7.0/libceph-prevent-potential-null-ptr-deref-in-ceph_handle_auth_reply.patch
new file mode 100644 (file)
index 0000000..4478ef4
--- /dev/null
@@ -0,0 +1,42 @@
+From 5199c125d25aeae8615c4fc31652cc0fe624338e Mon Sep 17 00:00:00 2001
+From: Raphael Zimmer <raphael.zimmer@tu-ilmenau.de>
+Date: Wed, 18 Mar 2026 18:09:03 +0100
+Subject: libceph: Prevent potential null-ptr-deref in ceph_handle_auth_reply()
+
+From: Raphael Zimmer <raphael.zimmer@tu-ilmenau.de>
+
+commit 5199c125d25aeae8615c4fc31652cc0fe624338e upstream.
+
+If a message of type CEPH_MSG_AUTH_REPLY contains a zero value for both
+protocol and result, this is currently not treated as an error. In case
+of ac->negotiating == true and ac->protocol > 0, this leads to setting
+ac->protocol = 0 and ac->ops = NULL. Thereafter, the check for
+ac->protocol != protocol returns false, and init_protocol() is not
+called. Subsequently, ac->ops->handle_reply() is called, which leads to
+a null pointer dereference, because ac->ops is still NULL.
+
+This patch changes the check for ac->protocol != protocol to
+!ac->protocol, as this also includes the case when the protocol was set
+to zero in the message. This causes the message to be treated as
+containing a bad auth protocol.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Raphael Zimmer <raphael.zimmer@tu-ilmenau.de>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ceph/auth.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ceph/auth.c
++++ b/net/ceph/auth.c
+@@ -245,7 +245,7 @@ int ceph_handle_auth_reply(struct ceph_a
+                       ac->protocol = 0;
+                       ac->ops = NULL;
+               }
+-              if (ac->protocol != protocol) {
++              if (!ac->protocol) {
+                       ret = init_protocol(ac, protocol);
+                       if (ret) {
+                               pr_err("auth protocol '%s' init failed: %d\n",
diff --git a/queue-7.0/loongarch-make-arch_irq_work_has_interrupt-true-only-if-ipi-hw-exist.patch b/queue-7.0/loongarch-make-arch_irq_work_has_interrupt-true-only-if-ipi-hw-exist.patch
new file mode 100644 (file)
index 0000000..625369e
--- /dev/null
@@ -0,0 +1,34 @@
+From 02a6a1f9d77a816fbac01de9bfcd0e0914552f2f Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Wed, 22 Apr 2026 15:45:12 +0800
+Subject: LoongArch: Make arch_irq_work_has_interrupt() true only if IPI HW exist
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 02a6a1f9d77a816fbac01de9bfcd0e0914552f2f upstream.
+
+After commit 7c405fb3279b3924 ("rcu: Use an intermediate irq_work to
+start process_srcu()"), Loongson-2K0300/2K0500 fail to boot. Because
+IRQ_WORK need IPI but Loongson-2K0300/2K0500 don't have IPI HW.
+
+So make arch_irq_work_has_interrupt() return true only if IPI HW exist.
+
+Cc: stable@vger.kernel.org
+Reported-by: Binbin Zhou <zhoubinbin@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/irq_work.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/loongarch/include/asm/irq_work.h
++++ b/arch/loongarch/include/asm/irq_work.h
+@@ -4,7 +4,7 @@
+ static inline bool arch_irq_work_has_interrupt(void)
+ {
+-      return IS_ENABLED(CONFIG_SMP);
++      return IS_ENABLED(CONFIG_SMP) && cpu_opt(LOONGARCH_CPU_CSRIPI);
+ }
+ #endif /* _ASM_LOONGARCH_IRQ_WORK_H */
diff --git a/queue-7.0/loongarch-show-cpu-vulnerabilites-correctly.patch b/queue-7.0/loongarch-show-cpu-vulnerabilites-correctly.patch
new file mode 100644 (file)
index 0000000..b6842d9
--- /dev/null
@@ -0,0 +1,45 @@
+From 37e57e8ad96cdec4a57b55fd10bef50f7370a954 Mon Sep 17 00:00:00 2001
+From: Huacai Chen <chenhuacai@loongson.cn>
+Date: Wed, 22 Apr 2026 15:45:12 +0800
+Subject: LoongArch: Show CPU vulnerabilites correctly
+
+From: Huacai Chen <chenhuacai@loongson.cn>
+
+commit 37e57e8ad96cdec4a57b55fd10bef50f7370a954 upstream.
+
+Most LoongArch processors are vulnerable to Spectre-V1 Proof-of-Concept
+(PoC). And the generic mechanism, __user pointer sanitization, can be
+used as a mitigation. This means to use array_index_nospec() to prevent
+out of boundry access in syscall and other critical paths.
+
+Implement the arch-specific cpu_show_spectre_v1() to show CPU Spectre-V1
+vulnerabilites correctly.
+
+Cc: stable@vger.kernel.org
+Link: https://cc-sw.com/chinese-loongarch-architecture-evaluation-part-3-of-3/
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/cpu-probe.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/arch/loongarch/kernel/cpu-probe.c
++++ b/arch/loongarch/kernel/cpu-probe.c
+@@ -7,6 +7,7 @@
+ #include <linux/init.h>
+ #include <linux/kernel.h>
+ #include <linux/ptrace.h>
++#include <linux/cpu.h>
+ #include <linux/smp.h>
+ #include <linux/stddef.h>
+ #include <linux/export.h>
+@@ -402,3 +403,9 @@ void cpu_probe(void)
+       cpu_report();
+ }
++
++ssize_t cpu_show_spectre_v1(struct device *dev,
++                          struct device_attribute *attr, char *buf)
++{
++      return sysfs_emit(buf, "Mitigation: __user pointer sanitization\n");
++}
diff --git a/queue-7.0/lsm-add-backing_file-lsm-hooks.patch b/queue-7.0/lsm-add-backing_file-lsm-hooks.patch
new file mode 100644 (file)
index 0000000..e671803
--- /dev/null
@@ -0,0 +1,580 @@
+From 6af36aeb147a06dea47c49859cd6ca5659aeb987 Mon Sep 17 00:00:00 2001
+From: Paul Moore <paul@paul-moore.com>
+Date: Fri, 19 Dec 2025 13:18:22 -0500
+Subject: lsm: add backing_file LSM hooks
+
+From: Paul Moore <paul@paul-moore.com>
+
+commit 6af36aeb147a06dea47c49859cd6ca5659aeb987 upstream.
+
+Stacked filesystems such as overlayfs do not currently provide the
+necessary mechanisms for LSMs to properly enforce access controls on the
+mmap() and mprotect() operations.  In order to resolve this gap, a LSM
+security blob is being added to the backing_file struct and the following
+new LSM hooks are being created:
+
+ security_backing_file_alloc()
+ security_backing_file_free()
+ security_mmap_backing_file()
+
+The first two hooks are to manage the lifecycle of the LSM security blob
+in the backing_file struct, while the third provides a new mmap() access
+control point for the underlying backing file.  It is also expected that
+LSMs will likely want to update their security_file_mprotect() callback
+to address issues with their mprotect() controls, but that does not
+require a change to the security_file_mprotect() LSM hook.
+
+There are a three other small changes to support these new LSM hooks:
+* Pass the user file associated with a backing file down to
+alloc_empty_backing_file() so it can be included in the
+security_backing_file_alloc() hook.
+* Add getter and setter functions for the backing_file struct LSM blob
+as the backing_file struct remains private to fs/file_table.c.
+* Constify the file struct field in the LSM common_audit_data struct to
+better support LSMs that need to pass a const file struct pointer into
+the common LSM audit code.
+
+Thanks to Arnd Bergmann for identifying the missing EXPORT_SYMBOL_GPL()
+and supplying a fixup.
+
+Cc: stable@vger.kernel.org
+Cc: linux-fsdevel@vger.kernel.org
+Cc: linux-unionfs@vger.kernel.org
+Cc: linux-erofs@lists.ozlabs.org
+Reviewed-by: Amir Goldstein <amir73il@gmail.com>
+Reviewed-by: Serge Hallyn <serge@hallyn.com>
+Reviewed-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/backing-file.c             |   18 +++++--
+ fs/erofs/ishare.c             |   10 +++-
+ fs/file_table.c               |   27 +++++++++--
+ fs/fuse/passthrough.c         |    2 
+ fs/internal.h                 |    3 -
+ fs/overlayfs/dir.c            |    2 
+ fs/overlayfs/file.c           |    2 
+ include/linux/backing-file.h  |    4 -
+ include/linux/fs.h            |   13 +++++
+ include/linux/lsm_audit.h     |    2 
+ include/linux/lsm_hook_defs.h |    5 ++
+ include/linux/lsm_hooks.h     |    1 
+ include/linux/security.h      |   22 +++++++++
+ security/lsm.h                |    1 
+ security/lsm_init.c           |    9 +++
+ security/security.c           |  102 ++++++++++++++++++++++++++++++++++++++++++
+ 16 files changed, 206 insertions(+), 17 deletions(-)
+
+--- a/fs/backing-file.c
++++ b/fs/backing-file.c
+@@ -12,6 +12,7 @@
+ #include <linux/backing-file.h>
+ #include <linux/splice.h>
+ #include <linux/mm.h>
++#include <linux/security.h>
+ #include "internal.h"
+@@ -29,14 +30,15 @@
+  * returned file into a container structure that also stores the stacked
+  * file's path, which can be retrieved using backing_file_user_path().
+  */
+-struct file *backing_file_open(const struct path *user_path, int flags,
++struct file *backing_file_open(const struct file *user_file, int flags,
+                              const struct path *real_path,
+                              const struct cred *cred)
+ {
++      const struct path *user_path = &user_file->f_path;
+       struct file *f;
+       int error;
+-      f = alloc_empty_backing_file(flags, cred);
++      f = alloc_empty_backing_file(flags, cred, user_file);
+       if (IS_ERR(f))
+               return f;
+@@ -52,15 +54,16 @@ struct file *backing_file_open(const str
+ }
+ EXPORT_SYMBOL_GPL(backing_file_open);
+-struct file *backing_tmpfile_open(const struct path *user_path, int flags,
++struct file *backing_tmpfile_open(const struct file *user_file, int flags,
+                                 const struct path *real_parentpath,
+                                 umode_t mode, const struct cred *cred)
+ {
+       struct mnt_idmap *real_idmap = mnt_idmap(real_parentpath->mnt);
++      const struct path *user_path = &user_file->f_path;
+       struct file *f;
+       int error;
+-      f = alloc_empty_backing_file(flags, cred);
++      f = alloc_empty_backing_file(flags, cred, user_file);
+       if (IS_ERR(f))
+               return f;
+@@ -336,8 +339,13 @@ int backing_file_mmap(struct file *file,
+       vma_set_file(vma, file);
+-      scoped_with_creds(ctx->cred)
++      scoped_with_creds(ctx->cred) {
++              ret = security_mmap_backing_file(vma, file, user_file);
++              if (ret)
++                      return ret;
++
+               ret = vfs_mmap(vma->vm_file, vma);
++      }
+       if (ctx->accessed)
+               ctx->accessed(user_file);
+--- a/fs/erofs/ishare.c
++++ b/fs/erofs/ishare.c
+@@ -4,6 +4,7 @@
+  */
+ #include <linux/xxhash.h>
+ #include <linux/mount.h>
++#include <linux/security.h>
+ #include "internal.h"
+ #include "xattr.h"
+@@ -106,7 +107,8 @@ static int erofs_ishare_file_open(struct
+       if (file->f_flags & O_DIRECT)
+               return -EINVAL;
+-      realfile = alloc_empty_backing_file(O_RDONLY|O_NOATIME, current_cred());
++      realfile = alloc_empty_backing_file(O_RDONLY|O_NOATIME, current_cred(),
++                                          file);
+       if (IS_ERR(realfile))
+               return PTR_ERR(realfile);
+       ihold(sharedinode);
+@@ -150,8 +152,14 @@ static ssize_t erofs_ishare_file_read_it
+ static int erofs_ishare_mmap(struct file *file, struct vm_area_struct *vma)
+ {
+       struct file *realfile = file->private_data;
++      int err;
+       vma_set_file(vma, realfile);
++
++      err = security_mmap_backing_file(vma, realfile, file);
++      if (err)
++              return err;
++
+       return generic_file_readonly_mmap(file, vma);
+ }
+--- a/fs/file_table.c
++++ b/fs/file_table.c
+@@ -50,6 +50,9 @@ struct backing_file {
+               struct path user_path;
+               freeptr_t bf_freeptr;
+       };
++#ifdef CONFIG_SECURITY
++      void *security;
++#endif
+ };
+ #define backing_file(f) container_of(f, struct backing_file, file)
+@@ -66,8 +69,21 @@ void backing_file_set_user_path(struct f
+ }
+ EXPORT_SYMBOL_GPL(backing_file_set_user_path);
++#ifdef CONFIG_SECURITY
++void *backing_file_security(const struct file *f)
++{
++      return backing_file(f)->security;
++}
++
++void backing_file_set_security(struct file *f, void *security)
++{
++      backing_file(f)->security = security;
++}
++#endif /* CONFIG_SECURITY */
++
+ static inline void backing_file_free(struct backing_file *ff)
+ {
++      security_backing_file_free(&ff->file);
+       path_put(&ff->user_path);
+       kmem_cache_free(bfilp_cachep, ff);
+ }
+@@ -288,10 +304,12 @@ struct file *alloc_empty_file_noaccount(
+       return f;
+ }
+-static int init_backing_file(struct backing_file *ff)
++static int init_backing_file(struct backing_file *ff,
++                           const struct file *user_file)
+ {
+       memset(&ff->user_path, 0, sizeof(ff->user_path));
+-      return 0;
++      backing_file_set_security(&ff->file, NULL);
++      return security_backing_file_alloc(&ff->file, user_file);
+ }
+ /*
+@@ -301,7 +319,8 @@ static int init_backing_file(struct back
+  * This is only for kernel internal use, and the allocate file must not be
+  * installed into file tables or such.
+  */
+-struct file *alloc_empty_backing_file(int flags, const struct cred *cred)
++struct file *alloc_empty_backing_file(int flags, const struct cred *cred,
++                                    const struct file *user_file)
+ {
+       struct backing_file *ff;
+       int error;
+@@ -318,7 +337,7 @@ struct file *alloc_empty_backing_file(in
+       /* The f_mode flags must be set before fput(). */
+       ff->file.f_mode |= FMODE_BACKING | FMODE_NOACCOUNT;
+-      error = init_backing_file(ff);
++      error = init_backing_file(ff, user_file);
+       if (unlikely(error)) {
+               fput(&ff->file);
+               return ERR_PTR(error);
+--- a/fs/fuse/passthrough.c
++++ b/fs/fuse/passthrough.c
+@@ -167,7 +167,7 @@ struct fuse_backing *fuse_passthrough_op
+               goto out;
+       /* Allocate backing file per fuse file to store fuse path */
+-      backing_file = backing_file_open(&file->f_path, file->f_flags,
++      backing_file = backing_file_open(file, file->f_flags,
+                                        &fb->file->f_path, fb->cred);
+       err = PTR_ERR(backing_file);
+       if (IS_ERR(backing_file)) {
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -106,7 +106,8 @@ extern void chroot_fs_refs(const struct
+  */
+ struct file *alloc_empty_file(int flags, const struct cred *cred);
+ struct file *alloc_empty_file_noaccount(int flags, const struct cred *cred);
+-struct file *alloc_empty_backing_file(int flags, const struct cred *cred);
++struct file *alloc_empty_backing_file(int flags, const struct cred *cred,
++                                    const struct file *user_file);
+ void backing_file_set_user_path(struct file *f, const struct path *path);
+ static inline void file_put_write_access(struct file *file)
+--- a/fs/overlayfs/dir.c
++++ b/fs/overlayfs/dir.c
+@@ -1374,7 +1374,7 @@ static int ovl_create_tmpfile(struct fil
+                               return PTR_ERR(cred);
+                       ovl_path_upper(dentry->d_parent, &realparentpath);
+-                      realfile = backing_tmpfile_open(&file->f_path, flags, &realparentpath,
++                      realfile = backing_tmpfile_open(file, flags, &realparentpath,
+                                                       mode, current_cred());
+                       err = PTR_ERR_OR_ZERO(realfile);
+                       pr_debug("tmpfile/open(%pd2, 0%o) = %i\n", realparentpath.dentry, mode, err);
+--- a/fs/overlayfs/file.c
++++ b/fs/overlayfs/file.c
+@@ -48,7 +48,7 @@ static struct file *ovl_open_realfile(co
+                       if (!inode_owner_or_capable(real_idmap, realinode))
+                               flags &= ~O_NOATIME;
+-                      realfile = backing_file_open(file_user_path(file),
++                      realfile = backing_file_open(file,
+                                                    flags, realpath, current_cred());
+               }
+       }
+--- a/include/linux/backing-file.h
++++ b/include/linux/backing-file.h
+@@ -18,10 +18,10 @@ struct backing_file_ctx {
+       void (*end_write)(struct kiocb *iocb, ssize_t);
+ };
+-struct file *backing_file_open(const struct path *user_path, int flags,
++struct file *backing_file_open(const struct file *user_file, int flags,
+                              const struct path *real_path,
+                              const struct cred *cred);
+-struct file *backing_tmpfile_open(const struct path *user_path, int flags,
++struct file *backing_tmpfile_open(const struct file *user_file, int flags,
+                                 const struct path *real_parentpath,
+                                 umode_t mode, const struct cred *cred);
+ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter,
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -2475,6 +2475,19 @@ struct file *dentry_create(struct path *
+                          const struct cred *cred);
+ const struct path *backing_file_user_path(const struct file *f);
++#ifdef CONFIG_SECURITY
++void *backing_file_security(const struct file *f);
++void backing_file_set_security(struct file *f, void *security);
++#else
++static inline void *backing_file_security(const struct file *f)
++{
++      return NULL;
++}
++static inline void backing_file_set_security(struct file *f, void *security)
++{
++}
++#endif /* CONFIG_SECURITY */
++
+ /*
+  * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+  * stored in ->vm_file is a backing file whose f_inode is on the underlying
+--- a/include/linux/lsm_audit.h
++++ b/include/linux/lsm_audit.h
+@@ -94,7 +94,7 @@ struct common_audit_data {
+ #endif
+               char *kmod_name;
+               struct lsm_ioctlop_audit *op;
+-              struct file *file;
++              const struct file *file;
+               struct lsm_ibpkey_audit *ibpkey;
+               struct lsm_ibendport_audit *ibendport;
+               int reason;
+--- a/include/linux/lsm_hook_defs.h
++++ b/include/linux/lsm_hook_defs.h
+@@ -191,6 +191,9 @@ LSM_HOOK(int, 0, file_permission, struct
+ LSM_HOOK(int, 0, file_alloc_security, struct file *file)
+ LSM_HOOK(void, LSM_RET_VOID, file_release, struct file *file)
+ LSM_HOOK(void, LSM_RET_VOID, file_free_security, struct file *file)
++LSM_HOOK(int, 0, backing_file_alloc, struct file *backing_file,
++       const struct file *user_file)
++LSM_HOOK(void, LSM_RET_VOID, backing_file_free, struct file *backing_file)
+ LSM_HOOK(int, 0, file_ioctl, struct file *file, unsigned int cmd,
+        unsigned long arg)
+ LSM_HOOK(int, 0, file_ioctl_compat, struct file *file, unsigned int cmd,
+@@ -198,6 +201,8 @@ LSM_HOOK(int, 0, file_ioctl_compat, stru
+ LSM_HOOK(int, 0, mmap_addr, unsigned long addr)
+ LSM_HOOK(int, 0, mmap_file, struct file *file, unsigned long reqprot,
+        unsigned long prot, unsigned long flags)
++LSM_HOOK(int, 0, mmap_backing_file, struct vm_area_struct *vma,
++       struct file *backing_file, struct file *user_file)
+ LSM_HOOK(int, 0, file_mprotect, struct vm_area_struct *vma,
+        unsigned long reqprot, unsigned long prot)
+ LSM_HOOK(int, 0, file_lock, struct file *file, unsigned int cmd)
+--- a/include/linux/lsm_hooks.h
++++ b/include/linux/lsm_hooks.h
+@@ -104,6 +104,7 @@ struct security_hook_list {
+ struct lsm_blob_sizes {
+       unsigned int lbs_cred;
+       unsigned int lbs_file;
++      unsigned int lbs_backing_file;
+       unsigned int lbs_ib;
+       unsigned int lbs_inode;
+       unsigned int lbs_sock;
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -472,11 +472,17 @@ int security_file_permission(struct file
+ int security_file_alloc(struct file *file);
+ void security_file_release(struct file *file);
+ void security_file_free(struct file *file);
++int security_backing_file_alloc(struct file *backing_file,
++                              const struct file *user_file);
++void security_backing_file_free(struct file *backing_file);
+ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+ int security_file_ioctl_compat(struct file *file, unsigned int cmd,
+                              unsigned long arg);
+ int security_mmap_file(struct file *file, unsigned long prot,
+                       unsigned long flags);
++int security_mmap_backing_file(struct vm_area_struct *vma,
++                             struct file *backing_file,
++                             struct file *user_file);
+ int security_mmap_addr(unsigned long addr);
+ int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
+                          unsigned long prot);
+@@ -1141,6 +1147,15 @@ static inline void security_file_release
+ static inline void security_file_free(struct file *file)
+ { }
++static inline int security_backing_file_alloc(struct file *backing_file,
++                                            const struct file *user_file)
++{
++      return 0;
++}
++
++static inline void security_backing_file_free(struct file *backing_file)
++{ }
++
+ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
+                                     unsigned long arg)
+ {
+@@ -1159,6 +1174,13 @@ static inline int security_mmap_file(str
+ {
+       return 0;
+ }
++
++static inline int security_mmap_backing_file(struct vm_area_struct *vma,
++                                           struct file *backing_file,
++                                           struct file *user_file)
++{
++      return 0;
++}
+ static inline int security_mmap_addr(unsigned long addr)
+ {
+--- a/security/lsm.h
++++ b/security/lsm.h
+@@ -29,6 +29,7 @@ extern struct lsm_blob_sizes blob_sizes;
+ /* LSM blob caches */
+ extern struct kmem_cache *lsm_file_cache;
++extern struct kmem_cache *lsm_backing_file_cache;
+ extern struct kmem_cache *lsm_inode_cache;
+ /* LSM blob allocators */
+--- a/security/lsm_init.c
++++ b/security/lsm_init.c
+@@ -293,6 +293,8 @@ static void __init lsm_prepare(struct ls
+       blobs = lsm->blobs;
+       lsm_blob_size_update(&blobs->lbs_cred, &blob_sizes.lbs_cred);
+       lsm_blob_size_update(&blobs->lbs_file, &blob_sizes.lbs_file);
++      lsm_blob_size_update(&blobs->lbs_backing_file,
++                           &blob_sizes.lbs_backing_file);
+       lsm_blob_size_update(&blobs->lbs_ib, &blob_sizes.lbs_ib);
+       /* inode blob gets an rcu_head in addition to LSM blobs. */
+       if (blobs->lbs_inode && blob_sizes.lbs_inode == 0)
+@@ -441,6 +443,8 @@ int __init security_init(void)
+       if (lsm_debug) {
+               lsm_pr("blob(cred) size %d\n", blob_sizes.lbs_cred);
+               lsm_pr("blob(file) size %d\n", blob_sizes.lbs_file);
++              lsm_pr("blob(backing_file) size %d\n",
++                     blob_sizes.lbs_backing_file);
+               lsm_pr("blob(ib) size %d\n", blob_sizes.lbs_ib);
+               lsm_pr("blob(inode) size %d\n", blob_sizes.lbs_inode);
+               lsm_pr("blob(ipc) size %d\n", blob_sizes.lbs_ipc);
+@@ -462,6 +466,11 @@ int __init security_init(void)
+               lsm_file_cache = kmem_cache_create("lsm_file_cache",
+                                                  blob_sizes.lbs_file, 0,
+                                                  SLAB_PANIC, NULL);
++      if (blob_sizes.lbs_backing_file)
++              lsm_backing_file_cache = kmem_cache_create(
++                                                 "lsm_backing_file_cache",
++                                                 blob_sizes.lbs_backing_file,
++                                                 0, SLAB_PANIC, NULL);
+       if (blob_sizes.lbs_inode)
+               lsm_inode_cache = kmem_cache_create("lsm_inode_cache",
+                                                   blob_sizes.lbs_inode, 0,
+--- a/security/security.c
++++ b/security/security.c
+@@ -82,6 +82,7 @@ const struct lsm_id *lsm_idlist[MAX_LSM_
+ struct lsm_blob_sizes blob_sizes;
+ struct kmem_cache *lsm_file_cache;
++struct kmem_cache *lsm_backing_file_cache;
+ struct kmem_cache *lsm_inode_cache;
+ #define SECURITY_HOOK_ACTIVE_KEY(HOOK, IDX) security_hook_active_##HOOK##_##IDX
+@@ -174,6 +175,30 @@ static int lsm_file_alloc(struct file *f
+ }
+ /**
++ * lsm_backing_file_alloc - allocate a composite backing file blob
++ * @backing_file: the backing file
++ *
++ * Allocate the backing file blob for all the modules.
++ *
++ * Returns 0, or -ENOMEM if memory can't be allocated.
++ */
++static int lsm_backing_file_alloc(struct file *backing_file)
++{
++      void *blob;
++
++      if (!lsm_backing_file_cache) {
++              backing_file_set_security(backing_file, NULL);
++              return 0;
++      }
++
++      blob = kmem_cache_zalloc(lsm_backing_file_cache, GFP_KERNEL);
++      backing_file_set_security(backing_file, blob);
++      if (!blob)
++              return -ENOMEM;
++      return 0;
++}
++
++/**
+  * lsm_blob_alloc - allocate a composite blob
+  * @dest: the destination for the blob
+  * @size: the size of the blob
+@@ -2419,6 +2444,57 @@ void security_file_free(struct file *fil
+ }
+ /**
++ * security_backing_file_alloc() - Allocate and setup a backing file blob
++ * @backing_file: the backing file
++ * @user_file: the associated user visible file
++ *
++ * Allocate a backing file LSM blob and perform any necessary initialization of
++ * the LSM blob.  There will be some operations where the LSM will not have
++ * access to @user_file after this point, so any important state associated
++ * with @user_file that is important to the LSM should be captured in the
++ * backing file's LSM blob.
++ *
++ * LSM's should avoid taking a reference to @user_file in this hook as it will
++ * result in problems later when the system attempts to drop/put the file
++ * references due to a circular dependency.
++ *
++ * Return: Return 0 if the hook is successful, negative values otherwise.
++ */
++int security_backing_file_alloc(struct file *backing_file,
++                              const struct file *user_file)
++{
++      int rc;
++
++      rc = lsm_backing_file_alloc(backing_file);
++      if (rc)
++              return rc;
++      rc = call_int_hook(backing_file_alloc, backing_file, user_file);
++      if (unlikely(rc))
++              security_backing_file_free(backing_file);
++
++      return rc;
++}
++
++/**
++ * security_backing_file_free() - Free a backing file blob
++ * @backing_file: the backing file
++ *
++ * Free any LSM state associate with a backing file's LSM blob, including the
++ * blob itself.
++ */
++void security_backing_file_free(struct file *backing_file)
++{
++      void *blob = backing_file_security(backing_file);
++
++      call_void_hook(backing_file_free, backing_file);
++
++      if (blob) {
++              backing_file_set_security(backing_file, NULL);
++              kmem_cache_free(lsm_backing_file_cache, blob);
++      }
++}
++
++/**
+  * security_file_ioctl() - Check if an ioctl is allowed
+  * @file: associated file
+  * @cmd: ioctl cmd
+@@ -2507,6 +2583,32 @@ int security_mmap_file(struct file *file
+ }
+ /**
++ * security_mmap_backing_file - Check if mmap'ing a backing file is allowed
++ * @vma: the vm_area_struct for the mmap'd region
++ * @backing_file: the backing file being mmap'd
++ * @user_file: the user file being mmap'd
++ *
++ * Check permissions for a mmap operation on a stacked filesystem.  This hook
++ * is called after the security_mmap_file() and is responsible for authorizing
++ * the mmap on @backing_file.  It is important to note that the mmap operation
++ * on @user_file has already been authorized and the @vma->vm_file has been
++ * set to @backing_file.
++ *
++ * Return: Returns 0 if permission is granted.
++ */
++int security_mmap_backing_file(struct vm_area_struct *vma,
++                             struct file *backing_file,
++                             struct file *user_file)
++{
++      /* recommended by the stackable filesystem devs */
++      if (WARN_ON_ONCE(!(backing_file->f_mode & FMODE_BACKING)))
++              return -EIO;
++
++      return call_int_hook(mmap_backing_file, vma, backing_file, user_file);
++}
++EXPORT_SYMBOL_GPL(security_mmap_backing_file);
++
++/**
+  * security_mmap_addr() - Check if mmap'ing an address is allowed
+  * @addr: address
+  *
diff --git a/queue-7.0/media-rzv2h-ivc-fix-axirx_vblank-register-write.patch b/queue-7.0/media-rzv2h-ivc-fix-axirx_vblank-register-write.patch
new file mode 100644 (file)
index 0000000..ec7b2c6
--- /dev/null
@@ -0,0 +1,63 @@
+From 6139d018f043a30274071d694276c5ce59fe62d0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Barnab=C3=A1s=20P=C5=91cze?=
+ <barnabas.pocze+renesas@ideasonboard.com>
+Date: Thu, 12 Feb 2026 16:41:56 +0100
+Subject: media: rzv2h-ivc: Fix AXIRX_VBLANK register write
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Barnabás PÅ‘cze <barnabas.pocze+renesas@ideasonboard.com>
+
+commit 6139d018f043a30274071d694276c5ce59fe62d0 upstream.
+
+According to the documentation there are writable reserved bits in the
+register and those should not be set to 0. So use `rzv2h_ivc_update_bits()`
+with a proper bitmask.
+
+Cc: stable@vger.kernel.org
+Fixes: f0b3984d821b ("media: platform: Add Renesas Input Video Control block driver")
+Reviewed-by: Daniel Scally <dan.scally@ideasonboard.com>
+Signed-off-by: Barnabás PÅ‘cze <barnabas.pocze+renesas@ideasonboard.com>
+Signed-off-by: Jacopo Mondi <jacopo.mondi+renesas@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c |    7 +++++--
+ drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h       |    2 +-
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
++++ b/drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
+@@ -7,6 +7,7 @@
+ #include "rzv2h-ivc.h"
++#include <linux/bitfield.h>
+ #include <linux/cleanup.h>
+ #include <linux/iopoll.h>
+ #include <linux/lockdep.h>
+@@ -235,8 +236,10 @@ static void rzv2h_ivc_format_configure(s
+       hts = pix->width + RZV2H_IVC_FIXED_HBLANK;
+       vblank = RZV2H_IVC_MIN_VBLANK(hts);
+-      rzv2h_ivc_write(ivc, RZV2H_IVC_REG_AXIRX_BLANK,
+-                      RZV2H_IVC_VBLANK(vblank));
++      rzv2h_ivc_update_bits(ivc, RZV2H_IVC_REG_AXIRX_BLANK,
++                            RZV2H_IVC_AXIRX_BLANK_FIELD_VBLANK,
++                            FIELD_PREP(RZV2H_IVC_AXIRX_BLANK_FIELD_VBLANK,
++                                       vblank));
+ }
+ static void rzv2h_ivc_return_buffers(struct rzv2h_ivc *ivc,
+--- a/drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h
++++ b/drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc.h
+@@ -34,7 +34,7 @@
+ #define RZV2H_IVC_REG_AXIRX_HSIZE                     0x0020
+ #define RZV2H_IVC_REG_AXIRX_VSIZE                     0x0024
+ #define RZV2H_IVC_REG_AXIRX_BLANK                     0x0028
+-#define RZV2H_IVC_VBLANK(x)                           ((x) << 16)
++#define RZV2H_IVC_AXIRX_BLANK_FIELD_VBLANK            GENMASK(25, 16)
+ #define RZV2H_IVC_REG_AXIRX_STRD                      0x0030
+ #define RZV2H_IVC_REG_AXIRX_ISSU                      0x0040
+ #define RZV2H_IVC_REG_AXIRX_ERACT                     0x0048
diff --git a/queue-7.0/media-rzv2h-ivc-revise-default-vblank-formula.patch b/queue-7.0/media-rzv2h-ivc-revise-default-vblank-formula.patch
new file mode 100644 (file)
index 0000000..9593e64
--- /dev/null
@@ -0,0 +1,36 @@
+From 38104fe60ebb6b6cb66e3e9ef0a5c12f1260b1bc Mon Sep 17 00:00:00 2001
+From: Daniel Scally <dan.scally+renesas@ideasonboard.com>
+Date: Thu, 12 Feb 2026 10:08:11 +0000
+Subject: media: rzv2h-ivc: Revise default VBLANK formula
+
+From: Daniel Scally <dan.scally+renesas@ideasonboard.com>
+
+commit 38104fe60ebb6b6cb66e3e9ef0a5c12f1260b1bc upstream.
+
+The vertical blanking settings for the IVC block are dependent on
+settings in the ISP. This was originally set to calculate as the
+worst-case possible value, but it seems that this can cause the IVC
+block to hang. Instead calculate the vblank to match the default
+settings (which are currently all the driver sets anyway).
+
+Cc: stable@vger.kernel.org
+Fixes: f0b3984d821b ("media: platform: Add Renesas Input Video Control block driver")
+Signed-off-by: Daniel Scally <dan.scally+renesas@ideasonboard.com>
+Signed-off-by: Jacopo Mondi <jacopo.mondi+renesas@ideasonboard.com>
+Signed-off-by: Hans Verkuil <hverkuil+cisco@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
++++ b/drivers/media/platform/renesas/rzv2h-ivc/rzv2h-ivc-video.c
+@@ -24,7 +24,7 @@
+ #include <media/videobuf2-dma-contig.h>
+ #define RZV2H_IVC_FIXED_HBLANK                        0x20
+-#define RZV2H_IVC_MIN_VBLANK(hts)             max(0x1b, 15 + (120501 / (hts)))
++#define RZV2H_IVC_MIN_VBLANK(hts)             max(0x1b, 70100 / (hts))
+ struct rzv2h_ivc_buf {
+       struct vb2_v4l2_buffer vb;
diff --git a/queue-7.0/net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch b/queue-7.0/net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch
new file mode 100644 (file)
index 0000000..d2922eb
--- /dev/null
@@ -0,0 +1,171 @@
+From df4601653201de21b487c3e7fffd464790cab808 Mon Sep 17 00:00:00 2001
+From: Zhengchuan Liang <zcliangcn@gmail.com>
+Date: Mon, 13 Apr 2026 17:08:46 +0800
+Subject: net: bridge: use a stable FDB dst snapshot in RCU readers
+
+From: Zhengchuan Liang <zcliangcn@gmail.com>
+
+commit df4601653201de21b487c3e7fffd464790cab808 upstream.
+
+Local FDB entries can be rewritten in place by `fdb_delete_local()`, which
+updates `f->dst` to another port or to `NULL` while keeping the entry
+alive. Several bridge RCU readers inspect `f->dst`, including
+`br_fdb_fillbuf()` through the `brforward_read()` sysfs path.
+
+These readers currently load `f->dst` multiple times and can therefore
+observe inconsistent values across the check and later dereference.
+In `br_fdb_fillbuf()`, this means a concurrent local-FDB update can change
+`f->dst` after the NULL check and before the `port_no` dereference,
+leading to a NULL-ptr-deref.
+
+Fix this by taking a single `READ_ONCE()` snapshot of `f->dst` in each
+affected RCU reader and using that snapshot for the rest of the access
+sequence. Also publish the in-place `f->dst` updates in `fdb_delete_local()`
+with `WRITE_ONCE()` so the readers and writer use matching access patterns.
+
+Fixes: 960b589f86c7 ("bridge: Properly check if local fdb entry can be deleted in br_fdb_change_mac_address")
+Cc: stable@kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Suggested-by: Xin Liu <bird@lzu.edu.cn>
+Tested-by: Ren Wei <enjou1224z@gmail.com>
+Signed-off-by: Zhengchuan Liang <zcliangcn@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/6570fabb85ecadb8baaf019efe856f407711c7b9.1776043229.git.zcliangcn@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_arp_nd_proxy.c |    8 +++++---
+ net/bridge/br_fdb.c          |   28 ++++++++++++++++++----------
+ 2 files changed, 23 insertions(+), 13 deletions(-)
+
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -202,11 +202,12 @@ void br_do_proxy_suppress_arp(struct sk_
+               f = br_fdb_find_rcu(br, n->ha, vid);
+               if (f) {
++                      const struct net_bridge_port *dst = READ_ONCE(f->dst);
+                       bool replied = false;
+                       if ((p && (p->flags & BR_PROXYARP)) ||
+-                          (f->dst && (f->dst->flags & BR_PROXYARP_WIFI)) ||
+-                          br_is_neigh_suppress_enabled(f->dst, vid)) {
++                          (dst && (dst->flags & BR_PROXYARP_WIFI)) ||
++                          br_is_neigh_suppress_enabled(dst, vid)) {
+                               if (!vid)
+                                       br_arp_send(br, p, skb->dev, sip, tip,
+                                                   sha, n->ha, sha, 0, 0);
+@@ -470,9 +471,10 @@ void br_do_suppress_nd(struct sk_buff *s
+               f = br_fdb_find_rcu(br, n->ha, vid);
+               if (f) {
++                      const struct net_bridge_port *dst = READ_ONCE(f->dst);
+                       bool replied = false;
+-                      if (br_is_neigh_suppress_enabled(f->dst, vid)) {
++                      if (br_is_neigh_suppress_enabled(dst, vid)) {
+                               if (vid != 0)
+                                       br_nd_send(br, p, skb, n,
+                                                  skb->vlan_proto,
+--- a/net/bridge/br_fdb.c
++++ b/net/bridge/br_fdb.c
+@@ -236,6 +236,7 @@ struct net_device *br_fdb_find_port(cons
+                                   const unsigned char *addr,
+                                   __u16 vid)
+ {
++      const struct net_bridge_port *dst;
+       struct net_bridge_fdb_entry *f;
+       struct net_device *dev = NULL;
+       struct net_bridge *br;
+@@ -248,8 +249,11 @@ struct net_device *br_fdb_find_port(cons
+       br = netdev_priv(br_dev);
+       rcu_read_lock();
+       f = br_fdb_find_rcu(br, addr, vid);
+-      if (f && f->dst)
+-              dev = f->dst->dev;
++      if (f) {
++              dst = READ_ONCE(f->dst);
++              if (dst)
++                      dev = dst->dev;
++      }
+       rcu_read_unlock();
+       return dev;
+@@ -346,7 +350,7 @@ static void fdb_delete_local(struct net_
+               vg = nbp_vlan_group(op);
+               if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+                   (!vid || br_vlan_find(vg, vid))) {
+-                      f->dst = op;
++                      WRITE_ONCE(f->dst, op);
+                       clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+                       return;
+               }
+@@ -357,7 +361,7 @@ static void fdb_delete_local(struct net_
+       /* Maybe bridge device has same hw addr? */
+       if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+           (!vid || (v && br_vlan_should_use(v)))) {
+-              f->dst = NULL;
++              WRITE_ONCE(f->dst, NULL);
+               clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
+               return;
+       }
+@@ -928,6 +932,7 @@ int br_fdb_test_addr(struct net_device *
+ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
+                  unsigned long maxnum, unsigned long skip)
+ {
++      const struct net_bridge_port *dst;
+       struct net_bridge_fdb_entry *f;
+       struct __fdb_entry *fe = buf;
+       unsigned long delta;
+@@ -944,7 +949,8 @@ int br_fdb_fillbuf(struct net_bridge *br
+                       continue;
+               /* ignore pseudo entry for local MAC address */
+-              if (!f->dst)
++              dst = READ_ONCE(f->dst);
++              if (!dst)
+                       continue;
+               if (skip) {
+@@ -956,8 +962,8 @@ int br_fdb_fillbuf(struct net_bridge *br
+               memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
+               /* due to ABI compat need to split into hi/lo */
+-              fe->port_no = f->dst->port_no;
+-              fe->port_hi = f->dst->port_no >> 8;
++              fe->port_no = dst->port_no;
++              fe->port_hi = dst->port_no >> 8;
+               fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
+               if (!test_bit(BR_FDB_STATIC, &f->flags)) {
+@@ -1083,9 +1089,11 @@ int br_fdb_dump(struct sk_buff *skb,
+       rcu_read_lock();
+       hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
++              const struct net_bridge_port *dst = READ_ONCE(f->dst);
++
+               if (*idx < ctx->fdb_idx)
+                       goto skip;
+-              if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
++              if (filter_dev && (!dst || dst->dev != filter_dev)) {
+                       if (filter_dev != dev)
+                               goto skip;
+                       /* !f->dst is a special case for bridge
+@@ -1093,10 +1101,10 @@ int br_fdb_dump(struct sk_buff *skb,
+                        * Therefore need a little more filtering
+                        * we only want to dump the !f->dst case
+                        */
+-                      if (f->dst)
++                      if (dst)
+                               goto skip;
+               }
+-              if (!filter_dev && f->dst)
++              if (!filter_dev && dst)
+                       goto skip;
+               err = fdb_fill_info(skb, br, f,
diff --git a/queue-7.0/net-ks8851-avoid-excess-softirq-scheduling.patch b/queue-7.0/net-ks8851-avoid-excess-softirq-scheduling.patch
new file mode 100644 (file)
index 0000000..b67f589
--- /dev/null
@@ -0,0 +1,41 @@
+From 22230e68b2cf1ab6b027be8cf1198164a949c4fa Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@nabladev.com>
+Date: Thu, 16 Apr 2026 01:09:45 +0200
+Subject: net: ks8851: Avoid excess softirq scheduling
+
+From: Marek Vasut <marex@nabladev.com>
+
+commit 22230e68b2cf1ab6b027be8cf1198164a949c4fa upstream.
+
+The code injects a packet into netif_rx() repeatedly, which will add
+it to its internal NAPI and schedule a softirq, and process it. It is
+more efficient to queue multiple packets and process them all at the
+local_bh_enable() time.
+
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Fixes: e0863634bf9f ("net: ks8851: Queue RX packets in IRQ handler instead of disabling BHs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260415231020.455298-2-marex@nabladev.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/micrel/ks8851_common.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -373,9 +373,12 @@ static irqreturn_t ks8851_irq(int irq, v
+       if (status & IRQ_LCI)
+               mii_check_link(&ks->mii);
+-      if (status & IRQ_RXI)
++      if (status & IRQ_RXI) {
++              local_bh_disable();
+               while ((skb = __skb_dequeue(&rxq)))
+                       netif_rx(skb);
++              local_bh_enable();
++      }
+       return IRQ_HANDLED;
+ }
diff --git a/queue-7.0/net-ks8851-reinstate-disabling-of-bhs-around-irq-handler.patch b/queue-7.0/net-ks8851-reinstate-disabling-of-bhs-around-irq-handler.patch
new file mode 100644 (file)
index 0000000..0d6f47e
--- /dev/null
@@ -0,0 +1,469 @@
+From 5c9fcac3c872224316714d0d8914d9af16c76a6d Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@nabladev.com>
+Date: Thu, 16 Apr 2026 01:09:44 +0200
+Subject: net: ks8851: Reinstate disabling of BHs around IRQ handler
+
+From: Marek Vasut <marex@nabladev.com>
+
+commit 5c9fcac3c872224316714d0d8914d9af16c76a6d upstream.
+
+If the driver executes ks8851_irq() AND a TX packet has been sent, then
+the driver enables TX queue via netif_wake_queue() which schedules TX
+softirq to queue packets for this device.
+
+If CONFIG_PREEMPT_RT=y is set AND a packet has also been received by
+the MAC, then ks8851_rx_pkts() calls netdev_alloc_skb_ip_align() to
+allocate SKBs for the received packets. If netdev_alloc_skb_ip_align()
+is called with BH enabled, then local_bh_enable() at the end of
+netdev_alloc_skb_ip_align() will trigger the pending softirq processing,
+which may ultimately call the .xmit callback ks8851_start_xmit_par().
+The ks8851_start_xmit_par() will try to lock struct ks8851_net_par
+.lock spinlock, which is already locked by ks8851_irq() from which
+ks8851_start_xmit_par() was called. This leads to a deadlock, which
+is reported by the kernel, including a trace listed below.
+
+If CONFIG_PREEMPT_RT is not set, then since commit 0913ec336a6c0
+("net: ks8851: Fix deadlock with the SPI chip variant") the deadlock
+can also be triggered without received packet in the RX FIFO. The
+pending softirqs will be processed on return from
+spin_unlock_bh(&ks->statelock) in ks8851_irq(), which triggers the
+deadlock as well.
+
+Fix the problem by disabling BH around critical sections, including the
+IRQ handler, thus preventing the net_tx_action() softirq from triggering
+during these critical sections. The net_tx_action() softirq is triggered
+once BH are re-enabled and at the end of the IRQ handler, once all the
+other IRQ handler actions have been completed.
+
+ __schedule from schedule_rtlock+0x1c/0x34
+ schedule_rtlock from rtlock_slowlock_locked+0x548/0x904
+ rtlock_slowlock_locked from rt_spin_lock+0x60/0x9c
+ rt_spin_lock from ks8851_start_xmit_par+0x74/0x1a8
+ ks8851_start_xmit_par from netdev_start_xmit+0x20/0x44
+ netdev_start_xmit from dev_hard_start_xmit+0xd0/0x188
+ dev_hard_start_xmit from sch_direct_xmit+0xb8/0x25c
+ sch_direct_xmit from __qdisc_run+0x1f8/0x4ec
+ __qdisc_run from qdisc_run+0x1c/0x28
+ qdisc_run from net_tx_action+0x1f0/0x268
+ net_tx_action from handle_softirqs+0x1a4/0x270
+ handle_softirqs from __local_bh_enable_ip+0xcc/0xe0
+ __local_bh_enable_ip from __alloc_skb+0xd8/0x128
+ __alloc_skb from __netdev_alloc_skb+0x3c/0x19c
+ __netdev_alloc_skb from ks8851_irq+0x388/0x4d4
+ ks8851_irq from irq_thread_fn+0x24/0x64
+ irq_thread_fn from irq_thread+0x178/0x28c
+ irq_thread from kthread+0x12c/0x138
+ kthread from ret_from_fork+0x14/0x28
+
+Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Fixes: e0863634bf9f ("net: ks8851: Queue RX packets in IRQ handler instead of disabling BHs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Marek Vasut <marex@nabladev.com>
+Link: https://patch.msgid.link/20260415231020.455298-1-marex@nabladev.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/micrel/ks8851.h        |    6 --
+ drivers/net/ethernet/micrel/ks8851_common.c |   64 +++++++++++-----------------
+ drivers/net/ethernet/micrel/ks8851_par.c    |   15 ++----
+ drivers/net/ethernet/micrel/ks8851_spi.c    |   11 +---
+ 4 files changed, 38 insertions(+), 58 deletions(-)
+
+--- a/drivers/net/ethernet/micrel/ks8851.h
++++ b/drivers/net/ethernet/micrel/ks8851.h
+@@ -408,10 +408,8 @@ struct ks8851_net {
+       struct gpio_desc        *gpio;
+       struct mii_bus          *mii_bus;
+-      void                    (*lock)(struct ks8851_net *ks,
+-                                      unsigned long *flags);
+-      void                    (*unlock)(struct ks8851_net *ks,
+-                                        unsigned long *flags);
++      void                    (*lock)(struct ks8851_net *ks);
++      void                    (*unlock)(struct ks8851_net *ks);
+       unsigned int            (*rdreg16)(struct ks8851_net *ks,
+                                          unsigned int reg);
+       void                    (*wrreg16)(struct ks8851_net *ks,
+--- a/drivers/net/ethernet/micrel/ks8851_common.c
++++ b/drivers/net/ethernet/micrel/ks8851_common.c
+@@ -28,25 +28,23 @@
+ /**
+  * ks8851_lock - register access lock
+  * @ks: The chip state
+- * @flags: Spinlock flags
+  *
+  * Claim chip register access lock
+  */
+-static void ks8851_lock(struct ks8851_net *ks, unsigned long *flags)
++static void ks8851_lock(struct ks8851_net *ks)
+ {
+-      ks->lock(ks, flags);
++      ks->lock(ks);
+ }
+ /**
+  * ks8851_unlock - register access unlock
+  * @ks: The chip state
+- * @flags: Spinlock flags
+  *
+  * Release chip register access lock
+  */
+-static void ks8851_unlock(struct ks8851_net *ks, unsigned long *flags)
++static void ks8851_unlock(struct ks8851_net *ks)
+ {
+-      ks->unlock(ks, flags);
++      ks->unlock(ks);
+ }
+ /**
+@@ -129,11 +127,10 @@ static void ks8851_set_powermode(struct
+ static int ks8851_write_mac_addr(struct net_device *dev)
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+-      unsigned long flags;
+       u16 val;
+       int i;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       /*
+        * Wake up chip in case it was powered off when stopped; otherwise,
+@@ -149,7 +146,7 @@ static int ks8851_write_mac_addr(struct
+       if (!netif_running(dev))
+               ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       return 0;
+ }
+@@ -163,12 +160,11 @@ static int ks8851_write_mac_addr(struct
+ static void ks8851_read_mac_addr(struct net_device *dev)
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+-      unsigned long flags;
+       u8 addr[ETH_ALEN];
+       u16 reg;
+       int i;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       for (i = 0; i < ETH_ALEN; i += 2) {
+               reg = ks8851_rdreg16(ks, KS_MAR(i));
+@@ -177,7 +173,7 @@ static void ks8851_read_mac_addr(struct
+       }
+       eth_hw_addr_set(dev, addr);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+ }
+ /**
+@@ -312,11 +308,10 @@ static irqreturn_t ks8851_irq(int irq, v
+ {
+       struct ks8851_net *ks = _ks;
+       struct sk_buff_head rxq;
+-      unsigned long flags;
+       unsigned int status;
+       struct sk_buff *skb;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       status = ks8851_rdreg16(ks, KS_ISR);
+       ks8851_wrreg16(ks, KS_ISR, status);
+@@ -373,7 +368,7 @@ static irqreturn_t ks8851_irq(int irq, v
+               ks8851_wrreg16(ks, KS_RXCR1, rxc->rxcr1);
+       }
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       if (status & IRQ_LCI)
+               mii_check_link(&ks->mii);
+@@ -405,7 +400,6 @@ static void ks8851_flush_tx_work(struct
+ static int ks8851_net_open(struct net_device *dev)
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+-      unsigned long flags;
+       int ret;
+       ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+@@ -418,7 +412,7 @@ static int ks8851_net_open(struct net_de
+       /* lock the card, even if we may not actually be doing anything
+        * else at the moment */
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       netif_dbg(ks, ifup, ks->netdev, "opening\n");
+@@ -471,7 +465,7 @@ static int ks8851_net_open(struct net_de
+       netif_dbg(ks, ifup, ks->netdev, "network device up\n");
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       mii_check_link(&ks->mii);
+       return 0;
+ }
+@@ -487,23 +481,22 @@ static int ks8851_net_open(struct net_de
+ static int ks8851_net_stop(struct net_device *dev)
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+-      unsigned long flags;
+       netif_info(ks, ifdown, dev, "shutting down\n");
+       netif_stop_queue(dev);
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       /* turn off the IRQs and ack any outstanding */
+       ks8851_wrreg16(ks, KS_IER, 0x0000);
+       ks8851_wrreg16(ks, KS_ISR, 0xffff);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       /* stop any outstanding work */
+       ks8851_flush_tx_work(ks);
+       flush_work(&ks->rxctrl_work);
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       /* shutdown RX process */
+       ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
+@@ -512,7 +505,7 @@ static int ks8851_net_stop(struct net_de
+       /* set powermode to soft power down to save power */
+       ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       /* ensure any queued tx buffers are dumped */
+       while (!skb_queue_empty(&ks->txq)) {
+@@ -566,14 +559,13 @@ static netdev_tx_t ks8851_start_xmit(str
+ static void ks8851_rxctrl_work(struct work_struct *work)
+ {
+       struct ks8851_net *ks = container_of(work, struct ks8851_net, rxctrl_work);
+-      unsigned long flags;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       /* need to shutdown RXQ before modifying filter parameters */
+       ks8851_wrreg16(ks, KS_RXCR1, 0x00);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+ }
+ static void ks8851_set_rx_mode(struct net_device *dev)
+@@ -780,7 +772,6 @@ static int ks8851_set_eeprom(struct net_
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+       int offset = ee->offset;
+-      unsigned long flags;
+       int len = ee->len;
+       u16 tmp;
+@@ -794,7 +785,7 @@ static int ks8851_set_eeprom(struct net_
+       if (!(ks->rc_ccr & CCR_EEPROM))
+               return -ENOENT;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       ks8851_eeprom_claim(ks);
+@@ -817,7 +808,7 @@ static int ks8851_set_eeprom(struct net_
+       eeprom_93cx6_wren(&ks->eeprom, false);
+       ks8851_eeprom_release(ks);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       return 0;
+ }
+@@ -827,7 +818,6 @@ static int ks8851_get_eeprom(struct net_
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+       int offset = ee->offset;
+-      unsigned long flags;
+       int len = ee->len;
+       /* must be 2 byte aligned */
+@@ -837,7 +827,7 @@ static int ks8851_get_eeprom(struct net_
+       if (!(ks->rc_ccr & CCR_EEPROM))
+               return -ENOENT;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       ks8851_eeprom_claim(ks);
+@@ -845,7 +835,7 @@ static int ks8851_get_eeprom(struct net_
+       eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+       ks8851_eeprom_release(ks);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       return 0;
+ }
+@@ -904,7 +894,6 @@ static int ks8851_phy_reg(int reg)
+ static int ks8851_phy_read_common(struct net_device *dev, int phy_addr, int reg)
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+-      unsigned long flags;
+       int result;
+       int ksreg;
+@@ -912,9 +901,9 @@ static int ks8851_phy_read_common(struct
+       if (ksreg < 0)
+               return ksreg;
+-      ks8851_lock(ks, &flags);
++      ks8851_lock(ks);
+       result = ks8851_rdreg16(ks, ksreg);
+-      ks8851_unlock(ks, &flags);
++      ks8851_unlock(ks);
+       return result;
+ }
+@@ -949,14 +938,13 @@ static void ks8851_phy_write(struct net_
+                            int phy, int reg, int value)
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+-      unsigned long flags;
+       int ksreg;
+       ksreg = ks8851_phy_reg(reg);
+       if (ksreg >= 0) {
+-              ks8851_lock(ks, &flags);
++              ks8851_lock(ks);
+               ks8851_wrreg16(ks, ksreg, value);
+-              ks8851_unlock(ks, &flags);
++              ks8851_unlock(ks);
+       }
+ }
+--- a/drivers/net/ethernet/micrel/ks8851_par.c
++++ b/drivers/net/ethernet/micrel/ks8851_par.c
+@@ -55,29 +55,27 @@ struct ks8851_net_par {
+ /**
+  * ks8851_lock_par - register access lock
+  * @ks: The chip state
+- * @flags: Spinlock flags
+  *
+  * Claim chip register access lock
+  */
+-static void ks8851_lock_par(struct ks8851_net *ks, unsigned long *flags)
++static void ks8851_lock_par(struct ks8851_net *ks)
+ {
+       struct ks8851_net_par *ksp = to_ks8851_par(ks);
+-      spin_lock_irqsave(&ksp->lock, *flags);
++      spin_lock_bh(&ksp->lock);
+ }
+ /**
+  * ks8851_unlock_par - register access unlock
+  * @ks: The chip state
+- * @flags: Spinlock flags
+  *
+  * Release chip register access lock
+  */
+-static void ks8851_unlock_par(struct ks8851_net *ks, unsigned long *flags)
++static void ks8851_unlock_par(struct ks8851_net *ks)
+ {
+       struct ks8851_net_par *ksp = to_ks8851_par(ks);
+-      spin_unlock_irqrestore(&ksp->lock, *flags);
++      spin_unlock_bh(&ksp->lock);
+ }
+ /**
+@@ -233,7 +231,6 @@ static netdev_tx_t ks8851_start_xmit_par
+ {
+       struct ks8851_net *ks = netdev_priv(dev);
+       netdev_tx_t ret = NETDEV_TX_OK;
+-      unsigned long flags;
+       unsigned int txqcr;
+       u16 txmir;
+       int err;
+@@ -241,7 +238,7 @@ static netdev_tx_t ks8851_start_xmit_par
+       netif_dbg(ks, tx_queued, ks->netdev,
+                 "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
+-      ks8851_lock_par(ks, &flags);
++      ks8851_lock_par(ks);
+       txmir = ks8851_rdreg16_par(ks, KS_TXMIR) & 0x1fff;
+@@ -262,7 +259,7 @@ static netdev_tx_t ks8851_start_xmit_par
+               ret = NETDEV_TX_BUSY;
+       }
+-      ks8851_unlock_par(ks, &flags);
++      ks8851_unlock_par(ks);
+       return ret;
+ }
+--- a/drivers/net/ethernet/micrel/ks8851_spi.c
++++ b/drivers/net/ethernet/micrel/ks8851_spi.c
+@@ -71,11 +71,10 @@ struct ks8851_net_spi {
+ /**
+  * ks8851_lock_spi - register access lock
+  * @ks: The chip state
+- * @flags: Spinlock flags
+  *
+  * Claim chip register access lock
+  */
+-static void ks8851_lock_spi(struct ks8851_net *ks, unsigned long *flags)
++static void ks8851_lock_spi(struct ks8851_net *ks)
+ {
+       struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+@@ -85,11 +84,10 @@ static void ks8851_lock_spi(struct ks885
+ /**
+  * ks8851_unlock_spi - register access unlock
+  * @ks: The chip state
+- * @flags: Spinlock flags
+  *
+  * Release chip register access lock
+  */
+-static void ks8851_unlock_spi(struct ks8851_net *ks, unsigned long *flags)
++static void ks8851_unlock_spi(struct ks8851_net *ks)
+ {
+       struct ks8851_net_spi *kss = to_ks8851_spi(ks);
+@@ -309,7 +307,6 @@ static void ks8851_tx_work(struct work_s
+       struct ks8851_net_spi *kss;
+       unsigned short tx_space;
+       struct ks8851_net *ks;
+-      unsigned long flags;
+       struct sk_buff *txb;
+       bool last;
+@@ -317,7 +314,7 @@ static void ks8851_tx_work(struct work_s
+       ks = &kss->ks8851;
+       last = skb_queue_empty(&ks->txq);
+-      ks8851_lock_spi(ks, &flags);
++      ks8851_lock_spi(ks);
+       while (!last) {
+               txb = skb_dequeue(&ks->txq);
+@@ -343,7 +340,7 @@ static void ks8851_tx_work(struct work_s
+       ks->tx_space = tx_space;
+       spin_unlock_bh(&ks->statelock);
+-      ks8851_unlock_spi(ks, &flags);
++      ks8851_unlock_spi(ks);
+ }
+ /**
diff --git a/queue-7.0/net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch b/queue-7.0/net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch
new file mode 100644 (file)
index 0000000..e681fb2
--- /dev/null
@@ -0,0 +1,85 @@
+From a663bac71a2f0b3ac6c373168ca57b2a6e6381aa Mon Sep 17 00:00:00 2001
+From: Yuan Zhaoming <yuanzm2@lenovo.com>
+Date: Fri, 17 Apr 2026 22:13:40 +0800
+Subject: net: mctp: fix don't require received header reserved bits to be zero
+
+From: Yuan Zhaoming <yuanzm2@lenovo.com>
+
+commit a663bac71a2f0b3ac6c373168ca57b2a6e6381aa upstream.
+
+From the MCTP Base specification (DSP0236 v1.2.1), the first byte of
+the MCTP header contains a 4 bit reserved field, and 4 bit version.
+
+On our current receive path, we require those 4 reserved bits to be
+zero, but the 9500-8i card is non-conformant, and may set these
+reserved bits.
+
+DSP0236 states that the reserved bits must be written as zero, and
+ignored when read. While the device might not conform to the former,
+we should accept these message to conform to the latter.
+
+Relax our check on the MCTP version byte to allow non-zero bits in the
+reserved field.
+
+Fixes: 889b7da23abf ("mctp: Add initial routing framework")
+Signed-off-by: Yuan Zhaoming <yuanzm2@lenovo.com>
+Cc: stable@vger.kernel.org
+Acked-by: Jeremy Kerr <jk@codeconstruct.com.au>
+Link: https://patch.msgid.link/20260417141340.5306-1-yuanzhaoming901030@126.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/mctp.h |    3 +++
+ net/mctp/route.c   |    8 ++++++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+--- a/include/net/mctp.h
++++ b/include/net/mctp.h
+@@ -26,6 +26,9 @@ struct mctp_hdr {
+ #define MCTP_VER_MIN  1
+ #define MCTP_VER_MAX  1
++/* Definitions for ver field */
++#define MCTP_HDR_VER_MASK     GENMASK(3, 0)
++
+ /* Definitions for flags_seq_tag field */
+ #define MCTP_HDR_FLAG_SOM     BIT(7)
+ #define MCTP_HDR_FLAG_EOM     BIT(6)
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -441,6 +441,7 @@ static int mctp_dst_input(struct mctp_ds
+       unsigned long f;
+       u8 tag, flags;
+       int rc;
++      u8 ver;
+       msk = NULL;
+       rc = -EINVAL;
+@@ -467,7 +468,8 @@ static int mctp_dst_input(struct mctp_ds
+       netid = mctp_cb(skb)->net;
+       skb_pull(skb, sizeof(struct mctp_hdr));
+-      if (mh->ver != 1)
++      ver = mh->ver & MCTP_HDR_VER_MASK;
++      if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
+               goto out;
+       flags = mh->flags_seq_tag & (MCTP_HDR_FLAG_SOM | MCTP_HDR_FLAG_EOM);
+@@ -1325,6 +1327,7 @@ static int mctp_pkttype_receive(struct s
+       struct mctp_dst dst;
+       struct mctp_hdr *mh;
+       int rc;
++      u8 ver;
+       rcu_read_lock();
+       mdev = __mctp_dev_get(dev);
+@@ -1342,7 +1345,8 @@ static int mctp_pkttype_receive(struct s
+       /* We have enough for a header; decode and route */
+       mh = mctp_hdr(skb);
+-      if (mh->ver < MCTP_VER_MIN || mh->ver > MCTP_VER_MAX)
++      ver = mh->ver & MCTP_HDR_VER_MASK;
++      if (ver < MCTP_VER_MIN || ver > MCTP_VER_MAX)
+               goto err_drop;
+       /* source must be valid unicast or null; drop reserved ranges and
diff --git a/queue-7.0/net-qrtr-ns-free-the-node-during-ctrl_cmd_bye.patch b/queue-7.0/net-qrtr-ns-free-the-node-during-ctrl_cmd_bye.patch
new file mode 100644 (file)
index 0000000..9845e4c
--- /dev/null
@@ -0,0 +1,73 @@
+From 68efba36446a7774ea5b971257ade049272a07ac Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Date: Thu, 9 Apr 2026 23:04:14 +0530
+Subject: net: qrtr: ns: Free the node during ctrl_cmd_bye()
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+commit 68efba36446a7774ea5b971257ade049272a07ac upstream.
+
+A node sends the BYE packet when it is about to go down. So the nameserver
+should advertise the removal of the node to all remote and local observers
+and free the node finally. But currently, the nameserver doesn't free the
+node memory even after processing the BYE packet. This causes the node
+memory to leak.
+
+Hence, remove the node from Xarray list and free the node memory during
+both success and failure case of ctrl_cmd_bye().
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-3-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c |   20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -360,7 +360,7 @@ static int ctrl_cmd_bye(struct sockaddr_
+       struct qrtr_node *node;
+       unsigned long index;
+       struct kvec iv;
+-      int ret;
++      int ret = 0;
+       iv.iov_base = &pkt;
+       iv.iov_len = sizeof(pkt);
+@@ -375,8 +375,10 @@ static int ctrl_cmd_bye(struct sockaddr_
+       /* Advertise the removal of this client to all local servers */
+       local_node = node_get(qrtr_ns.local_node);
+-      if (!local_node)
+-              return 0;
++      if (!local_node) {
++              ret = 0;
++              goto delete_node;
++      }
+       memset(&pkt, 0, sizeof(pkt));
+       pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE);
+@@ -393,10 +395,18 @@ static int ctrl_cmd_bye(struct sockaddr_
+               ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt));
+               if (ret < 0 && ret != -ENODEV) {
+                       pr_err("failed to send bye cmd\n");
+-                      return ret;
++                      goto delete_node;
+               }
+       }
+-      return 0;
++
++      /* Ignore -ENODEV */
++      ret = 0;
++
++delete_node:
++      xa_erase(&nodes, from->sq_node);
++      kfree(node);
++
++      return ret;
+ }
+ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from,
diff --git a/queue-7.0/net-qrtr-ns-limit-the-maximum-number-of-lookups.patch b/queue-7.0/net-qrtr-ns-limit-the-maximum-number-of-lookups.patch
new file mode 100644 (file)
index 0000000..8eb2481
--- /dev/null
@@ -0,0 +1,92 @@
+From 5640227d9a21c6a8be249a10677b832e7f40dc55 Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Date: Thu, 9 Apr 2026 23:04:13 +0530
+Subject: net: qrtr: ns: Limit the maximum number of lookups
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+commit 5640227d9a21c6a8be249a10677b832e7f40dc55 upstream.
+
+Current code does no bound checking on the number of lookups a client can
+perform. Though the code restricts the lookups to local clients, there is
+still a possibility of a malicious local client sending a flood of
+NEW_LOOKUP messages over the same socket.
+
+Fix this issue by limiting the maximum number of lookups to 64 globally.
+Since the nameserver allows only atmost one local observer, this global
+lookup count will ensure that the lookups stay within the limit.
+
+Note that, limit of 64 is chosen based on the current platform
+requirements. If requirement changes in the future, this limit can be
+increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-2-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c |   14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -22,6 +22,7 @@ static struct {
+       struct socket *sock;
+       struct sockaddr_qrtr bcast_sq;
+       struct list_head lookups;
++      u32 lookup_count;
+       struct workqueue_struct *workqueue;
+       struct work_struct work;
+       void (*saved_data_ready)(struct sock *sk);
+@@ -71,10 +72,11 @@ struct qrtr_node {
+       u32 server_count;
+ };
+-/* Max server limit is chosen based on the current platform requirements. If the
+- * requirement changes in the future, this value can be increased.
++/* Max server, lookup limits are chosen based on the current platform requirements.
++ * If the requirement changes in the future, these values can be increased.
+  */
+ #define QRTR_NS_MAX_SERVERS 256
++#define QRTR_NS_MAX_LOOKUPS 64
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+@@ -434,6 +436,7 @@ static int ctrl_cmd_del_client(struct so
+               list_del(&lookup->li);
+               kfree(lookup);
++              qrtr_ns.lookup_count--;
+       }
+       /* Remove the server belonging to this port but don't broadcast
+@@ -551,6 +554,11 @@ static int ctrl_cmd_new_lookup(struct so
+       if (from->sq_node != qrtr_ns.local_node)
+               return -EINVAL;
++      if (qrtr_ns.lookup_count >= QRTR_NS_MAX_LOOKUPS) {
++              pr_err_ratelimited("QRTR client node exceeds max lookup limit!\n");
++              return -ENOSPC;
++      }
++
+       lookup = kzalloc_obj(*lookup);
+       if (!lookup)
+               return -ENOMEM;
+@@ -559,6 +567,7 @@ static int ctrl_cmd_new_lookup(struct so
+       lookup->service = service;
+       lookup->instance = instance;
+       list_add_tail(&lookup->li, &qrtr_ns.lookups);
++      qrtr_ns.lookup_count++;
+       memset(&filter, 0, sizeof(filter));
+       filter.service = service;
+@@ -599,6 +608,7 @@ static void ctrl_cmd_del_lookup(struct s
+               list_del(&lookup->li);
+               kfree(lookup);
++              qrtr_ns.lookup_count--;
+       }
+ }
diff --git a/queue-7.0/net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch b/queue-7.0/net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch
new file mode 100644 (file)
index 0000000..0bc20af
--- /dev/null
@@ -0,0 +1,105 @@
+From d5ee2ff98322337951c56398e79d51815acbf955 Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Date: Thu, 9 Apr 2026 23:04:12 +0530
+Subject: net: qrtr: ns: Limit the maximum server registration per node
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+commit d5ee2ff98322337951c56398e79d51815acbf955 upstream.
+
+Current code does no bound checking on the number of servers added per
+node. A malicious client can flood NEW_SERVER messages and exhaust memory.
+
+Fix this issue by limiting the maximum number of server registrations to
+256 per node. If the NEW_SERVER message is received for an old port, then
+don't restrict it as it will get replaced. While at it, also rate limit
+the error messages in the failure path of qrtr_ns_worker().
+
+Note that the limit of 256 is chosen based on the current platform
+requirements. If requirement changes in the future, this limit can be
+increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Reported-by: Yiming Qian <yimingqian591@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-1-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c |   26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -68,8 +68,14 @@ struct qrtr_server {
+ struct qrtr_node {
+       unsigned int id;
+       struct xarray servers;
++      u32 server_count;
+ };
++/* Max server limit is chosen based on the current platform requirements. If the
++ * requirement changes in the future, this value can be increased.
++ */
++#define QRTR_NS_MAX_SERVERS 256
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+       struct qrtr_node *node;
+@@ -230,6 +236,17 @@ static struct qrtr_server *server_add(un
+       if (!service || !port)
+               return NULL;
++      node = node_get(node_id);
++      if (!node)
++              return NULL;
++
++      /* Make sure the new servers per port are capped at the maximum value */
++      old = xa_load(&node->servers, port);
++      if (!old && node->server_count >= QRTR_NS_MAX_SERVERS) {
++              pr_err_ratelimited("QRTR client node %u exceeds max server limit!\n", node_id);
++              return NULL;
++      }
++
+       srv = kzalloc_obj(*srv);
+       if (!srv)
+               return NULL;
+@@ -239,10 +256,6 @@ static struct qrtr_server *server_add(un
+       srv->node = node_id;
+       srv->port = port;
+-      node = node_get(node_id);
+-      if (!node)
+-              goto err;
+-
+       /* Delete the old server on the same port */
+       old = xa_store(&node->servers, port, srv, GFP_KERNEL);
+       if (old) {
+@@ -253,6 +266,8 @@ static struct qrtr_server *server_add(un
+               } else {
+                       kfree(old);
+               }
++      } else {
++              node->server_count++;
+       }
+       trace_qrtr_ns_server_add(srv->service, srv->instance,
+@@ -293,6 +308,7 @@ static int server_del(struct qrtr_node *
+       }
+       kfree(srv);
++      node->server_count--;
+       return 0;
+ }
+@@ -671,7 +687,7 @@ static void qrtr_ns_worker(struct work_s
+               }
+               if (ret < 0)
+-                      pr_err("failed while handling packet from %d:%d",
++                      pr_err_ratelimited("failed while handling packet from %d:%d",
+                              sq.sq_node, sq.sq_port);
+       }
diff --git a/queue-7.0/net-qrtr-ns-limit-the-total-number-of-nodes.patch b/queue-7.0/net-qrtr-ns-limit-the-total-number-of-nodes.patch
new file mode 100644 (file)
index 0000000..c49037c
--- /dev/null
@@ -0,0 +1,77 @@
+From 27d5e84e810b0849d08b9aec68e48570461ce313 Mon Sep 17 00:00:00 2001
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Date: Thu, 9 Apr 2026 23:04:15 +0530
+Subject: net: qrtr: ns: Limit the total number of nodes
+
+From: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+
+commit 27d5e84e810b0849d08b9aec68e48570461ce313 upstream.
+
+Currently, the nameserver doesn't limit the number of nodes it handles.
+This can be an attack vector if a malicious client starts registering
+random nodes, leading to memory exhaustion.
+
+Hence, limit the maximum number of nodes to 64. Note that, limit of 64 is
+chosen based on the current platform requirements. If requirement changes
+in the future, this limit can be increased.
+
+Cc: stable@vger.kernel.org
+Fixes: 0c2204a4ad71 ("net: qrtr: Migrate nameservice to kernel from userspace")
+Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260409-qrtr-fix-v3-4-00a8a5ff2b51@oss.qualcomm.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/qrtr/ns.c |   16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/net/qrtr/ns.c
++++ b/net/qrtr/ns.c
+@@ -72,12 +72,16 @@ struct qrtr_node {
+       u32 server_count;
+ };
+-/* Max server, lookup limits are chosen based on the current platform requirements.
+- * If the requirement changes in the future, these values can be increased.
++/* Max nodes, server, lookup limits are chosen based on the current platform
++ * requirements. If the requirement changes in the future, these values can be
++ * increased.
+  */
++#define QRTR_NS_MAX_NODES   64
+ #define QRTR_NS_MAX_SERVERS 256
+ #define QRTR_NS_MAX_LOOKUPS 64
++static u8 node_count;
++
+ static struct qrtr_node *node_get(unsigned int node_id)
+ {
+       struct qrtr_node *node;
+@@ -86,6 +90,11 @@ static struct qrtr_node *node_get(unsign
+       if (node)
+               return node;
++      if (node_count >= QRTR_NS_MAX_NODES) {
++              pr_err_ratelimited("QRTR clients exceed max node limit!\n");
++              return NULL;
++      }
++
+       /* If node didn't exist, allocate and insert it to the tree */
+       node = kzalloc_obj(*node);
+       if (!node)
+@@ -99,6 +108,8 @@ static struct qrtr_node *node_get(unsign
+               return NULL;
+       }
++      node_count++;
++
+       return node;
+ }
+@@ -405,6 +416,7 @@ static int ctrl_cmd_bye(struct sockaddr_
+ delete_node:
+       xa_erase(&nodes, from->sq_node);
+       kfree(node);
++      node_count--;
+       return ret;
+ }
diff --git a/queue-7.0/net-rds-fix-mr-cleanup-on-copy-error.patch b/queue-7.0/net-rds-fix-mr-cleanup-on-copy-error.patch
new file mode 100644 (file)
index 0000000..e582b58
--- /dev/null
@@ -0,0 +1,47 @@
+From 8141a2dc70080eda1aedc0389ed2db2b292af5bd Mon Sep 17 00:00:00 2001
+From: Ao Zhou <draw51280@163.com>
+Date: Wed, 22 Apr 2026 22:52:07 +0800
+Subject: net: rds: fix MR cleanup on copy error
+
+From: Ao Zhou <draw51280@163.com>
+
+commit 8141a2dc70080eda1aedc0389ed2db2b292af5bd upstream.
+
+__rds_rdma_map() hands sg/pages ownership to the transport after
+get_mr() succeeds. If copying the generated cookie back to user space
+fails after that point, the error path must not free those resources
+again before dropping the MR reference.
+
+Remove the duplicate unpin/free from the put_user() failure branch so
+that MR teardown is handled only through the existing final cleanup
+path.
+
+Fixes: 0d4597c8c5ab ("net/rds: Track user mapped pages through special API")
+Cc: stable@kernel.org
+Reported-by: Yuan Tan <yuantan098@gmail.com>
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Reported-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Ao Zhou <draw51280@163.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Reviewed-by: Allison Henderson <achender@kernel.org>
+Link: https://patch.msgid.link/79c8ef73ec8e5844d71038983940cc2943099baf.1776764247.git.draw51280@163.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rds/rdma.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/net/rds/rdma.c
++++ b/net/rds/rdma.c
+@@ -326,10 +326,6 @@ static int __rds_rdma_map(struct rds_soc
+       if (args->cookie_addr &&
+           put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
+-              if (!need_odp) {
+-                      unpin_user_pages(pages, nr_pages);
+-                      kfree(sg);
+-              }
+               ret = -EFAULT;
+               goto out;
+       }
diff --git a/queue-7.0/net-smc-avoid-early-lgr-access-in-smc_clc_wait_msg.patch b/queue-7.0/net-smc-avoid-early-lgr-access-in-smc_clc_wait_msg.patch
new file mode 100644 (file)
index 0000000..c9b1036
--- /dev/null
@@ -0,0 +1,49 @@
+From 5a8db80f721deee8e916c2cfdee78decda02ce4f Mon Sep 17 00:00:00 2001
+From: Ruijie Li <ruijieli51@gmail.com>
+Date: Wed, 22 Apr 2026 23:40:18 +0800
+Subject: net/smc: avoid early lgr access in smc_clc_wait_msg
+
+From: Ruijie Li <ruijieli51@gmail.com>
+
+commit 5a8db80f721deee8e916c2cfdee78decda02ce4f upstream.
+
+A CLC decline can be received while the handshake is still in an early
+stage, before the connection has been associated with a link group.
+
+The decline handling in smc_clc_wait_msg() updates link-group level sync
+state for first-contact declines, but that state only exists after link
+group setup has completed. Guard the link-group update accordingly and
+keep the per-socket peer diagnosis handling unchanged.
+
+This preserves the existing sync_err handling for established link-group
+contexts and avoids touching link-group state before it is available.
+
+Fixes: 0cfdd8f92cac ("smc: connection and link group creation")
+Cc: stable@kernel.org
+Reported-by: Yuan Tan <yuantan098@gmail.com>
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Reported-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Ruijie Li <ruijieli51@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
+Link: https://patch.msgid.link/08c68a5c817acf198cce63d22517e232e8d60718.1776850759.git.ruijieli51@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/smc/smc_clc.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -788,8 +788,8 @@ int smc_clc_wait_msg(struct smc_sock *sm
+               dclc = (struct smc_clc_msg_decline *)clcm;
+               reason_code = SMC_CLC_DECL_PEERDECL;
+               smc->peer_diagnosis = ntohl(dclc->peer_diagnosis);
+-              if (((struct smc_clc_msg_decline *)buf)->hdr.typev2 &
+-                                              SMC_FIRST_CONTACT_MASK) {
++              if ((dclc->hdr.typev2 & SMC_FIRST_CONTACT_MASK) &&
++                  smc->conn.lgr) {
+                       smc->conn.lgr->sync_err = 1;
+                       smc_lgr_terminate_sched(smc->conn.lgr);
+               }
diff --git a/queue-7.0/net-txgbe-fix-firmware-version-check.patch b/queue-7.0/net-txgbe-fix-firmware-version-check.patch
new file mode 100644 (file)
index 0000000..bde8fdb
--- /dev/null
@@ -0,0 +1,48 @@
+From c263f644add3d6ad81f9d62a99284fde408f0caa Mon Sep 17 00:00:00 2001
+From: Jiawen Wu <jiawenwu@trustnetic.com>
+Date: Wed, 22 Apr 2026 15:18:37 +0800
+Subject: net: txgbe: fix firmware version check
+
+From: Jiawen Wu <jiawenwu@trustnetic.com>
+
+commit c263f644add3d6ad81f9d62a99284fde408f0caa upstream.
+
+For the device SP, the firmware version is a 32-bit value where the
+lower 20 bits represent the base version number. And the customized
+firmware version populates the upper 12 bits with a specific
+identification number.
+
+For other devices AML 25G and 40G, the upper 12 bits of the firmware
+version is always non-zero, and they have other naming conventions.
+
+Only SP devices need to check this to tell if XPCS will work properly.
+So the judgement of MAC type is added here.
+
+And the original logic compared the entire 32-bit value against 0x20010,
+which caused the outdated base firmwares bypass the version check
+without a warning. Apply a mask 0xfffff to isolate the lower 20 bits for
+an accurate base version comparison.
+
+Fixes: ab928c24e6cd ("net: txgbe: add FW version warning")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiawen Wu <jiawenwu@trustnetic.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/C787AA5C07598B13+20260422071837.372731-1-jiawenwu@trustnetic.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/wangxun/txgbe/txgbe_main.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
++++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+@@ -867,7 +867,8 @@ static int txgbe_probe(struct pci_dev *p
+                        "0x%08x", etrack_id);
+       }
+-      if (etrack_id < 0x20010)
++      if (wx->mac.type == wx_mac_sp &&
++          ((etrack_id & 0xfffff) < 0x20010))
+               dev_warn(&pdev->dev, "Please upgrade the firmware to 0x20010 or above.\n");
+       err = txgbe_test_hostif(wx);
diff --git a/queue-7.0/netconsole-avoid-out-of-bounds-access-on-empty-string-in-trim_newline.patch b/queue-7.0/netconsole-avoid-out-of-bounds-access-on-empty-string-in-trim_newline.patch
new file mode 100644 (file)
index 0000000..0fd3725
--- /dev/null
@@ -0,0 +1,52 @@
+From 7079c8c13f2d33992bc846240517d88f4ab07781 Mon Sep 17 00:00:00 2001
+From: Breno Leitao <leitao@debian.org>
+Date: Mon, 20 Apr 2026 03:18:36 -0700
+Subject: netconsole: avoid out-of-bounds access on empty string in trim_newline()
+
+From: Breno Leitao <leitao@debian.org>
+
+commit 7079c8c13f2d33992bc846240517d88f4ab07781 upstream.
+
+trim_newline() unconditionally dereferences s[len - 1] after computing
+len = strnlen(s, maxlen). When the string is empty, len is 0 and the
+expression underflows to s[(size_t)-1], reading (and potentially
+writing) one byte before the buffer.
+
+The two callers feed trim_newline() with the result of strscpy() from
+configfs store callbacks (dev_name_store, userdatum_value_store).
+configfs guarantees count >= 1 reaches the callback, but the byte
+itself can be NUL: a userspace write(fd, "\0", 1) leaves the
+destination empty after strscpy() and triggers the underflow. The OOB
+write only fires if the adjacent byte happens to be '\n', so this is
+not a security issue, but the access is undefined behaviour either way.
+
+This pattern is commonly flagged by LLM-based code reviewers. While it
+is not a security fix, the underlying access is undefined behaviour and
+the change is small and self-contained, so it is a reasonable candidate
+for the stable trees.
+
+Guard the dereference on a non-zero length.
+
+Fixes: ae001dc67907 ("net: netconsole: move newline trimming to function")
+Cc: stable@vger.kernel.org
+Signed-off-by: Breno Leitao <leitao@debian.org>
+Reviewed-by: Gustavo Luiz Duarte <gustavold@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260420-netcons_trim_newline-v1-1-dc35889aeedf@debian.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/netconsole.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -497,6 +497,8 @@ static void trim_newline(char *s, size_t
+       size_t len;
+       len = strnlen(s, maxlen);
++      if (!len)
++              return;
+       if (s[len - 1] == '\n')
+               s[len - 1] = '\0';
+ }
diff --git a/queue-7.0/power-supply-axp288_charger-do-not-cancel-work-before-initializing-it.patch b/queue-7.0/power-supply-axp288_charger-do-not-cancel-work-before-initializing-it.patch
new file mode 100644 (file)
index 0000000..2bb94da
--- /dev/null
@@ -0,0 +1,83 @@
+From 658342fd75b582cbb06544d513171c3d645faead Mon Sep 17 00:00:00 2001
+From: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
+Date: Fri, 20 Feb 2026 18:49:39 +0100
+Subject: power: supply: axp288_charger: Do not cancel work before initializing it
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
+
+commit 658342fd75b582cbb06544d513171c3d645faead upstream.
+
+Driver registered devm handler to cancel_work_sync() before even the
+work was initialized, thus leading to possible warning from
+kernel/workqueue.c on (!work->func) check, if the error path was hit
+before the initialization happened.
+
+Use devm_work_autocancel() on each work item independently, which
+handles the initialization and handler to cancel work.
+
+Fixes: 165c2357744e ("power: supply: axp288_charger: Properly stop work on probe-error / remove")
+Cc: stable@vger.kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
+Reviewed-by: Hans de Goede <johannes.goede@oss.qualcomm.com>
+Reviewed-by: Chen-Yu Tsai <wens@kernel.org>
+Link: https://patch.msgid.link/20260220174938.672883-5-krzysztof.kozlowski@oss.qualcomm.com
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/power/supply/axp288_charger.c |   19 ++++++++-----------
+ 1 file changed, 8 insertions(+), 11 deletions(-)
+
+--- a/drivers/power/supply/axp288_charger.c
++++ b/drivers/power/supply/axp288_charger.c
+@@ -10,6 +10,7 @@
+ #include <linux/acpi.h>
+ #include <linux/bitops.h>
+ #include <linux/module.h>
++#include <linux/devm-helpers.h>
+ #include <linux/device.h>
+ #include <linux/regmap.h>
+ #include <linux/workqueue.h>
+@@ -821,14 +822,6 @@ static int charger_init_hw_regs(struct a
+       return 0;
+ }
+-static void axp288_charger_cancel_work(void *data)
+-{
+-      struct axp288_chrg_info *info = data;
+-
+-      cancel_work_sync(&info->otg.work);
+-      cancel_work_sync(&info->cable.work);
+-}
+-
+ static int axp288_charger_probe(struct platform_device *pdev)
+ {
+       int ret, i, pirq;
+@@ -911,12 +904,12 @@ static int axp288_charger_probe(struct p
+       }
+       /* Cancel our work on cleanup, register this before the notifiers */
+-      ret = devm_add_action(dev, axp288_charger_cancel_work, info);
++      ret = devm_work_autocancel(dev, &info->cable.work,
++                                 axp288_charger_extcon_evt_worker);
+       if (ret)
+               return ret;
+       /* Register for extcon notification */
+-      INIT_WORK(&info->cable.work, axp288_charger_extcon_evt_worker);
+       info->cable.nb.notifier_call = axp288_charger_handle_cable_evt;
+       ret = devm_extcon_register_notifier_all(dev, info->cable.edev,
+                                               &info->cable.nb);
+@@ -926,8 +919,12 @@ static int axp288_charger_probe(struct p
+       }
+       schedule_work(&info->cable.work);
++      ret = devm_work_autocancel(dev, &info->otg.work,
++                                 axp288_charger_otg_evt_worker);
++      if (ret)
++              return ret;
++
+       /* Register for OTG notification */
+-      INIT_WORK(&info->otg.work, axp288_charger_otg_evt_worker);
+       info->otg.id_nb.notifier_call = axp288_charger_handle_otg_evt;
+       if (info->otg.cable) {
+               ret = devm_extcon_register_notifier(dev, info->otg.cable,
diff --git a/queue-7.0/printf-compile-the-kunit-test-with-disable_branch_profiling-disable_branch_profiling.patch b/queue-7.0/printf-compile-the-kunit-test-with-disable_branch_profiling-disable_branch_profiling.patch
new file mode 100644 (file)
index 0000000..a1352bb
--- /dev/null
@@ -0,0 +1,56 @@
+From 8901ac9d2c7eb8ed7ae5e749bf13ecb3b6062488 Mon Sep 17 00:00:00 2001
+From: Petr Mladek <pmladek@suse.com>
+Date: Tue, 14 Apr 2026 17:41:24 +0200
+Subject: printf: Compile the kunit test with DISABLE_BRANCH_PROFILING DISABLE_BRANCH_PROFILING
+
+From: Petr Mladek <pmladek@suse.com>
+
+commit 8901ac9d2c7eb8ed7ae5e749bf13ecb3b6062488 upstream.
+
+GCC < 12.1 can miscompile printf_kunit's errptr() test when branch
+profiling is enabled. BUILD_BUG_ON(IS_ERR(PTR)) is a constant false
+expression, but CONFIG_TRACE_BRANCH_PROFILING and
+CONFIG_PROFILE_ALL_BRANCHES make the IS_ERR() path side-effectful.
+GCC's IPA splitter can then outline the cold assert arm into
+errptr.part.* and leave that clone with an unconditional
+__compiletime_assert_*() call, causing a false build failure.
+
+This started showing up after test_hashed() became a macro and moved its
+local buffer into errptr(), which changed GCC's inlining and splitting
+decisions enough to expose the compiler bug.
+
+Workaround the problem by disabling the branch profiling for
+printf_kunit.o. It is a straightforward and acceptable solution.
+
+The workaround can be removed once the minimum GCC includes commit
+76fe49423047 ("Fix tree-optimization/101941: IPA splitting out
+function with error attribute"), which first shipped in GCC 12.1.
+
+Fixes: 9bfa52dac27a ("printf: convert test_hashed into macro")
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202604030636.NqjaJvYp-lkp@intel.com/
+Cc: stable@vger.kernel.org
+Acked-by: Tamir Duberstein <tamird@kernel.org>
+Link: https://patch.msgid.link/ad5gJAX9f6dSQluz@pathway.suse.cz
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/tests/Makefile | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/lib/tests/Makefile b/lib/tests/Makefile
+index 05f74edbc62b..7e9c2fa52e35 100644
+--- a/lib/tests/Makefile
++++ b/lib/tests/Makefile
+@@ -40,6 +40,8 @@ obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o
+ obj-$(CONFIG_MIN_HEAP_KUNIT_TEST) += min_heap_kunit.o
+ CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare)
+ obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o
++# GCC < 12.1 can miscompile errptr() test when branch profiling is enabled.
++CFLAGS_printf_kunit.o += -DDISABLE_BRANCH_PROFILING
+ obj-$(CONFIG_PRINTF_KUNIT_TEST) += printf_kunit.o
+ obj-$(CONFIG_RANDSTRUCT_KUNIT_TEST) += randstruct_kunit.o
+ obj-$(CONFIG_SCANF_KUNIT_TEST) += scanf_kunit.o
+-- 
+2.54.0
+
diff --git a/queue-7.0/rdma-rxe-validate-pad-and-icrc-before-payload_size-in-rxe_rcv.patch b/queue-7.0/rdma-rxe-validate-pad-and-icrc-before-payload_size-in-rxe_rcv.patch
new file mode 100644 (file)
index 0000000..0326c62
--- /dev/null
@@ -0,0 +1,51 @@
+From 7244491dab347f648e661da96dc0febadd9daec3 Mon Sep 17 00:00:00 2001
+From: hkbinbin <hkbinbinbin@gmail.com>
+Date: Wed, 1 Apr 2026 12:19:07 +0000
+Subject: RDMA/rxe: Validate pad and ICRC before payload_size() in rxe_rcv
+
+From: hkbinbin <hkbinbinbin@gmail.com>
+
+commit 7244491dab347f648e661da96dc0febadd9daec3 upstream.
+
+rxe_rcv() currently checks only that the incoming packet is at least
+header_size(pkt) bytes long before payload_size() is used.
+
+However, payload_size() subtracts both the attacker-controlled BTH pad
+field and RXE_ICRC_SIZE from pkt->paylen:
+
+  payload_size = pkt->paylen - offset[RXE_PAYLOAD] - bth_pad(pkt)
+                 - RXE_ICRC_SIZE
+
+This means a short packet can still make payload_size() underflow even
+if it includes enough bytes for the fixed headers. Simply requiring
+header_size(pkt) + RXE_ICRC_SIZE is not sufficient either, because a
+packet with a forged non-zero BTH pad can still leave payload_size()
+negative and pass an underflowed value to later receive-path users.
+
+Fix this by validating pkt->paylen against the full minimum length
+required by payload_size(): header_size(pkt) + bth_pad(pkt) +
+RXE_ICRC_SIZE.
+
+Cc: stable@vger.kernel.org
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://patch.msgid.link/r/20260401121907.1468366-1-hkbinbinbin@gmail.com
+Signed-off-by: hkbinbin <hkbinbinbin@gmail.com>
+Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/sw/rxe/rxe_recv.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -330,7 +330,8 @@ void rxe_rcv(struct sk_buff *skb)
+       pkt->qp = NULL;
+       pkt->mask |= rxe_opcode[pkt->opcode].mask;
+-      if (unlikely(skb->len < header_size(pkt)))
++      if (unlikely(pkt->paylen < header_size(pkt) + bth_pad(pkt) +
++                     RXE_ICRC_SIZE))
+               goto drop;
+       err = hdr_check(pkt);
diff --git a/queue-7.0/selinux-fix-overlayfs-mmap-and-mprotect-access-checks.patch b/queue-7.0/selinux-fix-overlayfs-mmap-and-mprotect-access-checks.patch
new file mode 100644 (file)
index 0000000..ed8ee59
--- /dev/null
@@ -0,0 +1,419 @@
+From 82544d36b1729153c8aeb179e84750f0c085d3b1 Mon Sep 17 00:00:00 2001
+From: Paul Moore <paul@paul-moore.com>
+Date: Thu, 1 Jan 2026 17:19:18 -0500
+Subject: selinux: fix overlayfs mmap() and mprotect() access checks
+
+From: Paul Moore <paul@paul-moore.com>
+
+commit 82544d36b1729153c8aeb179e84750f0c085d3b1 upstream.
+
+The existing SELinux security model for overlayfs is to allow access if
+the current task is able to access the top level file (the "user" file)
+and the mounter's credentials are sufficient to access the lower
+level file (the "backing" file).  Unfortunately, the current code does
+not properly enforce these access controls for both mmap() and mprotect()
+operations on overlayfs filesystems.
+
+This patch makes use of the newly created security_mmap_backing_file()
+LSM hook to provide the missing backing file enforcement for mmap()
+operations, and leverages the backing file API and new LSM blob to
+provide the necessary information to properly enforce the mprotect()
+access controls.
+
+Cc: stable@vger.kernel.org
+Acked-by: Amir Goldstein <amir73il@gmail.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/selinux/hooks.c          |  244 +++++++++++++++++++++++++++-----------
+ security/selinux/include/objsec.h |   11 +
+ 2 files changed, 190 insertions(+), 65 deletions(-)
+
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1745,49 +1745,72 @@ static inline int file_path_has_perm(con
+ static int bpf_fd_pass(const struct file *file, u32 sid);
+ #endif
+-/* Check whether a task can use an open file descriptor to
+-   access an inode in a given way.  Check access to the
+-   descriptor itself, and then use dentry_has_perm to
+-   check a particular permission to the file.
+-   Access to the descriptor is implicitly granted if it
+-   has the same SID as the process.  If av is zero, then
+-   access to the file is not checked, e.g. for cases
+-   where only the descriptor is affected like seek. */
+-static int file_has_perm(const struct cred *cred,
+-                       struct file *file,
+-                       u32 av)
++static int __file_has_perm(const struct cred *cred, const struct file *file,
++                         u32 av, bool bf_user_file)
++
+ {
+-      struct file_security_struct *fsec = selinux_file(file);
+-      struct inode *inode = file_inode(file);
+       struct common_audit_data ad;
+-      u32 sid = cred_sid(cred);
++      struct inode *inode;
++      u32 ssid = cred_sid(cred);
++      u32 tsid_fd;
+       int rc;
+-      ad.type = LSM_AUDIT_DATA_FILE;
+-      ad.u.file = file;
++      if (bf_user_file) {
++              struct backing_file_security_struct *bfsec;
++              const struct path *path;
++
++              if (WARN_ON(!(file->f_mode & FMODE_BACKING)))
++                      return -EIO;
++
++              bfsec = selinux_backing_file(file);
++              path = backing_file_user_path(file);
++              tsid_fd = bfsec->uf_sid;
++              inode = d_inode(path->dentry);
+-      if (sid != fsec->sid) {
+-              rc = avc_has_perm(sid, fsec->sid,
+-                                SECCLASS_FD,
+-                                FD__USE,
+-                                &ad);
++              ad.type = LSM_AUDIT_DATA_PATH;
++              ad.u.path = *path;
++      } else {
++              struct file_security_struct *fsec = selinux_file(file);
++
++              tsid_fd = fsec->sid;
++              inode = file_inode(file);
++
++              ad.type = LSM_AUDIT_DATA_FILE;
++              ad.u.file = file;
++      }
++
++      if (ssid != tsid_fd) {
++              rc = avc_has_perm(ssid, tsid_fd, SECCLASS_FD, FD__USE, &ad);
+               if (rc)
+-                      goto out;
++                      return rc;
+       }
+ #ifdef CONFIG_BPF_SYSCALL
+-      rc = bpf_fd_pass(file, cred_sid(cred));
++      /* regardless of backing vs user file, use the underlying file here */
++      rc = bpf_fd_pass(file, ssid);
+       if (rc)
+               return rc;
+ #endif
+       /* av is zero if only checking access to the descriptor. */
+-      rc = 0;
+       if (av)
+-              rc = inode_has_perm(cred, inode, av, &ad);
++              return inode_has_perm(cred, inode, av, &ad);
+-out:
+-      return rc;
++      return 0;
++}
++
++/* Check whether a task can use an open file descriptor to
++   access an inode in a given way.  Check access to the
++   descriptor itself, and then use dentry_has_perm to
++   check a particular permission to the file.
++   Access to the descriptor is implicitly granted if it
++   has the same SID as the process.  If av is zero, then
++   access to the file is not checked, e.g. for cases
++   where only the descriptor is affected like seek. */
++static inline int file_has_perm(const struct cred *cred,
++                              const struct file *file, u32 av)
++{
++      return __file_has_perm(cred, file, av, false);
+ }
+ /*
+@@ -3825,6 +3848,17 @@ static int selinux_file_alloc_security(s
+       return 0;
+ }
++static int selinux_backing_file_alloc(struct file *backing_file,
++                                    const struct file *user_file)
++{
++      struct backing_file_security_struct *bfsec;
++
++      bfsec = selinux_backing_file(backing_file);
++      bfsec->uf_sid = selinux_file(user_file)->sid;
++
++      return 0;
++}
++
+ /*
+  * Check whether a task has the ioctl permission and cmd
+  * operation to an inode.
+@@ -3942,42 +3976,55 @@ static int selinux_file_ioctl_compat(str
+ static int default_noexec __ro_after_init;
+-static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
+-{
+-      const struct cred *cred = current_cred();
+-      u32 sid = cred_sid(cred);
+-      int rc = 0;
++static int __file_map_prot_check(const struct cred *cred,
++                               const struct file *file, unsigned long prot,
++                               bool shared, bool bf_user_file)
++{
++      struct inode *inode = NULL;
++      bool prot_exec = prot & PROT_EXEC;
++      bool prot_write = prot & PROT_WRITE;
++
++      if (file) {
++              if (bf_user_file)
++                      inode = d_inode(backing_file_user_path(file)->dentry);
++              else
++                      inode = file_inode(file);
++      }
++
++      if (default_noexec && prot_exec &&
++          (!file || IS_PRIVATE(inode) || (!shared && prot_write))) {
++              int rc;
++              u32 sid = cred_sid(cred);
+-      if (default_noexec &&
+-          (prot & PROT_EXEC) && (!file || IS_PRIVATE(file_inode(file)) ||
+-                                 (!shared && (prot & PROT_WRITE)))) {
+               /*
+-               * We are making executable an anonymous mapping or a
+-               * private file mapping that will also be writable.
+-               * This has an additional check.
++               * We are making executable an anonymous mapping or a private
++               * file mapping that will also be writable.
+                */
+-              rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+-                                PROCESS__EXECMEM, NULL);
++              rc = avc_has_perm(sid, sid, SECCLASS_PROCESS, PROCESS__EXECMEM,
++                                NULL);
+               if (rc)
+-                      goto error;
++                      return rc;
+       }
+       if (file) {
+-              /* read access is always possible with a mapping */
++              /* "read" always possible, "write" only if shared */
+               u32 av = FILE__READ;
+-
+-              /* write access only matters if the mapping is shared */
+-              if (shared && (prot & PROT_WRITE))
++              if (shared && prot_write)
+                       av |= FILE__WRITE;
+-
+-              if (prot & PROT_EXEC)
++              if (prot_exec)
+                       av |= FILE__EXECUTE;
+-              return file_has_perm(cred, file, av);
++              return __file_has_perm(cred, file, av, bf_user_file);
+       }
+-error:
+-      return rc;
++      return 0;
++}
++
++static inline int file_map_prot_check(const struct cred *cred,
++                                    const struct file *file,
++                                    unsigned long prot, bool shared)
++{
++      return __file_map_prot_check(cred, file, prot, shared, false);
+ }
+ static int selinux_mmap_addr(unsigned long addr)
+@@ -3993,36 +4040,80 @@ static int selinux_mmap_addr(unsigned lo
+       return rc;
+ }
+-static int selinux_mmap_file(struct file *file,
+-                           unsigned long reqprot __always_unused,
+-                           unsigned long prot, unsigned long flags)
++static int selinux_mmap_file_common(const struct cred *cred, struct file *file,
++                                  unsigned long prot, bool shared)
+ {
+-      struct common_audit_data ad;
+-      int rc;
+-
+       if (file) {
++              int rc;
++              struct common_audit_data ad;
++
+               ad.type = LSM_AUDIT_DATA_FILE;
+               ad.u.file = file;
+-              rc = inode_has_perm(current_cred(), file_inode(file),
+-                                  FILE__MAP, &ad);
++              rc = inode_has_perm(cred, file_inode(file), FILE__MAP, &ad);
+               if (rc)
+                       return rc;
+       }
+-      return file_map_prot_check(file, prot,
+-                                 (flags & MAP_TYPE) == MAP_SHARED);
++      return file_map_prot_check(cred, file, prot, shared);
++}
++
++static int selinux_mmap_file(struct file *file,
++                           unsigned long reqprot __always_unused,
++                           unsigned long prot, unsigned long flags)
++{
++      return selinux_mmap_file_common(current_cred(), file, prot,
++                                      (flags & MAP_TYPE) == MAP_SHARED);
++}
++
++/**
++ * selinux_mmap_backing_file - Check mmap permissions on a backing file
++ * @vma: memory region
++ * @backing_file: stacked filesystem backing file
++ * @user_file: user visible file
++ *
++ * This is called after selinux_mmap_file() on stacked filesystems, and it
++ * is this function's responsibility to verify access to @backing_file and
++ * setup the SELinux state for possible later use in the mprotect() code path.
++ *
++ * By the time this function is called, mmap() access to @user_file has already
++ * been authorized and @vma->vm_file has been set to point to @backing_file.
++ *
++ * Return zero on success, negative values otherwise.
++ */
++static int selinux_mmap_backing_file(struct vm_area_struct *vma,
++                                   struct file *backing_file,
++                                   struct file *user_file __always_unused)
++{
++      unsigned long prot = 0;
++
++      /* translate vma->vm_flags perms into PROT perms */
++      if (vma->vm_flags & VM_READ)
++              prot |= PROT_READ;
++      if (vma->vm_flags & VM_WRITE)
++              prot |= PROT_WRITE;
++      if (vma->vm_flags & VM_EXEC)
++              prot |= PROT_EXEC;
++
++      return selinux_mmap_file_common(backing_file->f_cred, backing_file,
++                                      prot, vma->vm_flags & VM_SHARED);
+ }
+ static int selinux_file_mprotect(struct vm_area_struct *vma,
+                                unsigned long reqprot __always_unused,
+                                unsigned long prot)
+ {
++      int rc;
+       const struct cred *cred = current_cred();
+       u32 sid = cred_sid(cred);
++      const struct file *file = vma->vm_file;
++      bool backing_file;
++      bool shared = vma->vm_flags & VM_SHARED;
++
++      /* check if we need to trigger the "backing files are awful" mode */
++      backing_file = file && (file->f_mode & FMODE_BACKING);
+       if (default_noexec &&
+           (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+-              int rc = 0;
+               /*
+                * We don't use the vma_is_initial_heap() helper as it has
+                * a history of problems and is currently broken on systems
+@@ -4036,11 +4127,15 @@ static int selinux_file_mprotect(struct
+                   vma->vm_end <= vma->vm_mm->brk) {
+                       rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+                                         PROCESS__EXECHEAP, NULL);
+-              } else if (!vma->vm_file && (vma_is_initial_stack(vma) ||
++                      if (rc)
++                              return rc;
++              } else if (!file && (vma_is_initial_stack(vma) ||
+                           vma_is_stack_for_current(vma))) {
+                       rc = avc_has_perm(sid, sid, SECCLASS_PROCESS,
+                                         PROCESS__EXECSTACK, NULL);
+-              } else if (vma->vm_file && vma->anon_vma) {
++                      if (rc)
++                              return rc;
++              } else if (file && vma->anon_vma) {
+                       /*
+                        * We are making executable a file mapping that has
+                        * had some COW done. Since pages might have been
+@@ -4048,13 +4143,29 @@ static int selinux_file_mprotect(struct
+                        * modified content.  This typically should only
+                        * occur for text relocations.
+                        */
+-                      rc = file_has_perm(cred, vma->vm_file, FILE__EXECMOD);
++                      rc = __file_has_perm(cred, file, FILE__EXECMOD,
++                                           backing_file);
++                      if (rc)
++                              return rc;
++                      if (backing_file) {
++                              rc = file_has_perm(file->f_cred, file,
++                                                 FILE__EXECMOD);
++                              if (rc)
++                                      return rc;
++                      }
+               }
++      }
++
++      rc = __file_map_prot_check(cred, file, prot, shared, backing_file);
++      if (rc)
++              return rc;
++      if (backing_file) {
++              rc = file_map_prot_check(file->f_cred, file, prot, shared);
+               if (rc)
+                       return rc;
+       }
+-      return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
++      return 0;
+ }
+ static int selinux_file_lock(struct file *file, unsigned int cmd)
+@@ -7393,6 +7504,7 @@ struct lsm_blob_sizes selinux_blob_sizes
+       .lbs_cred = sizeof(struct cred_security_struct),
+       .lbs_task = sizeof(struct task_security_struct),
+       .lbs_file = sizeof(struct file_security_struct),
++      .lbs_backing_file = sizeof(struct backing_file_security_struct),
+       .lbs_inode = sizeof(struct inode_security_struct),
+       .lbs_ipc = sizeof(struct ipc_security_struct),
+       .lbs_key = sizeof(struct key_security_struct),
+@@ -7498,9 +7610,11 @@ static struct security_hook_list selinux
+       LSM_HOOK_INIT(file_permission, selinux_file_permission),
+       LSM_HOOK_INIT(file_alloc_security, selinux_file_alloc_security),
++      LSM_HOOK_INIT(backing_file_alloc, selinux_backing_file_alloc),
+       LSM_HOOK_INIT(file_ioctl, selinux_file_ioctl),
+       LSM_HOOK_INIT(file_ioctl_compat, selinux_file_ioctl_compat),
+       LSM_HOOK_INIT(mmap_file, selinux_mmap_file),
++      LSM_HOOK_INIT(mmap_backing_file, selinux_mmap_backing_file),
+       LSM_HOOK_INIT(mmap_addr, selinux_mmap_addr),
+       LSM_HOOK_INIT(file_mprotect, selinux_file_mprotect),
+       LSM_HOOK_INIT(file_lock, selinux_file_lock),
+--- a/security/selinux/include/objsec.h
++++ b/security/selinux/include/objsec.h
+@@ -88,6 +88,10 @@ struct file_security_struct {
+       u32 pseqno; /* Policy seqno at the time of file open */
+ };
++struct backing_file_security_struct {
++      u32 uf_sid; /* associated user file fsec->sid */
++};
++
+ struct superblock_security_struct {
+       u32 sid; /* SID of file system superblock */
+       u32 def_sid; /* default SID for labeling */
+@@ -195,6 +199,13 @@ static inline struct file_security_struc
+       return file->f_security + selinux_blob_sizes.lbs_file;
+ }
++static inline struct backing_file_security_struct *
++selinux_backing_file(const struct file *backing_file)
++{
++      void *blob = backing_file_security(backing_file);
++      return blob + selinux_blob_sizes.lbs_backing_file;
++}
++
+ static inline struct inode_security_struct *
+ selinux_inode(const struct inode *inode)
+ {
index 920faec8f0eea00d99a243fcfc48c6edfd6e64b3..ad00972e9ec68a5503d208d0ef733a3463e1defe 100644 (file)
@@ -133,3 +133,38 @@ rxrpc-fix-rxrpc_input_call_event-to-only-unshare-data-packets.patch
 edac-versalnet-fix-memory-leak-in-remove-and-probe-error-paths.patch
 tools-accounting-handle-truncated-taskstats-netlink-messages.patch
 net-txgbe-fix-rtnl-assertion-warning-when-remove-module.patch
+arm64-dts-marvell-udpu-add-ethernet-aliases.patch
+net-qrtr-ns-limit-the-maximum-server-registration-per-node.patch
+net-qrtr-ns-limit-the-maximum-number-of-lookups.patch
+net-qrtr-ns-free-the-node-during-ctrl_cmd_bye.patch
+net-qrtr-ns-limit-the-total-number-of-nodes.patch
+net-rds-fix-mr-cleanup-on-copy-error.patch
+net-txgbe-fix-firmware-version-check.patch
+net-smc-avoid-early-lgr-access-in-smc_clc_wait_msg.patch
+net-ks8851-reinstate-disabling-of-bhs-around-irq-handler.patch
+net-bridge-use-a-stable-fdb-dst-snapshot-in-rcu-readers.patch
+netconsole-avoid-out-of-bounds-access-on-empty-string-in-trim_newline.patch
+net-mctp-fix-don-t-require-received-header-reserved-bits-to-be-zero.patch
+net-ks8851-avoid-excess-softirq-scheduling.patch
+drm-arcpgu-fix-device-node-leak.patch
+slub-fix-data-loss-and-overflow-in-krealloc.patch
+tracing-fprobe-reject-registration-of-a-registered-fprobe-before-init.patch
+rdma-rxe-validate-pad-and-icrc-before-payload_size-in-rxe_rcv.patch
+printf-compile-the-kunit-test-with-disable_branch_profiling-disable_branch_profiling.patch
+ipv4-icmp-validate-reply-type-before-using-icmp_pointers.patch
+libceph-prevent-potential-null-ptr-deref-in-ceph_handle_auth_reply.patch
+spi-fix-resource-leaks-on-device-setup-failure.patch
+apparmor-fix-string-overrun-due-to-missing-termination.patch
+extract-cert-wrap-key_pass-with-ifdef-use_pkcs11_engine.patch
+tpm-avoid-wunused-but-set-variable.patch
+loongarch-make-arch_irq_work_has_interrupt-true-only-if-ipi-hw-exist.patch
+loongarch-show-cpu-vulnerabilites-correctly.patch
+fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
+power-supply-axp288_charger-do-not-cancel-work-before-initializing-it.patch
+hwmon-isl28022-fix-integer-overflow-in-power-calculation-on-32-bit.patch
+hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch
+media-rzv2h-ivc-revise-default-vblank-formula.patch
+media-rzv2h-ivc-fix-axirx_vblank-register-write.patch
+fs-prepare-for-adding-lsm-blob-to-backing_file.patch
+lsm-add-backing_file-lsm-hooks.patch
+selinux-fix-overlayfs-mmap-and-mprotect-access-checks.patch
diff --git a/queue-7.0/slub-fix-data-loss-and-overflow-in-krealloc.patch b/queue-7.0/slub-fix-data-loss-and-overflow-in-krealloc.patch
new file mode 100644 (file)
index 0000000..eaf7c69
--- /dev/null
@@ -0,0 +1,129 @@
+From 082a6d03a2d685a83a332666b500ad3966349588 Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Thu, 16 Apr 2026 15:25:07 +0200
+Subject: slub: fix data loss and overflow in krealloc()
+
+From: Marco Elver <elver@google.com>
+
+commit 082a6d03a2d685a83a332666b500ad3966349588 upstream.
+
+Commit 2cd8231796b5 ("mm/slub: allow to set node and align in
+k[v]realloc") introduced the ability to force a reallocation if the
+original object does not satisfy new alignment or NUMA node, even when
+the object is being shrunk.
+
+This introduced two bugs in the reallocation fallback path:
+
+1. Data loss during NUMA migration: The jump to 'alloc_new' happens
+   before 'ks' and 'orig_size' are initialized. As a result, the
+   memcpy() in the 'alloc_new' block would copy 0 bytes into the new
+   allocation.
+
+2. Buffer overflow during shrinking: When shrinking an object while
+   forcing a new alignment, 'new_size' is smaller than the old size.
+   However, the memcpy() used the old size ('orig_size ?: ks'), leading
+   to an out-of-bounds write.
+
+The same overflow bug exists in the kvrealloc() fallback path, where the
+old bucket size ksize(p) is copied into the new buffer without being
+bounded by the new size.
+
+A simple reproducer:
+
+       // e.g. add to lkdtm as KREALLOC_SHRINK_OVERFLOW
+       while (1) {
+               void *p = kmalloc(128, GFP_KERNEL);
+               p = krealloc_node_align(p, 64, 256, GFP_KERNEL, NUMA_NO_NODE);
+               kfree(p);
+       }
+
+demonstrates the issue:
+
+  ==================================================================
+  BUG: KFENCE: out-of-bounds write in memcpy_orig+0x68/0x130
+
+  Out-of-bounds write at 0xffff8883ad757038 (120B right of kfence-#47):
+   memcpy_orig+0x68/0x130
+   krealloc_node_align_noprof+0x1c8/0x340
+   lkdtm_KREALLOC_SHRINK_OVERFLOW+0x8c/0xc0 [lkdtm]
+   lkdtm_do_action+0x3a/0x60 [lkdtm]
+   ...
+
+  kfence-#47: 0xffff8883ad756fc0-0xffff8883ad756fff, size=64, cache=kmalloc-64
+
+  allocated by task 316 on cpu 7 at 97.680481s (0.021813s ago):
+   krealloc_node_align_noprof+0x19c/0x340
+   lkdtm_KREALLOC_SHRINK_OVERFLOW+0x8c/0xc0 [lkdtm]
+   lkdtm_do_action+0x3a/0x60 [lkdtm]
+   ...
+  ==================================================================
+
+Fix it by moving the old size calculation to the top of __do_krealloc()
+and bounding all copy lengths by the new allocation size.
+
+Fixes: 2cd8231796b5 ("mm/slub: allow to set node and align in k[v]realloc")
+Cc: stable@vger.kernel.org
+Reported-by: https://sashiko.dev/#/patchset/20260415143735.2974230-1-elver%40google.com
+Signed-off-by: Marco Elver <elver@google.com>
+Link: https://patch.msgid.link/20260416132837.3787694-1-elver@google.com
+Reviewed-by: Harry Yoo (Oracle) <harry@kernel.org>
+Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |   24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -6569,16 +6569,6 @@ __do_krealloc(const void *p, size_t new_
+       if (!kasan_check_byte(p))
+               return NULL;
+-      /*
+-       * If reallocation is not necessary (e. g. the new size is less
+-       * than the current allocated size), the current allocation will be
+-       * preserved unless __GFP_THISNODE is set. In the latter case a new
+-       * allocation on the requested node will be attempted.
+-       */
+-      if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
+-                   nid != page_to_nid(virt_to_page(p)))
+-              goto alloc_new;
+-
+       if (is_kfence_address(p)) {
+               ks = orig_size = kfence_ksize(p);
+       } else {
+@@ -6597,6 +6587,16 @@ __do_krealloc(const void *p, size_t new_
+               }
+       }
++      /*
++       * If reallocation is not necessary (e. g. the new size is less
++       * than the current allocated size), the current allocation will be
++       * preserved unless __GFP_THISNODE is set. In the latter case a new
++       * allocation on the requested node will be attempted.
++       */
++      if (unlikely(flags & __GFP_THISNODE) && nid != NUMA_NO_NODE &&
++                   nid != page_to_nid(virt_to_page(p)))
++              goto alloc_new;
++
+       /* If the old object doesn't fit, allocate a bigger one */
+       if (new_size > ks)
+               goto alloc_new;
+@@ -6631,7 +6631,7 @@ alloc_new:
+       if (ret && p) {
+               /* Disable KASAN checks as the object's redzone is accessed. */
+               kasan_disable_current();
+-              memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
++              memcpy(ret, kasan_reset_tag(p), min(new_size, (size_t)(orig_size ?: ks)));
+               kasan_enable_current();
+       }
+@@ -6865,7 +6865,7 @@ void *kvrealloc_node_align_noprof(const
+               if (p) {
+                       /* We already know that `p` is not a vmalloc address. */
+                       kasan_disable_current();
+-                      memcpy(n, kasan_reset_tag(p), ksize(p));
++                      memcpy(n, kasan_reset_tag(p), min(size, ksize(p)));
+                       kasan_enable_current();
+                       kfree(p);
diff --git a/queue-7.0/spi-fix-resource-leaks-on-device-setup-failure.patch b/queue-7.0/spi-fix-resource-leaks-on-device-setup-failure.patch
new file mode 100644 (file)
index 0000000..ed8c6d5
--- /dev/null
@@ -0,0 +1,129 @@
+From db357034f7e0cf23f233f414a8508312dfe8fbbe Mon Sep 17 00:00:00 2001
+From: Johan Hovold <johan@kernel.org>
+Date: Fri, 10 Apr 2026 17:49:06 +0200
+Subject: spi: fix resource leaks on device setup failure
+
+From: Johan Hovold <johan@kernel.org>
+
+commit db357034f7e0cf23f233f414a8508312dfe8fbbe upstream.
+
+Make sure to call controller cleanup() if spi_setup() fails while
+registering a device to avoid leaking any resources allocated by
+setup().
+
+Fixes: c7299fea6769 ("spi: Fix spi device unregister flow")
+Cc: stable@vger.kernel.org     # 5.13
+Cc: Saravana Kannan <saravanak@kernel.org>
+Signed-off-by: Johan Hovold <johan@kernel.org>
+Link: https://patch.msgid.link/20260410154907.129248-2-johan@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/spi/spi.c |   61 ++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 37 insertions(+), 24 deletions(-)
+
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -43,6 +43,8 @@ EXPORT_TRACEPOINT_SYMBOL(spi_transfer_st
+ #include "internals.h"
++static int __spi_setup(struct spi_device *spi, bool initial_setup);
++
+ static DEFINE_IDR(spi_controller_idr);
+ static void spidev_release(struct device *dev)
+@@ -729,7 +731,7 @@ static int __spi_add_device(struct spi_d
+        * normally rely on the device being setup.  Devices
+        * using SPI_CS_HIGH can't coexist well otherwise...
+        */
+-      status = spi_setup(spi);
++      status = __spi_setup(spi, true);
+       if (status < 0) {
+               dev_err(dev, "can't setup %s, status %d\n",
+                               dev_name(&spi->dev), status);
+@@ -3993,27 +3995,7 @@ static int spi_set_cs_timing(struct spi_
+       return status;
+ }
+-/**
+- * spi_setup - setup SPI mode and clock rate
+- * @spi: the device whose settings are being modified
+- * Context: can sleep, and no requests are queued to the device
+- *
+- * SPI protocol drivers may need to update the transfer mode if the
+- * device doesn't work with its default.  They may likewise need
+- * to update clock rates or word sizes from initial values.  This function
+- * changes those settings, and must be called from a context that can sleep.
+- * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+- * effect the next time the device is selected and data is transferred to
+- * or from it.  When this function returns, the SPI device is deselected.
+- *
+- * Note that this call will fail if the protocol driver specifies an option
+- * that the underlying controller or its driver does not support.  For
+- * example, not all hardware supports wire transfers using nine bit words,
+- * LSB-first wire encoding, or active-high chipselects.
+- *
+- * Return: zero on success, else a negative error code.
+- */
+-int spi_setup(struct spi_device *spi)
++static int __spi_setup(struct spi_device *spi, bool initial_setup)
+ {
+       unsigned        bad_bits, ugly_bits;
+       int             status;
+@@ -4098,7 +4080,7 @@ int spi_setup(struct spi_device *spi)
+       status = spi_set_cs_timing(spi);
+       if (status) {
+               mutex_unlock(&spi->controller->io_mutex);
+-              return status;
++              goto err_cleanup;
+       }
+       if (spi->controller->auto_runtime_pm && spi->controller->set_cs) {
+@@ -4107,7 +4089,7 @@ int spi_setup(struct spi_device *spi)
+                       mutex_unlock(&spi->controller->io_mutex);
+                       dev_err(&spi->controller->dev, "Failed to power device: %d\n",
+                               status);
+-                      return status;
++                      goto err_cleanup;
+               }
+               /*
+@@ -4143,6 +4125,37 @@ int spi_setup(struct spi_device *spi)
+                       status);
+       return status;
++
++err_cleanup:
++      if (initial_setup)
++              spi_cleanup(spi);
++
++      return status;
++}
++
++/**
++ * spi_setup - setup SPI mode and clock rate
++ * @spi: the device whose settings are being modified
++ * Context: can sleep, and no requests are queued to the device
++ *
++ * SPI protocol drivers may need to update the transfer mode if the
++ * device doesn't work with its default.  They may likewise need
++ * to update clock rates or word sizes from initial values.  This function
++ * changes those settings, and must be called from a context that can sleep.
++ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
++ * effect the next time the device is selected and data is transferred to
++ * or from it.  When this function returns, the SPI device is deselected.
++ *
++ * Note that this call will fail if the protocol driver specifies an option
++ * that the underlying controller or its driver does not support.  For
++ * example, not all hardware supports wire transfers using nine bit words,
++ * LSB-first wire encoding, or active-high chipselects.
++ *
++ * Return: zero on success, else a negative error code.
++ */
++int spi_setup(struct spi_device *spi)
++{
++      return __spi_setup(spi, false);
+ }
+ EXPORT_SYMBOL_GPL(spi_setup);
diff --git a/queue-7.0/tpm-avoid-wunused-but-set-variable.patch b/queue-7.0/tpm-avoid-wunused-but-set-variable.patch
new file mode 100644 (file)
index 0000000..253987a
--- /dev/null
@@ -0,0 +1,52 @@
+From 6f1d4d2ecfcd1b577dc87350ea965fe81f272e83 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 22 Mar 2024 14:22:48 +0100
+Subject: tpm: avoid -Wunused-but-set-variable
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit 6f1d4d2ecfcd1b577dc87350ea965fe81f272e83 upstream.
+
+Outside of the EFI tpm code, the TPM_MEMREMAP()/TPM_MEMUNMAP functions are
+defined as trivial macros, leading to the mapping_size variable ending
+up unused:
+
+In file included from drivers/char/tpm/tpm-sysfs.c:16:
+In file included from drivers/char/tpm/tpm.h:28:
+include/linux/tpm_eventlog.h:167:6: error: variable 'mapping_size' set but not used [-Werror,-Wunused-but-set-variable]
+  167 |         int mapping_size;
+
+Turn the stubs into inline functions to avoid this warning.
+
+Cc: stable@vger.kernel.org # v5.3+
+Fixes: c46f3405692d ("tpm: Reserve the TPM final events table")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Thorsten Blum <thorsten.blum@linux.dev>
+Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/tpm_eventlog.h |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/include/linux/tpm_eventlog.h
++++ b/include/linux/tpm_eventlog.h
+@@ -131,11 +131,16 @@ struct tcg_algorithm_info {
+ };
+ #ifndef TPM_MEMREMAP
+-#define TPM_MEMREMAP(start, size) NULL
++static inline void *TPM_MEMREMAP(unsigned long start, size_t size)
++{
++      return NULL;
++}
+ #endif
+ #ifndef TPM_MEMUNMAP
+-#define TPM_MEMUNMAP(start, size) do{} while(0)
++static inline void TPM_MEMUNMAP(void *mapping, size_t size)
++{
++}
+ #endif
+ /**
diff --git a/queue-7.0/tracing-fprobe-reject-registration-of-a-registered-fprobe-before-init.patch b/queue-7.0/tracing-fprobe-reject-registration-of-a-registered-fprobe-before-init.patch
new file mode 100644 (file)
index 0000000..c83f708
--- /dev/null
@@ -0,0 +1,115 @@
+From 6ad51ada17ed80c9a5f205b4c01c424cac8b0d46 Mon Sep 17 00:00:00 2001
+From: "Masami Hiramatsu (Google)" <mhiramat@kernel.org>
+Date: Mon, 20 Apr 2026 23:00:48 +0900
+Subject: tracing/fprobe: Reject registration of a registered fprobe before init
+
+From: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+
+commit 6ad51ada17ed80c9a5f205b4c01c424cac8b0d46 upstream.
+
+Reject registration of a registered fprobe which is on the fprobe
+hash table before initializing fprobe.
+The add_fprobe_hash() checks this re-register fprobe, but since
+fprobe_init() clears hlist_array field, it is too late to check it.
+It has to check the re-registration before touncing fprobe.
+
+Link: https://lore.kernel.org/all/177669364845.132053.18375367916162315835.stgit@mhiramat.tok.corp.google.com/
+
+Fixes: 4346ba160409 ("fprobe: Rewrite fprobe on function-graph tracer")
+Cc: stable@vger.kernel.org
+Signed-off-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/fprobe.c |   21 ++++++++++-----------
+ 1 file changed, 10 insertions(+), 11 deletions(-)
+
+--- a/kernel/trace/fprobe.c
++++ b/kernel/trace/fprobe.c
+@@ -4,6 +4,7 @@
+  */
+ #define pr_fmt(fmt) "fprobe: " fmt
++#include <linux/cleanup.h>
+ #include <linux/err.h>
+ #include <linux/fprobe.h>
+ #include <linux/kallsyms.h>
+@@ -107,7 +108,7 @@ static bool delete_fprobe_node(struct fp
+ }
+ /* Check existence of the fprobe */
+-static bool is_fprobe_still_exist(struct fprobe *fp)
++static bool fprobe_registered(struct fprobe *fp)
+ {
+       struct hlist_head *head;
+       struct fprobe_hlist *fph;
+@@ -120,7 +121,7 @@ static bool is_fprobe_still_exist(struct
+       }
+       return false;
+ }
+-NOKPROBE_SYMBOL(is_fprobe_still_exist);
++NOKPROBE_SYMBOL(fprobe_registered);
+ static int add_fprobe_hash(struct fprobe *fp)
+ {
+@@ -132,9 +133,6 @@ static int add_fprobe_hash(struct fprobe
+       if (WARN_ON_ONCE(!fph))
+               return -EINVAL;
+-      if (is_fprobe_still_exist(fp))
+-              return -EEXIST;
+-
+       head = &fprobe_table[hash_ptr(fp, FPROBE_HASH_BITS)];
+       hlist_add_head_rcu(&fp->hlist_array->hlist, head);
+       return 0;
+@@ -149,7 +147,7 @@ static int del_fprobe_hash(struct fprobe
+       if (WARN_ON_ONCE(!fph))
+               return -EINVAL;
+-      if (!is_fprobe_still_exist(fp))
++      if (!fprobe_registered(fp))
+               return -ENOENT;
+       fph->fp = NULL;
+@@ -482,7 +480,7 @@ static void fprobe_return(struct ftrace_
+               if (!fp)
+                       break;
+               curr += FPROBE_HEADER_SIZE_IN_LONG;
+-              if (is_fprobe_still_exist(fp) && !fprobe_disabled(fp)) {
++              if (fprobe_registered(fp) && !fprobe_disabled(fp)) {
+                       if (WARN_ON_ONCE(curr + size > size_words))
+                               break;
+                       fp->exit_handler(fp, trace->func, ret_ip, fregs,
+@@ -841,12 +839,14 @@ int register_fprobe_ips(struct fprobe *f
+       struct fprobe_hlist *hlist_array;
+       int ret, i;
++      guard(mutex)(&fprobe_mutex);
++      if (fprobe_registered(fp))
++              return -EEXIST;
++
+       ret = fprobe_init(fp, addrs, num);
+       if (ret)
+               return ret;
+-      mutex_lock(&fprobe_mutex);
+-
+       hlist_array = fp->hlist_array;
+       if (fprobe_is_ftrace(fp))
+               ret = fprobe_ftrace_add_ips(addrs, num);
+@@ -866,7 +866,6 @@ int register_fprobe_ips(struct fprobe *f
+                               delete_fprobe_node(&hlist_array->array[i]);
+               }
+       }
+-      mutex_unlock(&fprobe_mutex);
+       if (ret)
+               fprobe_fail_cleanup(fp);
+@@ -928,7 +927,7 @@ int unregister_fprobe(struct fprobe *fp)
+       int ret = 0, i, count;
+       mutex_lock(&fprobe_mutex);
+-      if (!fp || !is_fprobe_still_exist(fp)) {
++      if (!fp || !fprobe_registered(fp)) {
+               ret = -EINVAL;
+               goto out;
+       }