--- /dev/null
+From 417f78e8ff288aa16f8aaaa47f078a35331c5e2e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 56c945d8240a..1bdb6a2f5596 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -350,7 +350,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From 205df81039bff5f325e340a7d6b4c91dfd4ca0bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14c..a0d55b77c994 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From 96541fd3ae4d4a4903096e3abb3d81d9ded40f0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:28 -0400
+Subject: cgroup/cpuset: Use static_branch_enable_cpuslocked() on
+ cpusets_insane_config_key
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 65f97cc81b0adc5f49cf6cff5d874be0058e3f41 ]
+
+The following lockdep splat was observed.
+
+[ 812.359086] ============================================
+[ 812.359089] WARNING: possible recursive locking detected
+[ 812.359097] --------------------------------------------
+[ 812.359100] runtest.sh/30042 is trying to acquire lock:
+[ 812.359105] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_enable+0xe/0x20
+[ 812.359131]
+[ 812.359131] but task is already holding lock:
+[ 812.359134] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: cpuset_write_resmask+0x98/0xa70
+ :
+[ 812.359267] Call Trace:
+[ 812.359272] <TASK>
+[ 812.359367] cpus_read_lock+0x3c/0xe0
+[ 812.359382] static_key_enable+0xe/0x20
+[ 812.359389] check_insane_mems_config.part.0+0x11/0x30
+[ 812.359398] cpuset_write_resmask+0x9f2/0xa70
+[ 812.359411] cgroup_file_write+0x1c7/0x660
+[ 812.359467] kernfs_fop_write_iter+0x358/0x530
+[ 812.359479] vfs_write+0xabe/0x1250
+[ 812.359529] ksys_write+0xf9/0x1d0
+[ 812.359558] do_syscall_64+0x5f/0xe0
+
+Since commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem
+and hotplug lock order"), the ordering of cpu hotplug lock
+and cpuset_mutex had been reversed. That patch correctly
+used the cpuslocked version of the static branch API to enable
+cpusets_pre_enable_key and cpusets_enabled_key, but it didn't do the
+same for cpusets_insane_config_key.
+
+The cpusets_insane_config_key can be enabled in the
+check_insane_mems_config() which is called from update_nodemask()
+or cpuset_hotplug_update_tasks() with both cpu hotplug lock and
+cpuset_mutex held. Deadlock can happen with a pending hotplug event that
+tries to acquire the cpu hotplug write lock which will block further
+cpus_read_lock() attempt from check_insane_mems_config(). Fix that by
+switching to use static_branch_enable_cpuslocked().
+
+Fixes: d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 52274eda8423..efe9785c6c13 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -392,7 +392,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ if (!cpusets_insane_config() &&
+ movable_only_nodes(nodes)) {
+- static_branch_enable(&cpusets_insane_config_key);
++ static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ "Cpuset allocations might fail even with a lot of memory available.\n",
+ nodemask_pr_args(nodes));
+--
+2.50.1
+
--- /dev/null
+From 888c2f8e11b8e213daec1bbe8934920e11f90c29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 08:40:27 -0700
+Subject: iommu/amd: Avoid stack buffer overflow from kernel cmdline
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
+
+While the kernel command line is considered trusted in most environments,
+avoid writing 1 byte past the end of "acpiid" if the "str" argument is
+maximum length.
+
+Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
+Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
+Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
+Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index e09391ab3deb..1ba6adb5b912 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3186,7 +3186,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+- char acpiid[ACPIID_LEN] = {0};
++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ int i;
+
+ addr = strchr(str, '@');
+@@ -3212,7 +3212,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+- if (strlen(str) > ACPIID_LEN + 1)
++ if (strlen(str) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+--
+2.50.1
+
--- /dev/null
+From 267a69d9716beb5d08a8f33a838a1064c555001c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 2e2b94ae6355..4a3f7bb027ed 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -294,6 +294,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From aed626ab4f8bb62a6dad61617a2e80e0f7382664 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index ca1a428b278e..54351d6742d0 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -390,7 +390,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -425,6 +425,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From 619f595b48da510da8b3bbb20896fb42728a80ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 15:06:40 +0200
+Subject: mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit f604d3aaf64ff0d90cc875295474d3abf4155629 ]
+
+By default, the device does not forward IPv4 packets with a link-local
+source IP (i.e., 169.254.0.0/16). This behavior does not align with the
+kernel which does forward them.
+
+Fix by instructing the device to forward such packets instead of
+dropping them.
+
+Fixes: ca360db4b825 ("mlxsw: spectrum: Disable DIP_LINK_LOCAL check in hardware pipeline")
+Reported-by: Zoey Mertes <zoey@cloudflare.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/6721e6b2c96feb80269e72ce8d0b426e2f32d99c.1755174341.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 4110e15c22c7..8ab7e591b66a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2222,6 +2222,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 57f9e24602d0..93ca6f90f320 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -92,6 +92,7 @@ enum {
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+--
+2.50.1
+
--- /dev/null
+From d03c04612af8f330d667989167ad20ac313b68fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Nov 2021 13:40:34 -0700
+Subject: mm/page_alloc: detect allocation forbidden by cpuset and bail out
+ early
+
+From: Feng Tang <feng.tang@intel.com>
+
+[ Upstream commit 8ca1b5a49885f0c0c486544da46a9e0ac790831d ]
+
+There was a report that starting an Ubuntu in docker while using cpuset
+to bind it to movable nodes (a node only has movable zone, like a node
+for hotplug or a Persistent Memory node in normal usage) will fail due
+to memory allocation failure, and then OOM is involved and many other
+innocent processes got killed.
+
+It can be reproduced with command:
+
+ $ docker run -it --rm --cpuset-mems 4 ubuntu:latest bash -c "grep Mems_allowed /proc/self/status"
+
+(where node 4 is a movable node)
+
+ runc:[2:INIT] invoked oom-killer: gfp_mask=0x500cc2(GFP_HIGHUSER|__GFP_ACCOUNT), order=0, oom_score_adj=0
+ CPU: 8 PID: 8291 Comm: runc:[2:INIT] Tainted: G W I E 5.8.2-0.g71b519a-default #1 openSUSE Tumbleweed (unreleased)
+ Hardware name: Dell Inc. PowerEdge R640/0PHYDR, BIOS 2.6.4 04/09/2020
+ Call Trace:
+ dump_stack+0x6b/0x88
+ dump_header+0x4a/0x1e2
+ oom_kill_process.cold+0xb/0x10
+ out_of_memory.part.0+0xaf/0x230
+ out_of_memory+0x3d/0x80
+ __alloc_pages_slowpath.constprop.0+0x954/0xa20
+ __alloc_pages_nodemask+0x2d3/0x300
+ pipe_write+0x322/0x590
+ new_sync_write+0x196/0x1b0
+ vfs_write+0x1c3/0x1f0
+ ksys_write+0xa7/0xe0
+ do_syscall_64+0x52/0xd0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ Mem-Info:
+ active_anon:392832 inactive_anon:182 isolated_anon:0
+ active_file:68130 inactive_file:151527 isolated_file:0
+ unevictable:2701 dirty:0 writeback:7
+ slab_reclaimable:51418 slab_unreclaimable:116300
+ mapped:45825 shmem:735 pagetables:2540 bounce:0
+ free:159849484 free_pcp:73 free_cma:0
+ Node 4 active_anon:1448kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:0kB dirty:0kB writeback:0kB shmem:0kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 0kB writeback_tmp:0kB all_unreclaimable? no
+ Node 4 Movable free:130021408kB min:9140kB low:139160kB high:269180kB reserved_highatomic:0KB active_anon:1448kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB present:130023424kB managed:130023424kB mlocked:0kB kernel_stack:0kB pagetables:0kB bounce:0kB free_pcp:292kB local_pcp:84kB free_cma:0kB
+ lowmem_reserve[]: 0 0 0 0 0
+ Node 4 Movable: 1*4kB (M) 0*8kB 0*16kB 1*32kB (M) 0*64kB 0*128kB 1*256kB (M) 1*512kB (M) 1*1024kB (M) 0*2048kB 31743*4096kB (M) = 130021156kB
+
+ oom-kill:constraint=CONSTRAINT_CPUSET,nodemask=(null),cpuset=docker-9976a269caec812c134fa317f27487ee36e1129beba7278a463dd53e5fb9997b.scope,mems_allowed=4,global_oom,task_memcg=/system.slice/containerd.service,task=containerd,pid=4100,uid=0
+ Out of memory: Killed process 4100 (containerd) total-vm:4077036kB, anon-rss:51184kB, file-rss:26016kB, shmem-rss:0kB, UID:0 pgtables:676kB oom_score_adj:0
+ oom_reaper: reaped process 8248 (docker), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+ oom_reaper: reaped process 2054 (node_exporter), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+ oom_reaper: reaped process 1452 (systemd-journal), now anon-rss:0kB, file-rss:8564kB, shmem-rss:4kB
+ oom_reaper: reaped process 2146 (munin-node), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+ oom_reaper: reaped process 8291 (runc:[2:INIT]), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+
+The reason is that in this case, the target cpuset nodes only have
+movable zone, while the creation of an OS in docker sometimes needs to
+allocate memory in non-movable zones (dma/dma32/normal) like
+GFP_HIGHUSER, and the cpuset limit forbids the allocation, then
+out-of-memory killing is involved even when normal nodes and movable
+nodes both have many free memory.
+
+The OOM killer cannot help to resolve the situation as there is no
+usable memory for the request in the cpuset scope. The only reasonable
+measure to take is to fail the allocation right away and have the caller
+to deal with it.
+
+So add a check for cases like this in the slowpath of allocation, and
+bail out early returning NULL for the allocation.
+
+As page allocation is one of the hottest path in kernel, this check will
+hurt all users with sane cpuset configuration, add a static branch check
+and detect the abnormal config in cpuset memory binding setup so that
+the extra check cost in page allocation is not paid by everyone.
+
+[thanks to Micho Hocko and David Rientjes for suggesting not handling
+ it inside OOM code, adding cpuset check, refining comments]
+
+Link: https://lkml.kernel.org/r/1632481657-68112-1-git-send-email-feng.tang@intel.com
+Signed-off-by: Feng Tang <feng.tang@intel.com>
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Zefan Li <lizefan.x@bytedance.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Stable-dep-of: 65f97cc81b0a ("cgroup/cpuset: Use static_branch_enable_cpuslocked() on cpusets_insane_config_key")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/cpuset.h | 17 +++++++++++++++++
+ include/linux/mmzone.h | 22 ++++++++++++++++++++++
+ kernel/cgroup/cpuset.c | 23 +++++++++++++++++++++++
+ mm/page_alloc.c | 13 +++++++++++++
+ 4 files changed, 75 insertions(+)
+
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index b70224370832..e0139d9747d4 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -33,6 +33,8 @@
+ */
+ extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
++extern struct static_key_false cpusets_insane_config_key;
++
+ static inline bool cpusets_enabled(void)
+ {
+ return static_branch_unlikely(&cpusets_enabled_key);
+@@ -50,6 +52,19 @@ static inline void cpuset_dec(void)
+ static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
+ }
+
++/*
++ * This will get enabled whenever a cpuset configuration is considered
++ * unsupportable in general. E.g. movable only node which cannot satisfy
++ * any non movable allocations (see update_nodemask). Page allocator
++ * needs to make additional checks for those configurations and this
++ * check is meant to guard those checks without any overhead for sane
++ * configurations.
++ */
++static inline bool cpusets_insane_config(void)
++{
++ return static_branch_unlikely(&cpusets_insane_config_key);
++}
++
+ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
+ extern void cpuset_force_rebuild(void);
+@@ -168,6 +183,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+
+ static inline bool cpusets_enabled(void) { return false; }
+
++static inline bool cpusets_insane_config(void) { return false; }
++
+ static inline int cpuset_init(void) { return 0; }
+ static inline void cpuset_init_smp(void) {}
+
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 71150fb1cb2a..e0106e13f74f 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1128,6 +1128,28 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+ #define for_each_zone_zonelist(zone, z, zlist, highidx) \
+ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+
++/* Whether the 'nodes' are all movable nodes */
++static inline bool movable_only_nodes(nodemask_t *nodes)
++{
++ struct zonelist *zonelist;
++ struct zoneref *z;
++ int nid;
++
++ if (nodes_empty(*nodes))
++ return false;
++
++ /*
++ * We can chose arbitrary node from the nodemask to get a
++ * zonelist as they are interlinked. We just need to find
++ * at least one zone that can satisfy kernel allocations.
++ */
++ nid = first_node(*nodes);
++ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
++ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
++ return (!z->zone) ? true : false;
++}
++
++
+ #ifdef CONFIG_SPARSEMEM
+ #include <asm/sparsemem.h>
+ #endif
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 731547a0d057..52274eda8423 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -71,6 +71,13 @@
+ DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+
++/*
++ * There could be abnormal cpuset configurations for cpu or memory
++ * node binding, add this key to provide a quick low-cost judgement
++ * of the situation.
++ */
++DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
++
+ /* See "Frequency meter" comments, below. */
+
+ struct fmeter {
+@@ -381,6 +388,17 @@ static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
+
+ static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
+
++static inline void check_insane_mems_config(nodemask_t *nodes)
++{
++ if (!cpusets_insane_config() &&
++ movable_only_nodes(nodes)) {
++ static_branch_enable(&cpusets_insane_config_key);
++ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
++ "Cpuset allocations might fail even with a lot of memory available.\n",
++ nodemask_pr_args(nodes));
++ }
++}
++
+ /*
+ * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
+ * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
+@@ -1878,6 +1896,8 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
+ if (retval < 0)
+ goto done;
+
++ check_insane_mems_config(&trialcs->mems_allowed);
++
+ spin_lock_irq(&callback_lock);
+ cs->mems_allowed = trialcs->mems_allowed;
+ spin_unlock_irq(&callback_lock);
+@@ -3215,6 +3235,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
+ mems_updated = !nodes_equal(new_mems, cs->effective_mems);
+
++ if (mems_updated)
++ check_insane_mems_config(&new_mems);
++
+ if (is_in_v2_mode())
+ hotplug_update_tasks(cs, &new_cpus, &new_mems,
+ cpus_updated, mems_updated);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 59e1fcc05566..d906c6b96181 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4691,6 +4691,19 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ if (!ac->preferred_zoneref->zone)
+ goto nopage;
+
++ /*
++ * Check for insane configurations where the cpuset doesn't contain
++ * any suitable zone to satisfy the request - e.g. non-movable
++ * GFP_HIGHUSER allocations from MOVABLE nodes only.
++ */
++ if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
++ struct zoneref *z = first_zones_zonelist(ac->zonelist,
++ ac->highest_zoneidx,
++ &cpuset_current_mems_allowed);
++ if (!z->zone)
++ goto nopage;
++ }
++
+ if (alloc_flags & ALLOC_KSWAPD)
+ wake_all_kswapds(order, gfp_mask, ac);
+
+--
+2.50.1
+
--- /dev/null
+From d0123c1e9c1cc7a42eb1c0e84bb4fdb89ea7f823 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index d9535129f4e9..6dabe5eaa3be 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1761,7 +1761,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1771,6 +1771,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1938,13 +1939,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From 4be43d28e3b16002ff73338a84a4e84bf6f22c9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 94e0a8c68d59..b301efa41c1c 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -558,7 +558,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From c28da585f561b24ea58cb3d8d8e205a97f221a8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 14:37:07 +0200
+Subject: netfilter: nf_reject: don't leak dst refcount for loopback packets
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 91a79b792204313153e1bdbbe5acbfc28903b3a5 ]
+
+recent patches to add a WARN() when replacing skb dst entry found an
+old bug:
+
+WARNING: include/linux/skbuff.h:1165 skb_dst_check_unset include/linux/skbuff.h:1164 [inline]
+WARNING: include/linux/skbuff.h:1165 skb_dst_set include/linux/skbuff.h:1210 [inline]
+WARNING: include/linux/skbuff.h:1165 nf_reject_fill_skb_dst+0x2a4/0x330 net/ipv4/netfilter/nf_reject_ipv4.c:234
+[..]
+Call Trace:
+ nf_send_unreach+0x17b/0x6e0 net/ipv4/netfilter/nf_reject_ipv4.c:325
+ nft_reject_inet_eval+0x4bc/0x690 net/netfilter/nft_reject_inet.c:27
+ expr_call_ops_eval net/netfilter/nf_tables_core.c:237 [inline]
+ ..
+
+This is because blamed commit forgot about loopback packets.
+Such packets already have a dst_entry attached, even at PRE_ROUTING stage.
+
+Instead of checking hook just check if the skb already has a route
+attached to it.
+
+Fixes: f53b9b0bdc59 ("netfilter: introduce support for reject at prerouting stage")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20250820123707.10671-1-fw@strlen.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++----
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 ++---
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index d232e0251142..e89a4cbd9f5d 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -125,8 +125,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(oldskb) < 0)
++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -194,8 +193,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index b396559f68b4..5384b73e318e 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -161,7 +161,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++ if (!skb_dst(oldskb)) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -259,8 +259,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+- nf_reject6_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+--
+2.50.1
+
--- /dev/null
+From 58f9df59f1d6efc7a710578bf0eec6310bd67239 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Oct 2020 21:43:52 +0200
+Subject: netfilter: nft_reject: unify reject init and dump into nft_reject
+
+From: Jose M. Guisado Gomez <guigom@riseup.net>
+
+[ Upstream commit 312ca575a50543a886a5dfa2af1e72aa6a5b601e ]
+
+Bridge family is using the same static init and dump function as inet.
+
+This patch removes duplicate code unifying these functions body into
+nft_reject.c so they can be reused in the rest of families supporting
+reject verdict.
+
+Signed-off-by: Jose M. Guisado Gomez <guigom@riseup.net>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 91a79b792204 ("netfilter: nf_reject: don't leak dst refcount for loopback packets")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/netfilter/nft_reject_bridge.c | 60 +-----------------------
+ net/netfilter/nft_reject.c | 12 ++++-
+ net/netfilter/nft_reject_inet.c | 60 +-----------------------
+ 3 files changed, 15 insertions(+), 117 deletions(-)
+
+diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
+index deae2c9a0f69..9ffadcd524f8 100644
+--- a/net/bridge/netfilter/nft_reject_bridge.c
++++ b/net/bridge/netfilter/nft_reject_bridge.c
+@@ -364,69 +364,13 @@ static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
+ (1 << NF_BR_LOCAL_IN));
+ }
+
+-static int nft_reject_bridge_init(const struct nft_ctx *ctx,
+- const struct nft_expr *expr,
+- const struct nlattr * const tb[])
+-{
+- struct nft_reject *priv = nft_expr_priv(expr);
+- int icmp_code;
+-
+- if (tb[NFTA_REJECT_TYPE] == NULL)
+- return -EINVAL;
+-
+- priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+- switch (priv->type) {
+- case NFT_REJECT_ICMP_UNREACH:
+- case NFT_REJECT_ICMPX_UNREACH:
+- if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+- return -EINVAL;
+-
+- icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+- if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
+- icmp_code > NFT_REJECT_ICMPX_MAX)
+- return -EINVAL;
+-
+- priv->icmp_code = icmp_code;
+- break;
+- case NFT_REJECT_TCP_RST:
+- break;
+- default:
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+-static int nft_reject_bridge_dump(struct sk_buff *skb,
+- const struct nft_expr *expr)
+-{
+- const struct nft_reject *priv = nft_expr_priv(expr);
+-
+- if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
+- goto nla_put_failure;
+-
+- switch (priv->type) {
+- case NFT_REJECT_ICMP_UNREACH:
+- case NFT_REJECT_ICMPX_UNREACH:
+- if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+- goto nla_put_failure;
+- break;
+- default:
+- break;
+- }
+-
+- return 0;
+-
+-nla_put_failure:
+- return -1;
+-}
+-
+ static struct nft_expr_type nft_reject_bridge_type;
+ static const struct nft_expr_ops nft_reject_bridge_ops = {
+ .type = &nft_reject_bridge_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_bridge_eval,
+- .init = nft_reject_bridge_init,
+- .dump = nft_reject_bridge_dump,
++ .init = nft_reject_init,
++ .dump = nft_reject_dump,
+ .validate = nft_reject_bridge_validate,
+ };
+
+diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
+index 61fb7e8afbf0..927ff8459bd9 100644
+--- a/net/netfilter/nft_reject.c
++++ b/net/netfilter/nft_reject.c
+@@ -40,6 +40,7 @@ int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[])
+ {
+ struct nft_reject *priv = nft_expr_priv(expr);
++ int icmp_code;
+
+ if (tb[NFTA_REJECT_TYPE] == NULL)
+ return -EINVAL;
+@@ -47,9 +48,17 @@ int nft_reject_init(const struct nft_ctx *ctx,
+ priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
++ case NFT_REJECT_ICMPX_UNREACH:
+ if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+ return -EINVAL;
+- priv->icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
++
++ icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
++ if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
++ icmp_code > NFT_REJECT_ICMPX_MAX)
++ return -EINVAL;
++
++ priv->icmp_code = icmp_code;
++ break;
+ case NFT_REJECT_TCP_RST:
+ break;
+ default:
+@@ -69,6 +78,7 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
++ case NFT_REJECT_ICMPX_UNREACH:
+ if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+ goto nla_put_failure;
+ break;
+diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
+index c00b94a16682..115aa446f6d5 100644
+--- a/net/netfilter/nft_reject_inet.c
++++ b/net/netfilter/nft_reject_inet.c
+@@ -60,69 +60,13 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
+ regs->verdict.code = NF_DROP;
+ }
+
+-static int nft_reject_inet_init(const struct nft_ctx *ctx,
+- const struct nft_expr *expr,
+- const struct nlattr * const tb[])
+-{
+- struct nft_reject *priv = nft_expr_priv(expr);
+- int icmp_code;
+-
+- if (tb[NFTA_REJECT_TYPE] == NULL)
+- return -EINVAL;
+-
+- priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
+- switch (priv->type) {
+- case NFT_REJECT_ICMP_UNREACH:
+- case NFT_REJECT_ICMPX_UNREACH:
+- if (tb[NFTA_REJECT_ICMP_CODE] == NULL)
+- return -EINVAL;
+-
+- icmp_code = nla_get_u8(tb[NFTA_REJECT_ICMP_CODE]);
+- if (priv->type == NFT_REJECT_ICMPX_UNREACH &&
+- icmp_code > NFT_REJECT_ICMPX_MAX)
+- return -EINVAL;
+-
+- priv->icmp_code = icmp_code;
+- break;
+- case NFT_REJECT_TCP_RST:
+- break;
+- default:
+- return -EINVAL;
+- }
+- return 0;
+-}
+-
+-static int nft_reject_inet_dump(struct sk_buff *skb,
+- const struct nft_expr *expr)
+-{
+- const struct nft_reject *priv = nft_expr_priv(expr);
+-
+- if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
+- goto nla_put_failure;
+-
+- switch (priv->type) {
+- case NFT_REJECT_ICMP_UNREACH:
+- case NFT_REJECT_ICMPX_UNREACH:
+- if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
+- goto nla_put_failure;
+- break;
+- default:
+- break;
+- }
+-
+- return 0;
+-
+-nla_put_failure:
+- return -1;
+-}
+-
+ static struct nft_expr_type nft_reject_inet_type;
+ static const struct nft_expr_ops nft_reject_inet_ops = {
+ .type = &nft_reject_inet_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_inet_eval,
+- .init = nft_reject_inet_init,
+- .dump = nft_reject_inet_dump,
++ .init = nft_reject_init,
++ .dump = nft_reject_dump,
+ .validate = nft_reject_validate,
+ };
+
+--
+2.50.1
+
--- /dev/null
+From 6dcdbb083a68bf3112d428be976c4253d59ad018 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Oct 2020 11:24:08 +0100
+Subject: netfilter: nft_reject_inet: allow to use reject from inet ingress
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 117ca1f8920cf4087bf82f44bd2a51b49d6aae63 ]
+
+Enhance validation to support for reject from inet ingress chains.
+
+Note that, reject from inet ingress and netdev ingress differ.
+
+Reject packets from inet ingress are sent through ip_local_out() since
+inet reject emulates the IP layer receive path. So the reject packet
+follows to classic IP output and postrouting paths.
+
+The reject action from netdev ingress assumes the packet not yet entered
+the IP layer, so the reject packet is sent through dev_queue_xmit().
+Therefore, reject packets from netdev ingress do not follow the classic
+IP output and postrouting paths.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 91a79b792204 ("netfilter: nf_reject: don't leak dst refcount for loopback packets")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++++--
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 +++--
+ net/netfilter/nft_reject_inet.c | 14 +++++++++++++-
+ 3 files changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index efe14a6a5d9b..d232e0251142 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -125,7 +125,8 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(oldskb))
++ if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
++ nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -193,7 +194,8 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if (hook == NF_INET_PRE_ROUTING && nf_reject_fill_skb_dst(skb_in))
++ if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
++ nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index df572724f254..b396559f68b4 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -161,7 +161,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING) {
++ if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -259,7 +259,8 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if (hooknum == NF_INET_PRE_ROUTING && nf_reject6_fill_skb_dst(skb_in))
++ if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
++ nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
+index 115aa446f6d5..554caf967baa 100644
+--- a/net/netfilter/nft_reject_inet.c
++++ b/net/netfilter/nft_reject_inet.c
+@@ -60,6 +60,18 @@ static void nft_reject_inet_eval(const struct nft_expr *expr,
+ regs->verdict.code = NF_DROP;
+ }
+
++static int nft_reject_inet_validate(const struct nft_ctx *ctx,
++ const struct nft_expr *expr,
++ const struct nft_data **data)
++{
++ return nft_chain_validate_hooks(ctx->chain,
++ (1 << NF_INET_LOCAL_IN) |
++ (1 << NF_INET_FORWARD) |
++ (1 << NF_INET_LOCAL_OUT) |
++ (1 << NF_INET_PRE_ROUTING) |
++ (1 << NF_INET_INGRESS));
++}
++
+ static struct nft_expr_type nft_reject_inet_type;
+ static const struct nft_expr_ops nft_reject_inet_ops = {
+ .type = &nft_reject_inet_type,
+@@ -67,7 +79,7 @@ static const struct nft_expr_ops nft_reject_inet_ops = {
+ .eval = nft_reject_inet_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+- .validate = nft_reject_validate,
++ .validate = nft_reject_inet_validate,
+ };
+
+ static struct nft_expr_type nft_reject_inet_type __read_mostly = {
+--
+2.50.1
+
--- /dev/null
+From 06da568a7d0f5305424d18ce0f7e5a5bf284cf7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:40:00 +0530
+Subject: RDMA/bnxt_re: Fix to initialize the PBL array
+
+From: Anantha Prabhu <anantha.prabhu@broadcom.com>
+
+[ Upstream commit 806b9f494f62791ee6d68f515a8056c615a0e7b2 ]
+
+memset the PBL page pointer and page map arrays before
+populating the SGL addresses of the HWQ.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index af23e57fc78e..be98b23488b4 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
++ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+--
+2.50.1
+
--- /dev/null
+From c57218023e70b067c4aea8de4ff2e8060e435eac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index f4c7dbfaf8ee..c5f53dc3dbbc 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -64,24 +64,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .llseek = no_llseek,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 0489af62f32bddc68ce01503fb51f09f37132c21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index c5f53dc3dbbc..5848f2e374a6 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -83,7 +84,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From 3e6adb25d737f38e183ba1c1cc1a31bf8c74580f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index f02d8bbea3e5..fc9382833435 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6619,6 +6619,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
tracing-remove-unneeded-goto-out-logic.patch
tracing-limit-access-to-parser-buffer-when-trace_get_user-failed.patch
iio-light-as73211-ensure-buffer-holes-are-zeroed.patch
+mm-page_alloc-detect-allocation-forbidden-by-cpuset-.patch
+cgroup-cpuset-use-static_branch_enable_cpuslocked-on.patch
+rdma-bnxt_re-fix-to-initialize-the-pbl-array.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+iommu-amd-avoid-stack-buffer-overflow-from-kernel-cm.patch
+mlxsw-spectrum-forward-packets-with-an-ipv4-link-loc.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
+netfilter-nft_reject-unify-reject-init-and-dump-into.patch
+netfilter-nft_reject_inet-allow-to-use-reject-from-i.patch
+netfilter-nf_reject-don-t-leak-dst-refcount-for-loop.patch
--- /dev/null
+From ad152f7ea4d1908510009ea9403b82743cae7107 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index f5a6e990d07a..12a5e053ec54 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From 8b194e76f47a302fd5d896522ca6ba44ab5d3a5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14c..a0d55b77c994 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From 22543503e37a634d5c8efa25ee03f7f937761579 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:58 +0000
+Subject: bonding: update LACP activity flag after setting lacp_active
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit b64d035f77b1f02ab449393342264b44950a75ae ]
+
+The port's actor_oper_port_state activity flag should be updated immediately
+after changing the lacp_active option to reflect the current mode correctly.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 25 +++++++++++++++++++++++++
+ drivers/net/bonding/bond_options.c | 1 +
+ include/net/bond_3ad.h | 1 +
+ 3 files changed, 27 insertions(+)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index ff6d4e74a186..fcbd70ad45b9 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2730,6 +2730,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
+ spin_unlock_bh(&bond->mode_lock);
+ }
+
++/**
++ * bond_3ad_update_lacp_active - change the lacp active
++ * @bond: bonding struct
++ *
++ * Update actor_oper_port_state when lacp_active is modified.
++ */
++void bond_3ad_update_lacp_active(struct bonding *bond)
++{
++ struct port *port = NULL;
++ struct list_head *iter;
++ struct slave *slave;
++ int lacp_active;
++
++ lacp_active = bond->params.lacp_active;
++ spin_lock_bh(&bond->mode_lock);
++ bond_for_each_slave(bond, slave, iter) {
++ port = &(SLAVE_AD_INFO(slave)->port);
++ if (lacp_active)
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ else
++ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
++ }
++ spin_unlock_bh(&bond->mode_lock);
++}
++
+ size_t bond_3ad_stats_size(void)
+ {
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 5da4599377e1..24072e164b77 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1390,6 +1390,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
+ netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.lacp_active = newval->value;
++ bond_3ad_update_lacp_active(bond);
+
+ return 0;
+ }
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index f2273bd5a4c5..2dd382a81dd5 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -303,6 +303,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ int bond_3ad_set_carrier(struct bonding *bond);
+ void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_lacp_rate(struct bonding *bond);
++void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+ int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+ size_t bond_3ad_stats_size(void);
+--
+2.50.1
+
--- /dev/null
+From 8a893bc1f759f17505ef5df85456d7a04a38a17b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:28 -0400
+Subject: cgroup/cpuset: Use static_branch_enable_cpuslocked() on
+ cpusets_insane_config_key
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 65f97cc81b0adc5f49cf6cff5d874be0058e3f41 ]
+
+The following lockdep splat was observed.
+
+[ 812.359086] ============================================
+[ 812.359089] WARNING: possible recursive locking detected
+[ 812.359097] --------------------------------------------
+[ 812.359100] runtest.sh/30042 is trying to acquire lock:
+[ 812.359105] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_enable+0xe/0x20
+[ 812.359131]
+[ 812.359131] but task is already holding lock:
+[ 812.359134] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: cpuset_write_resmask+0x98/0xa70
+ :
+[ 812.359267] Call Trace:
+[ 812.359272] <TASK>
+[ 812.359367] cpus_read_lock+0x3c/0xe0
+[ 812.359382] static_key_enable+0xe/0x20
+[ 812.359389] check_insane_mems_config.part.0+0x11/0x30
+[ 812.359398] cpuset_write_resmask+0x9f2/0xa70
+[ 812.359411] cgroup_file_write+0x1c7/0x660
+[ 812.359467] kernfs_fop_write_iter+0x358/0x530
+[ 812.359479] vfs_write+0xabe/0x1250
+[ 812.359529] ksys_write+0xf9/0x1d0
+[ 812.359558] do_syscall_64+0x5f/0xe0
+
+Since commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem
+and hotplug lock order"), the ordering of cpu hotplug lock
+and cpuset_mutex had been reversed. That patch correctly
+used the cpuslocked version of the static branch API to enable
+cpusets_pre_enable_key and cpusets_enabled_key, but it didn't do the
+same for cpusets_insane_config_key.
+
+The cpusets_insane_config_key can be enabled in the
+check_insane_mems_config() which is called from update_nodemask()
+or cpuset_hotplug_update_tasks() with both cpu hotplug lock and
+cpuset_mutex held. Deadlock can happen with a pending hotplug event that
+tries to acquire the cpu hotplug write lock which will block further
+cpus_read_lock() attempt from check_insane_mems_config(). Fix that by
+switching to use static_branch_enable_cpuslocked().
+
+Fixes: d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index b40f86a3f605..78c499021768 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -408,7 +408,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ if (!cpusets_insane_config() &&
+ movable_only_nodes(nodes)) {
+- static_branch_enable(&cpusets_insane_config_key);
++ static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ "Cpuset allocations might fail even with a lot of memory available.\n",
+ nodemask_pr_args(nodes));
+--
+2.50.1
+
--- /dev/null
+From ff9ad63dad8f8cdd39b0fec21243b795c1d8fd2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 21:36:41 -0500
+Subject: drm/amd/display: Add null pointer check in
+ mod_hdcp_hdcp1_create_session()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit 7a2ca2ea64b1b63c8baa94a8f5deb70b2248d119 ]
+
+The function mod_hdcp_hdcp1_create_session() calls the function
+get_first_active_display(), but does not check its return value.
+The return value is a null pointer if the display list is empty.
+This will lead to a null pointer dereference.
+
+Add a null pointer check for get_first_active_display() and return
+MOD_HDCP_STATUS_DISPLAY_NOT_FOUND if the function return null.
+
+This is similar to the commit c3e9826a2202
+("drm/amd/display: Add null pointer check for get_first_active_display()").
+
+Fixes: 2deade5ede56 ("drm/amd/display: Remove hdcp display state with mst fix")
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 5e43eb3cd731649c4f8b9134f857be62a416c893)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index b840da3f052a..b5a15b7aae66 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -256,6 +256,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
+--
+2.50.1
+
--- /dev/null
+From 8629b2073018e1c0846f9045056ee72d6db7c654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:31 +0800
+Subject: drm/hisilicon/hibmc: fix the hibmc loaded failed bug
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 93a08f856fcc5aaeeecad01f71bef3088588216a ]
+
+When hibmc loaded failed, the driver use hibmc_unload to free the
+resource, but the mutexes in mode.config are not init, which will
+access an NULL pointer. Just change goto statement to return, because
+hibnc_hw_init() doesn't need to free anything.
+
+Fixes: b3df5e65cc03 ("drm/hibmc: Drop drm_vblank_cleanup")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-5-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index 610fc8e135f9..7d0edecfc495 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -268,12 +268,12 @@ static int hibmc_load(struct drm_device *dev)
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+- goto err;
++ return ret;
+
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
+- goto err;
++ return ret;
+ }
+
+ ret = hibmc_kms_init(priv);
+--
+2.50.1
+
--- /dev/null
+From 7b96a1140dc289cd4f499bff2c3ccecf00e7377d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:59 -0700
+Subject: igc: fix disabling L1.2 PCI-E link substate on I226 on init
+
+From: ValdikSS <iam@valdikss.org.ru>
+
+[ Upstream commit 1468c1f97cf32418e34dbb40b784ed9333b9e123 ]
+
+Device ID comparison in igc_is_device_id_i226 is performed before
+the ID is set, resulting in always failing check on init.
+
+Before the patch:
+* L1.2 is not disabled on init
+* L1.2 is properly disabled after suspend-resume cycle
+
+With the patch:
+* L1.2 is properly disabled both on init and after suspend-resume
+
+How to test:
+Connect to the 1G link with 300+ mbit/s Internet speed, and run
+the download speed test, such as:
+
+ curl -o /dev/null http://speedtest.selectel.ru/1GB
+
+Without L1.2 disabled, the speed would be no more than ~200 mbit/s.
+With L1.2 disabled, the speed would reach 1 gbit/s.
+Note: it's required that the latency between your host and the remote
+be around 3-5 ms, the test inside LAN (<1 ms latency) won't trigger the
+issue.
+
+Link: https://lore.kernel.org/intel-wired-lan/15248b4f-3271-42dd-8e35-02bfc92b25e1@intel.com
+Fixes: 0325143b59c6 ("igc: disable L1.2 PCI-E link substate to avoid performance issue")
+Signed-off-by: ValdikSS <iam@valdikss.org.ru>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-6-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index f52c1674d19b..6a9ad4231b0c 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6549,6 +6549,13 @@ static int igc_probe(struct pci_dev *pdev,
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
++ /* PCI config space info */
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->revision_id = pdev->revision;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+@@ -6573,13 +6580,6 @@ static int igc_probe(struct pci_dev *pdev,
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+--
+2.50.1
+
--- /dev/null
+From 8e4377351619c8fb00643b43e47ffd617739e58f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 08:40:27 -0700
+Subject: iommu/amd: Avoid stack buffer overflow from kernel cmdline
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
+
+While the kernel command line is considered trusted in most environments,
+avoid writing 1 byte past the end of "acpiid" if the "str" argument is
+maximum length.
+
+Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
+Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
+Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
+Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index b6ee83b81d32..065d626d5905 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3286,7 +3286,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+- char acpiid[ACPIID_LEN] = {0};
++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ int i;
+
+ addr = strchr(str, '@');
+@@ -3312,7 +3312,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+- if (strlen(str) > ACPIID_LEN + 1)
++ if (strlen(str) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+--
+2.50.1
+
--- /dev/null
+From 853b56c8e14ae8b458d9c6a259fa9e5064b1a0da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 58203c41d652..7e3a85769932 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -294,6 +294,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From 8d5b9478433e160f631f9e9b5aa8ccd9d061bc79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index b399b9c14717..9a789a419166 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -392,7 +392,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -427,6 +427,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From ee3d71097181fbbc34e1a70898e6fc3a9858f264 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 15:06:40 +0200
+Subject: mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit f604d3aaf64ff0d90cc875295474d3abf4155629 ]
+
+By default, the device does not forward IPv4 packets with a link-local
+source IP (i.e., 169.254.0.0/16). This behavior does not align with the
+kernel which does forward them.
+
+Fix by instructing the device to forward such packets instead of
+dropping them.
+
+Fixes: ca360db4b825 ("mlxsw: spectrum: Disable DIP_LINK_LOCAL check in hardware pipeline")
+Reported-by: Zoey Mertes <zoey@cloudflare.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/6721e6b2c96feb80269e72ce8d0b426e2f32d99c.1755174341.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 35908a8c640a..85353042a790 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2269,6 +2269,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 9e070ab3ed76..eabaca6c240a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -93,6 +93,7 @@ enum {
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+--
+2.50.1
+
--- /dev/null
+From 02605f6ea8eca26eacb67684e50efa9e503a760b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Nov 2021 13:40:34 -0700
+Subject: mm/page_alloc: detect allocation forbidden by cpuset and bail out
+ early
+
+From: Feng Tang <feng.tang@intel.com>
+
+[ Upstream commit 8ca1b5a49885f0c0c486544da46a9e0ac790831d ]
+
+There was a report that starting an Ubuntu in docker while using cpuset
+to bind it to movable nodes (a node only has movable zone, like a node
+for hotplug or a Persistent Memory node in normal usage) will fail due
+to memory allocation failure, and then OOM is involved and many other
+innocent processes got killed.
+
+It can be reproduced with command:
+
+ $ docker run -it --rm --cpuset-mems 4 ubuntu:latest bash -c "grep Mems_allowed /proc/self/status"
+
+(where node 4 is a movable node)
+
+ runc:[2:INIT] invoked oom-killer: gfp_mask=0x500cc2(GFP_HIGHUSER|__GFP_ACCOUNT), order=0, oom_score_adj=0
+ CPU: 8 PID: 8291 Comm: runc:[2:INIT] Tainted: G W I E 5.8.2-0.g71b519a-default #1 openSUSE Tumbleweed (unreleased)
+ Hardware name: Dell Inc. PowerEdge R640/0PHYDR, BIOS 2.6.4 04/09/2020
+ Call Trace:
+ dump_stack+0x6b/0x88
+ dump_header+0x4a/0x1e2
+ oom_kill_process.cold+0xb/0x10
+ out_of_memory.part.0+0xaf/0x230
+ out_of_memory+0x3d/0x80
+ __alloc_pages_slowpath.constprop.0+0x954/0xa20
+ __alloc_pages_nodemask+0x2d3/0x300
+ pipe_write+0x322/0x590
+ new_sync_write+0x196/0x1b0
+ vfs_write+0x1c3/0x1f0
+ ksys_write+0xa7/0xe0
+ do_syscall_64+0x52/0xd0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+ Mem-Info:
+ active_anon:392832 inactive_anon:182 isolated_anon:0
+ active_file:68130 inactive_file:151527 isolated_file:0
+ unevictable:2701 dirty:0 writeback:7
+ slab_reclaimable:51418 slab_unreclaimable:116300
+ mapped:45825 shmem:735 pagetables:2540 bounce:0
+ free:159849484 free_pcp:73 free_cma:0
+ Node 4 active_anon:1448kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB isolated(anon):0kB isolated(file):0kB mapped:0kB dirty:0kB writeback:0kB shmem:0kB shmem_thp: 0kB shmem_pmdmapped: 0kB anon_thp: 0kB writeback_tmp:0kB all_unreclaimable? no
+ Node 4 Movable free:130021408kB min:9140kB low:139160kB high:269180kB reserved_highatomic:0KB active_anon:1448kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:0kB writepending:0kB present:130023424kB managed:130023424kB mlocked:0kB kernel_stack:0kB pagetables:0kB bounce:0kB free_pcp:292kB local_pcp:84kB free_cma:0kB
+ lowmem_reserve[]: 0 0 0 0 0
+ Node 4 Movable: 1*4kB (M) 0*8kB 0*16kB 1*32kB (M) 0*64kB 0*128kB 1*256kB (M) 1*512kB (M) 1*1024kB (M) 0*2048kB 31743*4096kB (M) = 130021156kB
+
+ oom-kill:constraint=CONSTRAINT_CPUSET,nodemask=(null),cpuset=docker-9976a269caec812c134fa317f27487ee36e1129beba7278a463dd53e5fb9997b.scope,mems_allowed=4,global_oom,task_memcg=/system.slice/containerd.service,task=containerd,pid=4100,uid=0
+ Out of memory: Killed process 4100 (containerd) total-vm:4077036kB, anon-rss:51184kB, file-rss:26016kB, shmem-rss:0kB, UID:0 pgtables:676kB oom_score_adj:0
+ oom_reaper: reaped process 8248 (docker), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+ oom_reaper: reaped process 2054 (node_exporter), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+ oom_reaper: reaped process 1452 (systemd-journal), now anon-rss:0kB, file-rss:8564kB, shmem-rss:4kB
+ oom_reaper: reaped process 2146 (munin-node), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+ oom_reaper: reaped process 8291 (runc:[2:INIT]), now anon-rss:0kB, file-rss:0kB, shmem-rss:0kB
+
+The reason is that in this case, the target cpuset nodes only have
+movable zone, while the creation of an OS in docker sometimes needs to
+allocate memory in non-movable zones (dma/dma32/normal) like
+GFP_HIGHUSER, and the cpuset limit forbids the allocation, then
+out-of-memory killing is involved even when normal nodes and movable
+nodes both have many free memory.
+
+The OOM killer cannot help to resolve the situation as there is no
+usable memory for the request in the cpuset scope. The only reasonable
+measure to take is to fail the allocation right away and have the caller
+to deal with it.
+
+So add a check for cases like this in the slowpath of allocation, and
+bail out early returning NULL for the allocation.
+
+As page allocation is one of the hottest path in kernel, this check will
+hurt all users with sane cpuset configuration, add a static branch check
+and detect the abnormal config in cpuset memory binding setup so that
+the extra check cost in page allocation is not paid by everyone.
+
+[thanks to Micho Hocko and David Rientjes for suggesting not handling
+ it inside OOM code, adding cpuset check, refining comments]
+
+Link: https://lkml.kernel.org/r/1632481657-68112-1-git-send-email-feng.tang@intel.com
+Signed-off-by: Feng Tang <feng.tang@intel.com>
+Suggested-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: David Rientjes <rientjes@google.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Zefan Li <lizefan.x@bytedance.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Stable-dep-of: 65f97cc81b0a ("cgroup/cpuset: Use static_branch_enable_cpuslocked() on cpusets_insane_config_key")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/cpuset.h | 17 +++++++++++++++++
+ include/linux/mmzone.h | 22 ++++++++++++++++++++++
+ kernel/cgroup/cpuset.c | 23 +++++++++++++++++++++++
+ mm/page_alloc.c | 13 +++++++++++++
+ 4 files changed, 75 insertions(+)
+
+diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
+index 82fb7e24d1cb..0348dba5680e 100644
+--- a/include/linux/cpuset.h
++++ b/include/linux/cpuset.h
+@@ -34,6 +34,8 @@
+ */
+ extern struct static_key_false cpusets_pre_enable_key;
+ extern struct static_key_false cpusets_enabled_key;
++extern struct static_key_false cpusets_insane_config_key;
++
+ static inline bool cpusets_enabled(void)
+ {
+ return static_branch_unlikely(&cpusets_enabled_key);
+@@ -51,6 +53,19 @@ static inline void cpuset_dec(void)
+ static_branch_dec_cpuslocked(&cpusets_pre_enable_key);
+ }
+
++/*
++ * This will get enabled whenever a cpuset configuration is considered
++ * unsupportable in general. E.g. movable only node which cannot satisfy
++ * any non movable allocations (see update_nodemask). Page allocator
++ * needs to make additional checks for those configurations and this
++ * check is meant to guard those checks without any overhead for sane
++ * configurations.
++ */
++static inline bool cpusets_insane_config(void)
++{
++ return static_branch_unlikely(&cpusets_insane_config_key);
++}
++
+ extern int cpuset_init(void);
+ extern void cpuset_init_smp(void);
+ extern void cpuset_force_rebuild(void);
+@@ -169,6 +184,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
+
+ static inline bool cpusets_enabled(void) { return false; }
+
++static inline bool cpusets_insane_config(void) { return false; }
++
+ static inline int cpuset_init(void) { return 0; }
+ static inline void cpuset_init_smp(void) {}
+
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 998f10249f13..34d02383023a 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -1229,6 +1229,28 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+ #define for_each_zone_zonelist(zone, z, zlist, highidx) \
+ for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
+
++/* Whether the 'nodes' are all movable nodes */
++static inline bool movable_only_nodes(nodemask_t *nodes)
++{
++ struct zonelist *zonelist;
++ struct zoneref *z;
++ int nid;
++
++ if (nodes_empty(*nodes))
++ return false;
++
++ /*
++ * We can chose arbitrary node from the nodemask to get a
++ * zonelist as they are interlinked. We just need to find
++ * at least one zone that can satisfy kernel allocations.
++ */
++ nid = first_node(*nodes);
++ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
++ z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes);
++ return (!z->zone) ? true : false;
++}
++
++
+ #ifdef CONFIG_SPARSEMEM
+ #include <asm/sparsemem.h>
+ #endif
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index d6e70aa7e151..b40f86a3f605 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -71,6 +71,13 @@
+ DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
+ DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
+
++/*
++ * There could be abnormal cpuset configurations for cpu or memory
++ * node binding, add this key to provide a quick low-cost judgement
++ * of the situation.
++ */
++DEFINE_STATIC_KEY_FALSE(cpusets_insane_config_key);
++
+ /* See "Frequency meter" comments, below. */
+
+ struct fmeter {
+@@ -397,6 +404,17 @@ static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);
+
+ static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);
+
++static inline void check_insane_mems_config(nodemask_t *nodes)
++{
++ if (!cpusets_insane_config() &&
++ movable_only_nodes(nodes)) {
++ static_branch_enable(&cpusets_insane_config_key);
++ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
++ "Cpuset allocations might fail even with a lot of memory available.\n",
++ nodemask_pr_args(nodes));
++ }
++}
++
+ /*
+ * Cgroup v2 behavior is used on the "cpus" and "mems" control files when
+ * on default hierarchy or when the cpuset_v2_mode flag is set by mounting
+@@ -1915,6 +1933,8 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
+ if (retval < 0)
+ goto done;
+
++ check_insane_mems_config(&trialcs->mems_allowed);
++
+ spin_lock_irq(&callback_lock);
+ cs->mems_allowed = trialcs->mems_allowed;
+ spin_unlock_irq(&callback_lock);
+@@ -3262,6 +3282,9 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
+ mems_updated = !nodes_equal(new_mems, cs->effective_mems);
+
++ if (mems_updated)
++ check_insane_mems_config(&new_mems);
++
+ if (is_in_v2_mode())
+ hotplug_update_tasks(cs, &new_cpus, &new_mems,
+ cpus_updated, mems_updated);
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8a1d1c1ca445..4279ece7eade 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4990,6 +4990,19 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+ if (!ac->preferred_zoneref->zone)
+ goto nopage;
+
++ /*
++ * Check for insane configurations where the cpuset doesn't contain
++ * any suitable zone to satisfy the request - e.g. non-movable
++ * GFP_HIGHUSER allocations from MOVABLE nodes only.
++ */
++ if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) {
++ struct zoneref *z = first_zones_zonelist(ac->zonelist,
++ ac->highest_zoneidx,
++ &cpuset_current_mems_allowed);
++ if (!z->zone)
++ goto nopage;
++ }
++
+ if (alloc_flags & ALLOC_KSWAPD)
+ wake_all_kswapds(order, gfp_mask, ac);
+
+--
+2.50.1
+
--- /dev/null
+From 25a4629ce9288e3dc8ea18815c07086b0cf0a40b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 10:10:54 +0800
+Subject: net: bridge: fix soft lockup in br_multicast_query_expired()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d1547bf460baec718b3398365f8de33d25c5f36f ]
+
+When set multicast_query_interval to a large value, the local variable
+'time' in br_multicast_send_query() may overflow. If the time is smaller
+than jiffies, the timer will expire immediately, and then call mod_timer()
+again, which creates a loop and may trigger the following soft lockup
+issue.
+
+ watchdog: BUG: soft lockup - CPU#1 stuck for 221s! [rb_consumer:66]
+ CPU: 1 UID: 0 PID: 66 Comm: rb_consumer Not tainted 6.16.0+ #259 PREEMPT(none)
+ Call Trace:
+ <IRQ>
+ __netdev_alloc_skb+0x2e/0x3a0
+ br_ip6_multicast_alloc_query+0x212/0x1b70
+ __br_multicast_send_query+0x376/0xac0
+ br_multicast_send_query+0x299/0x510
+ br_multicast_query_expired.constprop.0+0x16d/0x1b0
+ call_timer_fn+0x3b/0x2a0
+ __run_timers+0x619/0x950
+ run_timer_softirq+0x11c/0x220
+ handle_softirqs+0x18e/0x560
+ __irq_exit_rcu+0x158/0x1a0
+ sysvec_apic_timer_interrupt+0x76/0x90
+ </IRQ>
+
+This issue can be reproduced with:
+ ip link add br0 type bridge
+ echo 1 > /sys/class/net/br0/bridge/multicast_querier
+ echo 0xffffffffffffffff >
+ /sys/class/net/br0/bridge/multicast_query_interval
+ ip link set dev br0 up
+
+The multicast_startup_query_interval can also cause this issue. Similar to
+the commit 99b40610956a ("net: bridge: mcast: add and enforce query
+interval minimum"), add check for the query interval maximum to fix this
+issue.
+
+Link: https://lore.kernel.org/netdev/20250806094941.1285944-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250812091818.542238-1-wangliang74@huawei.com/
+Fixes: d902eee43f19 ("bridge: Add multicast count/interval sysfs entries")
+Suggested-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250813021054.1643649-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_multicast.c | 16 ++++++++++++++++
+ net/bridge/br_private.h | 2 ++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 085c9e706bc4..b8fb1e23b107 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -4608,6 +4608,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_query_interval = intvl_jiffies;
+ }
+
+@@ -4624,6 +4632,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+ }
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 1718168bd927..8acb427ae6de 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -30,6 +30,8 @@
+ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+ #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+ #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
+
+ #define BR_HWDOM_MAX BITS_PER_LONG
+
+--
+2.50.1
+
--- /dev/null
+From b62d06b4b3fb3e05cc846359f7e2bfbc7d005f96 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 5 Mar 2022 23:12:45 +0100
+Subject: net: phy: Use netif_rx().
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit a3d73e15909bdcf25f341e6623cc165ba7eb5968 ]
+
+Since commit
+ baebdf48c3600 ("net: dev: Makes sure netif_rx() can be invoked in any context.")
+
+the function netif_rx() can be used in preemptible/thread context as
+well as in interrupt context.
+
+Use netif_rx().
+
+Cc: Andrew Lunn <andrew@lunn.ch>
+Cc: Heiner Kallweit <hkallweit1@gmail.com>
+Cc: Radu Pirea <radu-nicolae.pirea@oss.nxp.com>
+Cc: Richard Cochran <richardcochran@gmail.com>
+Cc: Russell King <linux@armlinux.org.uk>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: bc1a59cff9f7 ("phy: mscc: Fix timestamping for vsc8584")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/dp83640.c | 6 +++---
+ drivers/net/phy/mscc/mscc_ptp.c | 2 +-
+ drivers/net/phy/nxp-c45-tja11xx.c | 2 +-
+ 3 files changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
+index 705c16675b80..88d23890f3be 100644
+--- a/drivers/net/phy/dp83640.c
++++ b/drivers/net/phy/dp83640.c
+@@ -886,7 +886,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
+ spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+
+ if (shhwtstamps)
+- netif_rx_ni(skb);
++ netif_rx(skb);
+ }
+
+ static void decode_txts(struct dp83640_private *dp83640,
+@@ -1332,7 +1332,7 @@ static void rx_timestamp_work(struct work_struct *work)
+ break;
+ }
+
+- netif_rx_ni(skb);
++ netif_rx(skb);
+ }
+
+ if (!skb_queue_empty(&dp83640->rx_queue))
+@@ -1383,7 +1383,7 @@ static bool dp83640_rxtstamp(struct mii_timestamper *mii_ts,
+ skb_queue_tail(&dp83640->rx_queue, skb);
+ schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
+ } else {
+- netif_rx_ni(skb);
++ netif_rx(skb);
+ }
+
+ return true;
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index 92f59c964409..cf61990ccd37 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1218,7 +1218,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ ts.tv_sec--;
+
+ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+- netif_rx_ni(skb);
++ netif_rx(skb);
+
+ return true;
+ }
+diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
+index 13269c4e87dd..e6ac851b217a 100644
+--- a/drivers/net/phy/nxp-c45-tja11xx.c
++++ b/drivers/net/phy/nxp-c45-tja11xx.c
+@@ -436,7 +436,7 @@ static long nxp_c45_do_aux_work(struct ptp_clock_info *ptp)
+ shhwtstamps_rx = skb_hwtstamps(skb);
+ shhwtstamps_rx->hwtstamp = ns_to_ktime(timespec64_to_ns(&ts));
+ NXP_C45_SKB_CB(skb)->header->reserved2 = 0;
+- netif_rx_ni(skb);
++ netif_rx(skb);
+ }
+
+ return reschedule ? 1 : -1;
+--
+2.50.1
+
--- /dev/null
+From 76e1990a2508db1aa380636ef69cbd74ca6261a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 8429d7f8aba4..73b840762afb 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1761,7 +1761,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1771,6 +1771,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1938,13 +1939,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From 64aba03fd1c365228ce65f3402cd92920f501fbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index e9f349cb6446..6c4de685a8e4 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -589,7 +589,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From 5e99541f06e653ac5d6620f9ab7ec2c2db771ce6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:45:07 +0900
+Subject: net: usb: asix_devices: Fix PHY address mask in MDIO bus
+ initialization
+
+From: Yuichiro Tsuji <yuichtsu@amazon.com>
+
+[ Upstream commit 24ef2f53c07f273bad99173e27ee88d44d135b1c ]
+
+Syzbot reported shift-out-of-bounds exception on MDIO bus initialization.
+
+The PHY address should be masked to 5 bits (0-31). Without this
+mask, invalid PHY addresses could be used, potentially causing issues
+with MDIO bus operations.
+
+Fix this by masking the PHY address with 0x1f (31 decimal) to ensure
+it stays within the valid range.
+
+Fixes: 4faff70959d5 ("net: usb: asix_devices: add phy_mask for ax88772 mdio bus")
+Reported-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=20537064367a0f98d597
+Tested-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Signed-off-by: Yuichiro Tsuji <yuichtsu@amazon.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250818084541.1958-1-yuichtsu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/asix_devices.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 928516222959..97d2037e7fee 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -669,7 +669,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+- priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
++ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+--
+2.50.1
+
--- /dev/null
+From 922d6e343a93012d44042ca05180fedb3aa14349 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 14:37:07 +0200
+Subject: netfilter: nf_reject: don't leak dst refcount for loopback packets
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 91a79b792204313153e1bdbbe5acbfc28903b3a5 ]
+
+recent patches to add a WARN() when replacing skb dst entry found an
+old bug:
+
+WARNING: include/linux/skbuff.h:1165 skb_dst_check_unset include/linux/skbuff.h:1164 [inline]
+WARNING: include/linux/skbuff.h:1165 skb_dst_set include/linux/skbuff.h:1210 [inline]
+WARNING: include/linux/skbuff.h:1165 nf_reject_fill_skb_dst+0x2a4/0x330 net/ipv4/netfilter/nf_reject_ipv4.c:234
+[..]
+Call Trace:
+ nf_send_unreach+0x17b/0x6e0 net/ipv4/netfilter/nf_reject_ipv4.c:325
+ nft_reject_inet_eval+0x4bc/0x690 net/netfilter/nft_reject_inet.c:27
+ expr_call_ops_eval net/netfilter/nf_tables_core.c:237 [inline]
+ ..
+
+This is because blamed commit forgot about loopback packets.
+Such packets already have a dst_entry attached, even at PRE_ROUTING stage.
+
+Instead of checking hook just check if the skb already has a route
+attached to it.
+
+Fixes: f53b9b0bdc59 ("netfilter: introduce support for reject at prerouting stage")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20250820123707.10671-1-fw@strlen.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++----
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 ++---
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 350aaca12618..c1f5ca847c8a 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(oldskb) < 0)
++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -317,8 +316,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) || !nf_reject_verify_csum(proto)) {
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 8208490e05a3..ca39b83c2a5d 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -295,7 +295,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++ if (!skb_dst(oldskb)) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -394,8 +394,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+- nf_reject6_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+--
+2.50.1
+
--- /dev/null
+From e3e078b31aa04bf0947a6e7fb7e65b981edd079f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 10:10:29 +0200
+Subject: phy: mscc: Fix timestamping for vsc8584
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit bc1a59cff9f797bfbf8f3104507584d89e9ecf2e ]
+
+There was a problem when we received frames and the frames were
+timestamped. The driver is configured to store the nanosecond part of
+the timestmap in the ptp reserved bits and it would take the second part
+by reading the LTC. The problem is that when reading the LTC we are in
+atomic context and to read the second part will go over mdio bus which
+might sleep, so we get an error.
+The fix consists in actually put all the frames in a queue and start the
+aux work and in that work to read the LTC and then calculate the full
+received time.
+
+Fixes: 7d272e63e0979d ("net: phy: mscc: timestamping and PHC support")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250818081029.1300780-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc.h | 12 ++++++++
+ drivers/net/phy/mscc/mscc_main.c | 12 ++++++++
+ drivers/net/phy/mscc/mscc_ptp.c | 49 ++++++++++++++++++++++++--------
+ 3 files changed, 61 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 055e4ca5b3b5..878298304430 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -360,6 +360,13 @@ struct vsc85xx_hw_stat {
+ u16 mask;
+ };
+
++struct vsc8531_skb_cb {
++ u32 ns;
++};
++
++#define VSC8531_SKB_CB(skb) \
++ ((struct vsc8531_skb_cb *)((skb)->cb))
++
+ struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+@@ -408,6 +415,11 @@ struct vsc8531_private {
+ */
+ struct mutex ts_lock;
+ struct mutex phc_lock;
++
++ /* list of skbs that were received and need timestamp information but it
++ * didn't received it yet
++ */
++ struct sk_buff_head rx_skbs_list;
+ };
+
+ /* Shared structure between the PHYs of the same package.
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index b349c359089e..03aa85ec60df 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2324,6 +2324,13 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ }
+
++static void vsc85xx_remove(struct phy_device *phydev)
++{
++ struct vsc8531_private *priv = phydev->priv;
++
++ skb_queue_purge(&priv->rx_skbs_list);
++}
++
+ /* Microsemi VSC85xx PHYs */
+ static struct phy_driver vsc85xx_driver[] = {
+ {
+@@ -2554,6 +2561,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2579,6 +2587,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2604,6 +2613,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2627,6 +2637,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2650,6 +2661,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index cf61990ccd37..f77bfbee5f20 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1190,9 +1190,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ {
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+- struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct vsc85xx_ptphdr *ptphdr;
+- struct timespec64 ts;
+ unsigned long ns;
+
+ if (!vsc8531->ptp->configured)
+@@ -1202,27 +1200,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ type == PTP_CLASS_NONE)
+ return false;
+
+- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+-
+ ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+ if (!ptphdr)
+ return false;
+
+- shhwtstamps = skb_hwtstamps(skb);
+- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+-
+ ns = ntohl(ptphdr->rsrvd2);
+
+- /* nsec is in reserved field */
+- if (ts.tv_nsec < ns)
+- ts.tv_sec--;
++ VSC8531_SKB_CB(skb)->ns = ns;
++ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
+
+- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+- netif_rx(skb);
++ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
+
+ return true;
+ }
+
++static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
++{
++ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
++ struct skb_shared_hwtstamps *shhwtstamps = NULL;
++ struct phy_device *phydev = ptp->phydev;
++ struct vsc8531_private *priv = phydev->priv;
++ struct sk_buff_head received;
++ struct sk_buff *rx_skb;
++ struct timespec64 ts;
++ unsigned long flags;
++
++ __skb_queue_head_init(&received);
++ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
++ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
++ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
++
++ vsc85xx_gettime(info, &ts);
++ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
++ shhwtstamps = skb_hwtstamps(rx_skb);
++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++
++ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
++ ts.tv_sec--;
++
++ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
++ VSC8531_SKB_CB(rx_skb)->ns);
++ netif_rx(rx_skb);
++ }
++
++ return -1;
++}
++
+ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .owner = THIS_MODULE,
+ .name = "VSC85xx timer",
+@@ -1236,6 +1259,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .adjfine = &vsc85xx_adjfine,
+ .gettime64 = &vsc85xx_gettime,
+ .settime64 = &vsc85xx_settime,
++ .do_aux_work = &vsc85xx_do_aux_work,
+ };
+
+ static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+@@ -1563,6 +1587,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+
+ mutex_init(&vsc8531->phc_lock);
+ mutex_init(&vsc8531->ts_lock);
++ skb_queue_head_init(&vsc8531->rx_skbs_list);
+
+ /* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ * the same GPIO can be requested by all the PHYs of the same package.
+--
+2.50.1
+
--- /dev/null
+From fff921d11d899057b970b5918e31986b4f073f34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:58 +0800
+Subject: ppp: fix race conditions in ppp_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 0417adf367a0af11adf7ace849af4638cfb573f7 ]
+
+ppp_fill_forward_path() has two race conditions:
+
+1. The ppp->channels list can change between list_empty() and
+ list_first_entry(), as ppp_lock() is not held. If the only channel
+ is deleted in ppp_disconnect_channel(), list_first_entry() may
+ access an empty head or a freed entry, and trigger a panic.
+
+2. pch->chan can be NULL. When ppp_unregister_channel() is called,
+ pch->chan is set to NULL before pch is removed from ppp->channels.
+
+Fix these by using a lockless RCU approach:
+- Use list_first_or_null_rcu() to safely test and access the first list
+ entry.
+- Convert list modifications on ppp->channels to their RCU variants and
+ add synchronize_net() after removal.
+- Check for a NULL pch->chan before dereferencing it.
+
+Fixes: f6efc675c9dd ("net: ppp: resolve forwarding path for bridge pppoe devices")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-2-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/ppp_generic.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 3e804800c509..5cb06e04293e 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ppp_channel.h>
+ #include <linux/ppp-comp.h>
+ #include <linux/skbuff.h>
++#include <linux/rculist.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/if_arp.h>
+ #include <linux/ip.h>
+@@ -1612,11 +1613,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+- if (list_empty(&ppp->channels))
++ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
++ if (!pch)
++ return -ENODEV;
++
++ chan = READ_ONCE(pch->chan);
++ if (!chan)
+ return -ENODEV;
+
+- pch = list_first_entry(&ppp->channels, struct channel, clist);
+- chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+@@ -2999,7 +3003,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+- pch->chan = NULL;
++ WRITE_ONCE(pch->chan, NULL);
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+@@ -3505,7 +3509,7 @@ ppp_connect_channel(struct channel *pch, int unit)
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+- list_add_tail(&pch->clist, &ppp->channels);
++ list_add_tail_rcu(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ refcount_inc(&ppp->file.refcnt);
+@@ -3535,10 +3539,11 @@ ppp_disconnect_channel(struct channel *pch)
+ if (ppp) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+- list_del(&pch->clist);
++ list_del_rcu(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
++ synchronize_net();
+ if (refcount_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+--
+2.50.1
+
--- /dev/null
+From 8ad3069554006e6a229e5fab37ce07a92bda6fc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:40:00 +0530
+Subject: RDMA/bnxt_re: Fix to initialize the PBL array
+
+From: Anantha Prabhu <anantha.prabhu@broadcom.com>
+
+[ Upstream commit 806b9f494f62791ee6d68f515a8056c615a0e7b2 ]
+
+memset the PBL page pointer and page map arrays before
+populating the SGL addresses of the HWQ.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 401cb3e22f31..7585d5a55db2 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
++ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+--
+2.50.1
+
--- /dev/null
+From 862b7e03f6b7eef70dc7de0295fa698c72b17dab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index f4c7dbfaf8ee..c5f53dc3dbbc 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -64,24 +64,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .llseek = no_llseek,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From ce52e81ee72aa5c7ca18eb5861d5e34b268f9c76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index c5f53dc3dbbc..5848f2e374a6 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -83,7 +84,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From a6015ad3f6fdf7e42081b0a59995349805d48b32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index ab89f3171a09..da2ed81673c4 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6607,6 +6607,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
iio-light-as73211-ensure-buffer-holes-are-zeroed.patch
mm-memory-failure-fix-infinite-uce-for-vm_pfnmap-pfn.patch
x86-cpu-hygon-add-missing-resctrl_cpu_detect-in-bsp_init-helper.patch
+mm-page_alloc-detect-allocation-forbidden-by-cpuset-.patch
+cgroup-cpuset-use-static_branch_enable_cpuslocked-on.patch
+rdma-bnxt_re-fix-to-initialize-the-pbl-array.patch
+net-bridge-fix-soft-lockup-in-br_multicast_query_exp.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+iommu-amd-avoid-stack-buffer-overflow-from-kernel-cm.patch
+mlxsw-spectrum-forward-packets-with-an-ipv4-link-loc.patch
+drm-hisilicon-hibmc-fix-the-hibmc-loaded-failed-bug.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+drm-amd-display-add-null-pointer-check-in-mod_hdcp_h.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+ppp-fix-race-conditions-in-ppp_fill_forward_path.patch
+net-phy-use-netif_rx.patch
+phy-mscc-fix-timestamping-for-vsc8584.patch
+net-usb-asix_devices-fix-phy-address-mask-in-mdio-bu.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+igc-fix-disabling-l1.2-pci-e-link-substate-on-i226-o.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+bonding-update-lacp-activity-flag-after-setting-lacp.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
+netfilter-nf_reject-don-t-leak-dst-refcount-for-loop.patch
--- /dev/null
+From e00953bf0d1fed3ef80df58ac05af7f0d1d797a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 47cfaf29fdd7..bb919f1d4043 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -350,7 +350,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From d825c2f6c9afdd182d71b6f836eae03caed3117b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4ee2a328cb5b..46cf36ab0acd 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From 5c8c4f5c8e9ee01d78f0c1b435a38d84e25e452e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index b3b2aa92e60d..292a36576115 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -295,6 +295,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From b3d7ad8250afb9d204e00217590738b59ffaac6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 921a2ddb497e..c68eb5dddf5b 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -582,7 +582,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -619,6 +619,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From 3c8c3d852de72fe43de2659db61a7611c5d30070 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index fc96ec46e6f6..738691b5697b 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1725,7 +1725,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1735,6 +1735,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1904,13 +1905,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From 756b87b202912ce5690d9e1ac8f5289c06f2a581 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 2562219ccca4..dd5088f7bffb 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -558,7 +558,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From c8c58163811a7860e0a7a77208ccd3bc9590ba7f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index f4c7dbfaf8ee..c5f53dc3dbbc 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -64,24 +64,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .llseek = no_llseek,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 63e18b243e77be85998cfef91cf066e0924485b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index c5f53dc3dbbc..5848f2e374a6 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -83,7 +84,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From 6f4ced428f58717099241abdf05a19a433abc283 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index af1c45dd2f38..4957e50b7b5d 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6593,6 +6593,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
nfs-fix-up-commit-deadlocks.patch
nfs-fix-uaf-in-direct-writes.patch
usb-xhci-fix-slot_id-resource-race-conflict.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
--- /dev/null
+From 4f76d1838af53f8d488888b9bc1edfaaf72b9b29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index f5a6e990d07a..12a5e053ec54 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From d8b7cbfaf91c557fcb297e6eb82d85834176419b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14c..a0d55b77c994 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From cd3de9b8e5fb713744f1cc3ead5e024029473cf1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 22:14:51 +0300
+Subject: Bluetooth: hci_conn: do return error from hci_enhanced_setup_sync()
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+[ Upstream commit 0eaf7c7e85da7495c0e03a99375707fc954f5e7b ]
+
+The commit e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to
+hci_sync") missed to update the *return* statement under the *case* of
+BT_CODEC_TRANSPARENT in hci_enhanced_setup_sync(), which led to returning
+success (0) instead of the negative error code (-EINVAL). However, the
+result of hci_enhanced_setup_sync() seems to be ignored anyway, since NULL
+gets passed to hci_cmd_sync_queue() as the last argument in that case and
+the only function interested in that result is specified by that argument.
+
+Fixes: e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to hci_sync")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 49b9dd21b73e..5f6785fd6af5 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -439,7 +439,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+ case BT_CODEC_TRANSPARENT:
+ if (!find_next_esco_param(conn, esco_param_msbc,
+ ARRAY_SIZE(esco_param_msbc)))
+- return false;
++ return -EINVAL;
++
+ param = &esco_param_msbc[conn->attempt - 1];
+ cp.tx_coding_format.id = 0x03;
+ cp.rx_coding_format.id = 0x03;
+--
+2.50.1
+
--- /dev/null
+From e8dc8819fd0bd3991e8669e9ead0ac394e7491a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Feb 2024 17:58:58 +0000
+Subject: bonding: Add independent control state machine
+
+From: Aahil Awatramani <aahila@google.com>
+
+[ Upstream commit 240fd405528bbf7fafa0559202ca7aa524c9cd96 ]
+
+Add support for the independent control state machine per IEEE
+802.1AX-2008 5.4.15 in addition to the existing implementation of the
+coupled control state machine.
+
+Introduces two new states, AD_MUX_COLLECTING and AD_MUX_DISTRIBUTING in
+the LACP MUX state machine for separated handling of an initial
+Collecting state before the Collecting and Distributing state. This
+enables a port to be in a state where it can receive incoming packets
+while not still distributing. This is useful for reducing packet loss when
+a port begins distributing before its partner is able to collect.
+
+Added new functions such as bond_set_slave_tx_disabled_flags and
+bond_set_slave_rx_enabled_flags to precisely manage the port's collecting
+and distributing states. Previously, there was no dedicated method to
+disable TX while keeping RX enabled, which this patch addresses.
+
+Note that the regular flow process in the kernel's bonding driver remains
+unaffected by this patch. The extension requires explicit opt-in by the
+user (in order to ensure no disruptions for existing setups) via netlink
+support using the new bonding parameter coupled_control. The default value
+for coupled_control is set to 1 so as to preserve existing behaviour.
+
+Signed-off-by: Aahil Awatramani <aahila@google.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://lore.kernel.org/r/20240202175858.1573852-1-aahila@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 0599640a21e9 ("bonding: send LACPDUs periodically in passive mode after receiving partner's LACPDU")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/bonding.rst | 12 ++
+ drivers/net/bonding/bond_3ad.c | 157 +++++++++++++++++++++++++--
+ drivers/net/bonding/bond_main.c | 1 +
+ drivers/net/bonding/bond_netlink.c | 16 +++
+ drivers/net/bonding/bond_options.c | 28 ++++-
+ include/net/bond_3ad.h | 2 +
+ include/net/bond_options.h | 1 +
+ include/net/bonding.h | 23 ++++
+ include/uapi/linux/if_link.h | 1 +
+ tools/include/uapi/linux/if_link.h | 1 +
+ 10 files changed, 234 insertions(+), 8 deletions(-)
+
+diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
+index 96cd7a26f3d9..870b4e134318 100644
+--- a/Documentation/networking/bonding.rst
++++ b/Documentation/networking/bonding.rst
+@@ -444,6 +444,18 @@ arp_missed_max
+
+ The default value is 2, and the allowable range is 1 - 255.
+
++coupled_control
++
++ Specifies whether the LACP state machine's MUX in the 802.3ad mode
++ should have separate Collecting and Distributing states.
++
++ This is by implementing the independent control state machine per
++ IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control
++ state machine.
++
++ The default value is 1. This setting does not separate the Collecting
++ and Distributing states, maintaining the bond in coupled control.
++
+ downdelay
+
+ Specifies the time, in milliseconds, to wait before disabling
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 7557c525615e..c64b87ca067b 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -105,6 +105,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+ static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_enable_collecting(struct port *port);
++static void ad_disable_distributing(struct port *port,
++ bool *update_slave_arr);
+ static void ad_enable_collecting_distributing(struct port *port,
+ bool *update_slave_arr);
+ static void ad_disable_collecting_distributing(struct port *port,
+@@ -170,9 +173,38 @@ static inline int __agg_has_partner(struct aggregator *agg)
+ return !is_zero_ether_addr(agg->partner_system.mac_addr_value);
+ }
+
++/**
++ * __disable_distributing_port - disable the port's slave for distributing.
++ * Port will still be able to collect.
++ * @port: the port we're looking at
++ *
++ * This will disable only distributing on the port's slave.
++ */
++static void __disable_distributing_port(struct port *port)
++{
++ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
++}
++
++/**
++ * __enable_collecting_port - enable the port's slave for collecting,
++ * if it's up
++ * @port: the port we're looking at
++ *
++ * This will enable only collecting on the port's slave.
++ */
++static void __enable_collecting_port(struct port *port)
++{
++ struct slave *slave = port->slave;
++
++ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
++ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
++}
++
+ /**
+ * __disable_port - disable the port's slave
+ * @port: the port we're looking at
++ *
++ * This will disable both collecting and distributing on the port's slave.
+ */
+ static inline void __disable_port(struct port *port)
+ {
+@@ -182,6 +214,8 @@ static inline void __disable_port(struct port *port)
+ /**
+ * __enable_port - enable the port's slave, if it's up
+ * @port: the port we're looking at
++ *
++ * This will enable both collecting and distributing on the port's slave.
+ */
+ static inline void __enable_port(struct port *port)
+ {
+@@ -192,10 +226,27 @@ static inline void __enable_port(struct port *port)
+ }
+
+ /**
+- * __port_is_enabled - check if the port's slave is in active state
++ * __port_move_to_attached_state - check if port should transition back to attached
++ * state.
++ * @port: the port we're looking at
++ */
++static bool __port_move_to_attached_state(struct port *port)
++{
++ if (!(port->sm_vars & AD_PORT_SELECTED) ||
++ (port->sm_vars & AD_PORT_STANDBY) ||
++ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
++ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
++ port->sm_mux_state = AD_MUX_ATTACHED;
++
++ return port->sm_mux_state == AD_MUX_ATTACHED;
++}
++
++/**
++ * __port_is_collecting_distributing - check if the port's slave is in the
++ * combined collecting/distributing state
+ * @port: the port we're looking at
+ */
+-static inline int __port_is_enabled(struct port *port)
++static int __port_is_collecting_distributing(struct port *port)
+ {
+ return bond_is_active_slave(port->slave);
+ }
+@@ -933,6 +984,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
+ */
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ {
++ struct bonding *bond = __get_bond_by_port(port);
+ mux_states_t last_state;
+
+ /* keep current State Machine state to compare later if it was
+@@ -990,9 +1042,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ !__check_agg_selection_timer(port)) {
+- if (port->aggregator->is_active)
+- port->sm_mux_state =
+- AD_MUX_COLLECTING_DISTRIBUTING;
++ if (port->aggregator->is_active) {
++ int state = AD_MUX_COLLECTING_DISTRIBUTING;
++
++ if (!bond->params.coupled_control)
++ state = AD_MUX_COLLECTING;
++ port->sm_mux_state = state;
++ }
+ } else if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY)) {
+ /* if UNSELECTED or STANDBY */
+@@ -1010,11 +1066,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ }
+ break;
+ case AD_MUX_COLLECTING_DISTRIBUTING:
++ if (!__port_move_to_attached_state(port)) {
++ /* if port state hasn't changed make
++ * sure that a collecting distributing
++ * port in an active aggregator is enabled
++ */
++ if (port->aggregator->is_active &&
++ !__port_is_collecting_distributing(port)) {
++ __enable_port(port);
++ *update_slave_arr = true;
++ }
++ }
++ break;
++ case AD_MUX_COLLECTING:
++ if (!__port_move_to_attached_state(port)) {
++ if ((port->sm_vars & AD_PORT_SELECTED) &&
++ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
++ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
++ port->sm_mux_state = AD_MUX_DISTRIBUTING;
++ } else {
++ /* If port state hasn't changed, make sure that a collecting
++ * port is enabled for an active aggregator.
++ */
++ struct slave *slave = port->slave;
++
++ if (port->aggregator->is_active &&
++ bond_is_slave_rx_disabled(slave)) {
++ ad_enable_collecting(port);
++ *update_slave_arr = true;
++ }
++ }
++ }
++ break;
++ case AD_MUX_DISTRIBUTING:
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
++ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
+- port->sm_mux_state = AD_MUX_ATTACHED;
++ port->sm_mux_state = AD_MUX_COLLECTING;
+ } else {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+@@ -1022,7 +1112,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ */
+ if (port->aggregator &&
+ port->aggregator->is_active &&
+- !__port_is_enabled(port)) {
++ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+@@ -1073,6 +1163,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ update_slave_arr);
+ port->ntt = true;
+ break;
++ case AD_MUX_COLLECTING:
++ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
++ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
++ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
++ ad_enable_collecting(port);
++ ad_disable_distributing(port, update_slave_arr);
++ port->ntt = true;
++ break;
++ case AD_MUX_DISTRIBUTING:
++ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
++ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
++ ad_enable_collecting_distributing(port,
++ update_slave_arr);
++ break;
+ default:
+ break;
+ }
+@@ -1897,6 +2001,45 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ }
+ }
+
++/**
++ * ad_enable_collecting - enable a port's receive
++ * @port: the port we're looking at
++ *
++ * Enable @port if it's in an active aggregator
++ */
++static void ad_enable_collecting(struct port *port)
++{
++ if (port->aggregator->is_active) {
++ struct slave *slave = port->slave;
++
++ slave_dbg(slave->bond->dev, slave->dev,
++ "Enabling collecting on port %d (LAG %d)\n",
++ port->actor_port_number,
++ port->aggregator->aggregator_identifier);
++ __enable_collecting_port(port);
++ }
++}
++
++/**
++ * ad_disable_distributing - disable a port's transmit
++ * @port: the port we're looking at
++ * @update_slave_arr: Does slave array need update?
++ */
++static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
++{
++ if (port->aggregator &&
++ !MAC_ADDRESS_EQUAL(&port->aggregator->partner_system,
++ &(null_mac_addr))) {
++ slave_dbg(port->slave->bond->dev, port->slave->dev,
++ "Disabling distributing on port %d (LAG %d)\n",
++ port->actor_port_number,
++ port->aggregator->aggregator_identifier);
++ __disable_distributing_port(port);
++ /* Slave array needs an update */
++ *update_slave_arr = true;
++ }
++}
++
+ /**
+ * ad_enable_collecting_distributing - enable a port's transmit/receive
+ * @port: the port we're looking at
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 3cedadef9c8a..11c58b88f9ce 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -6310,6 +6310,7 @@ static int bond_check_params(struct bond_params *params)
+ params->ad_actor_sys_prio = ad_actor_sys_prio;
+ eth_zero_addr(params->ad_actor_system);
+ params->ad_user_port_key = ad_user_port_key;
++ params->coupled_control = 1;
+ if (packets_per_slave > 0) {
+ params->reciprocal_packets_per_slave =
+ reciprocal_value(packets_per_slave);
+diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
+index 27cbe148f0db..aebc814ad495 100644
+--- a/drivers/net/bonding/bond_netlink.c
++++ b/drivers/net/bonding/bond_netlink.c
+@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
+ [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
++ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
+ };
+
+ static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
+@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
+ return err;
+ }
+
++ if (data[IFLA_BOND_COUPLED_CONTROL]) {
++ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
++
++ bond_opt_initval(&newval, coupled_control);
++ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
++ data[IFLA_BOND_COUPLED_CONTROL], extack);
++ if (err)
++ return err;
++ }
++
+ return 0;
+ }
+
+@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
++ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
+ 0;
+ }
+
+@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb,
+ bond->params.missed_max))
+ goto nla_put_failure;
+
++ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
++ bond->params.coupled_control))
++ goto nla_put_failure;
++
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ struct ad_info info;
+
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 8a24c016f667..1235878d8715 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -85,7 +85,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+ static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+-
++static int bond_option_coupled_control_set(struct bonding *bond,
++ const struct bond_opt_value *newval);
+
+ static const struct bond_opt_value bond_mode_tbl[] = {
+ { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
+@@ -233,6 +234,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = {
+ { NULL, -1, 0},
+ };
+
++static const struct bond_opt_value bond_coupled_control_tbl[] = {
++ { "on", 1, BOND_VALFLAG_DEFAULT},
++ { "off", 0, 0},
++ { NULL, -1, 0},
++};
++
+ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
+ [BOND_OPT_MODE] = {
+ .id = BOND_OPT_MODE,
+@@ -497,6 +504,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
+ .desc = "Delay between each peer notification on failover event, in milliseconds",
+ .values = bond_peer_notif_delay_tbl,
+ .set = bond_option_peer_notif_delay_set
++ },
++ [BOND_OPT_COUPLED_CONTROL] = {
++ .id = BOND_OPT_COUPLED_CONTROL,
++ .name = "coupled_control",
++ .desc = "Opt into using coupled control MUX for LACP states",
++ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
++ .flags = BOND_OPTFLAG_IFDOWN,
++ .values = bond_coupled_control_tbl,
++ .set = bond_option_coupled_control_set,
+ }
+ };
+
+@@ -1828,3 +1844,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ bond->params.ad_user_port_key = newval->value;
+ return 0;
+ }
++
++static int bond_option_coupled_control_set(struct bonding *bond,
++ const struct bond_opt_value *newval)
++{
++ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
++ newval->string, newval->value);
++
++ bond->params.coupled_control = newval->value;
++ return 0;
++}
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 2d9596dba84d..5047711944df 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -54,6 +54,8 @@ typedef enum {
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
++ AD_MUX_COLLECTING, /* mux machine */
++ AD_MUX_DISTRIBUTING, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
+ } mux_states_t;
+
+diff --git a/include/net/bond_options.h b/include/net/bond_options.h
+index f631d9f09941..18687ccf0638 100644
+--- a/include/net/bond_options.h
++++ b/include/net/bond_options.h
+@@ -76,6 +76,7 @@ enum {
+ BOND_OPT_MISSED_MAX,
+ BOND_OPT_NS_TARGETS,
+ BOND_OPT_PRIO,
++ BOND_OPT_COUPLED_CONTROL,
+ BOND_OPT_LAST
+ };
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 9a3ac960dfe1..bfd3e4e58f86 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -152,6 +152,7 @@ struct bond_params {
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+ #endif
++ int coupled_control;
+
+ /* 2 bytes of padding : see ether_addr_equal_64bits() */
+ u8 ad_actor_system[ETH_ALEN + 2];
+@@ -171,6 +172,7 @@ struct slave {
+ u8 backup:1, /* indicates backup slave. Value corresponds with
+ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
+ inactive:1, /* indicates inactive slave */
++ rx_disabled:1, /* indicates whether slave's Rx is disabled */
+ should_notify:1, /* indicates whether the state changed */
+ should_notify_link:1; /* indicates whether the link changed */
+ u8 duplex;
+@@ -574,6 +576,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
+ if (!slave->bond->params.all_slaves_active)
+ slave->inactive = 1;
++ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
++ slave->rx_disabled = 1;
++}
++
++static inline void bond_set_slave_tx_disabled_flags(struct slave *slave,
++ bool notify)
++{
++ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
+ }
+
+ static inline void bond_set_slave_active_flags(struct slave *slave,
+@@ -581,6 +591,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave,
+ {
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
+ slave->inactive = 0;
++ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
++ slave->rx_disabled = 0;
++}
++
++static inline void bond_set_slave_rx_enabled_flags(struct slave *slave,
++ bool notify)
++{
++ slave->rx_disabled = 0;
+ }
+
+ static inline bool bond_is_slave_inactive(struct slave *slave)
+@@ -588,6 +606,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
+ return slave->inactive;
+ }
+
++static inline bool bond_is_slave_rx_disabled(struct slave *slave)
++{
++ return slave->rx_disabled;
++}
++
+ static inline void bond_propose_link_state(struct slave *slave, int state)
+ {
+ slave->link_new_state = state;
+diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
+index 5e7a1041df3a..feebb4509abd 100644
+--- a/include/uapi/linux/if_link.h
++++ b/include/uapi/linux/if_link.h
+@@ -938,6 +938,7 @@ enum {
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
++ IFLA_BOND_COUPLED_CONTROL,
+ __IFLA_BOND_MAX,
+ };
+
+diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
+index 0242f31e339c..0d2eabfac956 100644
+--- a/tools/include/uapi/linux/if_link.h
++++ b/tools/include/uapi/linux/if_link.h
+@@ -863,6 +863,7 @@ enum {
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
++ IFLA_BOND_COUPLED_CONTROL,
+ __IFLA_BOND_MAX,
+ };
+
+--
+2.50.1
+
--- /dev/null
+From 8b7707cab02ec906ece852c52ca5ecb8e69a77ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:59 +0000
+Subject: bonding: send LACPDUs periodically in passive mode after receiving
+ partner's LACPDU
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 0599640a21e98f0d6a3e9ff85c0a687c90a8103b ]
+
+When `lacp_active` is set to `off`, the bond operates in passive mode, meaning
+it only "speaks when spoken to." However, the current kernel implementation
+only sends an LACPDU in response when the partner's state changes.
+
+As a result, once LACP negotiation succeeds, the actor stops sending LACPDUs
+until the partner times out and sends an "expired" LACPDU. This causes
+continuous LACP state flapping.
+
+According to IEEE 802.1AX-2014, 6.4.13 Periodic Transmission machine. The
+values of Partner_Oper_Port_State.LACP_Activity and
+Actor_Oper_Port_State.LACP_Activity determine whether periodic transmissions
+take place. If either or both parameters are set to Active LACP, then periodic
+transmissions occur; if both are set to Passive LACP, then periodic
+transmissions do not occur.
+
+To comply with this, we remove the `!bond->params.lacp_active` check in
+`ad_periodic_machine()`. Instead, we initialize the actor's port's
+`LACP_STATE_LACP_ACTIVITY` state based on `lacp_active` setting.
+
+Additionally, we avoid setting the partner's state to
+`LACP_STATE_LACP_ACTIVITY` in the EXPIRED state, since we should not assume
+the partner is active by default.
+
+This ensures that in passive mode, the bond starts sending periodic LACPDUs
+after receiving one from the partner, and avoids flapping due to inactivity.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 42 +++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index c64b87ca067b..37364bbfdbdc 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -98,13 +98,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr);
+ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
+ static void ad_tx_machine(struct port *port);
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
++static void ad_periodic_machine(struct port *port);
+ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
+ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ bool *update_slave_arr);
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+-static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
+ static void ad_enable_collecting(struct port *port);
+ static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
+@@ -1291,10 +1291,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
+ * case of EXPIRED even if LINK_DOWN didn't arrive for
+ * the port.
+ */
+- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->sm_vars &= ~AD_PORT_MATCHED;
++ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
++ * machine state diagram, the statue should be
++ * Partner_Oper_Port_State.Synchronization = FALSE;
++ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
++ * start current_while_timer(Short Timeout);
++ * Actor_Oper_Port_State.Expired = TRUE;
++ */
++ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
+ port->sm_vars |= AD_PORT_CHURNED;
+@@ -1400,11 +1406,10 @@ static void ad_tx_machine(struct port *port)
+ /**
+ * ad_periodic_machine - handle a port's periodic state machine
+ * @port: the port we're looking at
+- * @bond_params: bond parameters we will use
+ *
+ * Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
+ */
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
++static void ad_periodic_machine(struct port *port)
+ {
+ periodic_states_t last_state;
+
+@@ -1413,8 +1418,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
+
+ /* check if port was reinitialized */
+ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
+- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
+- !bond_params->lacp_active) {
++ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
+ port->sm_periodic_state = AD_NO_PERIODIC;
+ }
+ /* check if state machine should change state */
+@@ -1938,16 +1942,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
+ /**
+ * ad_initialize_port - initialize a given port's parameters
+ * @port: the port we're looking at
+- * @lacp_fast: boolean. whether fast periodic should be used
++ * @bond_params: bond parameters we will use
+ */
+-static void ad_initialize_port(struct port *port, int lacp_fast)
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
+ {
+ static const struct port_params tmpl = {
+ .system_priority = 0xffff,
+ .key = 1,
+ .port_number = 1,
+ .port_priority = 0xff,
+- .port_state = 1,
++ .port_state = 0,
+ };
+ static const struct lacpdu lacpdu = {
+ .subtype = 0x01,
+@@ -1965,12 +1969,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ port->actor_port_priority = 0xff;
+ port->actor_port_aggregator_identifier = 0;
+ port->ntt = false;
+- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
+- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
++ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
++ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
++ if (bond_params->lacp_active) {
++ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ }
+
+- if (lacp_fast)
++ if (bond_params->lacp_fast)
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
+
+ memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
+@@ -2186,7 +2192,7 @@ void bond_3ad_bind_slave(struct slave *slave)
+ /* port initialization */
+ port = &(SLAVE_AD_INFO(slave)->port);
+
+- ad_initialize_port(port, bond->params.lacp_fast);
++ ad_initialize_port(port, &bond->params);
+
+ port->slave = slave;
+ port->actor_port_number = SLAVE_AD_INFO(slave)->id;
+@@ -2498,7 +2504,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
+ }
+
+ ad_rx_machine(NULL, port);
+- ad_periodic_machine(port, &bond->params);
++ ad_periodic_machine(port);
+ ad_port_selection_logic(port, &update_slave_arr);
+ ad_mux_machine(port, &update_slave_arr);
+ ad_tx_machine(port);
+--
+2.50.1
+
--- /dev/null
+From 06bcae0551a2e06d86eac4d89f4e396d4f40be75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:58 +0000
+Subject: bonding: update LACP activity flag after setting lacp_active
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit b64d035f77b1f02ab449393342264b44950a75ae ]
+
+The port's actor_oper_port_state activity flag should be updated immediately
+after changing the lacp_active option to reflect the current mode correctly.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 25 +++++++++++++++++++++++++
+ drivers/net/bonding/bond_options.c | 1 +
+ include/net/bond_3ad.h | 1 +
+ 3 files changed, 27 insertions(+)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 9270977e6c7f..7557c525615e 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2725,6 +2725,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
+ spin_unlock_bh(&bond->mode_lock);
+ }
+
++/**
++ * bond_3ad_update_lacp_active - change the lacp active
++ * @bond: bonding struct
++ *
++ * Update actor_oper_port_state when lacp_active is modified.
++ */
++void bond_3ad_update_lacp_active(struct bonding *bond)
++{
++ struct port *port = NULL;
++ struct list_head *iter;
++ struct slave *slave;
++ int lacp_active;
++
++ lacp_active = bond->params.lacp_active;
++ spin_lock_bh(&bond->mode_lock);
++ bond_for_each_slave(bond, slave, iter) {
++ port = &(SLAVE_AD_INFO(slave)->port);
++ if (lacp_active)
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ else
++ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
++ }
++ spin_unlock_bh(&bond->mode_lock);
++}
++
+ size_t bond_3ad_stats_size(void)
+ {
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 21ca95cdef42..8a24c016f667 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1634,6 +1634,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
+ netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.lacp_active = newval->value;
++ bond_3ad_update_lacp_active(bond);
+
+ return 0;
+ }
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index a016f275cb01..2d9596dba84d 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -303,6 +303,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ int bond_3ad_set_carrier(struct bonding *bond);
+ void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_lacp_rate(struct bonding *bond);
++void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+ int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+ size_t bond_3ad_stats_size(void);
+--
+2.50.1
+
--- /dev/null
+From 7b59be3c5ad1f0d6ae85f0dad90393a63bc784a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:28 -0400
+Subject: cgroup/cpuset: Use static_branch_enable_cpuslocked() on
+ cpusets_insane_config_key
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 65f97cc81b0adc5f49cf6cff5d874be0058e3f41 ]
+
+The following lockdep splat was observed.
+
+[ 812.359086] ============================================
+[ 812.359089] WARNING: possible recursive locking detected
+[ 812.359097] --------------------------------------------
+[ 812.359100] runtest.sh/30042 is trying to acquire lock:
+[ 812.359105] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_enable+0xe/0x20
+[ 812.359131]
+[ 812.359131] but task is already holding lock:
+[ 812.359134] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: cpuset_write_resmask+0x98/0xa70
+ :
+[ 812.359267] Call Trace:
+[ 812.359272] <TASK>
+[ 812.359367] cpus_read_lock+0x3c/0xe0
+[ 812.359382] static_key_enable+0xe/0x20
+[ 812.359389] check_insane_mems_config.part.0+0x11/0x30
+[ 812.359398] cpuset_write_resmask+0x9f2/0xa70
+[ 812.359411] cgroup_file_write+0x1c7/0x660
+[ 812.359467] kernfs_fop_write_iter+0x358/0x530
+[ 812.359479] vfs_write+0xabe/0x1250
+[ 812.359529] ksys_write+0xf9/0x1d0
+[ 812.359558] do_syscall_64+0x5f/0xe0
+
+Since commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem
+and hotplug lock order"), the ordering of cpu hotplug lock
+and cpuset_mutex had been reversed. That patch correctly
+used the cpuslocked version of the static branch API to enable
+cpusets_pre_enable_key and cpusets_enabled_key, but it didn't do the
+same for cpusets_insane_config_key.
+
+The cpusets_insane_config_key can be enabled in the
+check_insane_mems_config() which is called from update_nodemask()
+or cpuset_hotplug_update_tasks() with both cpu hotplug lock and
+cpuset_mutex held. Deadlock can happen with a pending hotplug event that
+tries to acquire the cpu hotplug write lock which will block further
+cpus_read_lock() attempt from check_insane_mems_config(). Fix that by
+switching to use static_branch_enable_cpuslocked().
+
+Fixes: d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 370a6bce20a8..216bdebd9426 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -455,7 +455,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ if (!cpusets_insane_config() &&
+ movable_only_nodes(nodes)) {
+- static_branch_enable(&cpusets_insane_config_key);
++ static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ "Cpuset allocations might fail even with a lot of memory available.\n",
+ nodemask_pr_args(nodes));
+--
+2.50.1
+
--- /dev/null
+From c75519a5b2ba3a487360d064eeaa0e95aa05ba78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 21:36:41 -0500
+Subject: drm/amd/display: Add null pointer check in
+ mod_hdcp_hdcp1_create_session()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit 7a2ca2ea64b1b63c8baa94a8f5deb70b2248d119 ]
+
+The function mod_hdcp_hdcp1_create_session() calls the function
+get_first_active_display(), but does not check its return value.
+The return value is a null pointer if the display list is empty.
+This will lead to a null pointer dereference.
+
+Add a null pointer check for get_first_active_display() and return
+MOD_HDCP_STATUS_DISPLAY_NOT_FOUND if the function return null.
+
+This is similar to the commit c3e9826a2202
+("drm/amd/display: Add null pointer check for get_first_active_display()").
+
+Fixes: 2deade5ede56 ("drm/amd/display: Remove hdcp display state with mst fix")
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 5e43eb3cd731649c4f8b9134f857be62a416c893)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index 7f8f127e7722..ab6964ca1c2b 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
+--
+2.50.1
+
--- /dev/null
+From 84d986bb26f109bd9e34169149f8232b3f8e3319 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:31 +0800
+Subject: drm/hisilicon/hibmc: fix the hibmc loaded failed bug
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 93a08f856fcc5aaeeecad01f71bef3088588216a ]
+
+When hibmc loaded failed, the driver use hibmc_unload to free the
+resource, but the mutexes in mode.config are not init, which will
+access an NULL pointer. Just change goto statement to return, because
+hibnc_hw_init() doesn't need to free anything.
+
+Fixes: b3df5e65cc03 ("drm/hibmc: Drop drm_vblank_cleanup")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-5-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index fe4269c5aa0a..20c2af66ee53 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -269,12 +269,12 @@ static int hibmc_load(struct drm_device *dev)
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+- goto err;
++ return ret;
+
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
+- goto err;
++ return ret;
+ }
+
+ ret = hibmc_kms_init(priv);
+--
+2.50.1
+
--- /dev/null
+From ee33705d9f32b43c17a1c3afc6eb2087d990e87e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 14:12:45 -0700
+Subject: gve: prevent ethtool ops after shutdown
+
+From: Jordan Rhee <jordanrhee@google.com>
+
+[ Upstream commit 75a9a46d67f46d608205888f9b34e315c1786345 ]
+
+A crash can occur if an ethtool operation is invoked
+after shutdown() is called.
+
+shutdown() is invoked during system shutdown to stop DMA operations
+without performing expensive deallocations. It is discouraged to
+unregister the netdev in this path, so the device may still be visible
+to userspace and kernel helpers.
+
+In gve, shutdown() tears down most internal data structures. If an
+ethtool operation is dispatched after shutdown(), it will dereference
+freed or NULL pointers, leading to a kernel panic. While graceful
+shutdown normally quiesces userspace before invoking the reboot
+syscall, forced shutdowns (as observed on GCP VMs) can still trigger
+this path.
+
+Fix by calling netif_device_detach() in shutdown().
+This marks the device as detached so the ethtool ioctl handler
+will skip dispatching operations to the driver.
+
+Fixes: 974365e51861 ("gve: Implement suspend/resume/shutdown")
+Signed-off-by: Jordan Rhee <jordanrhee@google.com>
+Signed-off-by: Jeroen de Borst <jeroendb@google.com>
+Link: https://patch.msgid.link/20250818211245.1156919-1-jeroendb@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 4fee466a8e90..2e8b01b3ee44 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -1683,6 +1683,8 @@ static void gve_shutdown(struct pci_dev *pdev)
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
++ netif_device_detach(netdev);
++
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+--
+2.50.1
+
--- /dev/null
+From 754799b4e820c0bc48dcf2099696fc9329740aeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:59 -0700
+Subject: igc: fix disabling L1.2 PCI-E link substate on I226 on init
+
+From: ValdikSS <iam@valdikss.org.ru>
+
+[ Upstream commit 1468c1f97cf32418e34dbb40b784ed9333b9e123 ]
+
+Device ID comparison in igc_is_device_id_i226 is performed before
+the ID is set, resulting in always failing check on init.
+
+Before the patch:
+* L1.2 is not disabled on init
+* L1.2 is properly disabled after suspend-resume cycle
+
+With the patch:
+* L1.2 is properly disabled both on init and after suspend-resume
+
+How to test:
+Connect to the 1G link with 300+ mbit/s Internet speed, and run
+the download speed test, such as:
+
+ curl -o /dev/null http://speedtest.selectel.ru/1GB
+
+Without L1.2 disabled, the speed would be no more than ~200 mbit/s.
+With L1.2 disabled, the speed would reach 1 gbit/s.
+Note: it's required that the latency between your host and the remote
+be around 3-5 ms, the test inside LAN (<1 ms latency) won't trigger the
+issue.
+
+Link: https://lore.kernel.org/intel-wired-lan/15248b4f-3271-42dd-8e35-02bfc92b25e1@intel.com
+Fixes: 0325143b59c6 ("igc: disable L1.2 PCI-E link substate to avoid performance issue")
+Signed-off-by: ValdikSS <iam@valdikss.org.ru>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-6-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index ca3fd0270810..5bcdb1b7da29 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6553,6 +6553,13 @@ static int igc_probe(struct pci_dev *pdev,
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
++ /* PCI config space info */
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->revision_id = pdev->revision;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+@@ -6577,13 +6584,6 @@ static int igc_probe(struct pci_dev *pdev,
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+--
+2.50.1
+
--- /dev/null
+From 9ce3adb2878fcef77afe322f5cf2e4f8ec7ad214 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 08:40:27 -0700
+Subject: iommu/amd: Avoid stack buffer overflow from kernel cmdline
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
+
+While the kernel command line is considered trusted in most environments,
+avoid writing 1 byte past the end of "acpiid" if the "str" argument is
+maximum length.
+
+Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
+Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
+Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
+Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index bc78e8665551..23804270eda1 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3553,7 +3553,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+- char acpiid[ACPIID_LEN] = {0};
++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ int i;
+
+ addr = strchr(str, '@');
+@@ -3579,7 +3579,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+- if (strlen(str) > ACPIID_LEN + 1)
++ if (strlen(str) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+--
+2.50.1
+
--- /dev/null
+From 7a10db9091b0c8c7a2b26258cf9a02cb06119460 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Jul 2025 16:20:51 +0530
+Subject: iosys-map: Fix undefined behavior in iosys_map_clear()
+
+From: Nitin Gote <nitin.r.gote@intel.com>
+
+[ Upstream commit 5634c8cb298a7146b4e38873473e280b50e27a2c ]
+
+The current iosys_map_clear() implementation reads the potentially
+uninitialized 'is_iomem' boolean field to decide which union member
+to clear. This causes undefined behavior when called on uninitialized
+structures, as 'is_iomem' may contain garbage values like 0xFF.
+
+UBSAN detects this as:
+ UBSAN: invalid-load in include/linux/iosys-map.h:267
+ load of value 255 is not a valid value for type '_Bool'
+
+Fix by unconditionally clearing the entire structure with memset(),
+eliminating the need to read uninitialized data and ensuring all
+fields are set to known good values.
+
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14639
+Fixes: 01fd30da0474 ("dma-buf: Add struct dma-buf-map for storing struct dma_buf.vaddr_ptr")
+Signed-off-by: Nitin Gote <nitin.r.gote@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250718105051.2709487-1-nitin.r.gote@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/iosys-map.h | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
+index cb71aa616bd3..631d58d0b838 100644
+--- a/include/linux/iosys-map.h
++++ b/include/linux/iosys-map.h
+@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
+ */
+ static inline void iosys_map_clear(struct iosys_map *map)
+ {
+- if (map->is_iomem) {
+- map->vaddr_iomem = NULL;
+- map->is_iomem = false;
+- } else {
+- map->vaddr = NULL;
+- }
++ memset(map, 0, sizeof(*map));
+ }
+
+ /**
+--
+2.50.1
+
--- /dev/null
+From 59ec427a657e3717b5ac77251ee88a6ae5171e67 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index dd7406a9380f..b90c286d77ed 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -294,6 +294,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From 0572c63db66b74107fbdcd1bc940747f3053f863 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 1703c640a434..7ef82c30e857 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -403,7 +403,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -438,6 +438,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From e7a431b5fbe02b2592b8310e63d27679858cc276 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 15:06:40 +0200
+Subject: mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit f604d3aaf64ff0d90cc875295474d3abf4155629 ]
+
+By default, the device does not forward IPv4 packets with a link-local
+source IP (i.e., 169.254.0.0/16). This behavior does not align with the
+kernel which does forward them.
+
+Fix by instructing the device to forward such packets instead of
+dropping them.
+
+Fixes: ca360db4b825 ("mlxsw: spectrum: Disable DIP_LINK_LOCAL check in hardware pipeline")
+Reported-by: Zoey Mertes <zoey@cloudflare.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/6721e6b2c96feb80269e72ce8d0b426e2f32d99c.1755174341.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 67ecdb9e708f..2aec55dd07c6 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2522,6 +2522,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 8da169663bda..f44c8548c7e3 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -93,6 +93,7 @@ enum {
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+--
+2.50.1
+
--- /dev/null
+From ae16badcf7be182ba1fefe772add989bf8980eed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 10:10:54 +0800
+Subject: net: bridge: fix soft lockup in br_multicast_query_expired()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d1547bf460baec718b3398365f8de33d25c5f36f ]
+
+When set multicast_query_interval to a large value, the local variable
+'time' in br_multicast_send_query() may overflow. If the time is smaller
+than jiffies, the timer will expire immediately, and then call mod_timer()
+again, which creates a loop and may trigger the following soft lockup
+issue.
+
+ watchdog: BUG: soft lockup - CPU#1 stuck for 221s! [rb_consumer:66]
+ CPU: 1 UID: 0 PID: 66 Comm: rb_consumer Not tainted 6.16.0+ #259 PREEMPT(none)
+ Call Trace:
+ <IRQ>
+ __netdev_alloc_skb+0x2e/0x3a0
+ br_ip6_multicast_alloc_query+0x212/0x1b70
+ __br_multicast_send_query+0x376/0xac0
+ br_multicast_send_query+0x299/0x510
+ br_multicast_query_expired.constprop.0+0x16d/0x1b0
+ call_timer_fn+0x3b/0x2a0
+ __run_timers+0x619/0x950
+ run_timer_softirq+0x11c/0x220
+ handle_softirqs+0x18e/0x560
+ __irq_exit_rcu+0x158/0x1a0
+ sysvec_apic_timer_interrupt+0x76/0x90
+ </IRQ>
+
+This issue can be reproduced with:
+ ip link add br0 type bridge
+ echo 1 > /sys/class/net/br0/bridge/multicast_querier
+ echo 0xffffffffffffffff >
+ /sys/class/net/br0/bridge/multicast_query_interval
+ ip link set dev br0 up
+
+The multicast_startup_query_interval can also cause this issue. Similar to
+the commit 99b40610956a ("net: bridge: mcast: add and enforce query
+interval minimum"), add check for the query interval maximum to fix this
+issue.
+
+Link: https://lore.kernel.org/netdev/20250806094941.1285944-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250812091818.542238-1-wangliang74@huawei.com/
+Fixes: d902eee43f19 ("bridge: Add multicast count/interval sysfs entries")
+Suggested-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250813021054.1643649-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_multicast.c | 16 ++++++++++++++++
+ net/bridge/br_private.h | 2 ++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index e28c9db0c4db..140dbcfc8b94 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -4634,6 +4634,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_query_interval = intvl_jiffies;
+ }
+
+@@ -4650,6 +4658,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+ }
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 767f0e81dd26..20c96cb406d5 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -30,6 +30,8 @@
+ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+ #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+ #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
+
+ #define BR_HWDOM_MAX BITS_PER_LONG
+
+--
+2.50.1
+
--- /dev/null
+From 0158d3deed81c30ca2264cfc5e6f13dd70b3dc81 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:57 +0800
+Subject: net: ethernet: mtk_ppe: add RCU lock around dev_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 62c30c544359aa18b8fb2734166467a07d435c2d ]
+
+Ensure ndo_fill_forward_path() is called with RCU lock held.
+
+Fixes: 2830e314778d ("net: ethernet: mtk-ppe: fix traffic offload with bridged wlan")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-1-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index 8cb8d47227f5..cc8f4f5decaf 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
++ rcu_read_lock();
+ err = dev_fill_forward_path(dev, addr, &stack);
++ rcu_read_unlock();
+ if (err)
+ return err;
+
+--
+2.50.1
+
--- /dev/null
+From 37eb907ae5089ef6dc84694554139f0315b86336 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 12:51:19 +0200
+Subject: net: gso: Forbid IPv6 TSO with extensions on devices with only
+ IPV6_CSUM
+
+From: Jakub Ramaseuski <jramaseu@redhat.com>
+
+[ Upstream commit 864e3396976ef41de6cc7bc366276bf4e084fff2 ]
+
+When performing Generic Segmentation Offload (GSO) on an IPv6 packet that
+contains extension headers, the kernel incorrectly requests checksum offload
+if the egress device only advertises NETIF_F_IPV6_CSUM feature, which has
+a strict contract: it supports checksum offload only for plain TCP or UDP
+over IPv6 and explicitly does not support packets with extension headers.
+The current GSO logic violates this contract by failing to disable the feature
+for packets with extension headers, such as those used in GREoIPv6 tunnels.
+
+This violation results in the device being asked to perform an operation
+it cannot support, leading to a `skb_warn_bad_offload` warning and a collapse
+of network throughput. While device TSO/USO is correctly bypassed in favor
+of software GSO for these packets, the GSO stack must be explicitly told not
+to request checksum offload.
+
+Mask NETIF_F_IPV6_CSUM, NETIF_F_TSO6 and NETIF_F_GSO_UDP_L4
+in gso_features_check if the IPv6 header contains extension headers to compute
+checksum in software.
+
+The exception is a BIG TCP extension, which, as stated in commit
+68e068cabd2c6c53 ("net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets"):
+"The feature is only enabled on devices that support BIG TCP TSO.
+The header is only present for PF_PACKET taps like tcpdump,
+and not transmitted by physical devices."
+
+kernel log output (truncated):
+WARNING: CPU: 1 PID: 5273 at net/core/dev.c:3535 skb_warn_bad_offload+0x81/0x140
+...
+Call Trace:
+ <TASK>
+ skb_checksum_help+0x12a/0x1f0
+ validate_xmit_skb+0x1a3/0x2d0
+ validate_xmit_skb_list+0x4f/0x80
+ sch_direct_xmit+0x1a2/0x380
+ __dev_xmit_skb+0x242/0x670
+ __dev_queue_xmit+0x3fc/0x7f0
+ ip6_finish_output2+0x25e/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_tnl_xmit+0x608/0xc00 [ip6_tunnel]
+ ip6gre_tunnel_xmit+0x1c0/0x390 [ip6_gre]
+ dev_hard_start_xmit+0x63/0x1c0
+ __dev_queue_xmit+0x6d0/0x7f0
+ ip6_finish_output2+0x214/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ inet6_csk_xmit+0xeb/0x150
+ __tcp_transmit_skb+0x555/0xa80
+ tcp_write_xmit+0x32a/0xe90
+ tcp_sendmsg_locked+0x437/0x1110
+ tcp_sendmsg+0x2f/0x50
+...
+skb linear: 00000000: e4 3d 1a 7d ec 30 e4 3d 1a 7e 5d 90 86 dd 60 0e
+skb linear: 00000010: 00 0a 1b 34 3c 40 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000020: 00 00 00 00 00 12 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000030: 00 00 00 00 00 11 2f 00 04 01 04 01 01 00 00 00
+skb linear: 00000040: 86 dd 60 0e 00 0a 1b 00 06 40 20 23 00 00 00 00
+skb linear: 00000050: 00 00 00 00 00 00 00 00 00 12 20 23 00 00 00 00
+skb linear: 00000060: 00 00 00 00 00 00 00 00 00 11 bf 96 14 51 13 f9
+skb linear: 00000070: ae 27 a0 a8 2b e3 80 18 00 40 5b 6f 00 00 01 01
+skb linear: 00000080: 08 0a 42 d4 50 d5 4b 70 f8 1a
+
+Fixes: 04c20a9356f283da ("net: skip offload for NETIF_F_IPV6_CSUM if ipv6 header contains extension")
+Reported-by: Tianhao Zhao <tizhao@redhat.com>
+Suggested-by: Michal Schmidt <mschmidt@redhat.com>
+Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Jakub Ramaseuski <jramaseu@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250814105119.1525687-1-jramaseu@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 212a909b4840..114fc8bc37f8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3610,6 +3610,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ features &= ~NETIF_F_TSO_MANGLEID;
+ }
+
++ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
++ * so neither does TSO that depends on it.
++ */
++ if (features & NETIF_F_IPV6_CSUM &&
++ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++ skb_transport_header_was_set(skb) &&
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ !ipv6_has_hopopt_jumbo(skb))
++ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
++
+ return features;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From d3a9ea4f131b75ecf62e1ba8c6bd548493761445 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 12dd4d41605c..d99e1603c32a 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1761,7 +1761,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1771,6 +1771,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1938,13 +1939,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From 9eb387fe068ff784630a536a82edfec407629dd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 1e19d3ffbf21..7aac0916205b 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -589,7 +589,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From 6c9000bc5c078277614e953a0f2eca943ce4e998 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:45:07 +0900
+Subject: net: usb: asix_devices: Fix PHY address mask in MDIO bus
+ initialization
+
+From: Yuichiro Tsuji <yuichtsu@amazon.com>
+
+[ Upstream commit 24ef2f53c07f273bad99173e27ee88d44d135b1c ]
+
+Syzbot reported shift-out-of-bounds exception on MDIO bus initialization.
+
+The PHY address should be masked to 5 bits (0-31). Without this
+mask, invalid PHY addresses could be used, potentially causing issues
+with MDIO bus operations.
+
+Fix this by masking the PHY address with 0x1f (31 decimal) to ensure
+it stays within the valid range.
+
+Fixes: 4faff70959d5 ("net: usb: asix_devices: add phy_mask for ax88772 mdio bus")
+Reported-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=20537064367a0f98d597
+Tested-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Signed-off-by: Yuichiro Tsuji <yuichtsu@amazon.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250818084541.1958-1-yuichtsu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/asix_devices.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index d1813a2495bc..021f38c25be8 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -676,7 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+- priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
++ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+--
+2.50.1
+
--- /dev/null
+From aae7f7c626a566cf8534ba3cfef43f6436f47909 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 14:37:07 +0200
+Subject: netfilter: nf_reject: don't leak dst refcount for loopback packets
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 91a79b792204313153e1bdbbe5acbfc28903b3a5 ]
+
+recent patches to add a WARN() when replacing skb dst entry found an
+old bug:
+
+WARNING: include/linux/skbuff.h:1165 skb_dst_check_unset include/linux/skbuff.h:1164 [inline]
+WARNING: include/linux/skbuff.h:1165 skb_dst_set include/linux/skbuff.h:1210 [inline]
+WARNING: include/linux/skbuff.h:1165 nf_reject_fill_skb_dst+0x2a4/0x330 net/ipv4/netfilter/nf_reject_ipv4.c:234
+[..]
+Call Trace:
+ nf_send_unreach+0x17b/0x6e0 net/ipv4/netfilter/nf_reject_ipv4.c:325
+ nft_reject_inet_eval+0x4bc/0x690 net/netfilter/nft_reject_inet.c:27
+ expr_call_ops_eval net/netfilter/nf_tables_core.c:237 [inline]
+ ..
+
+This is because blamed commit forgot about loopback packets.
+Such packets already have a dst_entry attached, even at PRE_ROUTING stage.
+
+Instead of checking hook just check if the skb already has a route
+attached to it.
+
+Fixes: f53b9b0bdc59 ("netfilter: introduce support for reject at prerouting stage")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20250820123707.10671-1-fw@strlen.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++----
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 ++---
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 675b5bbed638..2d663fe50f87 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(oldskb) < 0)
++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) ||
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index e4776bd2ed89..f3579bccf0a5 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++ if (!skb_dst(oldskb)) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+- nf_reject6_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+--
+2.50.1
+
--- /dev/null
+From 4f26d68fb7ffd04acfa3a4ccaae42d4e14673742 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 10:10:29 +0200
+Subject: phy: mscc: Fix timestamping for vsc8584
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit bc1a59cff9f797bfbf8f3104507584d89e9ecf2e ]
+
+There was a problem when we received frames and the frames were
+timestamped. The driver is configured to store the nanosecond part of
+the timestmap in the ptp reserved bits and it would take the second part
+by reading the LTC. The problem is that when reading the LTC we are in
+atomic context and to read the second part will go over mdio bus which
+might sleep, so we get an error.
+The fix consists in actually put all the frames in a queue and start the
+aux work and in that work to read the LTC and then calculate the full
+received time.
+
+Fixes: 7d272e63e0979d ("net: phy: mscc: timestamping and PHC support")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250818081029.1300780-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc.h | 12 ++++++++
+ drivers/net/phy/mscc/mscc_main.c | 12 ++++++++
+ drivers/net/phy/mscc/mscc_ptp.c | 49 ++++++++++++++++++++++++--------
+ 3 files changed, 61 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 055e4ca5b3b5..878298304430 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -360,6 +360,13 @@ struct vsc85xx_hw_stat {
+ u16 mask;
+ };
+
++struct vsc8531_skb_cb {
++ u32 ns;
++};
++
++#define VSC8531_SKB_CB(skb) \
++ ((struct vsc8531_skb_cb *)((skb)->cb))
++
+ struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+@@ -408,6 +415,11 @@ struct vsc8531_private {
+ */
+ struct mutex ts_lock;
+ struct mutex phc_lock;
++
++ /* list of skbs that were received and need timestamp information but it
++ * didn't received it yet
++ */
++ struct sk_buff_head rx_skbs_list;
+ };
+
+ /* Shared structure between the PHYs of the same package.
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 7bd940baec59..36734bb217e4 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2324,6 +2324,13 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ }
+
++static void vsc85xx_remove(struct phy_device *phydev)
++{
++ struct vsc8531_private *priv = phydev->priv;
++
++ skb_queue_purge(&priv->rx_skbs_list);
++}
++
+ /* Microsemi VSC85xx PHYs */
+ static struct phy_driver vsc85xx_driver[] = {
+ {
+@@ -2554,6 +2561,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2579,6 +2587,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2604,6 +2613,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2627,6 +2637,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2650,6 +2661,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index d0bd6ab45ebe..add1a9ee721a 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1193,9 +1193,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ {
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+- struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct vsc85xx_ptphdr *ptphdr;
+- struct timespec64 ts;
+ unsigned long ns;
+
+ if (!vsc8531->ptp->configured)
+@@ -1205,27 +1203,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ type == PTP_CLASS_NONE)
+ return false;
+
+- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+-
+ ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+ if (!ptphdr)
+ return false;
+
+- shhwtstamps = skb_hwtstamps(skb);
+- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+-
+ ns = ntohl(ptphdr->rsrvd2);
+
+- /* nsec is in reserved field */
+- if (ts.tv_nsec < ns)
+- ts.tv_sec--;
++ VSC8531_SKB_CB(skb)->ns = ns;
++ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
+
+- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+- netif_rx(skb);
++ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
+
+ return true;
+ }
+
++static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
++{
++ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
++ struct skb_shared_hwtstamps *shhwtstamps = NULL;
++ struct phy_device *phydev = ptp->phydev;
++ struct vsc8531_private *priv = phydev->priv;
++ struct sk_buff_head received;
++ struct sk_buff *rx_skb;
++ struct timespec64 ts;
++ unsigned long flags;
++
++ __skb_queue_head_init(&received);
++ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
++ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
++ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
++
++ vsc85xx_gettime(info, &ts);
++ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
++ shhwtstamps = skb_hwtstamps(rx_skb);
++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++
++ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
++ ts.tv_sec--;
++
++ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
++ VSC8531_SKB_CB(rx_skb)->ns);
++ netif_rx(rx_skb);
++ }
++
++ return -1;
++}
++
+ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .owner = THIS_MODULE,
+ .name = "VSC85xx timer",
+@@ -1239,6 +1262,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .adjfine = &vsc85xx_adjfine,
+ .gettime64 = &vsc85xx_gettime,
+ .settime64 = &vsc85xx_settime,
++ .do_aux_work = &vsc85xx_do_aux_work,
+ };
+
+ static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+@@ -1566,6 +1590,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+
+ mutex_init(&vsc8531->phc_lock);
+ mutex_init(&vsc8531->ts_lock);
++ skb_queue_head_init(&vsc8531->rx_skbs_list);
+
+ /* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ * the same GPIO can be requested by all the PHYs of the same package.
+--
+2.50.1
+
--- /dev/null
+From 8c9fb06cd9edbadd4897ce2212d5cc489f7d2332 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:58 +0800
+Subject: ppp: fix race conditions in ppp_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 0417adf367a0af11adf7ace849af4638cfb573f7 ]
+
+ppp_fill_forward_path() has two race conditions:
+
+1. The ppp->channels list can change between list_empty() and
+ list_first_entry(), as ppp_lock() is not held. If the only channel
+ is deleted in ppp_disconnect_channel(), list_first_entry() may
+ access an empty head or a freed entry, and trigger a panic.
+
+2. pch->chan can be NULL. When ppp_unregister_channel() is called,
+ pch->chan is set to NULL before pch is removed from ppp->channels.
+
+Fix these by using a lockless RCU approach:
+- Use list_first_or_null_rcu() to safely test and access the first list
+ entry.
+- Convert list modifications on ppp->channels to their RCU variants and
+ add synchronize_net() after removal.
+- Check for a NULL pch->chan before dereferencing it.
+
+Fixes: f6efc675c9dd ("net: ppp: resolve forwarding path for bridge pppoe devices")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-2-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/ppp_generic.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 67d9efb05443..cbf1c1f23281 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ppp_channel.h>
+ #include <linux/ppp-comp.h>
+ #include <linux/skbuff.h>
++#include <linux/rculist.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/if_arp.h>
+ #include <linux/ip.h>
+@@ -1613,11 +1614,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+- if (list_empty(&ppp->channels))
++ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
++ if (!pch)
++ return -ENODEV;
++
++ chan = READ_ONCE(pch->chan);
++ if (!chan)
+ return -ENODEV;
+
+- pch = list_first_entry(&ppp->channels, struct channel, clist);
+- chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+@@ -3000,7 +3004,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+- pch->chan = NULL;
++ WRITE_ONCE(pch->chan, NULL);
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+@@ -3506,7 +3510,7 @@ ppp_connect_channel(struct channel *pch, int unit)
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+- list_add_tail(&pch->clist, &ppp->channels);
++ list_add_tail_rcu(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ refcount_inc(&ppp->file.refcnt);
+@@ -3536,10 +3540,11 @@ ppp_disconnect_channel(struct channel *pch)
+ if (ppp) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+- list_del(&pch->clist);
++ list_del_rcu(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
++ synchronize_net();
+ if (refcount_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+--
+2.50.1
+
--- /dev/null
+From 58e445b77ee9971dfd48735c5eb9a3fd739ae708 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:40:00 +0530
+Subject: RDMA/bnxt_re: Fix to initialize the PBL array
+
+From: Anantha Prabhu <anantha.prabhu@broadcom.com>
+
+[ Upstream commit 806b9f494f62791ee6d68f515a8056c615a0e7b2 ]
+
+memset the PBL page pointer and page map arrays before
+populating the SGL addresses of the HWQ.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 203350c6e00f..4962d68bf217 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = vmalloc(pages * sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
++ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc(pages * sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+--
+2.50.1
+
--- /dev/null
+From f09bcf0098dca54e84f449a42e6d92bf843ec7dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 13:53:55 +0800
+Subject: RDMA/erdma: Fix ignored return value of init_kernel_qp
+
+From: Boshi Yu <boshiyu@linux.alibaba.com>
+
+[ Upstream commit d5c74713f0117d07f91eb48b10bc2ad44e23c9b9 ]
+
+The init_kernel_qp interface may fail. Check its return value and free
+related resources properly when it does.
+
+Fixes: 155055771704 ("RDMA/erdma: Add verbs implementation")
+Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
+Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
+Link: https://patch.msgid.link/20250725055410.67520-3-boshiyu@linux.alibaba.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/erdma/erdma_verbs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 2edf0d882c6a..cc2b20c8b050 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -727,7 +727,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ if (ret)
+ goto err_out_cmd;
+ } else {
+- init_kernel_qp(dev, qp, attrs);
++ ret = init_kernel_qp(dev, qp, attrs);
++ if (ret)
++ goto err_out_xa;
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+--
+2.50.1
+
--- /dev/null
+From d6394560508ba783afbb7297c7ca5cb67038cf36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index f4c7dbfaf8ee..c5f53dc3dbbc 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -64,24 +64,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .llseek = no_llseek,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 4ccd2e8dac0336a98f05e0b0fe4dfb3718f46e03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index c5f53dc3dbbc..5848f2e374a6 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -83,7 +84,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From 0a6438dd26ce87f83a3476881b4e2397109cbd93 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 2925823a494a..837ea487cc82 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
iio-temperature-maxim_thermocouple-use-dma-safe-buffer-for-spi_read.patch
compiler-remove-__addressable_asm-_str-again.patch
x86-cpu-hygon-add-missing-resctrl_cpu_detect-in-bsp_init-helper.patch
+cgroup-cpuset-use-static_branch_enable_cpuslocked-on.patch
+iosys-map-fix-undefined-behavior-in-iosys_map_clear.patch
+rdma-erdma-fix-ignored-return-value-of-init_kernel_q.patch
+rdma-bnxt_re-fix-to-initialize-the-pbl-array.patch
+net-bridge-fix-soft-lockup-in-br_multicast_query_exp.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+iommu-amd-avoid-stack-buffer-overflow-from-kernel-cm.patch
+bluetooth-hci_conn-do-return-error-from-hci_enhanced.patch
+mlxsw-spectrum-forward-packets-with-an-ipv4-link-loc.patch
+drm-hisilicon-hibmc-fix-the-hibmc-loaded-failed-bug.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+drm-amd-display-add-null-pointer-check-in-mod_hdcp_h.patch
+net-gso-forbid-ipv6-tso-with-extensions-on-devices-w.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+net-ethernet-mtk_ppe-add-rcu-lock-around-dev_fill_fo.patch
+ppp-fix-race-conditions-in-ppp_fill_forward_path.patch
+phy-mscc-fix-timestamping-for-vsc8584.patch
+net-usb-asix_devices-fix-phy-address-mask-in-mdio-bu.patch
+gve-prevent-ethtool-ops-after-shutdown.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+igc-fix-disabling-l1.2-pci-e-link-substate-on-i226-o.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+bonding-update-lacp-activity-flag-after-setting-lacp.patch
+bonding-add-independent-control-state-machine.patch
+bonding-send-lacpdus-periodically-in-passive-mode-af.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
+netfilter-nf_reject-don-t-leak-dst-refcount-for-loop.patch
--- /dev/null
+From c4d2199d4bdb865bcb57e2a97be9607908de8894 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 09:43:17 +0800
+Subject: ALSA: timer: fix ida_free call while not allocated
+
+From: Dewei Meng <mengdewei@cqsoftware.com.cn>
+
+[ Upstream commit 5003a65790ed66be882d1987cc2ca86af0de3db1 ]
+
+In the snd_utimer_create() function, if the kasprintf() function return
+NULL, snd_utimer_put_id() will be called, finally use ida_free()
+to free the unallocated id 0.
+
+the syzkaller reported the following information:
+ ------------[ cut here ]------------
+ ida_free called for id=0 which is not allocated.
+ WARNING: CPU: 1 PID: 1286 at lib/idr.c:592 ida_free+0x1fd/0x2f0 lib/idr.c:592
+ Modules linked in:
+ CPU: 1 UID: 0 PID: 1286 Comm: syz-executor164 Not tainted 6.15.8 #3 PREEMPT(lazy)
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-4.fc42 04/01/2014
+ RIP: 0010:ida_free+0x1fd/0x2f0 lib/idr.c:592
+ Code: f8 fc 41 83 fc 3e 76 69 e8 70 b2 f8 (...)
+ RSP: 0018:ffffc900007f79c8 EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: 1ffff920000fef3b RCX: ffffffff872176a5
+ RDX: ffff88800369d200 RSI: 0000000000000000 RDI: ffff88800369d200
+ RBP: 0000000000000000 R08: ffffffff87ba60a5 R09: 0000000000000000
+ R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
+ R13: 0000000000000002 R14: 0000000000000000 R15: 0000000000000000
+ FS: 00007f6f1abc1740(0000) GS:ffff8880d76a0000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007f6f1ad7a784 CR3: 000000007a6e2000 CR4: 00000000000006f0
+ Call Trace:
+ <TASK>
+ snd_utimer_put_id sound/core/timer.c:2043 [inline] [snd_timer]
+ snd_utimer_create+0x59b/0x6a0 sound/core/timer.c:2184 [snd_timer]
+ snd_utimer_ioctl_create sound/core/timer.c:2202 [inline] [snd_timer]
+ __snd_timer_user_ioctl.isra.0+0x724/0x1340 sound/core/timer.c:2287 [snd_timer]
+ snd_timer_user_ioctl+0x75/0xc0 sound/core/timer.c:2298 [snd_timer]
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl fs/ioctl.c:893 [inline]
+ __x64_sys_ioctl+0x198/0x200 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x7b/0x160 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+
+The utimer->id should be set properly before the kasprintf() function,
+ensures the snd_utimer_put_id() function will free the allocated id.
+
+Fixes: 37745918e0e75 ("ALSA: timer: Introduce virtual userspace-driven timers")
+Signed-off-by: Dewei Meng <mengdewei@cqsoftware.com.cn>
+Link: https://patch.msgid.link/20250821014317.40786-1-mengdewei@cqsoftware.com.cn
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/timer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index d774b9b71ce2..a0dcb4ebb059 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -2139,14 +2139,14 @@ static int snd_utimer_create(struct snd_timer_uinfo *utimer_info,
+ goto err_take_id;
+ }
+
++ utimer->id = utimer_id;
++
+ utimer->name = kasprintf(GFP_KERNEL, "snd-utimer%d", utimer_id);
+ if (!utimer->name) {
+ err = -ENOMEM;
+ goto err_get_name;
+ }
+
+- utimer->id = utimer_id;
+-
+ tid.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
+ tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
+ tid.card = -1;
+--
+2.50.1
+
--- /dev/null
+From e1da8e09ab49cb8f63b30cdabc2e8863667eadaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 1cb52373e70f..db2c9bac00ad 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From 95eace3963a867a95b09b716bc92716cf9972cdd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14c..a0d55b77c994 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From 49caa25734ec3a86a55d43f965fbeac465d7d4ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 16:51:17 +0800
+Subject: Bluetooth: btmtk: Fix wait_on_bit_timeout interruption during
+ shutdown
+
+From: Jiande Lu <jiande.lu@mediatek.com>
+
+[ Upstream commit 099799fa9b76c5c02b49e07005a85117a25b01ea ]
+
+During the shutdown process, an interrupt occurs that
+prematurely terminates the wait for the expected event.
+This change replaces TASK_INTERRUPTIBLE with
+TASK_UNINTERRUPTIBLE in the wait_on_bit_timeout call to ensure
+the shutdown process completes as intended without being
+interrupted by signals.
+
+Fixes: d019930b0049 ("Bluetooth: btmtk: move btusb_mtk_hci_wmt_sync to btmtk.c")
+Signed-off-by: Jiande Lu <jiande.lu@mediatek.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btmtk.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 05de2e6f563d..07979d47eb76 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
+- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+- if (err == -EINTR) {
+- bt_dev_err(hdev, "Execution of wmt command interrupted");
+- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+- goto err_free_wc;
+- }
++ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+--
+2.50.1
+
--- /dev/null
+From 0d2ec876831783930f97528ae00e76362b7c8cb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 22:14:51 +0300
+Subject: Bluetooth: hci_conn: do return error from hci_enhanced_setup_sync()
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+[ Upstream commit 0eaf7c7e85da7495c0e03a99375707fc954f5e7b ]
+
+The commit e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to
+hci_sync") missed to update the *return* statement under the *case* of
+BT_CODEC_TRANSPARENT in hci_enhanced_setup_sync(), which led to returning
+success (0) instead of the negative error code (-EINVAL). However, the
+result of hci_enhanced_setup_sync() seems to be ignored anyway, since NULL
+gets passed to hci_cmd_sync_queue() as the last argument in that case and
+the only function interested in that result is specified by that argument.
+
+Fixes: e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to hci_sync")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index c6c1232db4e2..dad902047414 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -338,7 +338,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+ case BT_CODEC_TRANSPARENT:
+ if (!find_next_esco_param(conn, esco_param_msbc,
+ ARRAY_SIZE(esco_param_msbc)))
+- return false;
++ return -EINVAL;
++
+ param = &esco_param_msbc[conn->attempt - 1];
+ cp.tx_coding_format.id = 0x03;
+ cp.rx_coding_format.id = 0x03;
+--
+2.50.1
+
--- /dev/null
+From b79257cf9c1f5111b5431f635502b2af17bcf6fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 Aug 2025 11:36:20 +0300
+Subject: Bluetooth: hci_event: fix MTU for BN == 0 in CIS Established
+
+From: Pauli Virtanen <pav@iki.fi>
+
+[ Upstream commit 0b3725dbf61b51e7c663834811b3691157ae17d6 ]
+
+BN == 0x00 in CIS Established means no isochronous data for the
+corresponding direction (Core v6.1 pp. 2394). In this case SDU MTU
+should be 0.
+
+However, the specification does not say the Max_PDU_C_To_P or P_To_C are
+then zero. Intel AX210 in Framed CIS mode sets nonzero Max_PDU for
+direction with zero BN. This causes failure later when we try to LE
+Setup ISO Data Path for disabled direction, which is disallowed (Core
+v6.1 pp. 2750).
+
+Fix by setting SDU MTU to 0 if BN == 0.
+
+Fixes: 2be22f1941d5f ("Bluetooth: hci_event: Fix parsing of CIS Established Event")
+Signed-off-by: Pauli Virtanen <pav@iki.fi>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_event.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 38643ffa65a9..768bd5fd808f 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6725,8 +6725,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.out.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
+- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
++ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ qos->ucast.in.phy = ev->c_phy;
+ qos->ucast.out.phy = ev->p_phy;
+ break;
+@@ -6740,8 +6740,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.in.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
+- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
++ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ qos->ucast.out.phy = ev->c_phy;
+ qos->ucast.in.phy = ev->p_phy;
+ break;
+--
+2.50.1
+
--- /dev/null
+From 8d1f68c5d5b8788b575f04f76fc027040fbd86f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 16:43:18 -0400
+Subject: Bluetooth: hci_sync: Fix scan state after PA Sync has been
+ established
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit ca88be1a2725a42f8dbad579181611d9dcca8e88 ]
+
+Passive scanning is used to program the address of the peer to be
+synchronized, so once HCI_EV_LE_PA_SYNC_ESTABLISHED is received it
+needs to be updated after clearing HCI_PA_SYNC then call
+hci_update_passive_scan_sync to return it to its original state.
+
+Fixes: 6d0417e4e1cf ("Bluetooth: hci_conn: Fix not setting conn_timeout for Broadcast Receiver")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index bbd809414b2f..c7fd657c86ff 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -6960,8 +6960,6 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+
+ hci_dev_lock(hdev);
+
+- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+-
+ if (!hci_conn_valid(hdev, conn))
+ clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+
+@@ -7055,6 +7053,11 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
++ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
++
++ /* Update passive scan since HCI_PA_SYNC flag has been cleared */
++ hci_update_passive_scan_sync(hdev);
++
+ return err;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From bde14866575f66e600474c3f6e806616620ec221 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 15:56:03 +0800
+Subject: Bluetooth: hci_sync: Prevent unintended PA sync when SID is 0xFF
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit 4d19cd228bbe8ff84a63fe7b11bc756b4b4370c7 ]
+
+After LE Extended Scan times out, conn->sid remains 0xFF,
+so the PA sync creation process should be aborted.
+
+Btmon snippet from PA sync with SID=0xFF:
+
+< HCI Command: LE Set Extended.. (0x08|0x0042) plen 6 #74726 [hci0] 863.107927
+ Extended scan: Enabled (0x01)
+ Filter duplicates: Enabled (0x01)
+ Duration: 0 msec (0x0000)
+ Period: 0.00 sec (0x0000)
+> HCI Event: Command Complete (0x0e) plen 4 #74727 [hci0] 863.109389
+ LE Set Extended Scan Enable (0x08|0x0042) ncmd 1
+ Status: Success (0x00)
+< HCI Command: LE Periodic Ad.. (0x08|0x0044) plen 14 #74728 [hci0] 865.141168
+ Options: 0x0000
+ Use advertising SID, Advertiser Address Type and address
+ Reporting initially enabled
+ SID: 0xff
+ Adv address type: Random (0x01)
+ Adv address: 0D:D7:2C:E7:42:46 (Non-Resolvable)
+ Skip: 0x0000
+ Sync timeout: 20000 msec (0x07d0)
+ Sync CTE type: 0x0000
+> HCI Event: Command Status (0x0f) plen 4 #74729 [hci0] 865.143223
+ LE Periodic Advertising Create Sync (0x08|0x0044) ncmd 1
+ Status: Success (0x00)
+
+Fixes: e2d471b7806b ("Bluetooth: ISO: Fix not using SID from adv report")
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index c7fd657c86ff..af86df9de941 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -7020,10 +7020,13 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
+ * it.
+ */
+- if (conn->sid == HCI_SID_INVALID)
+- __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
+- HCI_EV_LE_EXT_ADV_REPORT,
+- conn->conn_timeout, NULL);
++ if (conn->sid == HCI_SID_INVALID) {
++ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
++ HCI_EV_LE_EXT_ADV_REPORT,
++ conn->conn_timeout, NULL);
++ if (err == -ETIMEDOUT)
++ goto done;
++ }
+
+ memset(&cp, 0, sizeof(cp));
+ cp.options = qos->bcast.options;
+@@ -7053,6 +7056,7 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
++done:
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+
+ /* Update passive scan since HCI_PA_SYNC flag has been cleared */
+--
+2.50.1
+
--- /dev/null
+From 275df869ecb38860a0590bf6642a89a74bcac8ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:59 +0000
+Subject: bonding: send LACPDUs periodically in passive mode after receiving
+ partner's LACPDU
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 0599640a21e98f0d6a3e9ff85c0a687c90a8103b ]
+
+When `lacp_active` is set to `off`, the bond operates in passive mode, meaning
+it only "speaks when spoken to." However, the current kernel implementation
+only sends an LACPDU in response when the partner's state changes.
+
+As a result, once LACP negotiation succeeds, the actor stops sending LACPDUs
+until the partner times out and sends an "expired" LACPDU. This causes
+continuous LACP state flapping.
+
+According to IEEE 802.1AX-2014, 6.4.13 Periodic Transmission machine. The
+values of Partner_Oper_Port_State.LACP_Activity and
+Actor_Oper_Port_State.LACP_Activity determine whether periodic transmissions
+take place. If either or both parameters are set to Active LACP, then periodic
+transmissions occur; if both are set to Passive LACP, then periodic
+transmissions do not occur.
+
+To comply with this, we remove the `!bond->params.lacp_active` check in
+`ad_periodic_machine()`. Instead, we initialize the actor's port's
+`LACP_STATE_LACP_ACTIVITY` state based on `lacp_active` setting.
+
+Additionally, we avoid setting the partner's state to
+`LACP_STATE_LACP_ACTIVITY` in the EXPIRED state, since we should not assume
+the partner is active by default.
+
+This ensures that in passive mode, the bond starts sending periodic LACPDUs
+after receiving one from the partner, and avoids flapping due to inactivity.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 42 +++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index a51305423d28..4c2560ae8866 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr);
+ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
+ static void ad_tx_machine(struct port *port);
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
++static void ad_periodic_machine(struct port *port);
+ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
+ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ bool *update_slave_arr);
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+-static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
+ static void ad_enable_collecting(struct port *port);
+ static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
+@@ -1296,10 +1296,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
+ * case of EXPIRED even if LINK_DOWN didn't arrive for
+ * the port.
+ */
+- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->sm_vars &= ~AD_PORT_MATCHED;
++ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
++ * machine state diagram, the statue should be
++ * Partner_Oper_Port_State.Synchronization = FALSE;
++ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
++ * start current_while_timer(Short Timeout);
++ * Actor_Oper_Port_State.Expired = TRUE;
++ */
++ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
+ port->sm_vars |= AD_PORT_CHURNED;
+@@ -1405,11 +1411,10 @@ static void ad_tx_machine(struct port *port)
+ /**
+ * ad_periodic_machine - handle a port's periodic state machine
+ * @port: the port we're looking at
+- * @bond_params: bond parameters we will use
+ *
+ * Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
+ */
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
++static void ad_periodic_machine(struct port *port)
+ {
+ periodic_states_t last_state;
+
+@@ -1418,8 +1423,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
+
+ /* check if port was reinitialized */
+ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
+- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
+- !bond_params->lacp_active) {
++ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
+ port->sm_periodic_state = AD_NO_PERIODIC;
+ }
+ /* check if state machine should change state */
+@@ -1943,16 +1947,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
+ /**
+ * ad_initialize_port - initialize a given port's parameters
+ * @port: the port we're looking at
+- * @lacp_fast: boolean. whether fast periodic should be used
++ * @bond_params: bond parameters we will use
+ */
+-static void ad_initialize_port(struct port *port, int lacp_fast)
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
+ {
+ static const struct port_params tmpl = {
+ .system_priority = 0xffff,
+ .key = 1,
+ .port_number = 1,
+ .port_priority = 0xff,
+- .port_state = 1,
++ .port_state = 0,
+ };
+ static const struct lacpdu lacpdu = {
+ .subtype = 0x01,
+@@ -1970,12 +1974,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ port->actor_port_priority = 0xff;
+ port->actor_port_aggregator_identifier = 0;
+ port->ntt = false;
+- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
+- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
++ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
++ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
++ if (bond_params->lacp_active) {
++ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ }
+
+- if (lacp_fast)
++ if (bond_params->lacp_fast)
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
+
+ memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
+@@ -2187,7 +2193,7 @@ void bond_3ad_bind_slave(struct slave *slave)
+ /* port initialization */
+ port = &(SLAVE_AD_INFO(slave)->port);
+
+- ad_initialize_port(port, bond->params.lacp_fast);
++ ad_initialize_port(port, &bond->params);
+
+ port->slave = slave;
+ port->actor_port_number = SLAVE_AD_INFO(slave)->id;
+@@ -2499,7 +2505,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
+ }
+
+ ad_rx_machine(NULL, port);
+- ad_periodic_machine(port, &bond->params);
++ ad_periodic_machine(port);
+ ad_port_selection_logic(port, &update_slave_arr);
+ ad_mux_machine(port, &update_slave_arr);
+ ad_tx_machine(port);
+--
+2.50.1
+
--- /dev/null
+From 3675e8eda95e27e5cb1df8ddeb09064447459612 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:58 +0000
+Subject: bonding: update LACP activity flag after setting lacp_active
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit b64d035f77b1f02ab449393342264b44950a75ae ]
+
+The port's actor_oper_port_state activity flag should be updated immediately
+after changing the lacp_active option to reflect the current mode correctly.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 25 +++++++++++++++++++++++++
+ drivers/net/bonding/bond_options.c | 1 +
+ include/net/bond_3ad.h | 1 +
+ 3 files changed, 27 insertions(+)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index c6807e473ab7..a51305423d28 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2869,6 +2869,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
+ spin_unlock_bh(&bond->mode_lock);
+ }
+
++/**
++ * bond_3ad_update_lacp_active - change the lacp active
++ * @bond: bonding struct
++ *
++ * Update actor_oper_port_state when lacp_active is modified.
++ */
++void bond_3ad_update_lacp_active(struct bonding *bond)
++{
++ struct port *port = NULL;
++ struct list_head *iter;
++ struct slave *slave;
++ int lacp_active;
++
++ lacp_active = bond->params.lacp_active;
++ spin_lock_bh(&bond->mode_lock);
++ bond_for_each_slave(bond, slave, iter) {
++ port = &(SLAVE_AD_INFO(slave)->port);
++ if (lacp_active)
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ else
++ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
++ }
++ spin_unlock_bh(&bond->mode_lock);
++}
++
+ size_t bond_3ad_stats_size(void)
+ {
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index d1b095af253b..e27d913b487b 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1634,6 +1634,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
+ netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.lacp_active = newval->value;
++ bond_3ad_update_lacp_active(bond);
+
+ return 0;
+ }
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 2053cd8e788a..dba369a2cf27 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -307,6 +307,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
+ int bond_3ad_set_carrier(struct bonding *bond);
+ void bond_3ad_update_lacp_rate(struct bonding *bond);
++void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+ int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+ size_t bond_3ad_stats_size(void);
+--
+2.50.1
+
--- /dev/null
+From 24ccb45dff7e9e513d47dcac80eb07888d90f50a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:29 -0400
+Subject: cgroup/cpuset: Fix a partition error with CPU hotplug
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 150e298ae0ccbecff2357a72fbabd80f8849ea6e ]
+
+It was found during testing that an invalid leaf partition with an
+empty effective exclusive CPU list can become a valid empty partition
+with no CPU afer an offline/online operation of an unrelated CPU. An
+empty partition root is allowed in the special case that it has no
+task in its cgroup and has distributed out all its CPUs to its child
+partitions. That is certainly not the case here.
+
+The problem is in the cpumask_subsets() test in the hotplug case
+(update with no new mask) of update_parent_effective_cpumask() as it
+also returns true if the effective exclusive CPU list is empty. Fix that
+by addding the cpumask_empty() test to root out this exception case.
+Also add the cpumask_empty() test in cpuset_hotplug_update_tasks()
+to avoid calling update_parent_effective_cpumask() for this special case.
+
+Fixes: 0c7f293efc87 ("cgroup/cpuset: Add cpuset.cpus.exclusive.effective for v2")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index af5dc30bfe4b..25f9565f798d 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1771,7 +1771,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
+ if (is_partition_valid(cs))
+ adding = cpumask_and(tmp->addmask,
+ xcpus, parent->effective_xcpus);
+- } else if (is_partition_invalid(cs) &&
++ } else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
+ cpumask_subset(xcpus, parent->effective_xcpus)) {
+ struct cgroup_subsys_state *css;
+ struct cpuset *child;
+@@ -3792,9 +3792,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ partcmd = partcmd_invalidate;
+ /*
+ * On the other hand, an invalid partition root may be transitioned
+- * back to a regular one.
++ * back to a regular one with a non-empty effective xcpus.
+ */
+- else if (is_partition_valid(parent) && is_partition_invalid(cs))
++ else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
++ !cpumask_empty(cs->effective_xcpus))
+ partcmd = partcmd_update;
+
+ if (partcmd >= 0) {
+--
+2.50.1
+
--- /dev/null
+From 7205fccb1e3fbc91c973e58596d972267f0e4670 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:28 -0400
+Subject: cgroup/cpuset: Use static_branch_enable_cpuslocked() on
+ cpusets_insane_config_key
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 65f97cc81b0adc5f49cf6cff5d874be0058e3f41 ]
+
+The following lockdep splat was observed.
+
+[ 812.359086] ============================================
+[ 812.359089] WARNING: possible recursive locking detected
+[ 812.359097] --------------------------------------------
+[ 812.359100] runtest.sh/30042 is trying to acquire lock:
+[ 812.359105] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_enable+0xe/0x20
+[ 812.359131]
+[ 812.359131] but task is already holding lock:
+[ 812.359134] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: cpuset_write_resmask+0x98/0xa70
+ :
+[ 812.359267] Call Trace:
+[ 812.359272] <TASK>
+[ 812.359367] cpus_read_lock+0x3c/0xe0
+[ 812.359382] static_key_enable+0xe/0x20
+[ 812.359389] check_insane_mems_config.part.0+0x11/0x30
+[ 812.359398] cpuset_write_resmask+0x9f2/0xa70
+[ 812.359411] cgroup_file_write+0x1c7/0x660
+[ 812.359467] kernfs_fop_write_iter+0x358/0x530
+[ 812.359479] vfs_write+0xabe/0x1250
+[ 812.359529] ksys_write+0xf9/0x1d0
+[ 812.359558] do_syscall_64+0x5f/0xe0
+
+Since commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem
+and hotplug lock order"), the ordering of cpu hotplug lock
+and cpuset_mutex had been reversed. That patch correctly
+used the cpuslocked version of the static branch API to enable
+cpusets_pre_enable_key and cpusets_enabled_key, but it didn't do the
+same for cpusets_insane_config_key.
+
+The cpusets_insane_config_key can be enabled in the
+check_insane_mems_config() which is called from update_nodemask()
+or cpuset_hotplug_update_tasks() with both cpu hotplug lock and
+cpuset_mutex held. Deadlock can happen with a pending hotplug event that
+tries to acquire the cpu hotplug write lock which will block further
+cpus_read_lock() attempt from check_insane_mems_config(). Fix that by
+switching to use static_branch_enable_cpuslocked().
+
+Fixes: d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index d1fb4bfbbd4c..af5dc30bfe4b 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -267,7 +267,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ if (!cpusets_insane_config() &&
+ movable_only_nodes(nodes)) {
+- static_branch_enable(&cpusets_insane_config_key);
++ static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ "Cpuset allocations might fail even with a lot of memory available.\n",
+ nodemask_pr_args(nodes));
+--
+2.50.1
+
--- /dev/null
+From 1575253e355435cf261bc0671beddccf6d9ac4d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 16:27:36 +0100
+Subject: cifs: Fix oops due to uninitialised variable
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 453a6d2a68e54a483d67233c6e1e24c4095ee4be ]
+
+Fix smb3_init_transform_rq() to initialise buffer to NULL before calling
+netfs_alloc_folioq_buffer() as netfs assumes it can append to the buffer it
+is given. Setting it to NULL means it should start a fresh buffer, but the
+value is currently undefined.
+
+Fixes: a2906d3316fc ("cifs: Switch crypto buffer to use a folio_queue rather than an xarray")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.org>
+cc: linux-cifs@vger.kernel.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2ops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4bababee965a..ab911a967246 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4522,7 +4522,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ for (int i = 1; i < num_rqst; i++) {
+ struct smb_rqst *old = &old_rq[i - 1];
+ struct smb_rqst *new = &new_rq[i];
+- struct folio_queue *buffer;
++ struct folio_queue *buffer = NULL;
+ size_t size = iov_iter_count(&old->rq_iter);
+
+ orig_len += smb_rqst_len(server, old);
+--
+2.50.1
+
--- /dev/null
+From 4645fd24c8724252a11c8a915339274cc1a15b33 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 21:36:41 -0500
+Subject: drm/amd/display: Add null pointer check in
+ mod_hdcp_hdcp1_create_session()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit 7a2ca2ea64b1b63c8baa94a8f5deb70b2248d119 ]
+
+The function mod_hdcp_hdcp1_create_session() calls the function
+get_first_active_display(), but does not check its return value.
+The return value is a null pointer if the display list is empty.
+This will lead to a null pointer dereference.
+
+Add a null pointer check for get_first_active_display() and return
+MOD_HDCP_STATUS_DISPLAY_NOT_FOUND if the function return null.
+
+This is similar to the commit c3e9826a2202
+("drm/amd/display: Add null pointer check for get_first_active_display()").
+
+Fixes: 2deade5ede56 ("drm/amd/display: Remove hdcp display state with mst fix")
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 5e43eb3cd731649c4f8b9134f857be62a416c893)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index e58e7b93810b..6b7db8ec9a53 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
+--
+2.50.1
+
--- /dev/null
+From cf04db9e03032ea5f1f8842667bd0804c557093f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:51 +0200
+Subject: drm/amd/display: Don't print errors for nonexistent connectors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit f14ee2e7a86c5e57295b48b8e198cae7189b3b93 ]
+
+When getting the number of connectors, the VBIOS reports
+the number of valid indices, but it doesn't say which indices
+are valid, and not every valid index has an actual connector.
+If we don't find a connector on an index, that is not an error.
+
+Considering these are not actual errors, don't litter the logs.
+
+Fixes: 60df5628144b ("drm/amd/display: handle invalid connector indices")
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 249d4bc5f1935f04bb45b3b63c0f8922565124f7)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 5 +----
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 15 ++++++++++++++-
+ 2 files changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index 3bacf470f7c5..a523c5cfcd24 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
+ return object_id;
+ }
+
+- if (tbl->ucNumberOfObjects <= i) {
+- dm_error("Can't find connector id %d in connector table of size %d.\n",
+- i, tbl->ucNumberOfObjects);
++ if (tbl->ucNumberOfObjects <= i)
+ return object_id;
+- }
+
+ id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ object_id = object_id_from_bios_object_id(id);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index b089db2b3d87..84e377113e58 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -215,11 +215,24 @@ static bool create_links(
+ connectors_num,
+ num_virtual_links);
+
+- // condition loop on link_count to allow skipping invalid indices
++ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
++ * but it doesn't say which indices are valid, and not every index has an actual connector.
++ * So, if we don't find a connector on an index, that is not an error.
++ *
++ * - There is no guarantee that the first N indices will be valid
++ * - VBIOS may report a higher amount of valid indices than there are actual connectors
++ * - Some VBIOS have valid configurations for more connectors than there actually are
++ * on the card. This may be because the manufacturer used the same VBIOS for different
++ * variants of the same card.
++ */
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
++ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
++ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
++ continue;
++
+ DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
+ link_init_params.ctx = dc->ctx;
+--
+2.50.1
+
--- /dev/null
+From 76510146b3e80e58ef29e3529b1de9f40a8ad28d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Feb 2025 16:03:47 +0000
+Subject: drm/format-helper: Add conversion from XRGB8888 to BGR888
+
+From: Kerem Karabay <kekrby@gmail.com>
+
+[ Upstream commit c9043706cb11b8005e145debe0a3211acd08e2c1 ]
+
+Add XRGB8888 emulation helper for devices that only support BGR888.
+
+Signed-off-by: Kerem Karabay <kekrby@gmail.com>
+Signed-off-by: Aditya Garg <gargaditya08@live.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/9A67EA95-9BC7-4D56-8F87-05EAC1C166AD@live.com
+Stable-dep-of: 05663d88fd0b ("drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_format_helper.c | 54 +++++++++++++
+ .../gpu/drm/tests/drm_format_helper_test.c | 81 +++++++++++++++++++
+ include/drm/drm_format_helper.h | 3 +
+ 3 files changed, 138 insertions(+)
+
+diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
+index b1be458ed4dd..4f60c8d8f63e 100644
+--- a/drivers/gpu/drm/drm_format_helper.c
++++ b/drivers/gpu/drm/drm_format_helper.c
+@@ -702,6 +702,57 @@ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pi
+ }
+ EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+
++static void drm_fb_xrgb8888_to_bgr888_line(void *dbuf, const void *sbuf, unsigned int pixels)
++{
++ u8 *dbuf8 = dbuf;
++ const __le32 *sbuf32 = sbuf;
++ unsigned int x;
++ u32 pix;
++
++ for (x = 0; x < pixels; x++) {
++ pix = le32_to_cpu(sbuf32[x]);
++ /* write red-green-blue to output in little endianness */
++ *dbuf8++ = (pix & 0x00ff0000) >> 16;
++ *dbuf8++ = (pix & 0x0000ff00) >> 8;
++ *dbuf8++ = (pix & 0x000000ff) >> 0;
++ }
++}
++
++/**
++ * drm_fb_xrgb8888_to_bgr888 - Convert XRGB8888 to BGR888 clip buffer
++ * @dst: Array of BGR888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffers
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. Destination and framebuffer formats must match. The
++ * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at
++ * least as many entries as there are planes in @fb's format. Each entry stores the
++ * value for the format's respective color plane at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for BGR888 devices that don't natively
++ * support XRGB8888.
++ */
++void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state)
++{
++ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
++ 3,
++ };
++
++ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
++ drm_fb_xrgb8888_to_bgr888_line);
++}
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
++
+ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+ __le32 *dbuf32 = dbuf;
+@@ -1035,6 +1086,9 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
+ } else if (dst_format == DRM_FORMAT_RGB888) {
+ drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip, state);
+ return 0;
++ } else if (dst_format == DRM_FORMAT_BGR888) {
++ drm_fb_xrgb8888_to_bgr888(dst, dst_pitch, src, fb, clip, state);
++ return 0;
+ } else if (dst_format == DRM_FORMAT_ARGB8888) {
+ drm_fb_xrgb8888_to_argb8888(dst, dst_pitch, src, fb, clip, state);
+ return 0;
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index b4d62fb1d909..2a3d80b27cae 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -60,6 +60,11 @@ struct convert_to_rgb888_result {
+ const u8 expected[TEST_BUF_SIZE];
+ };
+
++struct convert_to_bgr888_result {
++ unsigned int dst_pitch;
++ const u8 expected[TEST_BUF_SIZE];
++};
++
+ struct convert_to_argb8888_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+@@ -107,6 +112,7 @@ struct convert_xrgb8888_case {
+ struct convert_to_argb1555_result argb1555_result;
+ struct convert_to_rgba5551_result rgba5551_result;
+ struct convert_to_rgb888_result rgb888_result;
++ struct convert_to_bgr888_result bgr888_result;
+ struct convert_to_argb8888_result argb8888_result;
+ struct convert_to_xrgb2101010_result xrgb2101010_result;
+ struct convert_to_argb2101010_result argb2101010_result;
+@@ -151,6 +157,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
++ .bgr888_result = {
++ .dst_pitch = TEST_USE_DEFAULT_PITCH,
++ .expected = { 0xFF, 0x00, 0x00 },
++ },
+ .argb8888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0xFFFF0000 },
+@@ -217,6 +227,10 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
++ .bgr888_result = {
++ .dst_pitch = TEST_USE_DEFAULT_PITCH,
++ .expected = { 0xFF, 0x00, 0x00 },
++ },
+ .argb8888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = { 0xFFFF0000 },
+@@ -330,6 +344,15 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+ },
+ },
++ .bgr888_result = {
++ .dst_pitch = TEST_USE_DEFAULT_PITCH,
++ .expected = {
++ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
++ 0xFF, 0x00, 0x00, 0x00, 0xFF, 0x00,
++ 0x00, 0x00, 0xFF, 0xFF, 0x00, 0xFF,
++ 0xFF, 0xFF, 0x00, 0x00, 0xFF, 0xFF,
++ },
++ },
+ .argb8888_result = {
+ .dst_pitch = TEST_USE_DEFAULT_PITCH,
+ .expected = {
+@@ -468,6 +491,17 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
++ .bgr888_result = {
++ .dst_pitch = 15,
++ .expected = {
++ 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05, 0xA8, 0xF3, 0x03,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C, 0x11, 0x4D, 0x05,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ 0xA8, 0x03, 0x03, 0x6C, 0xF0, 0x73, 0x0E, 0x44, 0x9C,
++ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
++ },
++ },
+ .argb8888_result = {
+ .dst_pitch = 20,
+ .expected = {
+@@ -914,6 +948,52 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
++static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
++{
++ const struct convert_xrgb8888_case *params = test->param_value;
++ const struct convert_to_bgr888_result *result = ¶ms->bgr888_result;
++ size_t dst_size;
++ u8 *buf = NULL;
++ __le32 *xrgb8888 = NULL;
++ struct iosys_map dst, src;
++
++ struct drm_framebuffer fb = {
++ .format = drm_format_info(DRM_FORMAT_XRGB8888),
++ .pitches = { params->pitch, 0, 0 },
++ };
++
++ dst_size = conversion_buf_size(DRM_FORMAT_BGR888, result->dst_pitch,
++ ¶ms->clip, 0);
++ KUNIT_ASSERT_GT(test, dst_size, 0);
++
++ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
++ iosys_map_set_vaddr(&dst, buf);
++
++ xrgb8888 = cpubuf_to_le32(test, params->xrgb8888, TEST_BUF_SIZE);
++ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
++ iosys_map_set_vaddr(&src, xrgb8888);
++
++ /*
++ * BGR888 expected results are already in little-endian
++ * order, so there's no need to convert the test output.
++ */
++ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip,
++ &fmtcnv_state);
++ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
++
++ buf = dst.vaddr; /* restore original value of buf */
++ memset(buf, 0, dst_size);
++
++ int blit_result = 0;
++
++ blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, ¶ms->clip,
++ &fmtcnv_state);
++
++ KUNIT_EXPECT_FALSE(test, blit_result);
++ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
++}
++
+ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
+ {
+ const struct convert_xrgb8888_case *params = test->param_value;
+@@ -1851,6 +1931,7 @@ static struct kunit_case drm_format_helper_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb1555, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgba5551, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
++ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_bgr888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb8888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_argb2101010, convert_xrgb8888_gen_params),
+diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
+index 428d81afe215..aa1604d92c1a 100644
+--- a/include/drm/drm_format_helper.h
++++ b/include/drm/drm_format_helper.h
+@@ -96,6 +96,9 @@ void drm_fb_xrgb8888_to_rgba5551(struct iosys_map *dst, const unsigned int *dst_
+ void drm_fb_xrgb8888_to_rgb888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
+ void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+--
+2.50.1
+
--- /dev/null
+From 535ffc2209c4ec62ea806b1ca37daf5a31badbfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Mar 2025 15:14:58 +0100
+Subject: drm/format-helper: Add generic conversion to 32-bit formats
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit d55d0b066f4eedf030c9c1a67a2a0abffece3abc ]
+
+Add drm_fb_xfrm_line_32to32() to implement conversion from 32-bit
+pixels to 32-bit pixels. The pixel-conversion is specified by the
+given callback parameter. Mark the helper as always_inline to avoid
+overhead from function calls.
+
+Then implement all existing line-conversion functions with the new
+generic call and the respective pixel-conversion helper.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com>
+Link: https://lore.kernel.org/r/20250328141709.217283-3-tzimmermann@suse.de
+Stable-dep-of: 05663d88fd0b ("drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_format_helper.c | 84 +++++++----------------------
+ 1 file changed, 19 insertions(+), 65 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
+index 4f60c8d8f63e..4dcb78895581 100644
+--- a/drivers/gpu/drm/drm_format_helper.c
++++ b/drivers/gpu/drm/drm_format_helper.c
+@@ -20,6 +20,8 @@
+ #include <drm/drm_print.h>
+ #include <drm/drm_rect.h>
+
++#include "drm_format_internal.h"
++
+ /**
+ * drm_format_conv_state_init - Initialize format-conversion state
+ * @state: The state to initialize
+@@ -244,6 +246,18 @@ static int drm_fb_xfrm(struct iosys_map *dst,
+ xfrm_line);
+ }
+
++static __always_inline void drm_fb_xfrm_line_32to32(void *dbuf, const void *sbuf,
++ unsigned int pixels,
++ u32 (*xfrm_pixel)(u32))
++{
++ __le32 *dbuf32 = dbuf;
++ const __le32 *sbuf32 = sbuf;
++ const __le32 *send32 = sbuf32 + pixels;
++
++ while (sbuf32 < send32)
++ *dbuf32++ = cpu_to_le32(xfrm_pixel(le32_to_cpup(sbuf32++)));
++}
++
+ /**
+ * drm_fb_memcpy - Copy clip buffer
+ * @dst: Array of destination buffers
+@@ -755,16 +769,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgr888);
+
+ static void drm_fb_xrgb8888_to_argb8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+- __le32 *dbuf32 = dbuf;
+- const __le32 *sbuf32 = sbuf;
+- unsigned int x;
+- u32 pix;
+-
+- for (x = 0; x < pixels; x++) {
+- pix = le32_to_cpu(sbuf32[x]);
+- pix |= GENMASK(31, 24); /* fill alpha bits */
+- dbuf32[x] = cpu_to_le32(pix);
+- }
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb8888);
+ }
+
+ /**
+@@ -804,19 +809,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_argb8888);
+
+ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+- __le32 *dbuf32 = dbuf;
+- const __le32 *sbuf32 = sbuf;
+- unsigned int x;
+- u32 pix;
+-
+- for (x = 0; x < pixels; x++) {
+- pix = le32_to_cpu(sbuf32[x]);
+- pix = ((pix & 0x00ff0000) >> 16) << 0 |
+- ((pix & 0x0000ff00) >> 8) << 8 |
+- ((pix & 0x000000ff) >> 0) << 16 |
+- GENMASK(31, 24); /* fill alpha bits */
+- *dbuf32++ = cpu_to_le32(pix);
+- }
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
+ }
+
+ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+@@ -835,19 +828,7 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
+
+ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+- __le32 *dbuf32 = dbuf;
+- const __le32 *sbuf32 = sbuf;
+- unsigned int x;
+- u32 pix;
+-
+- for (x = 0; x < pixels; x++) {
+- pix = le32_to_cpu(sbuf32[x]);
+- pix = ((pix & 0x00ff0000) >> 16) << 0 |
+- ((pix & 0x0000ff00) >> 8) << 8 |
+- ((pix & 0x000000ff) >> 0) << 16 |
+- ((pix & 0xff000000) >> 24) << 24;
+- *dbuf32++ = cpu_to_le32(pix);
+- }
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
+ }
+
+ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+@@ -866,20 +847,7 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
+
+ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+- __le32 *dbuf32 = dbuf;
+- const __le32 *sbuf32 = sbuf;
+- unsigned int x;
+- u32 val32;
+- u32 pix;
+-
+- for (x = 0; x < pixels; x++) {
+- pix = le32_to_cpu(sbuf32[x]);
+- val32 = ((pix & 0x000000FF) << 2) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x00FF0000) << 6);
+- pix = val32 | ((val32 >> 8) & 0x00300C03);
+- *dbuf32++ = cpu_to_le32(pix);
+- }
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xrgb2101010);
+ }
+
+ /**
+@@ -920,21 +888,7 @@ EXPORT_SYMBOL(drm_fb_xrgb8888_to_xrgb2101010);
+
+ static void drm_fb_xrgb8888_to_argb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+- __le32 *dbuf32 = dbuf;
+- const __le32 *sbuf32 = sbuf;
+- unsigned int x;
+- u32 val32;
+- u32 pix;
+-
+- for (x = 0; x < pixels; x++) {
+- pix = le32_to_cpu(sbuf32[x]);
+- val32 = ((pix & 0x000000ff) << 2) |
+- ((pix & 0x0000ff00) << 4) |
+- ((pix & 0x00ff0000) << 6);
+- pix = GENMASK(31, 30) | /* set alpha bits */
+- val32 | ((val32 >> 8) & 0x00300c03);
+- *dbuf32++ = cpu_to_le32(pix);
+- }
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_argb2101010);
+ }
+
+ /**
+--
+2.50.1
+
--- /dev/null
+From dc8823cb9306c08238eb4a555b4cfb5b2c35ac12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Mar 2025 15:14:57 +0100
+Subject: drm/format-helper: Move helpers for pixel conversion to header file
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit c46d18f98261d99711003517c444417a303c7fae ]
+
+The DRM draw helpers contain format-conversion helpers that operate
+on individual pixels. Move them into an internal header file and adopt
+them as individual API. Update the draw code accordingly. The pixel
+helpers will also be useful for other format conversion helpers.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com>
+Link: https://lore.kernel.org/r/20250328141709.217283-2-tzimmermann@suse.de
+Stable-dep-of: 05663d88fd0b ("drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_draw.c | 100 +++-------------------
+ drivers/gpu/drm/drm_format_internal.h | 119 ++++++++++++++++++++++++++
+ 2 files changed, 130 insertions(+), 89 deletions(-)
+ create mode 100644 drivers/gpu/drm/drm_format_internal.h
+
+diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
+index cb2ad12bce57..d41f8ae1c148 100644
+--- a/drivers/gpu/drm/drm_draw.c
++++ b/drivers/gpu/drm/drm_draw.c
+@@ -11,85 +11,7 @@
+ #include <drm/drm_fourcc.h>
+
+ #include "drm_draw_internal.h"
+-
+-/*
+- * Conversions from xrgb8888
+- */
+-
+-static u16 convert_xrgb8888_to_rgb565(u32 pix)
+-{
+- return ((pix & 0x00F80000) >> 8) |
+- ((pix & 0x0000FC00) >> 5) |
+- ((pix & 0x000000F8) >> 3);
+-}
+-
+-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+-{
+- return ((pix & 0x00f80000) >> 8) |
+- ((pix & 0x0000f800) >> 5) |
+- ((pix & 0x000000f8) >> 2) |
+- BIT(0); /* set alpha bit */
+-}
+-
+-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+-{
+- return ((pix & 0x00f80000) >> 9) |
+- ((pix & 0x0000f800) >> 6) |
+- ((pix & 0x000000f8) >> 3);
+-}
+-
+-static u16 convert_xrgb8888_to_argb1555(u32 pix)
+-{
+- return BIT(15) | /* set alpha bit */
+- ((pix & 0x00f80000) >> 9) |
+- ((pix & 0x0000f800) >> 6) |
+- ((pix & 0x000000f8) >> 3);
+-}
+-
+-static u32 convert_xrgb8888_to_argb8888(u32 pix)
+-{
+- return pix | GENMASK(31, 24); /* fill alpha bits */
+-}
+-
+-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+-{
+- return ((pix & 0x00ff0000) >> 16) << 0 |
+- ((pix & 0x0000ff00) >> 8) << 8 |
+- ((pix & 0x000000ff) >> 0) << 16 |
+- ((pix & 0xff000000) >> 24) << 24;
+-}
+-
+-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+-{
+- return ((pix & 0x00ff0000) >> 16) << 0 |
+- ((pix & 0x0000ff00) >> 8) << 8 |
+- ((pix & 0x000000ff) >> 0) << 16 |
+- GENMASK(31, 24); /* fill alpha bits */
+-}
+-
+-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+-{
+- pix = ((pix & 0x000000FF) << 2) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x00FF0000) << 6);
+- return pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+-{
+- pix = ((pix & 0x000000FF) << 2) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x00FF0000) << 6);
+- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+-{
+- pix = ((pix & 0x00FF0000) >> 14) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x000000FF) << 22);
+- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+-}
++#include "drm_format_internal.h"
+
+ /**
+ * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+@@ -104,28 +26,28 @@ u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
+ {
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+- return convert_xrgb8888_to_rgb565(color);
++ return drm_pixel_xrgb8888_to_rgb565(color);
+ case DRM_FORMAT_RGBA5551:
+- return convert_xrgb8888_to_rgba5551(color);
++ return drm_pixel_xrgb8888_to_rgba5551(color);
+ case DRM_FORMAT_XRGB1555:
+- return convert_xrgb8888_to_xrgb1555(color);
++ return drm_pixel_xrgb8888_to_xrgb1555(color);
+ case DRM_FORMAT_ARGB1555:
+- return convert_xrgb8888_to_argb1555(color);
++ return drm_pixel_xrgb8888_to_argb1555(color);
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_XRGB8888:
+ return color;
+ case DRM_FORMAT_ARGB8888:
+- return convert_xrgb8888_to_argb8888(color);
++ return drm_pixel_xrgb8888_to_argb8888(color);
+ case DRM_FORMAT_XBGR8888:
+- return convert_xrgb8888_to_xbgr8888(color);
++ return drm_pixel_xrgb8888_to_xbgr8888(color);
+ case DRM_FORMAT_ABGR8888:
+- return convert_xrgb8888_to_abgr8888(color);
++ return drm_pixel_xrgb8888_to_abgr8888(color);
+ case DRM_FORMAT_XRGB2101010:
+- return convert_xrgb8888_to_xrgb2101010(color);
++ return drm_pixel_xrgb8888_to_xrgb2101010(color);
+ case DRM_FORMAT_ARGB2101010:
+- return convert_xrgb8888_to_argb2101010(color);
++ return drm_pixel_xrgb8888_to_argb2101010(color);
+ case DRM_FORMAT_ABGR2101010:
+- return convert_xrgb8888_to_abgr2101010(color);
++ return drm_pixel_xrgb8888_to_abgr2101010(color);
+ default:
+ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+ return 0;
+diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
+new file mode 100644
+index 000000000000..5f82f0b9c8e8
+--- /dev/null
++++ b/drivers/gpu/drm/drm_format_internal.h
+@@ -0,0 +1,119 @@
++/* SPDX-License-Identifier: GPL-2.0 or MIT */
++
++#ifndef DRM_FORMAT_INTERNAL_H
++#define DRM_FORMAT_INTERNAL_H
++
++#include <linux/bits.h>
++#include <linux/types.h>
++
++/*
++ * Each pixel-format conversion helper takes a raw pixel in a
++ * specific input format and returns a raw pixel in a specific
++ * output format. All pixels are in little-endian byte order.
++ *
++ * Function names are
++ *
++ * drm_pixel_<input>_to_<output>_<algorithm>()
++ *
++ * where <input> and <output> refer to pixel formats. The
++ * <algorithm> is optional and hints to the method used for the
++ * conversion. Helpers with no algorithm given apply pixel-bit
++ * shifting.
++ *
++ * The argument type is u32. We expect this to be wide enough to
++ * hold all conversion input from 32-bit RGB to any output format.
++ * The Linux kernel should avoid format conversion for anything
++ * but XRGB8888 input data. Converting from other format can still
++ * be acceptable in some cases.
++ *
++ * The return type is u32. It is wide enough to hold all conversion
++ * output from XRGB8888. For output formats wider than 32 bit, a
++ * return type of u64 would be acceptable.
++ */
++
++/*
++ * Conversions from XRGB8888
++ */
++
++static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix)
++{
++ return ((pix & 0x00f80000) >> 8) |
++ ((pix & 0x0000fc00) >> 5) |
++ ((pix & 0x000000f8) >> 3);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix)
++{
++ return ((pix & 0x00f80000) >> 8) |
++ ((pix & 0x0000f800) >> 5) |
++ ((pix & 0x000000f8) >> 2);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_rgba5551(u32 pix)
++{
++ return drm_pixel_xrgb8888_to_rgbx5551(pix) |
++ BIT(0); /* set alpha bit */
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xrgb1555(u32 pix)
++{
++ return ((pix & 0x00f80000) >> 9) |
++ ((pix & 0x0000f800) >> 6) |
++ ((pix & 0x000000f8) >> 3);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_argb1555(u32 pix)
++{
++ return BIT(15) | /* set alpha bit */
++ drm_pixel_xrgb8888_to_xrgb1555(pix);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_argb8888(u32 pix)
++{
++ return GENMASK(31, 24) | /* fill alpha bits */
++ pix;
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
++{
++ return ((pix & 0xff000000)) | /* also copy filler bits */
++ ((pix & 0x00ff0000) >> 16) |
++ ((pix & 0x0000ff00)) |
++ ((pix & 0x000000ff) << 16);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
++{
++ return GENMASK(31, 24) | /* fill alpha bits */
++ drm_pixel_xrgb8888_to_xbgr8888(pix);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xrgb2101010(u32 pix)
++{
++ pix = ((pix & 0x000000ff) << 2) |
++ ((pix & 0x0000ff00) << 4) |
++ ((pix & 0x00ff0000) << 6);
++ return pix | ((pix >> 8) & 0x00300c03);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_argb2101010(u32 pix)
++{
++ return GENMASK(31, 30) | /* set alpha bits */
++ drm_pixel_xrgb8888_to_xrgb2101010(pix);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_xbgr2101010(u32 pix)
++{
++ pix = ((pix & 0x00ff0000) >> 14) |
++ ((pix & 0x0000ff00) << 4) |
++ ((pix & 0x000000ff) << 22);
++ return pix | ((pix >> 8) & 0x00300c03);
++}
++
++static inline u32 drm_pixel_xrgb8888_to_abgr2101010(u32 pix)
++{
++ return GENMASK(31, 30) | /* set alpha bits */
++ drm_pixel_xrgb8888_to_xbgr2101010(pix);
++}
++
++#endif
+--
+2.50.1
+
--- /dev/null
+From 274ce93b4246a06ef4e8a5cd6460ca7e2d84cb49 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:31 +0800
+Subject: drm/hisilicon/hibmc: fix the hibmc loaded failed bug
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 93a08f856fcc5aaeeecad01f71bef3088588216a ]
+
+When hibmc loaded failed, the driver use hibmc_unload to free the
+resource, but the mutexes in mode.config are not init, which will
+access an NULL pointer. Just change goto statement to return, because
+hibnc_hw_init() doesn't need to free anything.
+
+Fixes: b3df5e65cc03 ("drm/hibmc: Drop drm_vblank_cleanup")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-5-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index 9f9b19ea0587..1640609cdbc0 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -258,13 +258,13 @@ static int hibmc_load(struct drm_device *dev)
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+- goto err;
++ return ret;
+
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
+- goto err;
++ return ret;
+ }
+
+ ret = hibmc_kms_init(priv);
+--
+2.50.1
+
--- /dev/null
+From 990e7c3cb4fe3e96ed5e2ef85566dbc52c2e0ea7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:28 +0800
+Subject: drm/hisilicon/hibmc: fix the i2c device resource leak when vdac init
+ failed
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit e5f48bfa2ae0806d5f51fb8061afc619a73599a7 ]
+
+Currently the driver missed to clean the i2c adapter when vdac init failed.
+It may cause resource leak.
+
+Fixes: a0d078d06e516 ("drm/hisilicon: Features to support reading resolutions from EDID")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-2-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 1 +
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c | 5 +++++
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c | 11 ++++++++---
+ 3 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+index 42f0ab8f9b5a..6eb0d41a0f68 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+@@ -58,5 +58,6 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
+ int hibmc_vdac_init(struct hibmc_drm_private *priv);
+
+ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
++void hibmc_ddc_del(struct hibmc_vdac *vdac);
+
+ #endif
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+index 99b3b77b5445..44860011855e 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+@@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
+
+ return i2c_bit_add_bus(&vdac->adapter);
+ }
++
++void hibmc_ddc_del(struct hibmc_vdac *vdac)
++{
++ i2c_del_adapter(&vdac->adapter);
++}
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+index 05e19ea4c9f9..9e29386700c8 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+@@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
+ {
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
+
+- i2c_del_adapter(&vdac->adapter);
++ hibmc_ddc_del(vdac);
+ drm_connector_cleanup(connector);
+ }
+
+@@ -109,7 +109,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "failed to init encoder: %d\n", ret);
+- return ret;
++ goto err;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
+@@ -120,7 +120,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ &vdac->adapter);
+ if (ret) {
+ drm_err(dev, "failed to init connector: %d\n", ret);
+- return ret;
++ goto err;
+ }
+
+ drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
+@@ -128,4 +128,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
++
++err:
++ hibmc_ddc_del(vdac);
++
++ return ret;
+ }
+--
+2.50.1
+
--- /dev/null
+From fd78d2712ae17ea3b38027af8b7d93a7dae71d99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Jan 2025 17:38:23 +0800
+Subject: drm/hisilicon/hibmc: refactored struct hibmc_drm_private
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 587013d72c1a217ced9f42a9a08c8013052cabfc ]
+
+Refactored struct hibmc_drm_private to separate VGA module from
+generic struct.
+
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Reviewed-by: Tian Tao <tiantao6@hisilicon.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250103093824.1963816-5-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
+Stable-dep-of: e5f48bfa2ae0 ("drm/hisilicon/hibmc: fix the i2c device resource leak when vdac init failed")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 16 ++++----
+ .../gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c | 41 +++++++++----------
+ .../gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c | 20 ++++-----
+ 3 files changed, 38 insertions(+), 39 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+index 6b566f3aeecb..42f0ab8f9b5a 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+@@ -20,9 +20,10 @@
+
+ #include <drm/drm_framebuffer.h>
+
+-struct hibmc_connector {
+- struct drm_connector base;
+-
++struct hibmc_vdac {
++ struct drm_device *dev;
++ struct drm_encoder encoder;
++ struct drm_connector connector;
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data bit_data;
+ };
+@@ -35,13 +36,12 @@ struct hibmc_drm_private {
+ struct drm_device dev;
+ struct drm_plane primary_plane;
+ struct drm_crtc crtc;
+- struct drm_encoder encoder;
+- struct hibmc_connector connector;
++ struct hibmc_vdac vdac;
+ };
+
+-static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
++static inline struct hibmc_vdac *to_hibmc_vdac(struct drm_connector *connector)
+ {
+- return container_of(connector, struct hibmc_connector, base);
++ return container_of(connector, struct hibmc_vdac, connector);
+ }
+
+ static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+@@ -57,6 +57,6 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv,
+ int hibmc_de_init(struct hibmc_drm_private *priv);
+ int hibmc_vdac_init(struct hibmc_drm_private *priv);
+
+-int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
++int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
+
+ #endif
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+index e6e48651c15c..99b3b77b5445 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+@@ -25,8 +25,8 @@
+
+ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+ {
+- struct hibmc_connector *hibmc_connector = data;
+- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
++ struct hibmc_vdac *vdac = data;
++ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if (value) {
+@@ -45,8 +45,8 @@ static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+
+ static int hibmc_get_i2c_signal(void *data, u32 mask)
+ {
+- struct hibmc_connector *hibmc_connector = data;
+- struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
++ struct hibmc_vdac *vdac = data;
++ struct hibmc_drm_private *priv = to_hibmc_drm_private(vdac->connector.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if ((tmp_dir & mask) != mask) {
+@@ -77,22 +77,21 @@ static int hibmc_ddc_getscl(void *data)
+ return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+ }
+
+-int hibmc_ddc_create(struct drm_device *drm_dev,
+- struct hibmc_connector *connector)
++int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
+ {
+- connector->adapter.owner = THIS_MODULE;
+- snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+- connector->adapter.dev.parent = drm_dev->dev;
+- i2c_set_adapdata(&connector->adapter, connector);
+- connector->adapter.algo_data = &connector->bit_data;
+-
+- connector->bit_data.udelay = 20;
+- connector->bit_data.timeout = usecs_to_jiffies(2000);
+- connector->bit_data.data = connector;
+- connector->bit_data.setsda = hibmc_ddc_setsda;
+- connector->bit_data.setscl = hibmc_ddc_setscl;
+- connector->bit_data.getsda = hibmc_ddc_getsda;
+- connector->bit_data.getscl = hibmc_ddc_getscl;
+-
+- return i2c_bit_add_bus(&connector->adapter);
++ vdac->adapter.owner = THIS_MODULE;
++ snprintf(vdac->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
++ vdac->adapter.dev.parent = drm_dev->dev;
++ i2c_set_adapdata(&vdac->adapter, vdac);
++ vdac->adapter.algo_data = &vdac->bit_data;
++
++ vdac->bit_data.udelay = 20;
++ vdac->bit_data.timeout = usecs_to_jiffies(2000);
++ vdac->bit_data.data = vdac;
++ vdac->bit_data.setsda = hibmc_ddc_setsda;
++ vdac->bit_data.setscl = hibmc_ddc_setscl;
++ vdac->bit_data.getsda = hibmc_ddc_getsda;
++ vdac->bit_data.getscl = hibmc_ddc_getscl;
++
++ return i2c_bit_add_bus(&vdac->adapter);
+ }
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+index 409c551c92af..05e19ea4c9f9 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+@@ -24,11 +24,11 @@
+
+ static int hibmc_connector_get_modes(struct drm_connector *connector)
+ {
+- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
++ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
+ const struct drm_edid *drm_edid;
+ int count;
+
+- drm_edid = drm_edid_read_ddc(connector, &hibmc_connector->adapter);
++ drm_edid = drm_edid_read_ddc(connector, &vdac->adapter);
+
+ drm_edid_connector_update(connector, drm_edid);
+
+@@ -51,9 +51,9 @@ static int hibmc_connector_get_modes(struct drm_connector *connector)
+
+ static void hibmc_connector_destroy(struct drm_connector *connector)
+ {
+- struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
++ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
+
+- i2c_del_adapter(&hibmc_connector->adapter);
++ i2c_del_adapter(&vdac->adapter);
+ drm_connector_cleanup(connector);
+ }
+
+@@ -93,20 +93,20 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = {
+ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ {
+ struct drm_device *dev = &priv->dev;
+- struct hibmc_connector *hibmc_connector = &priv->connector;
+- struct drm_encoder *encoder = &priv->encoder;
++ struct hibmc_vdac *vdac = &priv->vdac;
++ struct drm_encoder *encoder = &vdac->encoder;
+ struct drm_crtc *crtc = &priv->crtc;
+- struct drm_connector *connector = &hibmc_connector->base;
++ struct drm_connector *connector = &vdac->connector;
+ int ret;
+
+- ret = hibmc_ddc_create(dev, hibmc_connector);
++ ret = hibmc_ddc_create(dev, vdac);
+ if (ret) {
+ drm_err(dev, "failed to create ddc: %d\n", ret);
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+- ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
++ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "failed to init encoder: %d\n", ret);
+ return ret;
+@@ -117,7 +117,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &hibmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+- &hibmc_connector->adapter);
++ &vdac->adapter);
+ if (ret) {
+ drm_err(dev, "failed to init connector: %d\n", ret);
+ return ret;
+--
+2.50.1
+
--- /dev/null
+From 956104f97e401b03816ad847448027292fcf5194 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 19:50:27 +0800
+Subject: drm/nouveau/nvif: Fix potential memory leak in nvif_vmm_ctor().
+
+From: Fanhua Li <lifanhua5@huawei.com>
+
+[ Upstream commit bb8aeaa3191b617c6faf8ae937252e059673b7ea ]
+
+When the nvif_vmm_type is invalid, we will return error directly
+without freeing the args in nvif_vmm_ctor(), which leading a memory
+leak. Fix it by setting the ret -EINVAL and goto done.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/all/202312040659.4pJpMafN-lkp@intel.com/
+Fixes: 6b252cf42281 ("drm/nouveau: nvkm/vmm: implement raw ops to manage uvmm")
+Signed-off-by: Fanhua Li <lifanhua5@huawei.com>
+Link: https://lore.kernel.org/r/20250728115027.50878-1-lifanhua5@huawei.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nvif/vmm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
+index 99296f03371a..07c1ebc2a941 100644
+--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
++++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
+@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+ default:
+ WARN_ON(1);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto done;
+ }
+
+ memcpy(args->data, argv, argc);
+--
+2.50.1
+
--- /dev/null
+From 12d7c4932113b40111d7355d626fbc10ceb90f08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Dec 2024 16:45:00 +0100
+Subject: drm/panic: Move drawing functions to drm_draw
+
+From: Jocelyn Falempe <jfalempe@redhat.com>
+
+[ Upstream commit 31fa2c1ca0b239f64eaf682f1685bbbd74fc0181 ]
+
+Move the color conversions, blit and fill functions to drm_draw.c,
+so that they can be re-used by drm_log.
+drm_draw is internal to the drm subsystem, and shouldn't be used by
+gpu drivers.
+
+Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20241204160014.1171469-2-jfalempe@redhat.com
+Stable-dep-of: 05663d88fd0b ("drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/Kconfig | 5 +
+ drivers/gpu/drm/Makefile | 1 +
+ drivers/gpu/drm/drm_draw.c | 233 ++++++++++++++++++++++++
+ drivers/gpu/drm/drm_draw_internal.h | 56 ++++++
+ drivers/gpu/drm/drm_panic.c | 269 +++-------------------------
+ 5 files changed, 324 insertions(+), 240 deletions(-)
+ create mode 100644 drivers/gpu/drm/drm_draw.c
+ create mode 100644 drivers/gpu/drm/drm_draw_internal.h
+
+diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
+index 1160a439e92a..0dd0d996e53e 100644
+--- a/drivers/gpu/drm/Kconfig
++++ b/drivers/gpu/drm/Kconfig
+@@ -105,10 +105,15 @@ config DRM_KMS_HELPER
+ help
+ CRTC helpers for KMS drivers.
+
++config DRM_DRAW
++ bool
++ depends on DRM
++
+ config DRM_PANIC
+ bool "Display a user-friendly message when a kernel panic occurs"
+ depends on DRM
+ select FONT_SUPPORT
++ select DRM_DRAW
+ help
+ Enable a drm panic handler, which will display a user-friendly message
+ when a kernel panic occurs. It's useful when using a user-space
+diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
+index 1ec44529447a..f4a5edf746d2 100644
+--- a/drivers/gpu/drm/Makefile
++++ b/drivers/gpu/drm/Makefile
+@@ -89,6 +89,7 @@ drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
+ drm_privacy_screen_x86.o
+ drm-$(CONFIG_DRM_ACCEL) += ../../accel/drm_accel.o
+ drm-$(CONFIG_DRM_PANIC) += drm_panic.o
++drm-$(CONFIG_DRM_DRAW) += drm_draw.o
+ drm-$(CONFIG_DRM_PANIC_SCREEN_QR_CODE) += drm_panic_qr.o
+ obj-$(CONFIG_DRM) += drm.o
+
+diff --git a/drivers/gpu/drm/drm_draw.c b/drivers/gpu/drm/drm_draw.c
+new file mode 100644
+index 000000000000..cb2ad12bce57
+--- /dev/null
++++ b/drivers/gpu/drm/drm_draw.c
+@@ -0,0 +1,233 @@
++// SPDX-License-Identifier: GPL-2.0 or MIT
++/*
++ * Copyright (c) 2023 Red Hat.
++ * Author: Jocelyn Falempe <jfalempe@redhat.com>
++ */
++
++#include <linux/bits.h>
++#include <linux/iosys-map.h>
++#include <linux/types.h>
++
++#include <drm/drm_fourcc.h>
++
++#include "drm_draw_internal.h"
++
++/*
++ * Conversions from xrgb8888
++ */
++
++static u16 convert_xrgb8888_to_rgb565(u32 pix)
++{
++ return ((pix & 0x00F80000) >> 8) |
++ ((pix & 0x0000FC00) >> 5) |
++ ((pix & 0x000000F8) >> 3);
++}
++
++static u16 convert_xrgb8888_to_rgba5551(u32 pix)
++{
++ return ((pix & 0x00f80000) >> 8) |
++ ((pix & 0x0000f800) >> 5) |
++ ((pix & 0x000000f8) >> 2) |
++ BIT(0); /* set alpha bit */
++}
++
++static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
++{
++ return ((pix & 0x00f80000) >> 9) |
++ ((pix & 0x0000f800) >> 6) |
++ ((pix & 0x000000f8) >> 3);
++}
++
++static u16 convert_xrgb8888_to_argb1555(u32 pix)
++{
++ return BIT(15) | /* set alpha bit */
++ ((pix & 0x00f80000) >> 9) |
++ ((pix & 0x0000f800) >> 6) |
++ ((pix & 0x000000f8) >> 3);
++}
++
++static u32 convert_xrgb8888_to_argb8888(u32 pix)
++{
++ return pix | GENMASK(31, 24); /* fill alpha bits */
++}
++
++static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
++{
++ return ((pix & 0x00ff0000) >> 16) << 0 |
++ ((pix & 0x0000ff00) >> 8) << 8 |
++ ((pix & 0x000000ff) >> 0) << 16 |
++ ((pix & 0xff000000) >> 24) << 24;
++}
++
++static u32 convert_xrgb8888_to_abgr8888(u32 pix)
++{
++ return ((pix & 0x00ff0000) >> 16) << 0 |
++ ((pix & 0x0000ff00) >> 8) << 8 |
++ ((pix & 0x000000ff) >> 0) << 16 |
++ GENMASK(31, 24); /* fill alpha bits */
++}
++
++static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
++{
++ pix = ((pix & 0x000000FF) << 2) |
++ ((pix & 0x0000FF00) << 4) |
++ ((pix & 0x00FF0000) << 6);
++ return pix | ((pix >> 8) & 0x00300C03);
++}
++
++static u32 convert_xrgb8888_to_argb2101010(u32 pix)
++{
++ pix = ((pix & 0x000000FF) << 2) |
++ ((pix & 0x0000FF00) << 4) |
++ ((pix & 0x00FF0000) << 6);
++ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
++}
++
++static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
++{
++ pix = ((pix & 0x00FF0000) >> 14) |
++ ((pix & 0x0000FF00) << 4) |
++ ((pix & 0x000000FF) << 22);
++ return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
++}
++
++/**
++ * drm_draw_color_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
++ * @color: input color, in xrgb8888 format
++ * @format: output format
++ *
++ * Returns:
++ * Color in the format specified, casted to u32.
++ * Or 0 if the format is not supported.
++ */
++u32 drm_draw_color_from_xrgb8888(u32 color, u32 format)
++{
++ switch (format) {
++ case DRM_FORMAT_RGB565:
++ return convert_xrgb8888_to_rgb565(color);
++ case DRM_FORMAT_RGBA5551:
++ return convert_xrgb8888_to_rgba5551(color);
++ case DRM_FORMAT_XRGB1555:
++ return convert_xrgb8888_to_xrgb1555(color);
++ case DRM_FORMAT_ARGB1555:
++ return convert_xrgb8888_to_argb1555(color);
++ case DRM_FORMAT_RGB888:
++ case DRM_FORMAT_XRGB8888:
++ return color;
++ case DRM_FORMAT_ARGB8888:
++ return convert_xrgb8888_to_argb8888(color);
++ case DRM_FORMAT_XBGR8888:
++ return convert_xrgb8888_to_xbgr8888(color);
++ case DRM_FORMAT_ABGR8888:
++ return convert_xrgb8888_to_abgr8888(color);
++ case DRM_FORMAT_XRGB2101010:
++ return convert_xrgb8888_to_xrgb2101010(color);
++ case DRM_FORMAT_ARGB2101010:
++ return convert_xrgb8888_to_argb2101010(color);
++ case DRM_FORMAT_ABGR2101010:
++ return convert_xrgb8888_to_abgr2101010(color);
++ default:
++ WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
++ return 0;
++ }
++}
++EXPORT_SYMBOL(drm_draw_color_from_xrgb8888);
++
++/*
++ * Blit functions
++ */
++void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
++ const u8 *sbuf8, unsigned int spitch,
++ unsigned int height, unsigned int width,
++ unsigned int scale, u16 fg16)
++{
++ unsigned int y, x;
++
++ for (y = 0; y < height; y++)
++ for (x = 0; x < width; x++)
++ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
++ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
++}
++EXPORT_SYMBOL(drm_draw_blit16);
++
++void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
++ const u8 *sbuf8, unsigned int spitch,
++ unsigned int height, unsigned int width,
++ unsigned int scale, u32 fg32)
++{
++ unsigned int y, x;
++
++ for (y = 0; y < height; y++) {
++ for (x = 0; x < width; x++) {
++ u32 off = y * dpitch + x * 3;
++
++ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
++ /* write blue-green-red to output in little endianness */
++ iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
++ iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
++ iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
++ }
++ }
++ }
++}
++EXPORT_SYMBOL(drm_draw_blit24);
++
++void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
++ const u8 *sbuf8, unsigned int spitch,
++ unsigned int height, unsigned int width,
++ unsigned int scale, u32 fg32)
++{
++ unsigned int y, x;
++
++ for (y = 0; y < height; y++)
++ for (x = 0; x < width; x++)
++ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
++ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
++}
++EXPORT_SYMBOL(drm_draw_blit32);
++
++/*
++ * Fill functions
++ */
++void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
++ unsigned int height, unsigned int width,
++ u16 color)
++{
++ unsigned int y, x;
++
++ for (y = 0; y < height; y++)
++ for (x = 0; x < width; x++)
++ iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
++}
++EXPORT_SYMBOL(drm_draw_fill16);
++
++void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
++ unsigned int height, unsigned int width,
++ u16 color)
++{
++ unsigned int y, x;
++
++ for (y = 0; y < height; y++) {
++ for (x = 0; x < width; x++) {
++ unsigned int off = y * dpitch + x * 3;
++
++ /* write blue-green-red to output in little endianness */
++ iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
++ iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
++ iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
++ }
++ }
++}
++EXPORT_SYMBOL(drm_draw_fill24);
++
++void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
++ unsigned int height, unsigned int width,
++ u32 color)
++{
++ unsigned int y, x;
++
++ for (y = 0; y < height; y++)
++ for (x = 0; x < width; x++)
++ iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
++}
++EXPORT_SYMBOL(drm_draw_fill32);
+diff --git a/drivers/gpu/drm/drm_draw_internal.h b/drivers/gpu/drm/drm_draw_internal.h
+new file mode 100644
+index 000000000000..f121ee7339dc
+--- /dev/null
++++ b/drivers/gpu/drm/drm_draw_internal.h
+@@ -0,0 +1,56 @@
++/* SPDX-License-Identifier: GPL-2.0 or MIT */
++/*
++ * Copyright (c) 2023 Red Hat.
++ * Author: Jocelyn Falempe <jfalempe@redhat.com>
++ */
++
++#ifndef __DRM_DRAW_INTERNAL_H__
++#define __DRM_DRAW_INTERNAL_H__
++
++#include <linux/font.h>
++#include <linux/types.h>
++
++struct iosys_map;
++
++/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
++static inline bool drm_draw_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
++{
++ return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
++}
++
++static inline const u8 *drm_draw_get_char_bitmap(const struct font_desc *font,
++ char c, size_t font_pitch)
++{
++ return font->data + (c * font->height) * font_pitch;
++}
++
++u32 drm_draw_color_from_xrgb8888(u32 color, u32 format);
++
++void drm_draw_blit16(struct iosys_map *dmap, unsigned int dpitch,
++ const u8 *sbuf8, unsigned int spitch,
++ unsigned int height, unsigned int width,
++ unsigned int scale, u16 fg16);
++
++void drm_draw_blit24(struct iosys_map *dmap, unsigned int dpitch,
++ const u8 *sbuf8, unsigned int spitch,
++ unsigned int height, unsigned int width,
++ unsigned int scale, u32 fg32);
++
++void drm_draw_blit32(struct iosys_map *dmap, unsigned int dpitch,
++ const u8 *sbuf8, unsigned int spitch,
++ unsigned int height, unsigned int width,
++ unsigned int scale, u32 fg32);
++
++void drm_draw_fill16(struct iosys_map *dmap, unsigned int dpitch,
++ unsigned int height, unsigned int width,
++ u16 color);
++
++void drm_draw_fill24(struct iosys_map *dmap, unsigned int dpitch,
++ unsigned int height, unsigned int width,
++ u16 color);
++
++void drm_draw_fill32(struct iosys_map *dmap, unsigned int dpitch,
++ unsigned int height, unsigned int width,
++ u32 color);
++
++#endif /* __DRM_DRAW_INTERNAL_H__ */
+diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
+index 0a9ecc1380d2..f128d345b16d 100644
+--- a/drivers/gpu/drm/drm_panic.c
++++ b/drivers/gpu/drm/drm_panic.c
+@@ -31,6 +31,7 @@
+ #include <drm/drm_rect.h>
+
+ #include "drm_crtc_internal.h"
++#include "drm_draw_internal.h"
+
+ MODULE_AUTHOR("Jocelyn Falempe");
+ MODULE_DESCRIPTION("DRM panic handler");
+@@ -139,181 +140,8 @@ device_initcall(drm_panic_setup_logo);
+ #endif
+
+ /*
+- * Color conversion
++ * Blit & Fill functions
+ */
+-
+-static u16 convert_xrgb8888_to_rgb565(u32 pix)
+-{
+- return ((pix & 0x00F80000) >> 8) |
+- ((pix & 0x0000FC00) >> 5) |
+- ((pix & 0x000000F8) >> 3);
+-}
+-
+-static u16 convert_xrgb8888_to_rgba5551(u32 pix)
+-{
+- return ((pix & 0x00f80000) >> 8) |
+- ((pix & 0x0000f800) >> 5) |
+- ((pix & 0x000000f8) >> 2) |
+- BIT(0); /* set alpha bit */
+-}
+-
+-static u16 convert_xrgb8888_to_xrgb1555(u32 pix)
+-{
+- return ((pix & 0x00f80000) >> 9) |
+- ((pix & 0x0000f800) >> 6) |
+- ((pix & 0x000000f8) >> 3);
+-}
+-
+-static u16 convert_xrgb8888_to_argb1555(u32 pix)
+-{
+- return BIT(15) | /* set alpha bit */
+- ((pix & 0x00f80000) >> 9) |
+- ((pix & 0x0000f800) >> 6) |
+- ((pix & 0x000000f8) >> 3);
+-}
+-
+-static u32 convert_xrgb8888_to_argb8888(u32 pix)
+-{
+- return pix | GENMASK(31, 24); /* fill alpha bits */
+-}
+-
+-static u32 convert_xrgb8888_to_xbgr8888(u32 pix)
+-{
+- return ((pix & 0x00ff0000) >> 16) << 0 |
+- ((pix & 0x0000ff00) >> 8) << 8 |
+- ((pix & 0x000000ff) >> 0) << 16 |
+- ((pix & 0xff000000) >> 24) << 24;
+-}
+-
+-static u32 convert_xrgb8888_to_abgr8888(u32 pix)
+-{
+- return ((pix & 0x00ff0000) >> 16) << 0 |
+- ((pix & 0x0000ff00) >> 8) << 8 |
+- ((pix & 0x000000ff) >> 0) << 16 |
+- GENMASK(31, 24); /* fill alpha bits */
+-}
+-
+-static u32 convert_xrgb8888_to_xrgb2101010(u32 pix)
+-{
+- pix = ((pix & 0x000000FF) << 2) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x00FF0000) << 6);
+- return pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-static u32 convert_xrgb8888_to_argb2101010(u32 pix)
+-{
+- pix = ((pix & 0x000000FF) << 2) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x00FF0000) << 6);
+- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-static u32 convert_xrgb8888_to_abgr2101010(u32 pix)
+-{
+- pix = ((pix & 0x00FF0000) >> 14) |
+- ((pix & 0x0000FF00) << 4) |
+- ((pix & 0x000000FF) << 22);
+- return GENMASK(31, 30) /* set alpha bits */ | pix | ((pix >> 8) & 0x00300C03);
+-}
+-
+-/*
+- * convert_from_xrgb8888 - convert one pixel from xrgb8888 to the desired format
+- * @color: input color, in xrgb8888 format
+- * @format: output format
+- *
+- * Returns:
+- * Color in the format specified, casted to u32.
+- * Or 0 if the format is not supported.
+- */
+-static u32 convert_from_xrgb8888(u32 color, u32 format)
+-{
+- switch (format) {
+- case DRM_FORMAT_RGB565:
+- return convert_xrgb8888_to_rgb565(color);
+- case DRM_FORMAT_RGBA5551:
+- return convert_xrgb8888_to_rgba5551(color);
+- case DRM_FORMAT_XRGB1555:
+- return convert_xrgb8888_to_xrgb1555(color);
+- case DRM_FORMAT_ARGB1555:
+- return convert_xrgb8888_to_argb1555(color);
+- case DRM_FORMAT_RGB888:
+- case DRM_FORMAT_XRGB8888:
+- return color;
+- case DRM_FORMAT_ARGB8888:
+- return convert_xrgb8888_to_argb8888(color);
+- case DRM_FORMAT_XBGR8888:
+- return convert_xrgb8888_to_xbgr8888(color);
+- case DRM_FORMAT_ABGR8888:
+- return convert_xrgb8888_to_abgr8888(color);
+- case DRM_FORMAT_XRGB2101010:
+- return convert_xrgb8888_to_xrgb2101010(color);
+- case DRM_FORMAT_ARGB2101010:
+- return convert_xrgb8888_to_argb2101010(color);
+- case DRM_FORMAT_ABGR2101010:
+- return convert_xrgb8888_to_abgr2101010(color);
+- default:
+- WARN_ONCE(1, "Can't convert to %p4cc\n", &format);
+- return 0;
+- }
+-}
+-
+-/*
+- * Blit & Fill
+- */
+-/* check if the pixel at coord x,y is 1 (foreground) or 0 (background) */
+-static bool drm_panic_is_pixel_fg(const u8 *sbuf8, unsigned int spitch, int x, int y)
+-{
+- return (sbuf8[(y * spitch) + x / 8] & (0x80 >> (x % 8))) != 0;
+-}
+-
+-static void drm_panic_blit16(struct iosys_map *dmap, unsigned int dpitch,
+- const u8 *sbuf8, unsigned int spitch,
+- unsigned int height, unsigned int width,
+- unsigned int scale, u16 fg16)
+-{
+- unsigned int y, x;
+-
+- for (y = 0; y < height; y++)
+- for (x = 0; x < width; x++)
+- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, fg16);
+-}
+-
+-static void drm_panic_blit24(struct iosys_map *dmap, unsigned int dpitch,
+- const u8 *sbuf8, unsigned int spitch,
+- unsigned int height, unsigned int width,
+- unsigned int scale, u32 fg32)
+-{
+- unsigned int y, x;
+-
+- for (y = 0; y < height; y++) {
+- for (x = 0; x < width; x++) {
+- u32 off = y * dpitch + x * 3;
+-
+- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale)) {
+- /* write blue-green-red to output in little endianness */
+- iosys_map_wr(dmap, off, u8, (fg32 & 0x000000FF) >> 0);
+- iosys_map_wr(dmap, off + 1, u8, (fg32 & 0x0000FF00) >> 8);
+- iosys_map_wr(dmap, off + 2, u8, (fg32 & 0x00FF0000) >> 16);
+- }
+- }
+- }
+-}
+-
+-static void drm_panic_blit32(struct iosys_map *dmap, unsigned int dpitch,
+- const u8 *sbuf8, unsigned int spitch,
+- unsigned int height, unsigned int width,
+- unsigned int scale, u32 fg32)
+-{
+- unsigned int y, x;
+-
+- for (y = 0; y < height; y++)
+- for (x = 0; x < width; x++)
+- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, fg32);
+-}
+-
+ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect *clip,
+ const u8 *sbuf8, unsigned int spitch, unsigned int scale,
+ u32 fg_color)
+@@ -322,7 +150,7 @@ static void drm_panic_blit_pixel(struct drm_scanout_buffer *sb, struct drm_rect
+
+ for (y = 0; y < drm_rect_height(clip); y++)
+ for (x = 0; x < drm_rect_width(clip); x++)
+- if (drm_panic_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
++ if (drm_draw_is_pixel_fg(sbuf8, spitch, x / scale, y / scale))
+ sb->set_pixel(sb, clip->x1 + x, clip->y1 + y, fg_color);
+ }
+
+@@ -354,62 +182,22 @@ static void drm_panic_blit(struct drm_scanout_buffer *sb, struct drm_rect *clip,
+
+ switch (sb->format->cpp[0]) {
+ case 2:
+- drm_panic_blit16(&map, sb->pitch[0], sbuf8, spitch,
+- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
++ drm_draw_blit16(&map, sb->pitch[0], sbuf8, spitch,
++ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ break;
+ case 3:
+- drm_panic_blit24(&map, sb->pitch[0], sbuf8, spitch,
+- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
++ drm_draw_blit24(&map, sb->pitch[0], sbuf8, spitch,
++ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ break;
+ case 4:
+- drm_panic_blit32(&map, sb->pitch[0], sbuf8, spitch,
+- drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
++ drm_draw_blit32(&map, sb->pitch[0], sbuf8, spitch,
++ drm_rect_height(clip), drm_rect_width(clip), scale, fg_color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't blit with pixel width %d\n", sb->format->cpp[0]);
+ }
+ }
+
+-static void drm_panic_fill16(struct iosys_map *dmap, unsigned int dpitch,
+- unsigned int height, unsigned int width,
+- u16 color)
+-{
+- unsigned int y, x;
+-
+- for (y = 0; y < height; y++)
+- for (x = 0; x < width; x++)
+- iosys_map_wr(dmap, y * dpitch + x * sizeof(u16), u16, color);
+-}
+-
+-static void drm_panic_fill24(struct iosys_map *dmap, unsigned int dpitch,
+- unsigned int height, unsigned int width,
+- u32 color)
+-{
+- unsigned int y, x;
+-
+- for (y = 0; y < height; y++) {
+- for (x = 0; x < width; x++) {
+- unsigned int off = y * dpitch + x * 3;
+-
+- /* write blue-green-red to output in little endianness */
+- iosys_map_wr(dmap, off, u8, (color & 0x000000FF) >> 0);
+- iosys_map_wr(dmap, off + 1, u8, (color & 0x0000FF00) >> 8);
+- iosys_map_wr(dmap, off + 2, u8, (color & 0x00FF0000) >> 16);
+- }
+- }
+-}
+-
+-static void drm_panic_fill32(struct iosys_map *dmap, unsigned int dpitch,
+- unsigned int height, unsigned int width,
+- u32 color)
+-{
+- unsigned int y, x;
+-
+- for (y = 0; y < height; y++)
+- for (x = 0; x < width; x++)
+- iosys_map_wr(dmap, y * dpitch + x * sizeof(u32), u32, color);
+-}
+-
+ static void drm_panic_fill_pixel(struct drm_scanout_buffer *sb,
+ struct drm_rect *clip,
+ u32 color)
+@@ -442,27 +230,22 @@ static void drm_panic_fill(struct drm_scanout_buffer *sb, struct drm_rect *clip,
+
+ switch (sb->format->cpp[0]) {
+ case 2:
+- drm_panic_fill16(&map, sb->pitch[0], drm_rect_height(clip),
+- drm_rect_width(clip), color);
++ drm_draw_fill16(&map, sb->pitch[0], drm_rect_height(clip),
++ drm_rect_width(clip), color);
+ break;
+ case 3:
+- drm_panic_fill24(&map, sb->pitch[0], drm_rect_height(clip),
+- drm_rect_width(clip), color);
++ drm_draw_fill24(&map, sb->pitch[0], drm_rect_height(clip),
++ drm_rect_width(clip), color);
+ break;
+ case 4:
+- drm_panic_fill32(&map, sb->pitch[0], drm_rect_height(clip),
+- drm_rect_width(clip), color);
++ drm_draw_fill32(&map, sb->pitch[0], drm_rect_height(clip),
++ drm_rect_width(clip), color);
+ break;
+ default:
+ WARN_ONCE(1, "Can't fill with pixel width %d\n", sb->format->cpp[0]);
+ }
+ }
+
+-static const u8 *get_char_bitmap(const struct font_desc *font, char c, size_t font_pitch)
+-{
+- return font->data + (c * font->height) * font_pitch;
+-}
+-
+ static unsigned int get_max_line_len(const struct drm_panic_line *lines, int len)
+ {
+ int i;
+@@ -501,7 +284,7 @@ static void draw_txt_rectangle(struct drm_scanout_buffer *sb,
+ rec.x1 += (drm_rect_width(clip) - (line_len * font->width)) / 2;
+
+ for (j = 0; j < line_len; j++) {
+- src = get_char_bitmap(font, msg[i].txt[j], font_pitch);
++ src = drm_draw_get_char_bitmap(font, msg[i].txt[j], font_pitch);
+ rec.x2 = rec.x1 + font->width;
+ drm_panic_blit(sb, &rec, src, font_pitch, 1, color);
+ rec.x1 += font->width;
+@@ -533,8 +316,10 @@ static void drm_panic_logo_draw(struct drm_scanout_buffer *sb, struct drm_rect *
+
+ static void draw_panic_static_user(struct drm_scanout_buffer *sb)
+ {
+- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
++ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
++ sb->format->format);
++ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
++ sb->format->format);
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_screen, r_logo, r_msg;
+ unsigned int msg_width, msg_height;
+@@ -600,8 +385,10 @@ static int draw_line_with_wrap(struct drm_scanout_buffer *sb, const struct font_
+ */
+ static void draw_panic_static_kmsg(struct drm_scanout_buffer *sb)
+ {
+- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
++ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
++ sb->format->format);
++ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
++ sb->format->format);
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_screen = DRM_RECT_INIT(0, 0, sb->width, sb->height);
+ struct kmsg_dump_iter iter;
+@@ -791,8 +578,10 @@ static int drm_panic_get_qr_code(u8 **qr_image)
+ */
+ static int _draw_panic_static_qr_code(struct drm_scanout_buffer *sb)
+ {
+- u32 fg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR, sb->format->format);
+- u32 bg_color = convert_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR, sb->format->format);
++ u32 fg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_FOREGROUND_COLOR,
++ sb->format->format);
++ u32 bg_color = drm_draw_color_from_xrgb8888(CONFIG_DRM_PANIC_BACKGROUND_COLOR,
++ sb->format->format);
+ const struct font_desc *font = get_default_font(sb->width, sb->height, NULL, NULL);
+ struct drm_rect r_screen, r_logo, r_msg, r_qr, r_qr_canvas;
+ unsigned int max_qr_size, scale;
+@@ -878,7 +667,7 @@ static bool drm_panic_is_format_supported(const struct drm_format_info *format)
+ {
+ if (format->num_planes != 1)
+ return false;
+- return convert_from_xrgb8888(0xffffff, format->format) != 0;
++ return drm_draw_color_from_xrgb8888(0xffffff, format->format) != 0;
+ }
+
+ static void draw_panic_dispatch(struct drm_scanout_buffer *sb)
+--
+2.50.1
+
--- /dev/null
+From cf244310c67c6d8ed3ffffa33d25f710be846443 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Jun 2025 10:37:04 +0200
+Subject: drm/tests: Do not use drm_fb_blit() in format-helper tests
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 5a4856e0e38109ba994f369962f054ecb445c098 ]
+
+Export additional helpers from the format-helper library and open-code
+drm_fb_blit() in tests. Prepares for the removal of drm_fb_blit(). Only
+sysfb drivers use drm_fb_blit(). The function will soon be removed from
+format helpers and be refactored within sysfb helpers.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: José Expósito <jose.exposito89@gmail.com>
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Link: https://lore.kernel.org/r/20250616083846.221396-2-tzimmermann@suse.de
+Stable-dep-of: 05663d88fd0b ("drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_format_helper.c | 108 ++++++++++++++++--
+ drivers/gpu/drm/drm_format_internal.h | 8 ++
+ .../gpu/drm/tests/drm_format_helper_test.c | 108 +++---------------
+ include/drm/drm_format_helper.h | 9 ++
+ 4 files changed, 131 insertions(+), 102 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
+index 4dcb78895581..3769760b15cd 100644
+--- a/drivers/gpu/drm/drm_format_helper.c
++++ b/drivers/gpu/drm/drm_format_helper.c
+@@ -812,11 +812,33 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
+ }
+
+-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+- const struct iosys_map *src,
+- const struct drm_framebuffer *fb,
+- const struct drm_rect *clip,
+- struct drm_format_conv_state *state)
++/**
++ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
++ * @dst: Array of ABGR8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
++ * natively. It sets an opaque alpha channel as part of the conversion.
++ */
++void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src,
++ const struct drm_framebuffer *fb,
++ const struct drm_rect *clip,
++ struct drm_format_conv_state *state)
+ {
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+@@ -825,17 +847,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_abgr8888_line);
+ }
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
+
+ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
+ }
+
+-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+- const struct iosys_map *src,
+- const struct drm_framebuffer *fb,
+- const struct drm_rect *clip,
+- struct drm_format_conv_state *state)
++/**
++ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
++ * @dst: Array of XBGR8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
++ * natively.
++ */
++void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src,
++ const struct drm_framebuffer *fb,
++ const struct drm_rect *clip,
++ struct drm_format_conv_state *state)
+ {
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+@@ -844,6 +889,49 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_xbgr8888_line);
+ }
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
++
++static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
++{
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
++}
++
++/**
++ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
++ * @dst: Array of BGRX8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
++ * natively.
++ */
++void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src,
++ const struct drm_framebuffer *fb,
++ const struct drm_rect *clip,
++ struct drm_format_conv_state *state)
++{
++ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
++ 4,
++ };
++
++ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
++ drm_fb_xrgb8888_to_bgrx8888_line);
++}
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
+
+ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
+index 5f82f0b9c8e8..f06f09989ddc 100644
+--- a/drivers/gpu/drm/drm_format_internal.h
++++ b/drivers/gpu/drm/drm_format_internal.h
+@@ -82,6 +82,14 @@ static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+ ((pix & 0x000000ff) << 16);
+ }
+
++static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
++{
++ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
++ ((pix & 0x00ff0000) >> 8) |
++ ((pix & 0x0000ff00) << 8) |
++ ((pix & 0x000000ff) << 24);
++}
++
+ static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+ {
+ return GENMASK(31, 24) | /* fill alpha bits */
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 2a3d80b27cae..8b62adbd4dfa 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -748,14 +748,9 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
+ buf = dst.vaddr;
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, ¶ms->clip,
++ &fmtcnv_state, false);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -795,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -842,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -889,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -939,12 +916,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
++ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -985,12 +957,8 @@ static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, ¶ms->clip,
++ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip,
+ &fmtcnv_state);
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1030,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1077,12 +1039,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
+- ¶ms->clip, &fmtcnv_state);
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
++ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1122,14 +1079,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
+- ¶ms->clip, &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1202,23 +1153,15 @@ static void drm_test_fb_swab(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
+- &src, &fb, ¶ms->clip, &fmtcnv_state);
++ drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr;
+ memset(buf, 0, dst_size);
+
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
++ drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr;
+@@ -1229,11 +1172,8 @@ static void drm_test_fb_swab(struct kunit *test)
+ mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
+ fb.format = &mock_format;
+
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
++ drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1266,14 +1206,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
+ const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
+ NULL : &result->dst_pitch;
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1306,14 +1240,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
+ const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
+ NULL : &result->dst_pitch;
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1910,12 +1838,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
+ memset(buf[i], 0, dst_size[i]);
+ }
+
+- int blit_result;
+-
+- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, ¶ms->clip,
+- &fmtcnv_state);
++ drm_fb_memcpy(dst, dst_pitches, src, &fb, ¶ms->clip);
+
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ for (size_t i = 0; i < fb.format->num_planes; i++) {
+ expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
+ KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
+diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
+index aa1604d92c1a..2de9974992c3 100644
+--- a/include/drm/drm_format_helper.h
++++ b/include/drm/drm_format_helper.h
+@@ -102,6 +102,15 @@ void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pi
+ void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
+ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+--
+2.50.1
+
--- /dev/null
+From 3f774fe4699f428952b60de10c8414389c58577f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Jun 2025 11:00:54 +0200
+Subject: drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit 05663d88fd0b8ee1c54ab2d5fb36f9b6a3ed37f7 ]
+
+Fix failures on big-endian architectures on tests cases
+single_pixel_source_buffer, single_pixel_clip_rectangle,
+well_known_colors and destination_pitch.
+
+Fixes: 15bda1f8de5d ("drm/tests: Add calls to drm_fb_blit() on supported format conversion tests")
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250630090054.353246-2-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tests/drm_format_helper_test.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 8b62adbd4dfa..e17643c408bf 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -1040,6 +1040,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ memset(buf, 0, dst_size);
+
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
++ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 47b658ee11fa746df8b0364b49a4a024a66062c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Jun 2025 11:00:53 +0200
+Subject: drm/tests: Fix endian warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit d28b9d2925b4f773adb21b1fc20260ddc370fb13 ]
+
+When compiling with sparse enabled, this warning is thrown:
+
+ warning: incorrect type in argument 2 (different base types)
+ expected restricted __le32 const [usertype] *buf
+ got unsigned int [usertype] *[assigned] buf
+
+Add a cast to fix it.
+
+Fixes: 453114319699 ("drm/format-helper: Add KUnit tests for drm_fb_xrgb8888_to_xrgb2101010()")
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250630090054.353246-1-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tests/drm_format_helper_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 08992636ec05..b4d62fb1d909 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -991,7 +991,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ NULL : &result->dst_pitch;
+
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+- buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
++ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr; /* restore original value of buf */
+--
+2.50.1
+
--- /dev/null
+From 44d514f424b95a26d16d383ffc54d8c46ae46270 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 14:12:45 -0700
+Subject: gve: prevent ethtool ops after shutdown
+
+From: Jordan Rhee <jordanrhee@google.com>
+
+[ Upstream commit 75a9a46d67f46d608205888f9b34e315c1786345 ]
+
+A crash can occur if an ethtool operation is invoked
+after shutdown() is called.
+
+shutdown() is invoked during system shutdown to stop DMA operations
+without performing expensive deallocations. It is discouraged to
+unregister the netdev in this path, so the device may still be visible
+to userspace and kernel helpers.
+
+In gve, shutdown() tears down most internal data structures. If an
+ethtool operation is dispatched after shutdown(), it will dereference
+freed or NULL pointers, leading to a kernel panic. While graceful
+shutdown normally quiesces userspace before invoking the reboot
+syscall, forced shutdowns (as observed on GCP VMs) can still trigger
+this path.
+
+Fix by calling netif_device_detach() in shutdown().
+This marks the device as detached so the ethtool ioctl handler
+will skip dispatching operations to the driver.
+
+Fixes: 974365e51861 ("gve: Implement suspend/resume/shutdown")
+Signed-off-by: Jordan Rhee <jordanrhee@google.com>
+Signed-off-by: Jeroen de Borst <jeroendb@google.com>
+Link: https://patch.msgid.link/20250818211245.1156919-1-jeroendb@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 8ea3c7493663..497a19ca198d 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2726,6 +2726,8 @@ static void gve_shutdown(struct pci_dev *pdev)
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_running(priv->dev);
+
++ netif_device_detach(netdev);
++
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+--
+2.50.1
+
--- /dev/null
+From 3bffc1fc770c17d47a09df99aa544f5df7a4c2a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:59 -0700
+Subject: igc: fix disabling L1.2 PCI-E link substate on I226 on init
+
+From: ValdikSS <iam@valdikss.org.ru>
+
+[ Upstream commit 1468c1f97cf32418e34dbb40b784ed9333b9e123 ]
+
+Device ID comparison in igc_is_device_id_i226 is performed before
+the ID is set, resulting in always failing check on init.
+
+Before the patch:
+* L1.2 is not disabled on init
+* L1.2 is properly disabled after suspend-resume cycle
+
+With the patch:
+* L1.2 is properly disabled both on init and after suspend-resume
+
+How to test:
+Connect to the 1G link with 300+ mbit/s Internet speed, and run
+the download speed test, such as:
+
+ curl -o /dev/null http://speedtest.selectel.ru/1GB
+
+Without L1.2 disabled, the speed would be no more than ~200 mbit/s.
+With L1.2 disabled, the speed would reach 1 gbit/s.
+Note: it's required that the latency between your host and the remote
+be around 3-5 ms, the test inside LAN (<1 ms latency) won't trigger the
+issue.
+
+Link: https://lore.kernel.org/intel-wired-lan/15248b4f-3271-42dd-8e35-02bfc92b25e1@intel.com
+Fixes: 0325143b59c6 ("igc: disable L1.2 PCI-E link substate to avoid performance issue")
+Signed-off-by: ValdikSS <iam@valdikss.org.ru>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-6-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 2a0c5a343e47..aadc0667fa04 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6987,6 +6987,13 @@ static int igc_probe(struct pci_dev *pdev,
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
++ /* PCI config space info */
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->revision_id = pdev->revision;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+@@ -7013,13 +7020,6 @@ static int igc_probe(struct pci_dev *pdev,
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+--
+2.50.1
+
--- /dev/null
+From 8aa1b1ffe2317b9bac4a78803ccc37dffbdac333 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 08:40:27 -0700
+Subject: iommu/amd: Avoid stack buffer overflow from kernel cmdline
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
+
+While the kernel command line is considered trusted in most environments,
+avoid writing 1 byte past the end of "acpiid" if the "str" argument is
+maximum length.
+
+Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
+Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
+Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
+Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index ff11cd7e5c06..f5b544e0f230 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3598,7 +3598,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+- char acpiid[ACPIID_LEN] = {0};
++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ int i;
+
+ addr = strchr(str, '@');
+@@ -3624,7 +3624,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+- if (strlen(str) > ACPIID_LEN + 1)
++ if (strlen(str) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+--
+2.50.1
+
--- /dev/null
+From 1d78ddb2d1b51161746a15d56e9684ac78052164 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Jul 2025 16:20:51 +0530
+Subject: iosys-map: Fix undefined behavior in iosys_map_clear()
+
+From: Nitin Gote <nitin.r.gote@intel.com>
+
+[ Upstream commit 5634c8cb298a7146b4e38873473e280b50e27a2c ]
+
+The current iosys_map_clear() implementation reads the potentially
+uninitialized 'is_iomem' boolean field to decide which union member
+to clear. This causes undefined behavior when called on uninitialized
+structures, as 'is_iomem' may contain garbage values like 0xFF.
+
+UBSAN detects this as:
+ UBSAN: invalid-load in include/linux/iosys-map.h:267
+ load of value 255 is not a valid value for type '_Bool'
+
+Fix by unconditionally clearing the entire structure with memset(),
+eliminating the need to read uninitialized data and ensuring all
+fields are set to known good values.
+
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14639
+Fixes: 01fd30da0474 ("dma-buf: Add struct dma-buf-map for storing struct dma_buf.vaddr_ptr")
+Signed-off-by: Nitin Gote <nitin.r.gote@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250718105051.2709487-1-nitin.r.gote@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/iosys-map.h | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
+index 4696abfd311c..3e85afe794c0 100644
+--- a/include/linux/iosys-map.h
++++ b/include/linux/iosys-map.h
+@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
+ */
+ static inline void iosys_map_clear(struct iosys_map *map)
+ {
+- if (map->is_iomem) {
+- map->vaddr_iomem = NULL;
+- map->is_iomem = false;
+- } else {
+- map->vaddr = NULL;
+- }
++ memset(map, 0, sizeof(*map));
+ }
+
+ /**
+--
+2.50.1
+
--- /dev/null
+From 6a144ddf169bb8f77d24b3bbcd3fb7c67cbb230e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index e955008e732b..5d21a74c1165 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -296,6 +296,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From 39ba6c96132923aa1695eff997fa19fc4f45f80a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 3e3b471e53f0..b12c487f36cf 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From 36310228ea8f32ad75955da2136e7a5c50ab66a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Optimize module load time by optimizing PLT/GOT counting
+
+From: Kanglong Wang <wangkanglong@loongson.cn>
+
+[ Upstream commit 63dbd8fb2af3a89466538599a9acb2d11ef65c06 ]
+
+When enabling CONFIG_KASAN, CONFIG_PREEMPT_VOLUNTARY_BUILD and
+CONFIG_PREEMPT_VOLUNTARY at the same time, there will be soft deadlock,
+the relevant logs are as follows:
+
+rcu: INFO: rcu_sched self-detected stall on CPU
+...
+Call Trace:
+[<900000000024f9e4>] show_stack+0x5c/0x180
+[<90000000002482f4>] dump_stack_lvl+0x94/0xbc
+[<9000000000224544>] rcu_dump_cpu_stacks+0x1fc/0x280
+[<900000000037ac80>] rcu_sched_clock_irq+0x720/0xf88
+[<9000000000396c34>] update_process_times+0xb4/0x150
+[<90000000003b2474>] tick_nohz_handler+0xf4/0x250
+[<9000000000397e28>] __hrtimer_run_queues+0x1d0/0x428
+[<9000000000399b2c>] hrtimer_interrupt+0x214/0x538
+[<9000000000253634>] constant_timer_interrupt+0x64/0x80
+[<9000000000349938>] __handle_irq_event_percpu+0x78/0x1a0
+[<9000000000349a78>] handle_irq_event_percpu+0x18/0x88
+[<9000000000354c00>] handle_percpu_irq+0x90/0xf0
+[<9000000000348c74>] handle_irq_desc+0x94/0xb8
+[<9000000001012b28>] handle_cpu_irq+0x68/0xa0
+[<9000000001def8c0>] handle_loongarch_irq+0x30/0x48
+[<9000000001def958>] do_vint+0x80/0xd0
+[<9000000000268a0c>] kasan_mem_to_shadow.part.0+0x2c/0x2a0
+[<90000000006344f4>] __asan_load8+0x4c/0x120
+[<900000000025c0d0>] module_frob_arch_sections+0x5c8/0x6b8
+[<90000000003895f0>] load_module+0x9e0/0x2958
+[<900000000038b770>] __do_sys_init_module+0x208/0x2d0
+[<9000000001df0c34>] do_syscall+0x94/0x190
+[<900000000024d6fc>] handle_syscall+0xbc/0x158
+
+After analysis, this is because the slow speed of loading the amdgpu
+module leads to the long time occupation of the cpu and then the soft
+deadlock.
+
+When loading a module, module_frob_arch_sections() tries to figure out
+the number of PLTs/GOTs that will be needed to handle all the RELAs. It
+will call the count_max_entries() to find in an out-of-order date which
+counting algorithm has O(n^2) complexity.
+
+To make it faster, we sort the relocation list by info and addend. That
+way, to check for a duplicate relocation, it just needs to compare with
+the previous entry. This reduces the complexity of the algorithm to O(n
+ log n), as done in commit d4e0340919fb ("arm64/module: Optimize module
+load time by optimizing PLT counting"). This gives sinificant reduction
+in module load time for modules with large number of relocations.
+
+After applying this patch, the soft deadlock problem has been solved,
+and the kernel starts normally without "Call Trace".
+
+Using the default configuration to test some modules, the results are as
+follows:
+
+Module Size
+ip_tables 36K
+fat 143K
+radeon 2.5MB
+amdgpu 16MB
+
+Without this patch:
+Module Module load time (ms) Count(PLTs/GOTs)
+ip_tables 18 59/6
+fat 0 162/14
+radeon 54 1221/84
+amdgpu 1411 4525/1098
+
+With this patch:
+Module Module load time (ms) Count(PLTs/GOTs)
+ip_tables 18 59/6
+fat 0 162/14
+radeon 22 1221/84
+amdgpu 45 4525/1098
+
+Fixes: fcdfe9d22bed ("LoongArch: Add ELF and module support")
+Signed-off-by: Kanglong Wang <wangkanglong@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/module-sections.c | 36 ++++++++++++-------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
+index e2f30ff9afde..a43ba7f9f987 100644
+--- a/arch/loongarch/kernel/module-sections.c
++++ b/arch/loongarch/kernel/module-sections.c
+@@ -8,6 +8,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleloader.h>
+ #include <linux/ftrace.h>
++#include <linux/sort.h>
+
+ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
+ {
+@@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
+ return (Elf_Addr)&plt[nr];
+ }
+
+-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+-{
+- return x->r_info == y->r_info && x->r_addend == y->r_addend;
+-}
++#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
+
+-static bool duplicate_rela(const Elf_Rela *rela, int idx)
++static int compare_rela(const void *x, const void *y)
+ {
+- int i;
++ int ret;
++ const Elf_Rela *rela_x = x, *rela_y = y;
+
+- for (i = 0; i < idx; i++) {
+- if (is_rela_equal(&rela[i], &rela[idx]))
+- return true;
+- }
++ ret = cmp_3way(rela_x->r_info, rela_y->r_info);
++ if (ret == 0)
++ ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
+
+- return false;
++ return ret;
+ }
+
+ static void count_max_entries(Elf_Rela *relas, int num,
+ unsigned int *plts, unsigned int *gots)
+ {
+- unsigned int i, type;
++ unsigned int i;
++
++ sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
+
+ for (i = 0; i < num; i++) {
+- type = ELF_R_TYPE(relas[i].r_info);
+- switch (type) {
++ if (i && !compare_rela(&relas[i-1], &relas[i]))
++ continue;
++
++ switch (ELF_R_TYPE(relas[i].r_info)) {
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ case R_LARCH_B26:
+- if (!duplicate_rela(relas, i))
+- (*plts)++;
++ (*plts)++;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+- if (!duplicate_rela(relas, i))
+- (*gots)++;
++ (*gots)++;
+ break;
+ default:
+ break; /* Do nothing. */
+--
+2.50.1
+
--- /dev/null
+From 41b0ce4cba6843585a554bf29e9b468bba846452 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 11:35:13 +0530
+Subject: microchip: lan865x: fix missing netif_start_queue() call on device
+ open
+
+From: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+[ Upstream commit 1683fd1b2fa79864d3c7a951d9cea0a9ba1a1923 ]
+
+This fixes an issue where the transmit queue is started implicitly only
+the very first time the device is registered. When the device is taken
+down and brought back up again (using `ip` or `ifconfig`), the transmit
+queue is not restarted, causing packet transmission to hang.
+
+Adding an explicit call to netif_start_queue() in lan865x_net_open()
+ensures the transmit queue is properly started every time the device
+is reopened.
+
+Fixes: 5cd2340cb6a3 ("microchip: lan865x: add driver support for Microchip's LAN865X MAC-PHY")
+Signed-off-by: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+Link: https://patch.msgid.link/20250818060514.52795-2-parthiban.veerasooran@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan865x/lan865x.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+index dd436bdff0f8..d03f5a8de58d 100644
+--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
++++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+@@ -311,6 +311,8 @@ static int lan865x_net_open(struct net_device *netdev)
+
+ phy_start(netdev->phydev);
+
++ netif_start_queue(netdev);
++
+ return 0;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From c2cc064572e878a55a6e11c8895e605232aee862 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 11:35:14 +0530
+Subject: microchip: lan865x: fix missing Timer Increment config for Rev.B0/B1
+
+From: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+[ Upstream commit 2cd58fec912acec273cb155911ab8f06ddbb131a ]
+
+Fix missing configuration for LAN865x silicon revisions B0 and B1 as per
+Microchip Application Note AN1760 (Rev F, June 2024).
+
+The Timer Increment register was not being set, which is required for
+accurate timestamping. As per the application note, configure the MAC to
+set timestamping at the end of the Start of Frame Delimiter (SFD), and
+set the Timer Increment register to 40 ns (corresponding to a 25 MHz
+internal clock).
+
+Link: https://www.microchip.com/en-us/application-notes/an1760
+
+Fixes: 5cd2340cb6a3 ("microchip: lan865x: add driver support for Microchip's LAN865X MAC-PHY")
+Signed-off-by: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250818060514.52795-3-parthiban.veerasooran@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/microchip/lan865x/lan865x.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+index d03f5a8de58d..84c41f193561 100644
+--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
++++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+@@ -32,6 +32,10 @@
+ /* MAC Specific Addr 1 Top Reg */
+ #define LAN865X_REG_MAC_H_SADDR1 0x00010023
+
++/* MAC TSU Timer Increment Register */
++#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077
++#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028
++
+ struct lan865x_priv {
+ struct work_struct multicast_work;
+ struct net_device *netdev;
+@@ -346,6 +350,21 @@ static int lan865x_probe(struct spi_device *spi)
+ goto free_netdev;
+ }
+
++ /* LAN865x Rev.B0/B1 configuration parameters from AN1760
++ * As per the Configuration Application Note AN1760 published in the
++ * link, https://www.microchip.com/en-us/application-notes/an1760
++ * Revision F (DS60001760G - June 2024), configure the MAC to set time
++ * stamping at the end of the Start of Frame Delimiter (SFD) and set the
++ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
++ */
++ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
++ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
++ if (ret) {
++ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
++ ret);
++ goto oa_tc6_exit;
++ }
++
+ /* As per the point s3 in the below errata, SPI receive Ethernet frame
+ * transfer may halt when starting the next frame in the same data block
+ * (chunk) as the end of a previous frame. The RFA field should be
+--
+2.50.1
+
--- /dev/null
+From d15606777e9438ff32f07edf9ac1e66773d52b4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 15:06:40 +0200
+Subject: mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit f604d3aaf64ff0d90cc875295474d3abf4155629 ]
+
+By default, the device does not forward IPv4 packets with a link-local
+source IP (i.e., 169.254.0.0/16). This behavior does not align with the
+kernel which does forward them.
+
+Fix by instructing the device to forward such packets instead of
+dropping them.
+
+Fixes: ca360db4b825 ("mlxsw: spectrum: Disable DIP_LINK_LOCAL check in hardware pipeline")
+Reported-by: Zoey Mertes <zoey@cloudflare.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/6721e6b2c96feb80269e72ce8d0b426e2f32d99c.1755174341.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 3f5e5d99251b..26401bb57572 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2507,6 +2507,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 83477c8e6971..5bfc1499347a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -95,6 +95,7 @@ enum {
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+--
+2.50.1
+
--- /dev/null
+From 35bb1e65cadfc2ebbac4379151df6c0412835d4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 10:10:54 +0800
+Subject: net: bridge: fix soft lockup in br_multicast_query_expired()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d1547bf460baec718b3398365f8de33d25c5f36f ]
+
+When set multicast_query_interval to a large value, the local variable
+'time' in br_multicast_send_query() may overflow. If the time is smaller
+than jiffies, the timer will expire immediately, and then call mod_timer()
+again, which creates a loop and may trigger the following soft lockup
+issue.
+
+ watchdog: BUG: soft lockup - CPU#1 stuck for 221s! [rb_consumer:66]
+ CPU: 1 UID: 0 PID: 66 Comm: rb_consumer Not tainted 6.16.0+ #259 PREEMPT(none)
+ Call Trace:
+ <IRQ>
+ __netdev_alloc_skb+0x2e/0x3a0
+ br_ip6_multicast_alloc_query+0x212/0x1b70
+ __br_multicast_send_query+0x376/0xac0
+ br_multicast_send_query+0x299/0x510
+ br_multicast_query_expired.constprop.0+0x16d/0x1b0
+ call_timer_fn+0x3b/0x2a0
+ __run_timers+0x619/0x950
+ run_timer_softirq+0x11c/0x220
+ handle_softirqs+0x18e/0x560
+ __irq_exit_rcu+0x158/0x1a0
+ sysvec_apic_timer_interrupt+0x76/0x90
+ </IRQ>
+
+This issue can be reproduced with:
+ ip link add br0 type bridge
+ echo 1 > /sys/class/net/br0/bridge/multicast_querier
+ echo 0xffffffffffffffff >
+ /sys/class/net/br0/bridge/multicast_query_interval
+ ip link set dev br0 up
+
+The multicast_startup_query_interval can also cause this issue. Similar to
+the commit 99b40610956a ("net: bridge: mcast: add and enforce query
+interval minimum"), add check for the query interval maximum to fix this
+issue.
+
+Link: https://lore.kernel.org/netdev/20250806094941.1285944-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250812091818.542238-1-wangliang74@huawei.com/
+Fixes: d902eee43f19 ("bridge: Add multicast count/interval sysfs entries")
+Suggested-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250813021054.1643649-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_multicast.c | 16 ++++++++++++++++
+ net/bridge/br_private.h | 2 ++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 733ff6b758f6..0a00c3f57815 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -4808,6 +4808,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_query_interval = intvl_jiffies;
+ }
+
+@@ -4824,6 +4832,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+ }
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 6a1bce8959af..5026a256bf92 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -31,6 +31,8 @@
+ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+ #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+ #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
+
+ #define BR_HWDOM_MAX BITS_PER_LONG
+
+--
+2.50.1
+
--- /dev/null
+From 42a2fd79d38e09a762029b1ac2f4b002799eae31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 18:04:57 -0700
+Subject: net: dsa: microchip: Fix KSZ9477 HSR port setup issue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tristram Ha <tristram.ha@microchip.com>
+
+[ Upstream commit e318cd6714592fb762fcab59c5684a442243a12f ]
+
+ksz9477_hsr_join() is called once to setup the HSR port membership, but
+the port can be enabled later, or disabled and enabled back and the port
+membership is not set correctly inside ksz_update_port_member(). The
+added code always use the correct HSR port membership for HSR port that
+is enabled.
+
+Fixes: 2d61298fdd7b ("net: dsa: microchip: Enable HSR offloading for KSZ9477")
+Reported-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Signed-off-by: Tristram Ha <tristram.ha@microchip.com>
+Reviewed-by: Łukasz Majewski <lukma@nabladev.com>
+Tested-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Reviewed-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Link: https://patch.msgid.link/20250819010457.563286-1-Tristram.Ha@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index bf26cd0abf6d..0a34fd6887fc 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2208,6 +2208,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
+ dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
+ }
+
++ /* HSR ports are setup once so need to use the assigned membership
++ * when the port is enabled.
++ */
++ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
++ (dev->hsr_ports & BIT(port)))
++ port_member = dev->hsr_ports;
+ dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 8657de519aeccc6db6aa80609e8abcb4862cee07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:57 +0800
+Subject: net: ethernet: mtk_ppe: add RCU lock around dev_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 62c30c544359aa18b8fb2734166467a07d435c2d ]
+
+Ensure ndo_fill_forward_path() is called with RCU lock held.
+
+Fixes: 2830e314778d ("net: ethernet: mtk-ppe: fix traffic offload with bridged wlan")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-1-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index c855fb799ce1..e9bd32741983 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
++ rcu_read_lock();
+ err = dev_fill_forward_path(dev, addr, &stack);
++ rcu_read_unlock();
+ if (err)
+ return err;
+
+--
+2.50.1
+
--- /dev/null
+From 689fe0162d52caad27e54a7935aff0113d2cf13b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 12:51:19 +0200
+Subject: net: gso: Forbid IPv6 TSO with extensions on devices with only
+ IPV6_CSUM
+
+From: Jakub Ramaseuski <jramaseu@redhat.com>
+
+[ Upstream commit 864e3396976ef41de6cc7bc366276bf4e084fff2 ]
+
+When performing Generic Segmentation Offload (GSO) on an IPv6 packet that
+contains extension headers, the kernel incorrectly requests checksum offload
+if the egress device only advertises NETIF_F_IPV6_CSUM feature, which has
+a strict contract: it supports checksum offload only for plain TCP or UDP
+over IPv6 and explicitly does not support packets with extension headers.
+The current GSO logic violates this contract by failing to disable the feature
+for packets with extension headers, such as those used in GREoIPv6 tunnels.
+
+This violation results in the device being asked to perform an operation
+it cannot support, leading to a `skb_warn_bad_offload` warning and a collapse
+of network throughput. While device TSO/USO is correctly bypassed in favor
+of software GSO for these packets, the GSO stack must be explicitly told not
+to request checksum offload.
+
+Mask NETIF_F_IPV6_CSUM, NETIF_F_TSO6 and NETIF_F_GSO_UDP_L4
+in gso_features_check if the IPv6 header contains extension headers to compute
+checksum in software.
+
+The exception is a BIG TCP extension, which, as stated in commit
+68e068cabd2c6c53 ("net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets"):
+"The feature is only enabled on devices that support BIG TCP TSO.
+The header is only present for PF_PACKET taps like tcpdump,
+and not transmitted by physical devices."
+
+kernel log output (truncated):
+WARNING: CPU: 1 PID: 5273 at net/core/dev.c:3535 skb_warn_bad_offload+0x81/0x140
+...
+Call Trace:
+ <TASK>
+ skb_checksum_help+0x12a/0x1f0
+ validate_xmit_skb+0x1a3/0x2d0
+ validate_xmit_skb_list+0x4f/0x80
+ sch_direct_xmit+0x1a2/0x380
+ __dev_xmit_skb+0x242/0x670
+ __dev_queue_xmit+0x3fc/0x7f0
+ ip6_finish_output2+0x25e/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_tnl_xmit+0x608/0xc00 [ip6_tunnel]
+ ip6gre_tunnel_xmit+0x1c0/0x390 [ip6_gre]
+ dev_hard_start_xmit+0x63/0x1c0
+ __dev_queue_xmit+0x6d0/0x7f0
+ ip6_finish_output2+0x214/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ inet6_csk_xmit+0xeb/0x150
+ __tcp_transmit_skb+0x555/0xa80
+ tcp_write_xmit+0x32a/0xe90
+ tcp_sendmsg_locked+0x437/0x1110
+ tcp_sendmsg+0x2f/0x50
+...
+skb linear: 00000000: e4 3d 1a 7d ec 30 e4 3d 1a 7e 5d 90 86 dd 60 0e
+skb linear: 00000010: 00 0a 1b 34 3c 40 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000020: 00 00 00 00 00 12 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000030: 00 00 00 00 00 11 2f 00 04 01 04 01 01 00 00 00
+skb linear: 00000040: 86 dd 60 0e 00 0a 1b 00 06 40 20 23 00 00 00 00
+skb linear: 00000050: 00 00 00 00 00 00 00 00 00 12 20 23 00 00 00 00
+skb linear: 00000060: 00 00 00 00 00 00 00 00 00 11 bf 96 14 51 13 f9
+skb linear: 00000070: ae 27 a0 a8 2b e3 80 18 00 40 5b 6f 00 00 01 01
+skb linear: 00000080: 08 0a 42 d4 50 d5 4b 70 f8 1a
+
+Fixes: 04c20a9356f283da ("net: skip offload for NETIF_F_IPV6_CSUM if ipv6 header contains extension")
+Reported-by: Tianhao Zhao <tizhao@redhat.com>
+Suggested-by: Michal Schmidt <mschmidt@redhat.com>
+Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Jakub Ramaseuski <jramaseu@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250814105119.1525687-1-jramaseu@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 2ba2160dd093..cfd32bd02a69 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3603,6 +3603,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ features &= ~NETIF_F_TSO_MANGLEID;
+ }
+
++ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
++ * so neither does TSO that depends on it.
++ */
++ if (features & NETIF_F_IPV6_CSUM &&
++ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++ skb_transport_header_was_set(skb) &&
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ !ipv6_has_hopopt_jumbo(skb))
++ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
++
+ return features;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 83bdd3aa13806a3edd32b4057eb6019370b27e6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Jul 2025 09:48:14 +0300
+Subject: net/mlx5: Add IFC bits and enums for buf_ownership
+
+From: Oren Sidi <osidi@nvidia.com>
+
+[ Upstream commit 6f09ee0b583cad4f2b6a82842c26235bee3d5c2e ]
+
+Extend structure layouts and defines buf_ownership.
+buf_ownership indicates whether the buffer is managed by SW or FW.
+
+Signed-off-by: Oren Sidi <osidi@nvidia.com>
+Reviewed-by: Alex Lazar <alazar@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/1752734895-257735-3-git-send-email-tariqt@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Stable-dep-of: 451d2849ea66 ("net/mlx5e: Query FW for buffer ownership")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mlx5/mlx5_ifc.h | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 512e25c416ae..2b1a816e4d59 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -10358,8 +10358,16 @@ struct mlx5_ifc_pifr_reg_bits {
+ u8 port_filter_update_en[8][0x20];
+ };
+
++enum {
++ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
++ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
++ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
++};
++
+ struct mlx5_ifc_pfcc_reg_bits {
+- u8 reserved_at_0[0x8];
++ u8 reserved_at_0[0x4];
++ u8 buf_ownership[0x2];
++ u8 reserved_at_6[0x2];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0xb];
+ u8 ppan_mask_n[0x1];
+@@ -10491,7 +10499,9 @@ struct mlx5_ifc_mtutc_reg_bits {
+ struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x48];
+ u8 fec_100G_per_lane_in_pplm[0x1];
+- u8 reserved_at_49[0x1f];
++ u8 reserved_at_49[0xa];
++ u8 buffer_ownership[0x1];
++ u8 resereved_at_54[0x14];
+ u8 fec_50G_per_lane_in_pplm[0x1];
+ u8 reserved_at_69[0x4];
+ u8 rx_icrc_encapsulated_counter[0x1];
+--
+2.50.1
+
--- /dev/null
+From 9ee6048ff97c51421e152b689af6c132c6091761 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:02 +0300
+Subject: net/mlx5: Base ECVF devlink port attrs from 0
+
+From: Daniel Jurgens <danielj@nvidia.com>
+
+[ Upstream commit bc17455bc843b2f4b206e0bb8139013eb3d3c08b ]
+
+Adjust the vport number by the base ECVF vport number so the port
+attributes start at 0. Previously the port attributes would start 1
+after the maximum number of host VFs.
+
+Fixes: dc13180824b7 ("net/mlx5: Enable devlink port for embedded cpu VF vports")
+Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
+Reviewed-by: Parav Pandit <parav@nvidia.com>
+Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250820133209.389065-2-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+index f8869c9b6802..b0c97648ffc7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
+ devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ vport_num - 1, external);
+ } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
++ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
++
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
+- vport_num - 1, false);
++ vport_num - base_vport, false);
+ }
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 6c55f13462765fd468e93dce600b759b284f4f0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Mar 2025 18:06:15 +0200
+Subject: net/mlx5: Relocate function declarations from port.h to mlx5_core.h
+
+From: Shahar Shitrit <shshitrit@nvidia.com>
+
+[ Upstream commit a2f61f1db85532e72fb8a3af51b06df94bb82912 ]
+
+The port header is a general file under include, yet it
+contains declarations for functions that are either not
+exported or exported but not used outside the mlx5_core
+driver.
+
+To enhance code organization, we move these declarations
+to mlx5_core.h, where they are more appropriately scoped.
+
+This refactor removes unnecessary exported symbols and
+prevents unexported functions from being inadvertently
+referenced outside of the mlx5_core driver.
+
+Signed-off-by: Shahar Shitrit <shshitrit@nvidia.com>
+Reviewed-by: Carolina Jubran <cjubran@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/20250304160620.417580-2-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 451d2849ea66 ("net/mlx5e: Query FW for buffer ownership")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 85 +++++++++++++++++++
+ .../net/ethernet/mellanox/mlx5/core/port.c | 20 -----
+ include/linux/mlx5/port.h | 85 +------------------
+ 3 files changed, 86 insertions(+), 104 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 62c770b0eaa8..52c9a196728d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -114,6 +114,21 @@ struct mlx5_cmd_alias_obj_create_attr {
+ u8 access_key[ACCESS_KEY_LEN];
+ };
+
++struct mlx5_port_eth_proto {
++ u32 cap;
++ u32 admin;
++ u32 oper;
++};
++
++struct mlx5_module_eeprom_query_params {
++ u16 size;
++ u16 offset;
++ u16 i2c_address;
++ u32 page;
++ u32 bank;
++ u32 module_number;
++};
++
+ static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
+ {
+ struct device *device = dev->device;
+@@ -278,6 +293,76 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+ struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev);
+ void mlx5_dm_cleanup(struct mlx5_core_dev *dev);
+
++void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
++int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
++ enum mlx5_port_status status);
++int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
++ enum mlx5_port_status *status);
++int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
++
++int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
++int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
++int mlx5_query_port_pause(struct mlx5_core_dev *dev,
++ u32 *rx_pause, u32 *tx_pause);
++
++int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
++int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
++ u8 *pfc_en_rx);
++
++int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
++ u16 stall_critical_watermark,
++ u16 stall_minor_watermark);
++int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
++ u16 *stall_critical_watermark,
++ u16 *stall_minor_watermark);
++
++int mlx5_max_tc(struct mlx5_core_dev *mdev);
++int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
++int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
++ u8 prio, u8 *tc);
++int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
++int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
++ u8 tc, u8 *tc_group);
++int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
++int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
++ u8 tc, u8 *bw_pct);
++int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
++ u8 *max_bw_value,
++ u8 *max_bw_unit);
++int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
++ u8 *max_bw_value,
++ u8 *max_bw_unit);
++int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
++int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
++
++int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
++int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
++int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
++void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
++ bool *enabled);
++int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
++ u16 offset, u16 size, u8 *data);
++int
++mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
++ struct mlx5_module_eeprom_query_params *params,
++ u8 *data);
++
++int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
++int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
++int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
++int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
++int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
++int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
++
++int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
++ struct mlx5_port_eth_proto *eproto);
++bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
++u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
++ bool force_legacy);
++u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
++ bool force_legacy);
++int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
++
+ #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \
+ MLX5_CAP_GEN((mdev), pps_modify) && \
+ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index 50931584132b..dee4e44e2274 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -196,7 +196,6 @@ void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
+
+ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+@@ -210,7 +209,6 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_admin_status);
+
+ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status *status)
+@@ -227,7 +225,6 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+ *status = MLX5_GET(paos_reg, out, admin_status);
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
+
+ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+ u16 *max_mtu, u16 *oper_mtu, u8 port)
+@@ -257,7 +254,6 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+ void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
+ u8 port)
+@@ -447,7 +443,6 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+
+ return mlx5_query_mcia(dev, &query, data);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
+ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+ struct mlx5_module_eeprom_query_params *params,
+@@ -467,7 +462,6 @@ int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+
+ return mlx5_query_mcia(dev, params, data);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom_by_page);
+
+ static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
+ int pvlc_size, u8 local_port)
+@@ -518,7 +512,6 @@ int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause)
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_pause);
+
+ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+ u32 *rx_pause, u32 *tx_pause)
+@@ -538,7 +531,6 @@ int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_pause);
+
+ int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+ u16 stall_critical_watermark,
+@@ -597,7 +589,6 @@ int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx)
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PFCC, 0, 1);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_pfc);
+
+ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+ {
+@@ -616,7 +607,6 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+
+ int mlx5_max_tc(struct mlx5_core_dev *mdev)
+ {
+@@ -667,7 +657,6 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc)
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_prio_tc);
+
+ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+ u8 prio, u8 *tc)
+@@ -689,7 +678,6 @@ int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+
+ return err;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
+
+ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
+ int inlen)
+@@ -728,7 +716,6 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group)
+
+ return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group);
+
+ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *tc_group)
+@@ -749,7 +736,6 @@ int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group);
+
+ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
+ {
+@@ -763,7 +749,6 @@ int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw)
+
+ return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_tc_bw_alloc);
+
+ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+ u8 tc, u8 *bw_pct)
+@@ -784,7 +769,6 @@ int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_tc_bw_alloc);
+
+ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+@@ -808,7 +792,6 @@ int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+
+ return mlx5_set_port_qetcr_reg(mdev, in, sizeof(in));
+ }
+-EXPORT_SYMBOL_GPL(mlx5_modify_port_ets_rate_limit);
+
+ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+ u8 *max_bw_value,
+@@ -834,7 +817,6 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+
+ return 0;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_ets_rate_limit);
+
+ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
+ {
+@@ -845,7 +827,6 @@ int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode)
+ MLX5_SET(set_wol_rol_in, in, wol_mode, wol_mode);
+ return mlx5_cmd_exec_in(mdev, set_wol_rol, in);
+ }
+-EXPORT_SYMBOL_GPL(mlx5_set_port_wol);
+
+ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
+ {
+@@ -860,7 +841,6 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
+
+ return err;
+ }
+-EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+ int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen)
+ {
+diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
+index e68d42b8ce65..e288569225bd 100644
+--- a/include/linux/mlx5/port.h
++++ b/include/linux/mlx5/port.h
+@@ -61,15 +61,6 @@ enum mlx5_an_status {
+ #define MLX5_EEPROM_PAGE_LENGTH 256
+ #define MLX5_EEPROM_HIGH_PAGE_LENGTH 128
+
+-struct mlx5_module_eeprom_query_params {
+- u16 size;
+- u16 offset;
+- u16 i2c_address;
+- u32 page;
+- u32 bank;
+- u32 module_number;
+-};
+-
+ enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+@@ -142,12 +133,6 @@ enum mlx5_ptys_width {
+ MLX5_PTYS_WIDTH_12X = 1 << 4,
+ };
+
+-struct mlx5_port_eth_proto {
+- u32 cap;
+- u32 admin;
+- u32 oper;
+-};
+-
+ #define MLX5E_PROT_MASK(link_mode) (1U << link_mode)
+ #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \
+ (ext ? MLX5_GET(reg, out, ext_##field) : \
+@@ -160,14 +145,7 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+
+ int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
+ u16 *proto_oper, u8 local_port, u8 plane_index);
+-void mlx5_toggle_port_link(struct mlx5_core_dev *dev);
+-int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
+- enum mlx5_port_status status);
+-int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
+- enum mlx5_port_status *status);
+-int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration);
+-
+-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
++
+ void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ u8 port);
+@@ -175,65 +153,4 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
+ int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
+ u8 *vl_hw_cap, u8 local_port);
+
+-int mlx5_set_port_pause(struct mlx5_core_dev *dev, u32 rx_pause, u32 tx_pause);
+-int mlx5_query_port_pause(struct mlx5_core_dev *dev,
+- u32 *rx_pause, u32 *tx_pause);
+-
+-int mlx5_set_port_pfc(struct mlx5_core_dev *dev, u8 pfc_en_tx, u8 pfc_en_rx);
+-int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx,
+- u8 *pfc_en_rx);
+-
+-int mlx5_set_port_stall_watermark(struct mlx5_core_dev *dev,
+- u16 stall_critical_watermark,
+- u16 stall_minor_watermark);
+-int mlx5_query_port_stall_watermark(struct mlx5_core_dev *dev,
+- u16 *stall_critical_watermark, u16 *stall_minor_watermark);
+-
+-int mlx5_max_tc(struct mlx5_core_dev *mdev);
+-
+-int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc);
+-int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev,
+- u8 prio, u8 *tc);
+-int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group);
+-int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev,
+- u8 tc, u8 *tc_group);
+-int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw);
+-int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev,
+- u8 tc, u8 *bw_pct);
+-int mlx5_modify_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+- u8 *max_bw_value,
+- u8 *max_bw_unit);
+-int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev,
+- u8 *max_bw_value,
+- u8 *max_bw_unit);
+-int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode);
+-int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode);
+-
+-int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen);
+-int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen);
+-int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable);
+-void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+- bool *enabled);
+-int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+- u16 offset, u16 size, u8 *data);
+-int mlx5_query_module_eeprom_by_page(struct mlx5_core_dev *dev,
+- struct mlx5_module_eeprom_query_params *params, u8 *data);
+-
+-int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+-int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+-
+-int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+-int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
+-int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+-int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+-
+-int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
+- struct mlx5_port_eth_proto *eproto);
+-bool mlx5_ptys_ext_supported(struct mlx5_core_dev *mdev);
+-u32 mlx5_port_ptys2speed(struct mlx5_core_dev *mdev, u32 eth_proto_oper,
+- bool force_legacy);
+-u32 mlx5_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
+- bool force_legacy);
+-int mlx5_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+-
+ #endif /* __MLX5_PORT_H__ */
+--
+2.50.1
+
--- /dev/null
+From 747fe88e78121130b50906cddfb2b9cd819e4f56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:09 +0300
+Subject: net/mlx5e: Preserve shared buffer capacity during headroom updates
+
+From: Armen Ratner <armeng@nvidia.com>
+
+[ Upstream commit 8b0587a885fdb34fd6090a3f8625cb7ac1444826 ]
+
+When port buffer headroom changes, port_update_shared_buffer()
+recalculates the shared buffer size and splits it in a 3:1 ratio
+(lossy:lossless) - Currently, the calculation is:
+lossless = shared / 4;
+lossy = (shared / 4) * 3;
+
+Meaning, the calculation dropped the remainder of shared % 4 due to
+integer division, unintentionally reducing the total shared buffer
+by up to three cells on each update. Over time, this could shrink
+the buffer below usable size.
+
+Fix it by changing the calculation to:
+lossless = shared / 4;
+lossy = shared - lossless;
+
+This retains all buffer cells while still approximating the
+intended 3:1 split, preventing capacity loss over time.
+
+While at it, perform headroom calculations in units of cells rather than
+in bytes for more accurate calculations avoiding extra divisions.
+
+Fixes: a440030d8946 ("net/mlx5e: Update shared buffer along with device buffer changes")
+Signed-off-by: Armen Ratner <armeng@nvidia.com>
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Alexei Lazar <alazar@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/20250820133209.389065-9-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlx5/core/en/port_buffer.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 5ae787656a7c..3efa8bf1d14e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
+ /* Total shared buffer size is split in a ratio of 3:1 between
+ * lossy and lossless pools respectively.
+ */
+- lossy_epool_size = (shared_buffer_size / 4) * 3;
+ lossless_ipool_size = shared_buffer_size / 4;
++ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
+
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
+ lossy_epool_size);
+@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+- u32 new_headroom_size = 0;
+- u32 current_headroom_size;
++ u32 current_headroom_cells = 0;
++ u32 new_headroom_cells = 0;
+ void *in;
+ int err;
+ int i;
+
+- current_headroom_size = port_buffer->headroom_size;
+-
+ in = kzalloc(sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
+ void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
++ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
++
+ u64 size = port_buffer->buffer[i].size;
+ u64 xoff = port_buffer->buffer[i].xoff;
+ u64 xon = port_buffer->buffer[i].xon;
+
+- new_headroom_size += size;
+ do_div(size, port_buff_cell_sz);
++ new_headroom_cells += size;
+ do_div(xoff, port_buff_cell_sz);
+ do_div(xon, port_buff_cell_sz);
+ MLX5_SET(bufferx_reg, buffer, size, size);
+@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
+ }
+
+- new_headroom_size /= port_buff_cell_sz;
+- current_headroom_size /= port_buff_cell_sz;
+- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
+- new_headroom_size);
++ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
++ new_headroom_cells);
+ if (err)
+ goto out;
+
+--
+2.50.1
+
--- /dev/null
+From 1277c4cb10ef5b91c32de6367f6dc7ef8d9bc221 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:08 +0300
+Subject: net/mlx5e: Query FW for buffer ownership
+
+From: Alexei Lazar <alazar@nvidia.com>
+
+[ Upstream commit 451d2849ea66659040b59ae3cb7e50cc97404733 ]
+
+The SW currently saves local buffer ownership when setting
+the buffer.
+This means that the SW assumes it has ownership of the buffer
+after the command is set.
+
+If setting the buffer fails and we remain in FW ownership,
+the local buffer ownership state incorrectly remains as SW-owned.
+This leads to incorrect behavior in subsequent PFC commands,
+causing failures.
+
+Instead of saving local buffer ownership in SW,
+query the FW for buffer ownership when setting the buffer.
+This ensures that the buffer ownership state is accurately
+reflected, avoiding the issues caused by incorrect ownership
+states.
+
+Fixes: ecdf2dadee8e ("net/mlx5e: Receive buffer support for DCBX")
+Signed-off-by: Alexei Lazar <alazar@nvidia.com>
+Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250820133209.389065-8-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/en/dcbnl.h | 1 -
+ .../ethernet/mellanox/mlx5/core/en_dcbnl.c | 12 ++++++++---
+ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 2 ++
+ .../net/ethernet/mellanox/mlx5/core/port.c | 20 +++++++++++++++++++
+ 4 files changed, 31 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+index b59aee75de94..2c98a5299df3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
+ u8 cap;
+
+ /* Buffer configuration */
+- bool manual_buffer;
+ u32 cable_len;
+ u32 xoff;
+ u16 port_buff_cell_sz;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 8705cffc747f..b08328fe1aa3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
+ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+ {
++ u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 old_cable_len = priv->dcbx.cable_len;
+@@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+
+ if (MLX5_BUFFER_SUPPORTED(mdev)) {
+ pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
+- if (priv->dcbx.manual_buffer)
++ ret = mlx5_query_port_buffer_ownership(mdev,
++ &buffer_ownership);
++ if (ret)
++ netdev_err(dev,
++ "%s, Failed to get buffer ownership: %d\n",
++ __func__, ret);
++
++ if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
+ ret = mlx5e_port_manual_buffer_config(priv, changed,
+ dev->mtu, &pfc_new,
+ NULL, NULL);
+@@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
+ if (!changed)
+ return 0;
+
+- priv->dcbx.manual_buffer = true;
+ err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
+ buffer_size, prio2buffer);
+ return err;
+@@ -1250,7 +1257,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
+ priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+
+ priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
+- priv->dcbx.manual_buffer = false;
+ priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
+
+ mlx5e_ets_init(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 52c9a196728d..dc6965f6746e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -351,6 +351,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+ int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+ int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
++int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
++ u8 *buffer_ownership);
+ int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+ int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index dee4e44e2274..389b34d56b75 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
+ return err;
+ }
+
++int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
++ u8 *buffer_ownership)
++{
++ u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
++ int err;
++
++ if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
++ *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
++ return 0;
++ }
++
++ err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
++ if (err)
++ return err;
++
++ *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
++
++ return 0;
++}
++
+ int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
+ {
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+--
+2.50.1
+
--- /dev/null
+From 447056c474f58a80826c311756a25e45c0b59b49 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 2c2e2a67f3b2..6cbe8a7a0e5c 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1745,7 +1745,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1755,6 +1755,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1922,13 +1923,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From 2cb40a526b5581038a1cb3d67b3ba7d3fafdaa43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 1021681a5718..2c13de8bf16f 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From 0158f94ca328f5b081d2f45ace7506b13536bc8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 13:46:18 +0800
+Subject: net/smc: fix UAF on smcsk after smc_listen_out()
+
+From: D. Wythe <alibuda@linux.alibaba.com>
+
+[ Upstream commit d9cef55ed49117bd63695446fb84b4b91815c0b4 ]
+
+BPF CI testing report a UAF issue:
+
+ [ 16.446633] BUG: kernel NULL pointer dereference, address: 000000000000003 0
+ [ 16.447134] #PF: supervisor read access in kernel mod e
+ [ 16.447516] #PF: error_code(0x0000) - not-present pag e
+ [ 16.447878] PGD 0 P4D 0
+ [ 16.448063] Oops: Oops: 0000 [#1] PREEMPT SMP NOPT I
+ [ 16.448409] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Tainted: G OE 6.13.0-rc3-g89e8a75fda73-dirty #4 2
+ [ 16.449124] Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODUL E
+ [ 16.449502] Hardware name: QEMU Ubuntu 24.04 PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/201 4
+ [ 16.450201] Workqueue: smc_hs_wq smc_listen_wor k
+ [ 16.450531] RIP: 0010:smc_listen_work+0xc02/0x159 0
+ [ 16.452158] RSP: 0018:ffffb5ab40053d98 EFLAGS: 0001024 6
+ [ 16.452526] RAX: 0000000000000001 RBX: 0000000000000002 RCX: 000000000000030 0
+ [ 16.452994] RDX: 0000000000000280 RSI: 00003513840053f0 RDI: 000000000000000 0
+ [ 16.453492] RBP: ffffa097808e3800 R08: ffffa09782dba1e0 R09: 000000000000000 5
+ [ 16.453987] R10: 0000000000000000 R11: 0000000000000000 R12: ffffa0978274640 0
+ [ 16.454497] R13: 0000000000000000 R14: 0000000000000000 R15: ffffa09782d4092 0
+ [ 16.454996] FS: 0000000000000000(0000) GS:ffffa097bbc00000(0000) knlGS:000000000000000 0
+ [ 16.455557] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003 3
+ [ 16.455961] CR2: 0000000000000030 CR3: 0000000102788004 CR4: 0000000000770ef 0
+ [ 16.456459] PKRU: 5555555 4
+ [ 16.456654] Call Trace :
+ [ 16.456832] <TASK >
+ [ 16.456989] ? __die+0x23/0x7 0
+ [ 16.457215] ? page_fault_oops+0x180/0x4c 0
+ [ 16.457508] ? __lock_acquire+0x3e6/0x249 0
+ [ 16.457801] ? exc_page_fault+0x68/0x20 0
+ [ 16.458080] ? asm_exc_page_fault+0x26/0x3 0
+ [ 16.458389] ? smc_listen_work+0xc02/0x159 0
+ [ 16.458689] ? smc_listen_work+0xc02/0x159 0
+ [ 16.458987] ? lock_is_held_type+0x8f/0x10 0
+ [ 16.459284] process_one_work+0x1ea/0x6d 0
+ [ 16.459570] worker_thread+0x1c3/0x38 0
+ [ 16.459839] ? __pfx_worker_thread+0x10/0x1 0
+ [ 16.460144] kthread+0xe0/0x11 0
+ [ 16.460372] ? __pfx_kthread+0x10/0x1 0
+ [ 16.460640] ret_from_fork+0x31/0x5 0
+ [ 16.460896] ? __pfx_kthread+0x10/0x1 0
+ [ 16.461166] ret_from_fork_asm+0x1a/0x3 0
+ [ 16.461453] </TASK >
+ [ 16.461616] Modules linked in: bpf_testmod(OE) [last unloaded: bpf_testmod(OE) ]
+ [ 16.462134] CR2: 000000000000003 0
+ [ 16.462380] ---[ end trace 0000000000000000 ]---
+ [ 16.462710] RIP: 0010:smc_listen_work+0xc02/0x1590
+
+The direct cause of this issue is that after smc_listen_out_connected(),
+newclcsock->sk may be NULL since it will releases the smcsk. Therefore,
+if the application closes the socket immediately after accept,
+newclcsock->sk can be NULL. A possible execution order could be as
+follows:
+
+smc_listen_work | userspace
+-----------------------------------------------------------------
+lock_sock(sk) |
+smc_listen_out_connected() |
+| \- smc_listen_out |
+| | \- release_sock |
+ | |- sk->sk_data_ready() |
+ | fd = accept();
+ | close(fd);
+ | \- socket->sk = NULL;
+/* newclcsock->sk is NULL now */
+SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk))
+
+Since smc_listen_out_connected() will not fail, simply swapping the order
+of the code can easily fix this issue.
+
+Fixes: 3b2dec2603d5 ("net/smc: restructure client and server code in af_smc")
+Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
+Reviewed-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Alexandra Winter <wintera@linux.ibm.com>
+Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
+Link: https://patch.msgid.link/20250818054618.41615-1-alibuda@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index cdd445d40b94..02e08ac1da3a 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2565,8 +2565,9 @@ static void smc_listen_work(struct work_struct *work)
+ goto out_decl;
+ }
+
+- smc_listen_out_connected(new_smc);
+ SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
++ /* smc_listen_out() will release smcsk */
++ smc_listen_out_connected(new_smc);
+ goto out_free;
+
+ out_unlock:
+--
+2.50.1
+
--- /dev/null
+From 7570611ff814ae4e5b7492c05d85b944316eb339 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 16:21:06 +0530
+Subject: net: ti: icssg-prueth: Fix HSR and switch offload Enablement during
+ firwmare reload.
+
+From: MD Danish Anwar <danishanwar@ti.com>
+
+[ Upstream commit 01792bc3e5bdafa171dd83c7073f00e7de93a653 ]
+
+To enable HSR / Switch offload, certain configurations are needed.
+Currently they are done inside icssg_change_mode(). This function only
+gets called if we move from one mode to another without bringing the
+links up / down.
+
+Once in HSR / Switch mode, if we bring the links down and bring it back
+up again. The callback sequence is,
+
+- emac_ndo_stop()
+ Firmwares are stopped
+- emac_ndo_open()
+ Firmwares are loaded
+
+In this path icssg_change_mode() doesn't get called and as a result the
+configurations needed for HSR / Switch is not done.
+
+To fix this, put all these configurations in a separate function
+icssg_enable_fw_offload() and call this from both icssg_change_mode()
+and emac_ndo_open()
+
+Fixes: 56375086d093 ("net: ti: icssg-prueth: Enable HSR Tx duplication, Tx Tag and Rx Tag offload")
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Link: https://patch.msgid.link/20250814105106.1491871-1-danishanwar@ti.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icssg_prueth.c | 72 +++++++++++---------
+ 1 file changed, 41 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index ddbc4624ae88..055c5765bd86 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -240,6 +240,44 @@ static void prueth_emac_stop(struct prueth *prueth)
+ }
+ }
+
++static void icssg_enable_fw_offload(struct prueth *prueth)
++{
++ struct prueth_emac *emac;
++ int mac;
++
++ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
++ emac = prueth->emac[mac];
++ if (prueth->is_hsr_offload_mode) {
++ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
++ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
++ else
++ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
++ }
++
++ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
++ if (netif_running(emac->ndev)) {
++ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
++ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
++ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
++ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
++ ICSSG_FDB_ENTRY_BLOCK,
++ true);
++ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
++ BIT(emac->port_id) | DEFAULT_PORT_MASK,
++ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
++ true);
++ if (prueth->is_hsr_offload_mode)
++ icssg_vtbl_modify(emac, DEFAULT_VID,
++ DEFAULT_PORT_MASK,
++ DEFAULT_UNTAG_MASK, true);
++ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
++ if (prueth->is_switch_mode)
++ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
++ }
++ }
++ }
++}
++
+ static int prueth_emac_common_start(struct prueth *prueth)
+ {
+ struct prueth_emac *emac;
+@@ -690,6 +728,7 @@ static int emac_ndo_open(struct net_device *ndev)
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
++ icssg_enable_fw_offload(prueth);
+ }
+
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+@@ -1146,8 +1185,7 @@ static int prueth_emac_restart(struct prueth *prueth)
+
+ static void icssg_change_mode(struct prueth *prueth)
+ {
+- struct prueth_emac *emac;
+- int mac, ret;
++ int ret;
+
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+@@ -1155,35 +1193,7 @@ static void icssg_change_mode(struct prueth *prueth)
+ return;
+ }
+
+- for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+- emac = prueth->emac[mac];
+- if (prueth->is_hsr_offload_mode) {
+- if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+- else
+- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+- }
+-
+- if (netif_running(emac->ndev)) {
+- icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+- ICSSG_FDB_ENTRY_BLOCK,
+- true);
+- icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+- BIT(emac->port_id) | DEFAULT_PORT_MASK,
+- BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+- true);
+- if (prueth->is_hsr_offload_mode)
+- icssg_vtbl_modify(emac, DEFAULT_VID,
+- DEFAULT_PORT_MASK,
+- DEFAULT_UNTAG_MASK, true);
+- icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+- if (prueth->is_switch_mode)
+- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+- }
+- }
++ icssg_enable_fw_offload(prueth);
+ }
+
+ static int prueth_netdevice_port_link(struct net_device *ndev,
+--
+2.50.1
+
--- /dev/null
+From 78d9611546612fc3bf0f9efbdbc3d6c0b0ed7387 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:45:07 +0900
+Subject: net: usb: asix_devices: Fix PHY address mask in MDIO bus
+ initialization
+
+From: Yuichiro Tsuji <yuichtsu@amazon.com>
+
+[ Upstream commit 24ef2f53c07f273bad99173e27ee88d44d135b1c ]
+
+Syzbot reported shift-out-of-bounds exception on MDIO bus initialization.
+
+The PHY address should be masked to 5 bits (0-31). Without this
+mask, invalid PHY addresses could be used, potentially causing issues
+with MDIO bus operations.
+
+Fix this by masking the PHY address with 0x1f (31 decimal) to ensure
+it stays within the valid range.
+
+Fixes: 4faff70959d5 ("net: usb: asix_devices: add phy_mask for ax88772 mdio bus")
+Reported-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=20537064367a0f98d597
+Tested-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Signed-off-by: Yuichiro Tsuji <yuichtsu@amazon.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250818084541.1958-1-yuichtsu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/asix_devices.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index d9f5942ccc44..792ddda1ad49 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -676,7 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+- priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
++ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+--
+2.50.1
+
--- /dev/null
+From 691ed4a02568b445075743abdca0ac6f40a8fe00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 19:25:59 +0530
+Subject: net: xilinx: axienet: Fix RX skb ring management in DMAengine mode
+
+From: Suraj Gupta <suraj.gupta2@amd.com>
+
+[ Upstream commit fd980bf6e9cdae885105685259421164f843ca55 ]
+
+Submit multiple descriptors in axienet_rx_cb() to fill Rx skb ring. This
+ensures the ring "catches up" on previously missed allocations.
+
+Increment Rx skb ring head pointer after BD is successfully allocated.
+Previously, head pointer was incremented before verifying if descriptor is
+successfully allocated and has valid entries, which could lead to ring
+state inconsistency if descriptor setup failed.
+
+These changes improve reliability by maintaining adequate descriptor
+availability and ensuring proper ring buffer state management.
+
+Fixes: 6a91b846af85 ("net: axienet: Introduce dmaengine support")
+Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com>
+Link: https://patch.msgid.link/20250813135559.1555652-1-suraj.gupta2@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 2d47b35443af..1775e060d39d 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1119,6 +1119,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ struct axienet_local *lp = data;
+ struct sk_buff *skb;
+ u32 *app_metadata;
++ int i;
+
+ skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
+ skb = skbuf_dma->skb;
+@@ -1137,7 +1138,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ u64_stats_add(&lp->rx_packets, 1);
+ u64_stats_add(&lp->rx_bytes, rx_len);
+ u64_stats_update_end(&lp->rx_stat_sync);
+- axienet_rx_submit_desc(lp->ndev);
++
++ for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
++ RX_BUF_NUM_DEFAULT); i++)
++ axienet_rx_submit_desc(lp->ndev);
+ dma_async_issue_pending(lp->rx_chan);
+ }
+
+@@ -1394,7 +1398,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
+ if (!skbuf_dma)
+ return;
+
+- lp->rx_ring_head++;
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ return;
+@@ -1419,6 +1422,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
+ skbuf_dma->desc = dma_rx_desc;
+ dma_rx_desc->callback_param = lp;
+ dma_rx_desc->callback_result = axienet_dma_rx_cb;
++ lp->rx_ring_head++;
+ dmaengine_submit(dma_rx_desc);
+
+ return;
+--
+2.50.1
+
--- /dev/null
+From 1d3cc8776025f1c96f3d83ae3056c4e1e7892c73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 14:37:07 +0200
+Subject: netfilter: nf_reject: don't leak dst refcount for loopback packets
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 91a79b792204313153e1bdbbe5acbfc28903b3a5 ]
+
+recent patches to add a WARN() when replacing skb dst entry found an
+old bug:
+
+WARNING: include/linux/skbuff.h:1165 skb_dst_check_unset include/linux/skbuff.h:1164 [inline]
+WARNING: include/linux/skbuff.h:1165 skb_dst_set include/linux/skbuff.h:1210 [inline]
+WARNING: include/linux/skbuff.h:1165 nf_reject_fill_skb_dst+0x2a4/0x330 net/ipv4/netfilter/nf_reject_ipv4.c:234
+[..]
+Call Trace:
+ nf_send_unreach+0x17b/0x6e0 net/ipv4/netfilter/nf_reject_ipv4.c:325
+ nft_reject_inet_eval+0x4bc/0x690 net/netfilter/nft_reject_inet.c:27
+ expr_call_ops_eval net/netfilter/nf_tables_core.c:237 [inline]
+ ..
+
+This is because blamed commit forgot about loopback packets.
+Such packets already have a dst_entry attached, even at PRE_ROUTING stage.
+
+Instead of checking hook just check if the skb already has a route
+attached to it.
+
+Fixes: f53b9b0bdc59 ("netfilter: introduce support for reject at prerouting stage")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20250820123707.10671-1-fw@strlen.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++----
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 ++---
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 87fd945a0d27..0d3cb2ba6fc8 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(oldskb) < 0)
++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) ||
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 9ae2b2725bf9..c3d64c4b69d7 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++ if (!skb_dst(oldskb)) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+- nf_reject6_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+--
+2.50.1
+
--- /dev/null
+From 2010e2a5b27a152db64cfeaaf2eae376649b20b2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 12:09:18 +0530
+Subject: Octeontx2-af: Skip overlap check for SPI field
+
+From: Hariprasad Kelam <hkelam@marvell.com>
+
+[ Upstream commit 8c5d95988c34f0aeba1f34cd5e4ba69494c90c5f ]
+
+Octeontx2/CN10K silicon supports generating a 256-bit key per packet.
+The specific fields to be extracted from a packet for key generation
+are configurable via a Key Extraction (MKEX) Profile.
+
+The AF driver scans the configured extraction profile to ensure that
+fields from upper layers do not overwrite fields from lower layers in
+the key.
+
+Example Packet Field Layout:
+LA: DMAC + SMAC
+LB: VLAN
+LC: IPv4/IPv6
+LD: TCP/UDP
+
+Valid MKEX Profile Configuration:
+
+LA -> DMAC -> key_offset[0-5]
+LC -> SIP -> key_offset[20-23]
+LD -> SPORT -> key_offset[30-31]
+
+Invalid MKEX profile configuration:
+
+LA -> DMAC -> key_offset[0-5]
+LC -> SIP -> key_offset[20-23]
+LD -> SPORT -> key_offset[2-3] // Overlaps with DMAC field
+
+In another scenario, if the MKEX profile is configured to extract
+the SPI field from both AH and ESP headers at the same key offset,
+the driver rejecting this configuration. In a regular traffic,
+ipsec packet will be having either AH(LD) or ESP (LE). This patch
+relaxes the check for the same.
+
+Fixes: 12aa0a3b93f3 ("octeontx2-af: Harden rule validation.")
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Link: https://patch.msgid.link/20250820063919.1463518-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 150635de2bd5..0c484120be79 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+
+- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
++ /* Allow extracting SPI field from AH and ESP headers at same offset */
++ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
+ (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
+ *features |= BIT_ULL(NPC_IPSEC_SPI);
+
+--
+2.50.1
+
--- /dev/null
+From 7dd9de8d40e0058c3110f51820d2ad34439591e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 10:10:29 +0200
+Subject: phy: mscc: Fix timestamping for vsc8584
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit bc1a59cff9f797bfbf8f3104507584d89e9ecf2e ]
+
+There was a problem when we received frames and the frames were
+timestamped. The driver is configured to store the nanosecond part of
+the timestmap in the ptp reserved bits and it would take the second part
+by reading the LTC. The problem is that when reading the LTC we are in
+atomic context and to read the second part will go over mdio bus which
+might sleep, so we get an error.
+The fix consists in actually put all the frames in a queue and start the
+aux work and in that work to read the LTC and then calculate the full
+received time.
+
+Fixes: 7d272e63e0979d ("net: phy: mscc: timestamping and PHC support")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250818081029.1300780-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc.h | 12 ++++++++
+ drivers/net/phy/mscc/mscc_main.c | 12 ++++++++
+ drivers/net/phy/mscc/mscc_ptp.c | 49 ++++++++++++++++++++++++--------
+ 3 files changed, 61 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 6a3d8a754eb8..58c6d47fbe04 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -362,6 +362,13 @@ struct vsc85xx_hw_stat {
+ u16 mask;
+ };
+
++struct vsc8531_skb_cb {
++ u32 ns;
++};
++
++#define VSC8531_SKB_CB(skb) \
++ ((struct vsc8531_skb_cb *)((skb)->cb))
++
+ struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+@@ -410,6 +417,11 @@ struct vsc8531_private {
+ */
+ struct mutex ts_lock;
+ struct mutex phc_lock;
++
++ /* list of skbs that were received and need timestamp information but it
++ * didn't received it yet
++ */
++ struct sk_buff_head rx_skbs_list;
+ };
+
+ /* Shared structure between the PHYs of the same package.
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 6f74ce0ab1aa..42cafa68c400 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2335,6 +2335,13 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ }
+
++static void vsc85xx_remove(struct phy_device *phydev)
++{
++ struct vsc8531_private *priv = phydev->priv;
++
++ skb_queue_purge(&priv->rx_skbs_list);
++}
++
+ /* Microsemi VSC85xx PHYs */
+ static struct phy_driver vsc85xx_driver[] = {
+ {
+@@ -2589,6 +2596,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2614,6 +2622,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2639,6 +2648,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2662,6 +2672,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2685,6 +2696,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index bce6cc5b04ee..80992827a3bd 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1191,9 +1191,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ {
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+- struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct vsc85xx_ptphdr *ptphdr;
+- struct timespec64 ts;
+ unsigned long ns;
+
+ if (!vsc8531->ptp->configured)
+@@ -1203,27 +1201,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ type == PTP_CLASS_NONE)
+ return false;
+
+- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+-
+ ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+ if (!ptphdr)
+ return false;
+
+- shhwtstamps = skb_hwtstamps(skb);
+- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+-
+ ns = ntohl(ptphdr->rsrvd2);
+
+- /* nsec is in reserved field */
+- if (ts.tv_nsec < ns)
+- ts.tv_sec--;
++ VSC8531_SKB_CB(skb)->ns = ns;
++ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
+
+- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+- netif_rx(skb);
++ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
+
+ return true;
+ }
+
++static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
++{
++ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
++ struct skb_shared_hwtstamps *shhwtstamps = NULL;
++ struct phy_device *phydev = ptp->phydev;
++ struct vsc8531_private *priv = phydev->priv;
++ struct sk_buff_head received;
++ struct sk_buff *rx_skb;
++ struct timespec64 ts;
++ unsigned long flags;
++
++ __skb_queue_head_init(&received);
++ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
++ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
++ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
++
++ vsc85xx_gettime(info, &ts);
++ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
++ shhwtstamps = skb_hwtstamps(rx_skb);
++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++
++ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
++ ts.tv_sec--;
++
++ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
++ VSC8531_SKB_CB(rx_skb)->ns);
++ netif_rx(rx_skb);
++ }
++
++ return -1;
++}
++
+ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .owner = THIS_MODULE,
+ .name = "VSC85xx timer",
+@@ -1237,6 +1260,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .adjfine = &vsc85xx_adjfine,
+ .gettime64 = &vsc85xx_gettime,
+ .settime64 = &vsc85xx_settime,
++ .do_aux_work = &vsc85xx_do_aux_work,
+ };
+
+ static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+@@ -1564,6 +1588,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+
+ mutex_init(&vsc8531->phc_lock);
+ mutex_init(&vsc8531->ts_lock);
++ skb_queue_head_init(&vsc8531->rx_skbs_list);
+
+ /* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ * the same GPIO can be requested by all the PHYs of the same package.
+--
+2.50.1
+
--- /dev/null
+From d4790b2a1c9e1bdff3d687fdb6425286dc9205a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:58 +0800
+Subject: ppp: fix race conditions in ppp_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 0417adf367a0af11adf7ace849af4638cfb573f7 ]
+
+ppp_fill_forward_path() has two race conditions:
+
+1. The ppp->channels list can change between list_empty() and
+ list_first_entry(), as ppp_lock() is not held. If the only channel
+ is deleted in ppp_disconnect_channel(), list_first_entry() may
+ access an empty head or a freed entry, and trigger a panic.
+
+2. pch->chan can be NULL. When ppp_unregister_channel() is called,
+ pch->chan is set to NULL before pch is removed from ppp->channels.
+
+Fix these by using a lockless RCU approach:
+- Use list_first_or_null_rcu() to safely test and access the first list
+ entry.
+- Convert list modifications on ppp->channels to their RCU variants and
+ add synchronize_net() after removal.
+- Check for a NULL pch->chan before dereferencing it.
+
+Fixes: f6efc675c9dd ("net: ppp: resolve forwarding path for bridge pppoe devices")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-2-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/ppp_generic.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index 1420c4efa48e..0553b0b356b3 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ppp_channel.h>
+ #include <linux/ppp-comp.h>
+ #include <linux/skbuff.h>
++#include <linux/rculist.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/if_arp.h>
+ #include <linux/ip.h>
+@@ -1613,11 +1614,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+- if (list_empty(&ppp->channels))
++ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
++ if (!pch)
++ return -ENODEV;
++
++ chan = READ_ONCE(pch->chan);
++ if (!chan)
+ return -ENODEV;
+
+- pch = list_first_entry(&ppp->channels, struct channel, clist);
+- chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+@@ -3000,7 +3004,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+- pch->chan = NULL;
++ WRITE_ONCE(pch->chan, NULL);
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+@@ -3506,7 +3510,7 @@ ppp_connect_channel(struct channel *pch, int unit)
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+- list_add_tail(&pch->clist, &ppp->channels);
++ list_add_tail_rcu(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ refcount_inc(&ppp->file.refcnt);
+@@ -3536,10 +3540,11 @@ ppp_disconnect_channel(struct channel *pch)
+ if (ppp) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+- list_del(&pch->clist);
++ list_del_rcu(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
++ synchronize_net();
+ if (refcount_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+--
+2.50.1
+
--- /dev/null
+From b28fb86801c668aebb17d47f62aec98ecc0b2787 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:59 +0530
+Subject: RDMA/bnxt_re: Fix a possible memory leak in the driver
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit ba60a1e8cbbd396c69ff9c8bc3242f5ab133e38a ]
+
+The GID context reuse logic requires the context memory to be
+not freed if and when DEL_GID firmware command fails. But, if
+there's no subsequent ADD_GID to reuse it, the context memory
+must be freed when the driver is unloaded. Otherwise it leads
+to a memory leak.
+
+Below is the kmemleak trace reported:
+
+unreferenced object 0xffff88817a4f34d0 (size 8):
+ comm "insmod", pid 1072504, jiffies 4402561550
+ hex dump (first 8 bytes):
+ 01 00 00 00 00 00 00 00 ........
+ backtrace (crc ccaa009e):
+ __kmalloc_cache_noprof+0x33e/0x400
+ 0xffffffffc2db9d48
+ add_modify_gid+0x5e0/0xb60 [ib_core]
+ __ib_cache_gid_add+0x213/0x350 [ib_core]
+ update_gid+0xf2/0x180 [ib_core]
+ enum_netdev_ipv4_ips+0x3f3/0x690 [ib_core]
+ enum_all_gids_of_dev_cb+0x125/0x1b0 [ib_core]
+ ib_enum_roce_netdev+0x14b/0x250 [ib_core]
+ ib_cache_setup_one+0x2e5/0x540 [ib_core]
+ ib_register_device+0x82c/0xf10 [ib_core]
+ 0xffffffffc2df5ad9
+ 0xffffffffc2da8b07
+ 0xffffffffc2db174d
+ auxiliary_bus_probe+0xa5/0x120
+ really_probe+0x1e4/0x850
+ __driver_probe_device+0x18f/0x3d0
+
+Fixes: 4a62c5e9e2e1 ("RDMA/bnxt_re: Do not free the ctx_tbl entry if delete GID fails")
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-4-kalesh-anakkur.purayil@broadcom.com
+Reviewed-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 9bd837a5b8a1..b213ecca2854 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -1615,6 +1615,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
+ rdev->nqr = NULL;
+ }
+
++/* When DEL_GID fails, driver is not freeing GID ctx memory.
++ * To avoid the memory leak, free the memory during unload
++ */
++static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
++{
++ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
++ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
++ int i;
++
++ if (!sgid_tbl->active)
++ return;
++
++ ctx_tbl = sgid_tbl->ctx;
++ for (i = 0; i < sgid_tbl->max; i++) {
++ if (sgid_tbl->hw_id[i] == 0xFFFF)
++ continue;
++
++ ctx = ctx_tbl[i];
++ kfree(ctx);
++ }
++}
++
+ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ {
+ u8 type;
+@@ -1623,6 +1645,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
+ cancel_delayed_work_sync(&rdev->worker);
+
++ bnxt_re_free_gid_ctx(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ &rdev->flags))
+ bnxt_re_cleanup_res(rdev);
+--
+2.50.1
+
--- /dev/null
+From 8beee777c47177a2d9e9c4ff1d7a616f16edf408 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:57 +0530
+Subject: RDMA/bnxt_re: Fix to do SRQ armena by default
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 6296f9a5293ada28558f2867ac54c487e1e2b9f2 ]
+
+Whenever SRQ is created, make sure SRQ arm enable is always
+set. Driver is always ready to receive SRQ ASYNC event.
+
+Additional note -
+There is no need to do srq arm enable conditionally.
+See bnxt_qplib_armen_db in bnxt_qplib_create_cq().
+
+Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-2-kalesh-anakkur.purayil@broadcom.com
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 7436ce551579..3170a3e2df24 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -704,8 +704,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+- if (srq->threshold)
+- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
++ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+--
+2.50.1
+
--- /dev/null
+From 991b336316de5eaba323868a080c0ed3ff744b6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:40:00 +0530
+Subject: RDMA/bnxt_re: Fix to initialize the PBL array
+
+From: Anantha Prabhu <anantha.prabhu@broadcom.com>
+
+[ Upstream commit 806b9f494f62791ee6d68f515a8056c615a0e7b2 ]
+
+memset the PBL page pointer and page map arrays before
+populating the SGL addresses of the HWQ.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 02922a0987ad..b785d9e7774c 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
++ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+--
+2.50.1
+
--- /dev/null
+From e92d3958b21631dff70f135323020a439ec02893 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:58 +0530
+Subject: RDMA/bnxt_re: Fix to remove workload check in SRQ limit path
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 666bce0bd7e771127cb0cda125cc9d32d9f9f15d ]
+
+There should not be any checks of current workload to set
+srq_limit value to SRQ hw context.
+
+Remove all such workload checks and make a direct call to
+set srq_limit via doorbell SRQ_ARM.
+
+Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-3-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 ++-----
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 27 ------------------------
+ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 --
+ 3 files changed, 2 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 4a3ce61a3bba..b222bf4f38e1 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1874,7 +1874,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+- int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+@@ -1886,11 +1885,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+- rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+- if (rc) {
+- ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
+- return rc;
+- }
++ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
++
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 3170a3e2df24..0f50c1ffbe01 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -705,7 +705,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+- srq->arm_req = false;
+
+ return 0;
+ fail:
+@@ -715,24 +714,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ return rc;
+ }
+
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+- struct bnxt_qplib_srq *srq)
+-{
+- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+- u32 count;
+-
+- count = __bnxt_qplib_get_avail(srq_hwq);
+- if (count > srq->threshold) {
+- srq->arm_req = false;
+- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+- } else {
+- /* Deferred arming */
+- srq->arm_req = true;
+- }
+-
+- return 0;
+-}
+-
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+ {
+@@ -774,7 +755,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe;
+ struct sq_sge *hw_sge;
+- u32 count = 0;
+ int i, next;
+
+ spin_lock(&srq_hwq->lock);
+@@ -806,15 +786,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+
+- spin_lock(&srq_hwq->lock);
+- count = __bnxt_qplib_get_avail(srq_hwq);
+- spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+- if (srq->arm_req == true && count > srq->threshold) {
+- srq->arm_req = false;
+- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+- }
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 6f02954eb142..fd4f9fada46a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -521,8 +521,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ srqn_handler_t srq_handler);
+ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+- struct bnxt_qplib_srq *srq);
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+--
+2.50.1
+
--- /dev/null
+From 43a5b987d730c1aaff513ab9b762718bc08f73ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 13:53:55 +0800
+Subject: RDMA/erdma: Fix ignored return value of init_kernel_qp
+
+From: Boshi Yu <boshiyu@linux.alibaba.com>
+
+[ Upstream commit d5c74713f0117d07f91eb48b10bc2ad44e23c9b9 ]
+
+The init_kernel_qp interface may fail. Check its return value and free
+related resources properly when it does.
+
+Fixes: 155055771704 ("RDMA/erdma: Add verbs implementation")
+Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
+Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
+Link: https://patch.msgid.link/20250725055410.67520-3-boshiyu@linux.alibaba.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/erdma/erdma_verbs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index e56ba86d460e..a50fb03c9643 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -991,7 +991,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ if (ret)
+ goto err_out_cmd;
+ } else {
+- init_kernel_qp(dev, qp, attrs);
++ ret = init_kernel_qp(dev, qp, attrs);
++ if (ret)
++ goto err_out_xa;
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+--
+2.50.1
+
--- /dev/null
+From 8148fa1cd999c533f55372e2920af25f890b723f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Aug 2025 20:26:02 +0800
+Subject: RDMA/hns: Fix dip entries leak on devices newer than hip09
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fa2e2d31ee3b7212079323b4b09201ef68af3a97 ]
+
+DIP algorithm is also supported on devices newer than hip09, so free
+dip entries too.
+
+Fixes: f91696f2f053 ("RDMA/hns: Support congestion control type selection according to the FW")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20250812122602.3524602-1-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index a7b3e4248ebb..6a6daca9f606 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3028,7 +3028,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+ if (!hr_dev->is_vf)
+ hns_roce_free_link_table(hr_dev);
+
+- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ free_dip_entry(hr_dev);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 908876f406f6fe540d571f3aa0af052f2624731b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Jul 2025 15:53:45 +0800
+Subject: RDMA/hns: Fix querying wrong SCC context for DIP algorithm
+
+From: wenglianfa <wenglianfa@huawei.com>
+
+[ Upstream commit 085a1b42e52750769a3fa29d4da6c05ab56f18f8 ]
+
+When using DIP algorithm, all QPs establishing connections with
+the same destination IP share the same SCC, which is indexed by
+dip_idx, but dip_idx isn't necessarily equal to qpn. Therefore,
+dip_idx should be used to query SCC context instead of qpn.
+
+Fixes: 124a9fbe43aa ("RDMA/hns: Append SCC context to the raw dump of QPC")
+Signed-off-by: wenglianfa <wenglianfa@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20250726075345.846957-1-huangjunxian6@hisilicon.com
+Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 ++--
+ drivers/infiniband/hw/hns/hns_roce_restrack.c | 9 ++++++++-
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 53fe0ef3883d..a7b3e4248ebb 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5498,7 +5498,7 @@ static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn,
+ return ret;
+ }
+
+-static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
++static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
+ void *buffer)
+ {
+ struct hns_roce_v2_scc_context *context;
+@@ -5510,7 +5510,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
+- qpn);
++ sccn);
+ if (ret)
+ goto out;
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+index f637b73b946e..230187dda6a0 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
++++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+@@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+ struct hns_roce_v2_qp_context qpc;
+ struct hns_roce_v2_scc_context sccc;
+ } context = {};
++ u32 sccn = hr_qp->qpn;
+ int ret;
+
+ if (!hr_dev->hw->query_qpc)
+@@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+ !hr_dev->hw->query_sccc)
+ goto out;
+
+- ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
++ if (hr_qp->cong_type == CONG_TYPE_DIP) {
++ if (!hr_qp->dip)
++ goto out;
++ sccn = hr_qp->dip->dip_idx;
++ }
++
++ ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
+ if (ret)
+ ibdev_warn_ratelimited(&hr_dev->ib_dev,
+ "failed to query SCCC, ret = %d.\n",
+--
+2.50.1
+
--- /dev/null
+From 407cfe7275b797e69fe9674cae3091573a644ab8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 15:16:31 +0800
+Subject: rtase: Fix Rx descriptor CRC error bit definition
+
+From: Justin Lai <justinlai0215@realtek.com>
+
+[ Upstream commit 065c31f2c6915b38f45b1c817b31f41f62eaa774 ]
+
+The CRC error bit is located at bit 17 in the Rx descriptor, but the
+driver was incorrectly using bit 16. Fix it.
+
+Fixes: a36e9f5cfe9e ("rtase: Add support for a pci table in this module")
+Signed-off-by: Justin Lai <justinlai0215@realtek.com>
+Link: https://patch.msgid.link/20250813071631.7566-1-justinlai0215@realtek.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/rtase/rtase.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
+index 4a4434869b10..b3310e342ccf 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase.h
++++ b/drivers/net/ethernet/realtek/rtase/rtase.h
+@@ -239,7 +239,7 @@ union rtase_rx_desc {
+ #define RTASE_RX_RES BIT(20)
+ #define RTASE_RX_RUNT BIT(19)
+ #define RTASE_RX_RWT BIT(18)
+-#define RTASE_RX_CRC BIT(16)
++#define RTASE_RX_CRC BIT(17)
+ #define RTASE_RX_V6F BIT(31)
+ #define RTASE_RX_V4F BIT(30)
+ #define RTASE_RX_UDPT BIT(29)
+--
+2.50.1
+
--- /dev/null
+From 5049ac82cd3fc92d95b16d696eb455f3477d6fa3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Aug 2025 22:42:15 +0200
+Subject: rust: alloc: fix `rusttest` by providing `Cmalloc::aligned_layout`
+ too
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+[ Upstream commit 0f580d5d3d9d9cd0953695cd32e43aac3a946338 ]
+
+Commit fde578c86281 ("rust: alloc: replace aligned_size() with
+Kmalloc::aligned_layout()") provides a public `aligned_layout` function
+in `Kamlloc`, but not in `Cmalloc`, and thus uses of it will trigger an
+error in `rusttest`.
+
+Such a user appeared in the following commit 22ab0641b939 ("rust: drm:
+ensure kmalloc() compatible Layout"):
+
+ error[E0599]: no function or associated item named `aligned_layout` found for struct `alloc::allocator_test::Cmalloc` in the current scope
+ --> rust/kernel/drm/device.rs:100:31
+ |
+ 100 | let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
+ | ^^^^^^^^^^^^^^ function or associated item not found in `Cmalloc`
+ |
+ ::: rust/kernel/alloc/allocator_test.rs:19:1
+ |
+ 19 | pub struct Cmalloc;
+ | ------------------ function or associated item `aligned_layout` not found for this struct
+
+Thus add an equivalent one for `Cmalloc`.
+
+Fixes: fde578c86281 ("rust: alloc: replace aligned_size() with Kmalloc::aligned_layout()")
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Link: https://lore.kernel.org/r/20250816204215.2719559-1-ojeda@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/alloc/allocator_test.rs | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
+index c37d4c0c64e9..ec13385489df 100644
+--- a/rust/kernel/alloc/allocator_test.rs
++++ b/rust/kernel/alloc/allocator_test.rs
+@@ -22,6 +22,17 @@ pub type Kmalloc = Cmalloc;
+ pub type Vmalloc = Kmalloc;
+ pub type KVmalloc = Kmalloc;
+
++impl Cmalloc {
++ /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
++ /// `layout`.
++ pub fn aligned_layout(layout: Layout) -> Layout {
++ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++ // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
++ // a properly aligned object (see comments in `kmalloc()` for more information).
++ layout.pad_to_align()
++ }
++}
++
+ extern "C" {
+ #[link_name = "aligned_alloc"]
+ fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
+--
+2.50.1
+
--- /dev/null
+From ac78b265145e336328ffeb9bb5f5de6f110f477d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 17:48:06 +0200
+Subject: rust: alloc: replace aligned_size() with Kmalloc::aligned_layout()
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit fde578c86281f27b182680c7642836a0dbbd0be7 ]
+
+aligned_size() dates back to when Rust did support kmalloc() only, but
+is now used in ReallocFunc::call() and hence for all allocators.
+
+However, the additional padding applied by aligned_size() is only
+required by the kmalloc() allocator backend.
+
+Hence, replace aligned_size() with Kmalloc::aligned_layout() and use it
+for the affected allocators, i.e. kmalloc() and kvmalloc(), only.
+
+While at it, make Kmalloc::aligned_layout() public, such that Rust
+abstractions, which have to call subsystem specific kmalloc() based
+allocation primitives directly, can make use of it.
+
+Fixes: 8a799831fc63 ("rust: alloc: implement `ReallocFunc`")
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://lore.kernel.org/r/20250731154919.4132-2-dakr@kernel.org
+[ Remove `const` from Kmalloc::aligned_layout(). - Danilo ]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/alloc/allocator.rs | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
+index 439985e29fbc..e4cd29100007 100644
+--- a/rust/kernel/alloc/allocator.rs
++++ b/rust/kernel/alloc/allocator.rs
+@@ -43,17 +43,6 @@ pub struct Vmalloc;
+ /// For more details see [self].
+ pub struct KVmalloc;
+
+-/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
+-fn aligned_size(new_layout: Layout) -> usize {
+- // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
+- let layout = new_layout.pad_to_align();
+-
+- // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
+- // which together with the slab guarantees means the `krealloc` will return a properly aligned
+- // object (see comments in `kmalloc()` for more information).
+- layout.size()
+-}
+-
+ /// # Invariants
+ ///
+ /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
+@@ -87,7 +76,7 @@ impl ReallocFunc {
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+- let size = aligned_size(layout);
++ let size = layout.size();
+ let ptr = match ptr {
+ Some(ptr) => {
+ if old_layout.size() == 0 {
+@@ -122,6 +111,17 @@ impl ReallocFunc {
+ }
+ }
+
++impl Kmalloc {
++ /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
++ /// `layout`.
++ pub fn aligned_layout(layout: Layout) -> Layout {
++ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++ // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
++ // a properly aligned object (see comments in `kmalloc()` for more information).
++ layout.pad_to_align()
++ }
++}
++
+ // SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
+ // - memory remains valid until it is explicitly freed,
+ // - passing a pointer to a valid memory allocation is OK,
+@@ -134,6 +134,8 @@ unsafe impl Allocator for Kmalloc {
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
++ let layout = Kmalloc::aligned_layout(layout);
++
+ // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
+ unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+ }
+@@ -175,6 +177,10 @@ unsafe impl Allocator for KVmalloc {
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
++ // `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc`
++ // compatible layout.
++ let layout = Kmalloc::aligned_layout(layout);
++
+ // TODO: Support alignments larger than PAGE_SIZE.
+ if layout.align() > bindings::PAGE_SIZE {
+ pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
+--
+2.50.1
+
--- /dev/null
+From 176b277fa143196d6c49fb09c599a90adbacb3bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index 5d9effb0867c..e74eb8f9b23a 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -66,23 +66,27 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 24aa9c29a265a527381d14213b22967e37e94bd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index e74eb8f9b23a..41a0d2066fa0 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -84,7 +85,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From 1455e1f62644fea9a1ef8fcbe18a87858f23d4f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 17:04:27 +0200
+Subject: s390/mm: Do not map lowcore with identity mapping
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 93f616ff870a1fb7e84d472cad0af651b18f9f87 ]
+
+Since the identity mapping is pinned to address zero the lowcore is always
+also mapped to address zero, this happens regardless of the relocate_lowcore
+command line option. If the option is specified the lowcore is mapped
+twice, instead of only once.
+
+This means that NULL pointer accesses will succeed instead of causing an
+exception (low address protection still applies, but covers only parts).
+To fix this never map the first two pages of physical memory with the
+identity mapping.
+
+Fixes: 32db401965f1 ("s390/mm: Pin identity mapping base to zero")
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/boot/vmem.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index 3fa28db2fe59..14aee8524021 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -471,6 +471,9 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
+ lowcore_address + sizeof(struct lowcore),
+ POPULATE_LOWCORE);
+ for_each_physmem_usable_range(i, &start, &end) {
++ /* Do not map lowcore with identity mapping */
++ if (!start)
++ start = sizeof(struct lowcore);
+ pgtable_populate((unsigned long)__identity_va(start),
+ (unsigned long)__identity_va(end),
+ POPULATE_IDENTITY);
+--
+2.50.1
+
--- /dev/null
+From 4b08cfee2b233bd6db64724f3138edd7ab52fc18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 97e9ca5a2a02..59ff6bb11d84 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
iio-imu-inv_icm42600-convert-to-uxx-and-sxx-integer-types.patch
iio-imu-inv_icm42600-change-invalid-data-error-to-ebusy.patch
x86-cpu-hygon-add-missing-resctrl_cpu_detect-in-bsp_init-helper.patch
+spi-spi-fsl-lpspi-clamp-too-high-speed_hz.patch
+drm-nouveau-nvif-fix-potential-memory-leak-in-nvif_v.patch
+cgroup-cpuset-use-static_branch_enable_cpuslocked-on.patch
+cgroup-cpuset-fix-a-partition-error-with-cpu-hotplug.patch
+drm-tests-fix-endian-warning.patch
+drm-panic-move-drawing-functions-to-drm_draw.patch
+drm-format-helper-add-conversion-from-xrgb8888-to-bg.patch
+drm-format-helper-move-helpers-for-pixel-conversion-.patch
+drm-format-helper-add-generic-conversion-to-32-bit-f.patch
+drm-tests-do-not-use-drm_fb_blit-in-format-helper-te.patch
+drm-tests-fix-drm_test_fb_xrgb8888_to_xrgb2101010-on.patch
+iosys-map-fix-undefined-behavior-in-iosys_map_clear.patch
+rust-alloc-replace-aligned_size-with-kmalloc-aligned.patch
+rdma-erdma-fix-ignored-return-value-of-init_kernel_q.patch
+rdma-hns-fix-querying-wrong-scc-context-for-dip-algo.patch
+rdma-bnxt_re-fix-to-do-srq-armena-by-default.patch
+rdma-bnxt_re-fix-to-remove-workload-check-in-srq-lim.patch
+rdma-bnxt_re-fix-a-possible-memory-leak-in-the-drive.patch
+rdma-bnxt_re-fix-to-initialize-the-pbl-array.patch
+rdma-hns-fix-dip-entries-leak-on-devices-newer-than-.patch
+net-xilinx-axienet-fix-rx-skb-ring-management-in-dma.patch
+net-bridge-fix-soft-lockup-in-br_multicast_query_exp.patch
+rtase-fix-rx-descriptor-crc-error-bit-definition.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+iommu-amd-avoid-stack-buffer-overflow-from-kernel-cm.patch
+bluetooth-hci_sync-fix-scan-state-after-pa-sync-has-.patch
+bluetooth-btmtk-fix-wait_on_bit_timeout-interruption.patch
+bluetooth-hci_sync-prevent-unintended-pa-sync-when-s.patch
+bluetooth-hci_event-fix-mtu-for-bn-0-in-cis-establis.patch
+bluetooth-hci_conn-do-return-error-from-hci_enhanced.patch
+mlxsw-spectrum-forward-packets-with-an-ipv4-link-loc.patch
+rust-alloc-fix-rusttest-by-providing-cmalloc-aligned.patch
+drm-hisilicon-hibmc-refactored-struct-hibmc_drm_priv.patch
+drm-hisilicon-hibmc-fix-the-i2c-device-resource-leak.patch
+drm-hisilicon-hibmc-fix-the-hibmc-loaded-failed-bug.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+drm-amd-display-add-null-pointer-check-in-mod_hdcp_h.patch
+drm-amd-display-don-t-print-errors-for-nonexistent-c.patch
+net-gso-forbid-ipv6-tso-with-extensions-on-devices-w.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+net-ethernet-mtk_ppe-add-rcu-lock-around-dev_fill_fo.patch
+ppp-fix-race-conditions-in-ppp_fill_forward_path.patch
+net-ti-icssg-prueth-fix-hsr-and-switch-offload-enabl.patch
+cifs-fix-oops-due-to-uninitialised-variable.patch
+phy-mscc-fix-timestamping-for-vsc8584.patch
+net-usb-asix_devices-fix-phy-address-mask-in-mdio-bu.patch
+gve-prevent-ethtool-ops-after-shutdown.patch
+net-smc-fix-uaf-on-smcsk-after-smc_listen_out.patch
+microchip-lan865x-fix-missing-netif_start_queue-call.patch
+microchip-lan865x-fix-missing-timer-increment-config.patch
+loongarch-optimize-module-load-time-by-optimizing-pl.patch
+s390-mm-do-not-map-lowcore-with-identity-mapping.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+igc-fix-disabling-l1.2-pci-e-link-substate-on-i226-o.patch
+net-dsa-microchip-fix-ksz9477-hsr-port-setup-issue.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+alsa-timer-fix-ida_free-call-while-not-allocated.patch
+bonding-update-lacp-activity-flag-after-setting-lacp.patch
+bonding-send-lacpdus-periodically-in-passive-mode-af.patch
+octeontx2-af-skip-overlap-check-for-spi-field.patch
+net-mlx5-base-ecvf-devlink-port-attrs-from-0.patch
+net-mlx5-relocate-function-declarations-from-port.h-.patch
+net-mlx5-add-ifc-bits-and-enums-for-buf_ownership.patch
+net-mlx5e-query-fw-for-buffer-ownership.patch
+net-mlx5e-preserve-shared-buffer-capacity-during-hea.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
+netfilter-nf_reject-don-t-leak-dst-refcount-for-loop.patch
--- /dev/null
+From bc1e4afc6859be5f0e2a12093f5034fd11b29d5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 12:07:42 +0200
+Subject: spi: spi-fsl-lpspi: Clamp too high speed_hz
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit af357a6a3b7d685e7aa621c6fb1d4ed6c349ec9e ]
+
+Currently the driver is not able to handle the case that a SPI device
+specifies a higher spi-max-frequency than half of per-clk:
+
+ per-clk should be at least two times of transfer speed
+
+Fix this by clamping to the max possible value and use the minimum SCK
+period of 2 cycles.
+
+Fixes: 77736a98b859 ("spi: lpspi: add the error info of transfer speed setting")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://patch.msgid.link/20250807100742.9917-1-wahrenst@gmx.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-fsl-lpspi.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 29b9676fe43d..f8cacb9c7408 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ }
+
+ if (config.speed_hz > perclk_rate / 2) {
+- dev_err(fsl_lpspi->dev,
+- "per-clk should be at least two times of transfer speed");
+- return -EINVAL;
++ div = 2;
++ } else {
++ div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+ }
+
+- div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+-
+ for (prescale = 0; prescale <= prescale_max; prescale++) {
+ scldiv = div / (1 << prescale) - 2;
+ if (scldiv >= 0 && scldiv < 256) {
+--
+2.50.1
+
--- /dev/null
+From 5e9dc39b13cc89dcdb75f488203d94d46aa00cac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 09:43:17 +0800
+Subject: ALSA: timer: fix ida_free call while not allocated
+
+From: Dewei Meng <mengdewei@cqsoftware.com.cn>
+
+[ Upstream commit 5003a65790ed66be882d1987cc2ca86af0de3db1 ]
+
+In the snd_utimer_create() function, if the kasprintf() function return
+NULL, snd_utimer_put_id() will be called, finally use ida_free()
+to free the unallocated id 0.
+
+the syzkaller reported the following information:
+ ------------[ cut here ]------------
+ ida_free called for id=0 which is not allocated.
+ WARNING: CPU: 1 PID: 1286 at lib/idr.c:592 ida_free+0x1fd/0x2f0 lib/idr.c:592
+ Modules linked in:
+ CPU: 1 UID: 0 PID: 1286 Comm: syz-executor164 Not tainted 6.15.8 #3 PREEMPT(lazy)
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-4.fc42 04/01/2014
+ RIP: 0010:ida_free+0x1fd/0x2f0 lib/idr.c:592
+ Code: f8 fc 41 83 fc 3e 76 69 e8 70 b2 f8 (...)
+ RSP: 0018:ffffc900007f79c8 EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: 1ffff920000fef3b RCX: ffffffff872176a5
+ RDX: ffff88800369d200 RSI: 0000000000000000 RDI: ffff88800369d200
+ RBP: 0000000000000000 R08: ffffffff87ba60a5 R09: 0000000000000000
+ R10: 0000000000000001 R11: 0000000000000000 R12: 0000000000000000
+ R13: 0000000000000002 R14: 0000000000000000 R15: 0000000000000000
+ FS: 00007f6f1abc1740(0000) GS:ffff8880d76a0000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007f6f1ad7a784 CR3: 000000007a6e2000 CR4: 00000000000006f0
+ Call Trace:
+ <TASK>
+ snd_utimer_put_id sound/core/timer.c:2043 [inline] [snd_timer]
+ snd_utimer_create+0x59b/0x6a0 sound/core/timer.c:2184 [snd_timer]
+ snd_utimer_ioctl_create sound/core/timer.c:2202 [inline] [snd_timer]
+ __snd_timer_user_ioctl.isra.0+0x724/0x1340 sound/core/timer.c:2287 [snd_timer]
+ snd_timer_user_ioctl+0x75/0xc0 sound/core/timer.c:2298 [snd_timer]
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl fs/ioctl.c:893 [inline]
+ __x64_sys_ioctl+0x198/0x200 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x7b/0x160 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+
+The utimer->id should be set properly before the kasprintf() function,
+ensures the snd_utimer_put_id() function will free the allocated id.
+
+Fixes: 37745918e0e75 ("ALSA: timer: Introduce virtual userspace-driven timers")
+Signed-off-by: Dewei Meng <mengdewei@cqsoftware.com.cn>
+Link: https://patch.msgid.link/20250821014317.40786-1-mengdewei@cqsoftware.com.cn
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/timer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 8072183c33d3..a352247519be 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -2139,14 +2139,14 @@ static int snd_utimer_create(struct snd_timer_uinfo *utimer_info,
+ goto err_take_id;
+ }
+
++ utimer->id = utimer_id;
++
+ utimer->name = kasprintf(GFP_KERNEL, "snd-utimer%d", utimer_id);
+ if (!utimer->name) {
+ err = -ENOMEM;
+ goto err_get_name;
+ }
+
+- utimer->id = utimer_id;
+-
+ tid.dev_sclass = SNDRV_TIMER_SCLASS_APPLICATION;
+ tid.dev_class = SNDRV_TIMER_CLASS_GLOBAL;
+ tid.card = -1;
+--
+2.50.1
+
--- /dev/null
+From 1478015e7d7b1a7b1f0a8f04b8672ceb8daaa9ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 1cb52373e70f..db2c9bac00ad 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From f4cd7b875f830313be20b33af4715421dfd05e10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14c..a0d55b77c994 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From 5fd8bcfbda9d779770eb010c1bb7beab1d749865 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 15:22:01 +0100
+Subject: ASoC: cs35l56: Handle new algorithms IDs for CS35L63
+
+From: Richard Fitzgerald <rf@opensource.cirrus.com>
+
+[ Upstream commit 8dadc11b67d4b83deff45e4889b3b5540b9c0a7f ]
+
+CS35L63 uses different algorithm IDs from CS35L56.
+Add a new mechanism to handle different alg IDs between parts in the
+CS35L56 driver.
+
+Fixes: 978858791ced ("ASoC: cs35l56: Add initial support for CS35L63 for I2C and SoundWire")
+
+Signed-off-by: Richard Fitzgerald <rf@opensource.cirrus.com>
+Signed-off-by: Stefan Binding <sbinding@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250820142209.127575-3-sbinding@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/sound/cs35l56.h | 1 +
+ sound/soc/codecs/cs35l56-shared.c | 29 ++++++++++++++++++++++++++---
+ sound/soc/codecs/cs35l56.c | 2 +-
+ 3 files changed, 28 insertions(+), 4 deletions(-)
+
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index f44aabde805e..7c8bbe8ad1e2 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -306,6 +306,7 @@ struct cs35l56_base {
+ struct gpio_desc *reset_gpio;
+ struct cs35l56_spi_payload *spi_payload_buf;
+ const struct cs35l56_fw_reg *fw_reg;
++ const struct cirrus_amp_cal_controls *calibration_controls;
+ };
+
+ static inline bool cs35l56_is_otp_register(unsigned int reg)
+diff --git a/sound/soc/codecs/cs35l56-shared.c b/sound/soc/codecs/cs35l56-shared.c
+index ba653f6ccfae..850fcf385996 100644
+--- a/sound/soc/codecs/cs35l56-shared.c
++++ b/sound/soc/codecs/cs35l56-shared.c
+@@ -838,6 +838,15 @@ const struct cirrus_amp_cal_controls cs35l56_calibration_controls = {
+ };
+ EXPORT_SYMBOL_NS_GPL(cs35l56_calibration_controls, "SND_SOC_CS35L56_SHARED");
+
++static const struct cirrus_amp_cal_controls cs35l63_calibration_controls = {
++ .alg_id = 0xbf210,
++ .mem_region = WMFW_ADSP2_YM,
++ .ambient = "CAL_AMBIENT",
++ .calr = "CAL_R",
++ .status = "CAL_STATUS",
++ .checksum = "CAL_CHECKSUM",
++};
++
+ int cs35l56_get_calibration(struct cs35l56_base *cs35l56_base)
+ {
+ u64 silicon_uid = 0;
+@@ -912,19 +921,31 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_read_prot_status, "SND_SOC_CS35L56_SHARED");
+ void cs35l56_log_tuning(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp)
+ {
+ __be32 pid, sid, tid;
++ unsigned int alg_id;
+ int ret;
+
++ switch (cs35l56_base->type) {
++ case 0x54:
++ case 0x56:
++ case 0x57:
++ alg_id = 0x9f212;
++ break;
++ default:
++ alg_id = 0xbf212;
++ break;
++ }
++
+ scoped_guard(mutex, &cs_dsp->pwr_lock) {
+ ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(cs_dsp, "AS_PRJCT_ID",
+- WMFW_ADSP2_XM, 0x9f212),
++ WMFW_ADSP2_XM, alg_id),
+ 0, &pid, sizeof(pid));
+ if (!ret)
+ ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(cs_dsp, "AS_CHNNL_ID",
+- WMFW_ADSP2_XM, 0x9f212),
++ WMFW_ADSP2_XM, alg_id),
+ 0, &sid, sizeof(sid));
+ if (!ret)
+ ret = cs_dsp_coeff_read_ctrl(cs_dsp_get_ctl(cs_dsp, "AS_SNPSHT_ID",
+- WMFW_ADSP2_XM, 0x9f212),
++ WMFW_ADSP2_XM, alg_id),
+ 0, &tid, sizeof(tid));
+ }
+
+@@ -974,8 +995,10 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
+ case 0x35A54:
+ case 0x35A56:
+ case 0x35A57:
++ cs35l56_base->calibration_controls = &cs35l56_calibration_controls;
+ break;
+ case 0x35A630:
++ cs35l56_base->calibration_controls = &cs35l63_calibration_controls;
+ devid = devid >> 4;
+ break;
+ default:
+diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
+index 1b42586794ad..76306282b2e6 100644
+--- a/sound/soc/codecs/cs35l56.c
++++ b/sound/soc/codecs/cs35l56.c
+@@ -695,7 +695,7 @@ static int cs35l56_write_cal(struct cs35l56_private *cs35l56)
+ return ret;
+
+ ret = cs_amp_write_cal_coeffs(&cs35l56->dsp.cs_dsp,
+- &cs35l56_calibration_controls,
++ cs35l56->base.calibration_controls,
+ &cs35l56->base.cal_data);
+
+ wm_adsp_stop(&cs35l56->dsp);
+--
+2.50.1
+
--- /dev/null
+From 8bbb3c73dd2703a791c412be48084d23e7b1c3d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 15:22:02 +0100
+Subject: ASoC: cs35l56: Remove SoundWire Clock Divider workaround for CS35L63
+
+From: Stefan Binding <sbinding@opensource.cirrus.com>
+
+[ Upstream commit 8d13d1bdb59d0a2c526869ee571ec51a3a887463 ]
+
+Production silicon for CS36L63 has some small differences compared to
+pre-production silicon. Remove soundwire clock workaround as no
+longer necessary. We don't want to do tricks with low-level clocking
+controls if we don't need to.
+
+Fixes: 978858791ced ("ASoC: cs35l56: Add initial support for CS35L63 for I2C and SoundWire")
+
+Signed-off-by: Stefan Binding <sbinding@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250820142209.127575-4-sbinding@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/cs35l56-sdw.c | 69 ----------------------------------
+ sound/soc/codecs/cs35l56.h | 3 --
+ 2 files changed, 72 deletions(-)
+
+diff --git a/sound/soc/codecs/cs35l56-sdw.c b/sound/soc/codecs/cs35l56-sdw.c
+index fa9693af3722..d7fa12d287e0 100644
+--- a/sound/soc/codecs/cs35l56-sdw.c
++++ b/sound/soc/codecs/cs35l56-sdw.c
+@@ -394,74 +394,6 @@ static int cs35l56_sdw_update_status(struct sdw_slave *peripheral,
+ return 0;
+ }
+
+-static int cs35l63_sdw_kick_divider(struct cs35l56_private *cs35l56,
+- struct sdw_slave *peripheral)
+-{
+- unsigned int curr_scale_reg, next_scale_reg;
+- int curr_scale, next_scale, ret;
+-
+- if (!cs35l56->base.init_done)
+- return 0;
+-
+- if (peripheral->bus->params.curr_bank) {
+- curr_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B1;
+- next_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B0;
+- } else {
+- curr_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B0;
+- next_scale_reg = SDW_SCP_BUSCLOCK_SCALE_B1;
+- }
+-
+- /*
+- * Current clock scale value must be different to new value.
+- * Modify current to guarantee this. If next still has the dummy
+- * value we wrote when it was current, the core code has not set
+- * a new scale so restore its original good value
+- */
+- curr_scale = sdw_read_no_pm(peripheral, curr_scale_reg);
+- if (curr_scale < 0) {
+- dev_err(cs35l56->base.dev, "Failed to read current clock scale: %d\n", curr_scale);
+- return curr_scale;
+- }
+-
+- next_scale = sdw_read_no_pm(peripheral, next_scale_reg);
+- if (next_scale < 0) {
+- dev_err(cs35l56->base.dev, "Failed to read next clock scale: %d\n", next_scale);
+- return next_scale;
+- }
+-
+- if (next_scale == CS35L56_SDW_INVALID_BUS_SCALE) {
+- next_scale = cs35l56->old_sdw_clock_scale;
+- ret = sdw_write_no_pm(peripheral, next_scale_reg, next_scale);
+- if (ret < 0) {
+- dev_err(cs35l56->base.dev, "Failed to modify current clock scale: %d\n",
+- ret);
+- return ret;
+- }
+- }
+-
+- cs35l56->old_sdw_clock_scale = curr_scale;
+- ret = sdw_write_no_pm(peripheral, curr_scale_reg, CS35L56_SDW_INVALID_BUS_SCALE);
+- if (ret < 0) {
+- dev_err(cs35l56->base.dev, "Failed to modify current clock scale: %d\n", ret);
+- return ret;
+- }
+-
+- dev_dbg(cs35l56->base.dev, "Next bus scale: %#x\n", next_scale);
+-
+- return 0;
+-}
+-
+-static int cs35l56_sdw_bus_config(struct sdw_slave *peripheral,
+- struct sdw_bus_params *params)
+-{
+- struct cs35l56_private *cs35l56 = dev_get_drvdata(&peripheral->dev);
+-
+- if ((cs35l56->base.type == 0x63) && (cs35l56->base.rev < 0xa1))
+- return cs35l63_sdw_kick_divider(cs35l56, peripheral);
+-
+- return 0;
+-}
+-
+ static int __maybe_unused cs35l56_sdw_clk_stop(struct sdw_slave *peripheral,
+ enum sdw_clk_stop_mode mode,
+ enum sdw_clk_stop_type type)
+@@ -477,7 +409,6 @@ static const struct sdw_slave_ops cs35l56_sdw_ops = {
+ .read_prop = cs35l56_sdw_read_prop,
+ .interrupt_callback = cs35l56_sdw_interrupt,
+ .update_status = cs35l56_sdw_update_status,
+- .bus_config = cs35l56_sdw_bus_config,
+ #ifdef DEBUG
+ .clk_stop = cs35l56_sdw_clk_stop,
+ #endif
+diff --git a/sound/soc/codecs/cs35l56.h b/sound/soc/codecs/cs35l56.h
+index bd77a57249d7..40a1800a4585 100644
+--- a/sound/soc/codecs/cs35l56.h
++++ b/sound/soc/codecs/cs35l56.h
+@@ -20,8 +20,6 @@
+ #define CS35L56_SDW_GEN_INT_MASK_1 0xc1
+ #define CS35L56_SDW_INT_MASK_CODEC_IRQ BIT(0)
+
+-#define CS35L56_SDW_INVALID_BUS_SCALE 0xf
+-
+ #define CS35L56_RX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE)
+ #define CS35L56_TX_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE \
+ | SNDRV_PCM_FMTBIT_S32_LE)
+@@ -52,7 +50,6 @@ struct cs35l56_private {
+ u8 asp_slot_count;
+ bool tdm_mode;
+ bool sysclk_set;
+- u8 old_sdw_clock_scale;
+ u8 sdw_link_num;
+ u8 sdw_unique_id;
+ };
+--
+2.50.1
+
--- /dev/null
+From 789d33790beb9a04bbb0ce804b0805997fdde0ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 15:22:00 +0100
+Subject: ASoC: cs35l56: Update Firmware Addresses for CS35L63 for production
+ silicon
+
+From: Stefan Binding <sbinding@opensource.cirrus.com>
+
+[ Upstream commit f135fb24ef29335b94921077588cae445bc7f099 ]
+
+Production silicon for CS36L63 has some small differences compared to
+pre-production silicon. Update firmware addresses, which are different.
+
+No product was ever released with pre-production silicon so there is no
+need for the driver to include support for it.
+
+Fixes: 978858791ced ("ASoC: cs35l56: Add initial support for CS35L63 for I2C and SoundWire")
+
+Signed-off-by: Stefan Binding <sbinding@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250820142209.127575-2-sbinding@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/sound/cs35l56.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
+index e17c4cadd04d..f44aabde805e 100644
+--- a/include/sound/cs35l56.h
++++ b/include/sound/cs35l56.h
+@@ -107,8 +107,8 @@
+ #define CS35L56_DSP1_PMEM_5114 0x3804FE8
+
+ #define CS35L63_DSP1_FW_VER CS35L56_DSP1_FW_VER
+-#define CS35L63_DSP1_HALO_STATE 0x280396C
+-#define CS35L63_DSP1_PM_CUR_STATE 0x28042C8
++#define CS35L63_DSP1_HALO_STATE 0x2803C04
++#define CS35L63_DSP1_PM_CUR_STATE 0x2804518
+ #define CS35L63_PROTECTION_STATUS 0x340009C
+ #define CS35L63_TRANSDUCER_ACTUAL_PS 0x34000F4
+ #define CS35L63_MAIN_RENDER_USER_MUTE 0x3400020
+--
+2.50.1
+
--- /dev/null
+From 5e391b63065a966a1f07697e1041314cd0c877b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 21:17:37 +0800
+Subject: blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 2d82f3bd8910eb65e30bb2a3c9b945bfb3b6d661 ]
+
+Commit 5989bfe6ac6b ("block: restore two stage elevator switch while
+running nr_hw_queue update") reintroduced a lockdep warning by calling
+blk_mq_freeze_queue_nomemsave() before switching the I/O scheduler.
+
+The function blk_mq_elv_switch_none() calls elevator_change_done().
+Running this while the queue is frozen causes a lockdep warning.
+
+Fix this by reordering the operations: first, switch the I/O scheduler
+to 'none', and then freeze the queue. This ensures that elevator_change_done()
+is not called on an already frozen queue. And this way is safe because
+elevator_set_none() does freeze queue before switching to none.
+
+Also we still have to rely on blk_mq_elv_switch_back() for switching
+back, and it has to cover unfrozen queue case.
+
+Cc: Nilay Shroff <nilay@linux.ibm.com>
+Cc: Yu Kuai <yukuai3@huawei.com>
+Fixes: 5989bfe6ac6b ("block: restore two stage elevator switch while running nr_hw_queue update")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
+Link: https://lore.kernel.org/r/20250815131737.331692-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 4cb2f5ca8656..355db0abe44b 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -5031,6 +5031,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ unsigned int memflags;
+ int i;
+ struct xarray elv_tbl, et_tbl;
++ bool queues_frozen = false;
+
+ lockdep_assert_held(&set->tag_list_lock);
+
+@@ -5054,9 +5055,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ blk_mq_sysfs_unregister_hctxs(q);
+ }
+
+- list_for_each_entry(q, &set->tag_list, tag_set_list)
+- blk_mq_freeze_queue_nomemsave(q);
+-
+ /*
+ * Switch IO scheduler to 'none', cleaning up the data associated
+ * with the previous scheduler. We will switch back once we are done
+@@ -5066,6 +5064,9 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ if (blk_mq_elv_switch_none(q, &elv_tbl))
+ goto switch_back;
+
++ list_for_each_entry(q, &set->tag_list, tag_set_list)
++ blk_mq_freeze_queue_nomemsave(q);
++ queues_frozen = true;
+ if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
+ goto switch_back;
+
+@@ -5089,8 +5090,12 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ }
+ switch_back:
+ /* The blk_mq_elv_switch_back unfreezes queue for us. */
+- list_for_each_entry(q, &set->tag_list, tag_set_list)
++ list_for_each_entry(q, &set->tag_list, tag_set_list) {
++ /* switch_back expects queue to be frozen */
++ if (!queues_frozen)
++ blk_mq_freeze_queue_nomemsave(q);
+ blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
++ }
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_sysfs_register_hctxs(q);
+--
+2.50.1
+
--- /dev/null
+From 56cbcf83ed6e0c105ea4f3168e0f0667e94e000c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 13:54:59 +0530
+Subject: block: avoid cpu_hotplug_lock depedency on freeze_lock
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 370ac285f23aecae40600851fb4a1a9e75e50973 ]
+
+A recent lockdep[1] splat observed while running blktest block/005
+reveals a potential deadlock caused by the cpu_hotplug_lock dependency
+on ->freeze_lock. This dependency was introduced by commit 033b667a823e
+("block: blk-rq-qos: guard rq-qos helpers by static key").
+
+That change added a static key to avoid fetching q->rq_qos when
+neither blk-wbt nor blk-iolatency is configured. The static key
+dynamically patches kernel text to a NOP when disabled, eliminating
+overhead of fetching q->rq_qos in the I/O hot path. However, enabling
+a static key at runtime requires acquiring both cpu_hotplug_lock and
+jump_label_mutex. When this happens after the queue has already been
+frozen (i.e., while holding ->freeze_lock), it creates a locking
+dependency from cpu_hotplug_lock to ->freeze_lock, which leads to a
+potential deadlock reported by lockdep [1].
+
+To resolve this, replace the static key mechanism with q->queue_flags:
+QUEUE_FLAG_QOS_ENABLED. This flag is evaluated in the fast path before
+accessing q->rq_qos. If the flag is set, we proceed to fetch q->rq_qos;
+otherwise, the access is skipped.
+
+Since q->queue_flags is commonly accessed in IO hotpath and resides in
+the first cacheline of struct request_queue, checking it imposes minimal
+overhead while eliminating the deadlock risk.
+
+This change avoids the lockdep splat without introducing performance
+regressions.
+
+[1] https://lore.kernel.org/linux-block/4fdm37so3o4xricdgfosgmohn63aa7wj3ua4e5vpihoamwg3ui@fq42f5q5t5ic/
+
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Closes: https://lore.kernel.org/linux-block/4fdm37so3o4xricdgfosgmohn63aa7wj3ua4e5vpihoamwg3ui@fq42f5q5t5ic/
+Fixes: 033b667a823e ("block: blk-rq-qos: guard rq-qos helpers by static key")
+Tested-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20250814082612.500845-4-nilay@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-debugfs.c | 1 +
+ block/blk-rq-qos.c | 9 ++++---
+ block/blk-rq-qos.h | 54 ++++++++++++++++++++++++------------------
+ include/linux/blkdev.h | 1 +
+ 4 files changed, 37 insertions(+), 28 deletions(-)
+
+diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
+index 29b3540dd180..bdcb27ab5606 100644
+--- a/block/blk-mq-debugfs.c
++++ b/block/blk-mq-debugfs.c
+@@ -95,6 +95,7 @@ static const char *const blk_queue_flag_name[] = {
+ QUEUE_FLAG_NAME(SQ_SCHED),
+ QUEUE_FLAG_NAME(DISABLE_WBT_DEF),
+ QUEUE_FLAG_NAME(NO_ELV_SWITCH),
++ QUEUE_FLAG_NAME(QOS_ENABLED),
+ };
+ #undef QUEUE_FLAG_NAME
+
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index b1e24bb85ad2..654478dfbc20 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -2,8 +2,6 @@
+
+ #include "blk-rq-qos.h"
+
+-__read_mostly DEFINE_STATIC_KEY_FALSE(block_rq_qos);
+-
+ /*
+ * Increment 'v', if 'v' is below 'below'. Returns true if we succeeded,
+ * false if 'v' + 1 would be bigger than 'below'.
+@@ -319,8 +317,8 @@ void rq_qos_exit(struct request_queue *q)
+ struct rq_qos *rqos = q->rq_qos;
+ q->rq_qos = rqos->next;
+ rqos->ops->exit(rqos);
+- static_branch_dec(&block_rq_qos);
+ }
++ blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
+ mutex_unlock(&q->rq_qos_mutex);
+ }
+
+@@ -346,7 +344,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
+ goto ebusy;
+ rqos->next = q->rq_qos;
+ q->rq_qos = rqos;
+- static_branch_inc(&block_rq_qos);
++ blk_queue_flag_set(QUEUE_FLAG_QOS_ENABLED, q);
+
+ blk_mq_unfreeze_queue(q, memflags);
+
+@@ -374,10 +372,11 @@ void rq_qos_del(struct rq_qos *rqos)
+ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+ if (*cur == rqos) {
+ *cur = rqos->next;
+- static_branch_dec(&block_rq_qos);
+ break;
+ }
+ }
++ if (!q->rq_qos)
++ blk_queue_flag_clear(QUEUE_FLAG_QOS_ENABLED, q);
+ blk_mq_unfreeze_queue(q, memflags);
+
+ mutex_lock(&q->debugfs_mutex);
+diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
+index 28125fc49eff..1fe22000a379 100644
+--- a/block/blk-rq-qos.h
++++ b/block/blk-rq-qos.h
+@@ -12,7 +12,6 @@
+ #include "blk-mq-debugfs.h"
+
+ struct blk_mq_debugfs_attr;
+-extern struct static_key_false block_rq_qos;
+
+ enum rq_qos_id {
+ RQ_QOS_WBT,
+@@ -113,49 +112,55 @@ void __rq_qos_queue_depth_changed(struct rq_qos *rqos);
+
+ static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos)
+ __rq_qos_cleanup(q->rq_qos, bio);
+ }
+
+ static inline void rq_qos_done(struct request_queue *q, struct request *rq)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos &&
+- !blk_rq_is_passthrough(rq))
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos && !blk_rq_is_passthrough(rq))
+ __rq_qos_done(q->rq_qos, rq);
+ }
+
+ static inline void rq_qos_issue(struct request_queue *q, struct request *rq)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos)
+ __rq_qos_issue(q->rq_qos, rq);
+ }
+
+ static inline void rq_qos_requeue(struct request_queue *q, struct request *rq)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos)
+ __rq_qos_requeue(q->rq_qos, rq);
+ }
+
+ static inline void rq_qos_done_bio(struct bio *bio)
+ {
+- if (static_branch_unlikely(&block_rq_qos) &&
+- bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
+- bio_flagged(bio, BIO_QOS_MERGED))) {
+- struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+-
+- /*
+- * If a bio has BIO_QOS_xxx set, it implicitly implies that
+- * q->rq_qos is present. So, we skip re-checking q->rq_qos
+- * here as an extra optimization and directly call
+- * __rq_qos_done_bio().
+- */
+- __rq_qos_done_bio(q->rq_qos, bio);
+- }
++ struct request_queue *q;
++
++ if (!bio->bi_bdev || (!bio_flagged(bio, BIO_QOS_THROTTLED) &&
++ !bio_flagged(bio, BIO_QOS_MERGED)))
++ return;
++
++ q = bdev_get_queue(bio->bi_bdev);
++
++ /*
++ * If a bio has BIO_QOS_xxx set, it implicitly implies that
++ * q->rq_qos is present. So, we skip re-checking q->rq_qos
++ * here as an extra optimization and directly call
++ * __rq_qos_done_bio().
++ */
++ __rq_qos_done_bio(q->rq_qos, bio);
+ }
+
+ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos) {
+ bio_set_flag(bio, BIO_QOS_THROTTLED);
+ __rq_qos_throttle(q->rq_qos, bio);
+ }
+@@ -164,14 +169,16 @@ static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio)
+ static inline void rq_qos_track(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos)
+ __rq_qos_track(q->rq_qos, rq, bio);
+ }
+
+ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
+ struct bio *bio)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos) {
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos) {
+ bio_set_flag(bio, BIO_QOS_MERGED);
+ __rq_qos_merge(q->rq_qos, rq, bio);
+ }
+@@ -179,7 +186,8 @@ static inline void rq_qos_merge(struct request_queue *q, struct request *rq,
+
+ static inline void rq_qos_queue_depth_changed(struct request_queue *q)
+ {
+- if (static_branch_unlikely(&block_rq_qos) && q->rq_qos)
++ if (unlikely(test_bit(QUEUE_FLAG_QOS_ENABLED, &q->queue_flags)) &&
++ q->rq_qos)
+ __rq_qos_queue_depth_changed(q->rq_qos);
+ }
+
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 620345ce3aaa..3921188c9e13 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -652,6 +652,7 @@ enum {
+ QUEUE_FLAG_SQ_SCHED, /* single queue style io dispatch */
+ QUEUE_FLAG_DISABLE_WBT_DEF, /* for sched to disable/enable wbt */
+ QUEUE_FLAG_NO_ELV_SWITCH, /* can't switch elevator any more */
++ QUEUE_FLAG_QOS_ENABLED, /* qos is enabled */
+ QUEUE_FLAG_MAX
+ };
+
+--
+2.50.1
+
--- /dev/null
+From 9e224f95dabf3fc2afd72a4edcfa0d3e6acce61a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 13:54:58 +0530
+Subject: block: decrement block_rq_qos static key in rq_qos_del()
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit ade1beea1c27657712aa8f594226d461639382ff ]
+
+rq_qos_add() increments the block_rq_qos static key when a QoS
+policy is attached. When a QoS policy is removed via rq_qos_del(),
+we must symmetrically decrement the static key. If this removal drops
+the last QoS policy from the queue (q->rq_qos becomes NULL), the
+static branch can be disabled and the jump label patched to a NOP,
+avoiding overhead on the hot path.
+
+This change ensures rq_qos_add()/rq_qos_del() keep the
+block_rq_qos static key balanced and prevents leaving the branch
+permanently enabled after the last policy is removed.
+
+Fixes: 033b667a823e ("block: blk-rq-qos: guard rq-qos helpers by static key")
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20250814082612.500845-3-nilay@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-rq-qos.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index 848591fb3c57..b1e24bb85ad2 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -374,6 +374,7 @@ void rq_qos_del(struct rq_qos *rqos)
+ for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+ if (*cur == rqos) {
+ *cur = rqos->next;
++ static_branch_dec(&block_rq_qos);
+ break;
+ }
+ }
+--
+2.50.1
+
--- /dev/null
+From 789546b2b64b9771fa06757e8bc0ff7fef32ba4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 13:16:08 +0530
+Subject: block: fix lockdep warning caused by lock dependency in
+ elv_iosched_store
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit f5a6604f7a4405450e4a1f54e5430f47290c500f ]
+
+Recent lockdep reports [1] have revealed a potential deadlock caused by a
+lock dependency between the percpu allocator lock and the elevator lock.
+This issue can be avoided by ensuring that the allocation and release of
+scheduler tags (sched_tags) are performed outside the elevator lock.
+Furthermore, the queue does not need to be remain frozen during these
+operations.
+
+To address this, move all sched_tags allocations and deallocations outside
+of both the ->elevator_lock and the ->freeze_lock. Since the lifetime of
+the elevator queue and its associated sched_tags is closely tied, the
+allocated sched_tags are now stored in the elevator queue structure. Then,
+during the actual elevator switch (which runs under ->freeze_lock and
+->elevator_lock), the pre-allocated sched_tags are assigned to the
+appropriate q->hctx. Once the elevator switch is complete and the locks
+are released, the old elevator queue and its associated sched_tags are
+freed.
+
+This commit specifically addresses the allocation/deallocation of sched_
+tags during elevator switching. Note that sched_tags may also be allocated
+in other contexts, such as during nr_hw_queues updates. Supporting that
+use case will require batch allocation/deallocation, which will be handled
+in a follow-up patch.
+
+This restructuring ensures that sched_tags memory management occurs
+entirely outside of the ->elevator_lock and ->freeze_lock context,
+eliminating the lock dependency problem seen during scheduler updates.
+
+[1] https://lore.kernel.org/all/0659ea8d-a463-47c8-9180-43c719e106eb@linux.ibm.com/
+
+Reported-by: Stefan Haberland <sth@linux.ibm.com>
+Closes: https://lore.kernel.org/all/0659ea8d-a463-47c8-9180-43c719e106eb@linux.ibm.com/
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Link: https://lore.kernel.org/r/20250730074614.2537382-3-nilay@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 2d82f3bd8910 ("blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 155 +++++++++++++++++++++++--------------------
+ block/blk-mq-sched.h | 8 ++-
+ block/elevator.c | 40 +++++++++--
+ block/elevator.h | 14 +++-
+ 4 files changed, 136 insertions(+), 81 deletions(-)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 359e0704e09b..2d6d1ebdd8fb 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -374,64 +374,17 @@ bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
+ }
+ EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
+
+-static int blk_mq_sched_alloc_map_and_rqs(struct request_queue *q,
+- struct blk_mq_hw_ctx *hctx,
+- unsigned int hctx_idx)
+-{
+- if (blk_mq_is_shared_tags(q->tag_set->flags)) {
+- hctx->sched_tags = q->sched_shared_tags;
+- return 0;
+- }
+-
+- hctx->sched_tags = blk_mq_alloc_map_and_rqs(q->tag_set, hctx_idx,
+- q->nr_requests);
+-
+- if (!hctx->sched_tags)
+- return -ENOMEM;
+- return 0;
+-}
+-
+-static void blk_mq_exit_sched_shared_tags(struct request_queue *queue)
+-{
+- blk_mq_free_rq_map(queue->sched_shared_tags);
+- queue->sched_shared_tags = NULL;
+-}
+-
+ /* called in queue's release handler, tagset has gone away */
+ static void blk_mq_sched_tags_teardown(struct request_queue *q, unsigned int flags)
+ {
+ struct blk_mq_hw_ctx *hctx;
+ unsigned long i;
+
+- queue_for_each_hw_ctx(q, hctx, i) {
+- if (hctx->sched_tags) {
+- if (!blk_mq_is_shared_tags(flags))
+- blk_mq_free_rq_map(hctx->sched_tags);
+- hctx->sched_tags = NULL;
+- }
+- }
++ queue_for_each_hw_ctx(q, hctx, i)
++ hctx->sched_tags = NULL;
+
+ if (blk_mq_is_shared_tags(flags))
+- blk_mq_exit_sched_shared_tags(q);
+-}
+-
+-static int blk_mq_init_sched_shared_tags(struct request_queue *queue)
+-{
+- struct blk_mq_tag_set *set = queue->tag_set;
+-
+- /*
+- * Set initial depth at max so that we don't need to reallocate for
+- * updating nr_requests.
+- */
+- queue->sched_shared_tags = blk_mq_alloc_map_and_rqs(set,
+- BLK_MQ_NO_HCTX_IDX,
+- MAX_SCHED_RQ);
+- if (!queue->sched_shared_tags)
+- return -ENOMEM;
+-
+- blk_mq_tag_update_sched_shared_tags(queue);
+-
+- return 0;
++ q->sched_shared_tags = NULL;
+ }
+
+ void blk_mq_sched_reg_debugfs(struct request_queue *q)
+@@ -458,8 +411,75 @@ void blk_mq_sched_unreg_debugfs(struct request_queue *q)
+ mutex_unlock(&q->debugfs_mutex);
+ }
+
++void blk_mq_free_sched_tags(struct elevator_tags *et,
++ struct blk_mq_tag_set *set)
++{
++ unsigned long i;
++
++ /* Shared tags are stored at index 0 in @tags. */
++ if (blk_mq_is_shared_tags(set->flags))
++ blk_mq_free_map_and_rqs(set, et->tags[0], BLK_MQ_NO_HCTX_IDX);
++ else {
++ for (i = 0; i < et->nr_hw_queues; i++)
++ blk_mq_free_map_and_rqs(set, et->tags[i], i);
++ }
++
++ kfree(et);
++}
++
++struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
++ unsigned int nr_hw_queues)
++{
++ unsigned int nr_tags;
++ int i;
++ struct elevator_tags *et;
++ gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
++
++ if (blk_mq_is_shared_tags(set->flags))
++ nr_tags = 1;
++ else
++ nr_tags = nr_hw_queues;
++
++ et = kmalloc(sizeof(struct elevator_tags) +
++ nr_tags * sizeof(struct blk_mq_tags *), gfp);
++ if (!et)
++ return NULL;
++ /*
++ * Default to double of smaller one between hw queue_depth and
++ * 128, since we don't split into sync/async like the old code
++ * did. Additionally, this is a per-hw queue depth.
++ */
++ et->nr_requests = 2 * min_t(unsigned int, set->queue_depth,
++ BLKDEV_DEFAULT_RQ);
++ et->nr_hw_queues = nr_hw_queues;
++
++ if (blk_mq_is_shared_tags(set->flags)) {
++ /* Shared tags are stored at index 0 in @tags. */
++ et->tags[0] = blk_mq_alloc_map_and_rqs(set, BLK_MQ_NO_HCTX_IDX,
++ MAX_SCHED_RQ);
++ if (!et->tags[0])
++ goto out;
++ } else {
++ for (i = 0; i < et->nr_hw_queues; i++) {
++ et->tags[i] = blk_mq_alloc_map_and_rqs(set, i,
++ et->nr_requests);
++ if (!et->tags[i])
++ goto out_unwind;
++ }
++ }
++
++ return et;
++out_unwind:
++ while (--i >= 0)
++ blk_mq_free_map_and_rqs(set, et->tags[i], i);
++out:
++ kfree(et);
++ return NULL;
++}
++
+ /* caller must have a reference to @e, will grab another one if successful */
+-int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
++int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
++ struct elevator_tags *et)
+ {
+ unsigned int flags = q->tag_set->flags;
+ struct blk_mq_hw_ctx *hctx;
+@@ -467,40 +487,33 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+ unsigned long i;
+ int ret;
+
+- /*
+- * Default to double of smaller one between hw queue_depth and 128,
+- * since we don't split into sync/async like the old code did.
+- * Additionally, this is a per-hw queue depth.
+- */
+- q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
+- BLKDEV_DEFAULT_RQ);
+-
+- eq = elevator_alloc(q, e);
++ eq = elevator_alloc(q, e, et);
+ if (!eq)
+ return -ENOMEM;
+
++ q->nr_requests = et->nr_requests;
++
+ if (blk_mq_is_shared_tags(flags)) {
+- ret = blk_mq_init_sched_shared_tags(q);
+- if (ret)
+- goto err_put_elevator;
++ /* Shared tags are stored at index 0 in @et->tags. */
++ q->sched_shared_tags = et->tags[0];
++ blk_mq_tag_update_sched_shared_tags(q);
+ }
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+- ret = blk_mq_sched_alloc_map_and_rqs(q, hctx, i);
+- if (ret)
+- goto err_free_map_and_rqs;
++ if (blk_mq_is_shared_tags(flags))
++ hctx->sched_tags = q->sched_shared_tags;
++ else
++ hctx->sched_tags = et->tags[i];
+ }
+
+ ret = e->ops.init_sched(q, eq);
+ if (ret)
+- goto err_free_map_and_rqs;
++ goto out;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ if (e->ops.init_hctx) {
+ ret = e->ops.init_hctx(hctx, i);
+ if (ret) {
+- eq = q->elevator;
+- blk_mq_sched_free_rqs(q);
+ blk_mq_exit_sched(q, eq);
+ kobject_put(&eq->kobj);
+ return ret;
+@@ -509,10 +522,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+ }
+ return 0;
+
+-err_free_map_and_rqs:
+- blk_mq_sched_free_rqs(q);
++out:
+ blk_mq_sched_tags_teardown(q, flags);
+-err_put_elevator:
+ kobject_put(&eq->kobj);
+ q->elevator = NULL;
+ return ret;
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 1326526bb733..0cde00cd1c47 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -18,10 +18,16 @@ void __blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx);
+
+ void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
+
+-int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e);
++int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
++ struct elevator_tags *et);
+ void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e);
+ void blk_mq_sched_free_rqs(struct request_queue *q);
+
++struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
++ unsigned int nr_hw_queues);
++void blk_mq_free_sched_tags(struct elevator_tags *et,
++ struct blk_mq_tag_set *set);
++
+ static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+ {
+ if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
+diff --git a/block/elevator.c b/block/elevator.c
+index 939b0c590fbe..e9dc837b7b70 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -54,6 +54,8 @@ struct elv_change_ctx {
+ struct elevator_queue *old;
+ /* for registering new elevator */
+ struct elevator_queue *new;
++ /* holds sched tags data */
++ struct elevator_tags *et;
+ };
+
+ static DEFINE_SPINLOCK(elv_list_lock);
+@@ -132,7 +134,7 @@ static struct elevator_type *elevator_find_get(const char *name)
+ static const struct kobj_type elv_ktype;
+
+ struct elevator_queue *elevator_alloc(struct request_queue *q,
+- struct elevator_type *e)
++ struct elevator_type *e, struct elevator_tags *et)
+ {
+ struct elevator_queue *eq;
+
+@@ -145,6 +147,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
+ kobject_init(&eq->kobj, &elv_ktype);
+ mutex_init(&eq->sysfs_lock);
+ hash_init(eq->hash);
++ eq->et = et;
+
+ return eq;
+ }
+@@ -165,7 +168,6 @@ static void elevator_exit(struct request_queue *q)
+ lockdep_assert_held(&q->elevator_lock);
+
+ ioc_clear_queue(q);
+- blk_mq_sched_free_rqs(q);
+
+ mutex_lock(&e->sysfs_lock);
+ blk_mq_exit_sched(q, e);
+@@ -591,7 +593,7 @@ static int elevator_switch(struct request_queue *q, struct elv_change_ctx *ctx)
+ }
+
+ if (new_e) {
+- ret = blk_mq_init_sched(q, new_e);
++ ret = blk_mq_init_sched(q, new_e, ctx->et);
+ if (ret)
+ goto out_unfreeze;
+ ctx->new = q->elevator;
+@@ -626,8 +628,10 @@ static void elv_exit_and_release(struct request_queue *q)
+ elevator_exit(q);
+ mutex_unlock(&q->elevator_lock);
+ blk_mq_unfreeze_queue(q, memflags);
+- if (e)
++ if (e) {
++ blk_mq_free_sched_tags(e->et, q->tag_set);
+ kobject_put(&e->kobj);
++ }
+ }
+
+ static int elevator_change_done(struct request_queue *q,
+@@ -640,6 +644,7 @@ static int elevator_change_done(struct request_queue *q,
+ &ctx->old->flags);
+
+ elv_unregister_queue(q, ctx->old);
++ blk_mq_free_sched_tags(ctx->old->et, q->tag_set);
+ kobject_put(&ctx->old->kobj);
+ if (enable_wbt)
+ wbt_enable_default(q->disk);
+@@ -658,9 +663,16 @@ static int elevator_change_done(struct request_queue *q,
+ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ {
+ unsigned int memflags;
++ struct blk_mq_tag_set *set = q->tag_set;
+ int ret = 0;
+
+- lockdep_assert_held(&q->tag_set->update_nr_hwq_lock);
++ lockdep_assert_held(&set->update_nr_hwq_lock);
++
++ if (strncmp(ctx->name, "none", 4)) {
++ ctx->et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
++ if (!ctx->et)
++ return -ENOMEM;
++ }
+
+ memflags = blk_mq_freeze_queue(q);
+ /*
+@@ -680,6 +692,11 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ blk_mq_unfreeze_queue(q, memflags);
+ if (!ret)
+ ret = elevator_change_done(q, ctx);
++ /*
++ * Free sched tags if it's allocated but we couldn't switch elevator.
++ */
++ if (ctx->et && !ctx->new)
++ blk_mq_free_sched_tags(ctx->et, set);
+
+ return ret;
+ }
+@@ -690,6 +707,7 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ */
+ void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
+ {
++ struct blk_mq_tag_set *set = q->tag_set;
+ struct elv_change_ctx ctx = {};
+ int ret = -ENODEV;
+
+@@ -697,15 +715,25 @@ void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
+
+ if (e && !blk_queue_dying(q) && blk_queue_registered(q)) {
+ ctx.name = e->elevator_name;
+-
++ ctx.et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
++ if (!ctx.et) {
++ WARN_ON_ONCE(1);
++ goto unfreeze;
++ }
+ mutex_lock(&q->elevator_lock);
+ /* force to reattach elevator after nr_hw_queue is updated */
+ ret = elevator_switch(q, &ctx);
+ mutex_unlock(&q->elevator_lock);
+ }
++unfreeze:
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ if (!ret)
+ WARN_ON_ONCE(elevator_change_done(q, &ctx));
++ /*
++ * Free sched tags if it's allocated but we couldn't switch elevator.
++ */
++ if (ctx.et && !ctx.new)
++ blk_mq_free_sched_tags(ctx.et, set);
+ }
+
+ /*
+diff --git a/block/elevator.h b/block/elevator.h
+index a4de5f9ad790..adc5c157e17e 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -23,6 +23,15 @@ enum elv_merge {
+ struct blk_mq_alloc_data;
+ struct blk_mq_hw_ctx;
+
++struct elevator_tags {
++ /* num. of hardware queues for which tags are allocated */
++ unsigned int nr_hw_queues;
++ /* depth used while allocating tags */
++ unsigned int nr_requests;
++ /* shared tag is stored at index 0 */
++ struct blk_mq_tags *tags[];
++};
++
+ struct elevator_mq_ops {
+ int (*init_sched)(struct request_queue *, struct elevator_queue *);
+ void (*exit_sched)(struct elevator_queue *);
+@@ -113,6 +122,7 @@ struct request *elv_rqhash_find(struct request_queue *q, sector_t offset);
+ struct elevator_queue
+ {
+ struct elevator_type *type;
++ struct elevator_tags *et;
+ void *elevator_data;
+ struct kobject kobj;
+ struct mutex sysfs_lock;
+@@ -152,8 +162,8 @@ ssize_t elv_iosched_show(struct gendisk *disk, char *page);
+ ssize_t elv_iosched_store(struct gendisk *disk, const char *page, size_t count);
+
+ extern bool elv_bio_merge_ok(struct request *, struct bio *);
+-extern struct elevator_queue *elevator_alloc(struct request_queue *,
+- struct elevator_type *);
++struct elevator_queue *elevator_alloc(struct request_queue *,
++ struct elevator_type *, struct elevator_tags *);
+
+ /*
+ * Helper functions.
+--
+2.50.1
+
--- /dev/null
+From e5ee18930aa36e2247322113829f6d93f0125740 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 13:16:09 +0530
+Subject: block: fix potential deadlock while running nr_hw_queue update
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 04225d13aef11b2a539014def5e47d8c21fd74a5 ]
+
+Move scheduler tags (sched_tags) allocation and deallocation outside
+both the ->elevator_lock and ->freeze_lock when updating nr_hw_queues.
+This change breaks the dependency chain from the percpu allocator lock
+to the elevator lock, helping to prevent potential deadlocks, as
+observed in the reported lockdep splat[1].
+
+This commit introduces batch allocation and deallocation helpers for
+sched_tags, which are now used from within __blk_mq_update_nr_hw_queues
+routine while iterating through the tagset.
+
+With this change, all sched_tags memory management is handled entirely
+outside the ->elevator_lock and the ->freeze_lock context, thereby
+eliminating the lock dependency that could otherwise manifest during
+nr_hw_queues updates.
+
+[1] https://lore.kernel.org/all/0659ea8d-a463-47c8-9180-43c719e106eb@linux.ibm.com/
+
+Reported-by: Stefan Haberland <sth@linux.ibm.com>
+Closes: https://lore.kernel.org/all/0659ea8d-a463-47c8-9180-43c719e106eb@linux.ibm.com/
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Link: https://lore.kernel.org/r/20250730074614.2537382-4-nilay@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 2d82f3bd8910 ("blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq-sched.c | 65 ++++++++++++++++++++++++++++++++++++++++++++
+ block/blk-mq-sched.h | 4 +++
+ block/blk-mq.c | 16 +++++++----
+ block/blk.h | 4 ++-
+ block/elevator.c | 15 ++++------
+ 5 files changed, 89 insertions(+), 15 deletions(-)
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 2d6d1ebdd8fb..e2ce4a28e6c9 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -427,6 +427,32 @@ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ kfree(et);
+ }
+
++void blk_mq_free_sched_tags_batch(struct xarray *et_table,
++ struct blk_mq_tag_set *set)
++{
++ struct request_queue *q;
++ struct elevator_tags *et;
++
++ lockdep_assert_held_write(&set->update_nr_hwq_lock);
++
++ list_for_each_entry(q, &set->tag_list, tag_set_list) {
++ /*
++ * Accessing q->elevator without holding q->elevator_lock is
++ * safe because we're holding here set->update_nr_hwq_lock in
++ * the writer context. So, scheduler update/switch code (which
++ * acquires the same lock but in the reader context) can't run
++ * concurrently.
++ */
++ if (q->elevator) {
++ et = xa_load(et_table, q->id);
++ if (unlikely(!et))
++ WARN_ON_ONCE(1);
++ else
++ blk_mq_free_sched_tags(et, set);
++ }
++ }
++}
++
+ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues)
+ {
+@@ -477,6 +503,45 @@ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ return NULL;
+ }
+
++int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
++ struct blk_mq_tag_set *set, unsigned int nr_hw_queues)
++{
++ struct request_queue *q;
++ struct elevator_tags *et;
++ gfp_t gfp = GFP_NOIO | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
++
++ lockdep_assert_held_write(&set->update_nr_hwq_lock);
++
++ list_for_each_entry(q, &set->tag_list, tag_set_list) {
++ /*
++ * Accessing q->elevator without holding q->elevator_lock is
++ * safe because we're holding here set->update_nr_hwq_lock in
++ * the writer context. So, scheduler update/switch code (which
++ * acquires the same lock but in the reader context) can't run
++ * concurrently.
++ */
++ if (q->elevator) {
++ et = blk_mq_alloc_sched_tags(set, nr_hw_queues);
++ if (!et)
++ goto out_unwind;
++ if (xa_insert(et_table, q->id, et, gfp))
++ goto out_free_tags;
++ }
++ }
++ return 0;
++out_free_tags:
++ blk_mq_free_sched_tags(et, set);
++out_unwind:
++ list_for_each_entry_continue_reverse(q, &set->tag_list, tag_set_list) {
++ if (q->elevator) {
++ et = xa_load(et_table, q->id);
++ if (et)
++ blk_mq_free_sched_tags(et, set);
++ }
++ }
++ return -ENOMEM;
++}
++
+ /* caller must have a reference to @e, will grab another one if successful */
+ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e,
+ struct elevator_tags *et)
+diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
+index 0cde00cd1c47..b554e1d55950 100644
+--- a/block/blk-mq-sched.h
++++ b/block/blk-mq-sched.h
+@@ -25,8 +25,12 @@ void blk_mq_sched_free_rqs(struct request_queue *q);
+
+ struct elevator_tags *blk_mq_alloc_sched_tags(struct blk_mq_tag_set *set,
+ unsigned int nr_hw_queues);
++int blk_mq_alloc_sched_tags_batch(struct xarray *et_table,
++ struct blk_mq_tag_set *set, unsigned int nr_hw_queues);
+ void blk_mq_free_sched_tags(struct elevator_tags *et,
+ struct blk_mq_tag_set *set);
++void blk_mq_free_sched_tags_batch(struct xarray *et_table,
++ struct blk_mq_tag_set *set);
+
+ static inline void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
+ {
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index 32d11305d51b..4cb2f5ca8656 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -4972,12 +4972,13 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr)
+ * Switch back to the elevator type stored in the xarray.
+ */
+ static void blk_mq_elv_switch_back(struct request_queue *q,
+- struct xarray *elv_tbl)
++ struct xarray *elv_tbl, struct xarray *et_tbl)
+ {
+ struct elevator_type *e = xa_load(elv_tbl, q->id);
++ struct elevator_tags *t = xa_load(et_tbl, q->id);
+
+ /* The elv_update_nr_hw_queues unfreezes the queue. */
+- elv_update_nr_hw_queues(q, e);
++ elv_update_nr_hw_queues(q, e, t);
+
+ /* Drop the reference acquired in blk_mq_elv_switch_none. */
+ if (e)
+@@ -5029,7 +5030,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ int prev_nr_hw_queues = set->nr_hw_queues;
+ unsigned int memflags;
+ int i;
+- struct xarray elv_tbl;
++ struct xarray elv_tbl, et_tbl;
+
+ lockdep_assert_held(&set->tag_list_lock);
+
+@@ -5042,6 +5043,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+
+ memflags = memalloc_noio_save();
+
++ xa_init(&et_tbl);
++ if (blk_mq_alloc_sched_tags_batch(&et_tbl, set, nr_hw_queues) < 0)
++ goto out_memalloc_restore;
++
+ xa_init(&elv_tbl);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+@@ -5085,7 +5090,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ switch_back:
+ /* The blk_mq_elv_switch_back unfreezes queue for us. */
+ list_for_each_entry(q, &set->tag_list, tag_set_list)
+- blk_mq_elv_switch_back(q, &elv_tbl);
++ blk_mq_elv_switch_back(q, &elv_tbl, &et_tbl);
+
+ list_for_each_entry(q, &set->tag_list, tag_set_list) {
+ blk_mq_sysfs_register_hctxs(q);
+@@ -5096,7 +5101,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
+ }
+
+ xa_destroy(&elv_tbl);
+-
++ xa_destroy(&et_tbl);
++out_memalloc_restore:
+ memalloc_noio_restore(memflags);
+
+ /* Free the excess tags when nr_hw_queues shrink. */
+diff --git a/block/blk.h b/block/blk.h
+index 4746a7704856..5d9ca8c95193 100644
+--- a/block/blk.h
++++ b/block/blk.h
+@@ -12,6 +12,7 @@
+ #include "blk-crypto-internal.h"
+
+ struct elevator_type;
++struct elevator_tags;
+
+ #define BLK_DEV_MAX_SECTORS (LLONG_MAX >> 9)
+ #define BLK_MIN_SEGMENT_SIZE 4096
+@@ -322,7 +323,8 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
+
+ bool blk_insert_flush(struct request *rq);
+
+-void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e);
++void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
++ struct elevator_tags *t);
+ void elevator_set_default(struct request_queue *q);
+ void elevator_set_none(struct request_queue *q);
+
+diff --git a/block/elevator.c b/block/elevator.c
+index e9dc837b7b70..fe96c6f4753c 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -705,7 +705,8 @@ static int elevator_change(struct request_queue *q, struct elv_change_ctx *ctx)
+ * The I/O scheduler depends on the number of hardware queues, this forces a
+ * reattachment when nr_hw_queues changes.
+ */
+-void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
++void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e,
++ struct elevator_tags *t)
+ {
+ struct blk_mq_tag_set *set = q->tag_set;
+ struct elv_change_ctx ctx = {};
+@@ -715,25 +716,21 @@ void elv_update_nr_hw_queues(struct request_queue *q, struct elevator_type *e)
+
+ if (e && !blk_queue_dying(q) && blk_queue_registered(q)) {
+ ctx.name = e->elevator_name;
+- ctx.et = blk_mq_alloc_sched_tags(set, set->nr_hw_queues);
+- if (!ctx.et) {
+- WARN_ON_ONCE(1);
+- goto unfreeze;
+- }
++ ctx.et = t;
++
+ mutex_lock(&q->elevator_lock);
+ /* force to reattach elevator after nr_hw_queue is updated */
+ ret = elevator_switch(q, &ctx);
+ mutex_unlock(&q->elevator_lock);
+ }
+-unfreeze:
+ blk_mq_unfreeze_queue_nomemrestore(q);
+ if (!ret)
+ WARN_ON_ONCE(elevator_change_done(q, &ctx));
+ /*
+ * Free sched tags if it's allocated but we couldn't switch elevator.
+ */
+- if (ctx.et && !ctx.new)
+- blk_mq_free_sched_tags(ctx.et, set);
++ if (t && !ctx.new)
++ blk_mq_free_sched_tags(t, set);
+ }
+
+ /*
+--
+2.50.1
+
--- /dev/null
+From 945b9b6f75d747cbe4129f9a661355849a80ef3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 13:16:07 +0530
+Subject: block: move elevator queue allocation logic into blk_mq_init_sched
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 49811586be373e26a3ab52f54e0dfa663c02fddd ]
+
+In preparation for allocating sched_tags before freezing the request
+queue and acquiring ->elevator_lock, move the elevator queue allocation
+logic from the elevator ops ->init_sched callback into blk_mq_init_sched.
+As elevator_alloc is now only invoked from block layer core, we don't
+need to export it, so unexport elevator_alloc function.
+
+This refactoring provides a centralized location for elevator queue
+initialization, which makes it easier to store pre-allocated sched_tags
+in the struct elevator_queue during later changes.
+
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Link: https://lore.kernel.org/r/20250730074614.2537382-2-nilay@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 2d82f3bd8910 ("blk-mq: fix lockdep warning in __blk_mq_update_nr_hw_queues")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/bfq-iosched.c | 13 +++----------
+ block/blk-mq-sched.c | 11 ++++++++---
+ block/elevator.c | 1 -
+ block/elevator.h | 2 +-
+ block/kyber-iosched.c | 11 ++---------
+ block/mq-deadline.c | 14 ++------------
+ 6 files changed, 16 insertions(+), 36 deletions(-)
+
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index d68da9e92e1e..9483c529c958 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -7229,22 +7229,16 @@ static void bfq_init_root_group(struct bfq_group *root_group,
+ root_group->sched_data.bfq_class_idle_last_service = jiffies;
+ }
+
+-static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
++static int bfq_init_queue(struct request_queue *q, struct elevator_queue *eq)
+ {
+ struct bfq_data *bfqd;
+- struct elevator_queue *eq;
+ unsigned int i;
+ struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges;
+
+- eq = elevator_alloc(q, e);
+- if (!eq)
+- return -ENOMEM;
+-
+ bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node);
+- if (!bfqd) {
+- kobject_put(&eq->kobj);
++ if (!bfqd)
+ return -ENOMEM;
+- }
++
+ eq->elevator_data = bfqd;
+
+ spin_lock_irq(&q->queue_lock);
+@@ -7402,7 +7396,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
+
+ out_free:
+ kfree(bfqd);
+- kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+
+diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
+index 55a0fd105147..359e0704e09b 100644
+--- a/block/blk-mq-sched.c
++++ b/block/blk-mq-sched.c
+@@ -475,10 +475,14 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+ q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth,
+ BLKDEV_DEFAULT_RQ);
+
++ eq = elevator_alloc(q, e);
++ if (!eq)
++ return -ENOMEM;
++
+ if (blk_mq_is_shared_tags(flags)) {
+ ret = blk_mq_init_sched_shared_tags(q);
+ if (ret)
+- return ret;
++ goto err_put_elevator;
+ }
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+@@ -487,7 +491,7 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+ goto err_free_map_and_rqs;
+ }
+
+- ret = e->ops.init_sched(q, e);
++ ret = e->ops.init_sched(q, eq);
+ if (ret)
+ goto err_free_map_and_rqs;
+
+@@ -508,7 +512,8 @@ int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e)
+ err_free_map_and_rqs:
+ blk_mq_sched_free_rqs(q);
+ blk_mq_sched_tags_teardown(q, flags);
+-
++err_put_elevator:
++ kobject_put(&eq->kobj);
+ q->elevator = NULL;
+ return ret;
+ }
+diff --git a/block/elevator.c b/block/elevator.c
+index 88f8f36bed98..939b0c590fbe 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -148,7 +148,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
+
+ return eq;
+ }
+-EXPORT_SYMBOL(elevator_alloc);
+
+ static void elevator_release(struct kobject *kobj)
+ {
+diff --git a/block/elevator.h b/block/elevator.h
+index a07ce773a38f..a4de5f9ad790 100644
+--- a/block/elevator.h
++++ b/block/elevator.h
+@@ -24,7 +24,7 @@ struct blk_mq_alloc_data;
+ struct blk_mq_hw_ctx;
+
+ struct elevator_mq_ops {
+- int (*init_sched)(struct request_queue *, struct elevator_type *);
++ int (*init_sched)(struct request_queue *, struct elevator_queue *);
+ void (*exit_sched)(struct elevator_queue *);
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
+index bfd9a40bb33d..70cbc7b2deb4 100644
+--- a/block/kyber-iosched.c
++++ b/block/kyber-iosched.c
+@@ -399,20 +399,13 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
+ return ERR_PTR(ret);
+ }
+
+-static int kyber_init_sched(struct request_queue *q, struct elevator_type *e)
++static int kyber_init_sched(struct request_queue *q, struct elevator_queue *eq)
+ {
+ struct kyber_queue_data *kqd;
+- struct elevator_queue *eq;
+-
+- eq = elevator_alloc(q, e);
+- if (!eq)
+- return -ENOMEM;
+
+ kqd = kyber_queue_data_alloc(q);
+- if (IS_ERR(kqd)) {
+- kobject_put(&eq->kobj);
++ if (IS_ERR(kqd))
+ return PTR_ERR(kqd);
+- }
+
+ blk_stat_enable_accounting(q);
+
+diff --git a/block/mq-deadline.c b/block/mq-deadline.c
+index 9ab6c6256695..b9b7cdf1d3c9 100644
+--- a/block/mq-deadline.c
++++ b/block/mq-deadline.c
+@@ -554,20 +554,14 @@ static void dd_exit_sched(struct elevator_queue *e)
+ /*
+ * initialize elevator private data (deadline_data).
+ */
+-static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
++static int dd_init_sched(struct request_queue *q, struct elevator_queue *eq)
+ {
+ struct deadline_data *dd;
+- struct elevator_queue *eq;
+ enum dd_prio prio;
+- int ret = -ENOMEM;
+-
+- eq = elevator_alloc(q, e);
+- if (!eq)
+- return ret;
+
+ dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
+ if (!dd)
+- goto put_eq;
++ return -ENOMEM;
+
+ eq->elevator_data = dd;
+
+@@ -594,10 +588,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
+
+ q->elevator = eq;
+ return 0;
+-
+-put_eq:
+- kobject_put(&eq->kobj);
+- return ret;
+ }
+
+ /*
+--
+2.50.1
+
--- /dev/null
+From 64da3038271741b128e7df4d63c7d4821e81c947 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 13:54:57 +0530
+Subject: block: skip q->rq_qos check in rq_qos_done_bio()
+
+From: Nilay Shroff <nilay@linux.ibm.com>
+
+[ Upstream commit 275332877e2fa9d6efa7402b1e897f6c6ee695bb ]
+
+If a bio has BIO_QOS_THROTTLED or BIO_QOS_MERGED set,
+it implicitly guarantees that q->rq_qos is present.
+Avoid re-checking q->rq_qos in this case and call
+__rq_qos_done_bio() directly as a minor optimization.
+
+Suggested-by : Yu Kuai <yukuai1@huaweicloud.com>
+
+Signed-off-by: Nilay Shroff <nilay@linux.ibm.com>
+Reviewed-by: Ming Lei <ming.lei@redhat.com>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Link: https://lore.kernel.org/r/20250814082612.500845-2-nilay@linux.ibm.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: 370ac285f23a ("block: avoid cpu_hotplug_lock depedency on freeze_lock")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-rq-qos.h | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
+index 39749f4066fb..28125fc49eff 100644
+--- a/block/blk-rq-qos.h
++++ b/block/blk-rq-qos.h
+@@ -142,8 +142,14 @@ static inline void rq_qos_done_bio(struct bio *bio)
+ bio->bi_bdev && (bio_flagged(bio, BIO_QOS_THROTTLED) ||
+ bio_flagged(bio, BIO_QOS_MERGED))) {
+ struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+- if (q->rq_qos)
+- __rq_qos_done_bio(q->rq_qos, bio);
++
++ /*
++ * If a bio has BIO_QOS_xxx set, it implicitly implies that
++ * q->rq_qos is present. So, we skip re-checking q->rq_qos
++ * here as an extra optimization and directly call
++ * __rq_qos_done_bio().
++ */
++ __rq_qos_done_bio(q->rq_qos, bio);
+ }
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 25d39162d6b3bb4787c8af8247da975e80a50641 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Jul 2025 18:52:47 +0800
+Subject: Bluetooth: Add PA_LINK to distinguish BIG sync and PA sync
+ connections
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit a7bcffc673de219af2698fbb90627016233de67b ]
+
+Currently, BIS_LINK is used for both BIG sync and PA sync connections,
+which makes it impossible to distinguish them when searching for a PA
+sync connection.
+
+Adding PA_LINK will make the distinction clearer and simplify future
+extensions for PA-related features.
+
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Stable-dep-of: 9d4b01a0bf8d ("Bluetooth: hci_core: Fix not accounting for BIS/CIS/PA links separately")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/hci.h | 1 +
+ include/net/bluetooth/hci_core.h | 10 +++++++---
+ net/bluetooth/hci_conn.c | 14 +++++++++-----
+ net/bluetooth/hci_core.c | 27 +++++++++++++++------------
+ net/bluetooth/hci_event.c | 7 ++++---
+ net/bluetooth/hci_sync.c | 10 +++++-----
+ net/bluetooth/iso.c | 6 ++++--
+ net/bluetooth/mgmt.c | 1 +
+ 8 files changed, 46 insertions(+), 30 deletions(-)
+
+diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
+index 8fa829873134..7d1ba92b71f6 100644
+--- a/include/net/bluetooth/hci.h
++++ b/include/net/bluetooth/hci.h
+@@ -562,6 +562,7 @@ enum {
+ #define LE_LINK 0x80
+ #define CIS_LINK 0x82
+ #define BIS_LINK 0x83
++#define PA_LINK 0x84
+ #define INVALID_LINK 0xff
+
+ /* LMP features */
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 2fcd62fdbc87..459f26d63451 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1015,6 +1015,7 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ h->iso_num++;
+ break;
+ }
+@@ -1042,6 +1043,7 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ h->iso_num--;
+ break;
+ }
+@@ -1060,6 +1062,7 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+ return h->sco_num;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ return h->iso_num;
+ default:
+ return 0;
+@@ -1142,7 +1145,7 @@ hci_conn_hash_lookup_create_pa_sync(struct hci_dev *hdev)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+- if (c->type != BIS_LINK)
++ if (c->type != PA_LINK)
+ continue;
+
+ if (!test_bit(HCI_CONN_CREATE_PA_SYNC, &c->flags))
+@@ -1337,7 +1340,7 @@ hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev,
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+- if (c->type != BIS_LINK)
++ if (c->type != PA_LINK)
+ continue;
+
+ if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) {
+@@ -1407,7 +1410,7 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle)
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+- if (c->type != BIS_LINK)
++ if (c->type != PA_LINK)
+ continue;
+
+ /* Ignore the listen hcon, we are looking
+@@ -2038,6 +2041,7 @@ static inline int hci_proto_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr,
+
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ return iso_connect_ind(hdev, bdaddr, flags);
+
+ default:
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 2d4cbc483e77..6a064a6b0e43 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -786,7 +786,7 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, struct hci_conn *c
+ d->sync_handle = conn->sync_handle;
+
+ if (test_and_clear_bit(HCI_CONN_PA_SYNC, &conn->flags)) {
+- hci_conn_hash_list_flag(hdev, find_bis, BIS_LINK,
++ hci_conn_hash_list_flag(hdev, find_bis, PA_LINK,
+ HCI_CONN_PA_SYNC, d);
+
+ if (!d->count)
+@@ -915,6 +915,7 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ if (hdev->iso_mtu)
+ /* Dedicated ISO Buffer exists */
+ break;
+@@ -980,6 +981,7 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ /* conn->src should reflect the local identity address */
+ hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
+
+@@ -1034,7 +1036,6 @@ static struct hci_conn *__hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t
+ }
+
+ hci_conn_init_sysfs(conn);
+-
+ return conn;
+ }
+
+@@ -1078,6 +1079,7 @@ static void hci_conn_cleanup_child(struct hci_conn *conn, u8 reason)
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ if ((conn->state != BT_CONNECTED &&
+ !test_bit(HCI_CONN_CREATE_CIS, &conn->flags)) ||
+ test_bit(HCI_CONN_BIG_CREATED, &conn->flags))
+@@ -1153,7 +1155,8 @@ void hci_conn_del(struct hci_conn *conn)
+ } else {
+ /* Unacked ISO frames */
+ if (conn->type == CIS_LINK ||
+- conn->type == BIS_LINK) {
++ conn->type == BIS_LINK ||
++ conn->type == PA_LINK) {
+ if (hdev->iso_pkts)
+ hdev->iso_cnt += conn->sent;
+ else if (hdev->le_pkts)
+@@ -2082,7 +2085,7 @@ struct hci_conn *hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst,
+
+ bt_dev_dbg(hdev, "dst %pMR type %d sid %d", dst, dst_type, sid);
+
+- conn = hci_conn_add_unset(hdev, BIS_LINK, dst, HCI_ROLE_SLAVE);
++ conn = hci_conn_add_unset(hdev, PA_LINK, dst, HCI_ROLE_SLAVE);
+ if (IS_ERR(conn))
+ return conn;
+
+@@ -2247,7 +2250,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ * the start periodic advertising and create BIG commands have
+ * been queued
+ */
+- hci_conn_hash_list_state(hdev, bis_mark_per_adv, BIS_LINK,
++ hci_conn_hash_list_state(hdev, bis_mark_per_adv, PA_LINK,
+ BT_BOUND, &data);
+
+ /* Queue start periodic advertising and create BIG */
+@@ -2981,6 +2984,7 @@ void hci_conn_tx_queue(struct hci_conn *conn, struct sk_buff *skb)
+ switch (conn->type) {
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ case ACL_LINK:
+ case LE_LINK:
+ break;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 441cb1700f99..0aa8a591ce42 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -2938,12 +2938,14 @@ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
+ case HCI_ACLDATA_PKT:
+ /* Detect if ISO packet has been sent as ACL */
+ if (hci_conn_num(hdev, CIS_LINK) ||
+- hci_conn_num(hdev, BIS_LINK)) {
++ hci_conn_num(hdev, BIS_LINK) ||
++ hci_conn_num(hdev, PA_LINK)) {
+ __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
+ __u8 type;
+
+ type = hci_conn_lookup_type(hdev, hci_handle(handle));
+- if (type == CIS_LINK || type == BIS_LINK)
++ if (type == CIS_LINK || type == BIS_LINK ||
++ type == PA_LINK)
+ hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
+ }
+ break;
+@@ -3398,6 +3400,7 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
+ break;
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ cnt = hdev->iso_mtu ? hdev->iso_cnt :
+ hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
+ break;
+@@ -3411,7 +3414,7 @@ static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
+ }
+
+ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
+- __u8 type2, int *quote)
++ int *quote)
+ {
+ struct hci_conn_hash *h = &hdev->conn_hash;
+ struct hci_conn *conn = NULL, *c;
+@@ -3423,7 +3426,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(c, &h->list, list) {
+- if ((c->type != type && c->type != type2) ||
++ if (c->type != type ||
+ skb_queue_empty(&c->data_q))
+ continue;
+
+@@ -3627,7 +3630,7 @@ static void hci_sched_sco(struct hci_dev *hdev, __u8 type)
+ else
+ cnt = &hdev->sco_cnt;
+
+- while (*cnt && (conn = hci_low_sent(hdev, type, type, "e))) {
++ while (*cnt && (conn = hci_low_sent(hdev, type, "e))) {
+ while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+ BT_DBG("skb %p len %d", skb, skb->len);
+ hci_send_conn_frame(hdev, conn, skb);
+@@ -3746,8 +3749,8 @@ static void hci_sched_le(struct hci_dev *hdev)
+ hci_prio_recalculate(hdev, LE_LINK);
+ }
+
+-/* Schedule CIS */
+-static void hci_sched_iso(struct hci_dev *hdev)
++/* Schedule iso */
++static void hci_sched_iso(struct hci_dev *hdev, __u8 type)
+ {
+ struct hci_conn *conn;
+ struct sk_buff *skb;
+@@ -3755,14 +3758,12 @@ static void hci_sched_iso(struct hci_dev *hdev)
+
+ BT_DBG("%s", hdev->name);
+
+- if (!hci_conn_num(hdev, CIS_LINK) &&
+- !hci_conn_num(hdev, BIS_LINK))
++ if (!hci_conn_num(hdev, type))
+ return;
+
+ cnt = hdev->iso_pkts ? &hdev->iso_cnt :
+ hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
+- while (*cnt && (conn = hci_low_sent(hdev, CIS_LINK, BIS_LINK,
+- "e))) {
++ while (*cnt && (conn = hci_low_sent(hdev, type, "e))) {
+ while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
+ BT_DBG("skb %p len %d", skb, skb->len);
+ hci_send_conn_frame(hdev, conn, skb);
+@@ -3787,7 +3788,9 @@ static void hci_tx_work(struct work_struct *work)
+ /* Schedule queues and send stuff to HCI driver */
+ hci_sched_sco(hdev, SCO_LINK);
+ hci_sched_sco(hdev, ESCO_LINK);
+- hci_sched_iso(hdev);
++ hci_sched_iso(hdev, CIS_LINK);
++ hci_sched_iso(hdev, BIS_LINK);
++ hci_sched_iso(hdev, PA_LINK);
+ hci_sched_acl(hdev);
+ hci_sched_le(hdev);
+ }
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index f93509007e92..5ef54853bc5e 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -4432,6 +4432,7 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data,
+
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ if (hdev->iso_pkts) {
+ hdev->iso_cnt += count;
+ if (hdev->iso_cnt > hdev->iso_pkts)
+@@ -6381,7 +6382,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ conn->sync_handle = le16_to_cpu(ev->handle);
+ conn->sid = HCI_SID_INVALID;
+
+- mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, BIS_LINK,
++ mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, PA_LINK,
+ &flags);
+ if (!(mask & HCI_LM_ACCEPT)) {
+ hci_le_pa_term_sync(hdev, ev->handle);
+@@ -6392,7 +6393,7 @@ static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data,
+ goto unlock;
+
+ /* Add connection to indicate PA sync event */
+- pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY,
++ pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
+
+ if (IS_ERR(pa_sync))
+@@ -6423,7 +6424,7 @@ static void hci_le_per_adv_report_evt(struct hci_dev *hdev, void *data,
+
+ hci_dev_lock(hdev);
+
+- mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, BIS_LINK, &flags);
++ mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, PA_LINK, &flags);
+ if (!(mask & HCI_LM_ACCEPT))
+ goto unlock;
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index a1b063fde286..115dc1cd99ce 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -2929,7 +2929,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
+ if (sent) {
+ struct hci_conn *conn;
+
+- conn = hci_conn_hash_lookup_ba(hdev, BIS_LINK,
++ conn = hci_conn_hash_lookup_ba(hdev, PA_LINK,
+ &sent->bdaddr);
+ if (conn) {
+ struct bt_iso_qos *qos = &conn->iso_qos;
+@@ -5493,7 +5493,7 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ {
+ struct hci_cp_disconnect cp;
+
+- if (conn->type == BIS_LINK) {
++ if (conn->type == BIS_LINK || conn->type == PA_LINK) {
+ /* This is a BIS connection, hci_conn_del will
+ * do the necessary cleanup.
+ */
+@@ -5562,7 +5562,7 @@ static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ return HCI_ERROR_LOCAL_HOST_TERM;
+ }
+
+- if (conn->type == BIS_LINK) {
++ if (conn->type == BIS_LINK || conn->type == PA_LINK) {
+ /* There is no way to cancel a BIS without terminating the BIG
+ * which is done later on connection cleanup.
+ */
+@@ -5627,7 +5627,7 @@ static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
+ if (conn->type == CIS_LINK)
+ return hci_le_reject_cis_sync(hdev, conn, reason);
+
+- if (conn->type == BIS_LINK)
++ if (conn->type == BIS_LINK || conn->type == PA_LINK)
+ return -EINVAL;
+
+ if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
+@@ -6992,7 +6992,7 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+ goto unlock;
+
+ /* Add connection to indicate PA sync error */
+- pa_sync = hci_conn_add_unset(hdev, BIS_LINK, BDADDR_ANY,
++ pa_sync = hci_conn_add_unset(hdev, PA_LINK, BDADDR_ANY,
+ HCI_ROLE_SLAVE);
+
+ if (IS_ERR(pa_sync))
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index f6fad466d16d..14a4215352d5 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -2226,7 +2226,8 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+
+ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ {
+- if (hcon->type != CIS_LINK && hcon->type != BIS_LINK) {
++ if (hcon->type != CIS_LINK && hcon->type != BIS_LINK &&
++ hcon->type != PA_LINK) {
+ if (hcon->type != LE_LINK)
+ return;
+
+@@ -2267,7 +2268,8 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+
+ static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ {
+- if (hcon->type != CIS_LINK && hcon->type != BIS_LINK)
++ if (hcon->type != CIS_LINK && hcon->type != BIS_LINK &&
++ hcon->type != PA_LINK)
+ return;
+
+ BT_DBG("hcon %p reason %d", hcon, reason);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index d4405d3d9bc1..3166f5fb876b 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -3237,6 +3237,7 @@ static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
+ switch (link_type) {
+ case CIS_LINK:
+ case BIS_LINK:
++ case PA_LINK:
+ case LE_LINK:
+ switch (addr_type) {
+ case ADDR_LE_DEV_PUBLIC:
+--
+2.50.1
+
--- /dev/null
+From 0dd88ba3823493458128f62462f845fbf3d831af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 16:51:17 +0800
+Subject: Bluetooth: btmtk: Fix wait_on_bit_timeout interruption during
+ shutdown
+
+From: Jiande Lu <jiande.lu@mediatek.com>
+
+[ Upstream commit 099799fa9b76c5c02b49e07005a85117a25b01ea ]
+
+During the shutdown process, an interrupt occurs that
+prematurely terminates the wait for the expected event.
+This change replaces TASK_INTERRUPTIBLE with
+TASK_UNINTERRUPTIBLE in the wait_on_bit_timeout call to ensure
+the shutdown process completes as intended without being
+interrupted by signals.
+
+Fixes: d019930b0049 ("Bluetooth: btmtk: move btusb_mtk_hci_wmt_sync to btmtk.c")
+Signed-off-by: Jiande Lu <jiande.lu@mediatek.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btmtk.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c
+index 4390fd571dbd..a8c520dc09e1 100644
+--- a/drivers/bluetooth/btmtk.c
++++ b/drivers/bluetooth/btmtk.c
+@@ -642,12 +642,7 @@ static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev,
+ * WMT command.
+ */
+ err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT,
+- TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT);
+- if (err == -EINTR) {
+- bt_dev_err(hdev, "Execution of wmt command interrupted");
+- clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags);
+- goto err_free_wc;
+- }
++ TASK_UNINTERRUPTIBLE, HCI_INIT_TIMEOUT);
+
+ if (err) {
+ bt_dev_err(hdev, "Execution of wmt command timed out");
+--
+2.50.1
+
--- /dev/null
+From cd0645c05c16a0f3c40fc0752d90afd1997e8829 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 22:14:51 +0300
+Subject: Bluetooth: hci_conn: do return error from hci_enhanced_setup_sync()
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+[ Upstream commit 0eaf7c7e85da7495c0e03a99375707fc954f5e7b ]
+
+The commit e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to
+hci_sync") missed to update the *return* statement under the *case* of
+BT_CODEC_TRANSPARENT in hci_enhanced_setup_sync(), which led to returning
+success (0) instead of the negative error code (-EINVAL). However, the
+result of hci_enhanced_setup_sync() seems to be ignored anyway, since NULL
+gets passed to hci_cmd_sync_queue() as the last argument in that case and
+the only function interested in that result is specified by that argument.
+
+Fixes: e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to hci_sync")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index f5cd935490ad..2d4cbc483e77 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -339,7 +339,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+ case BT_CODEC_TRANSPARENT:
+ if (!find_next_esco_param(conn, esco_param_msbc,
+ ARRAY_SIZE(esco_param_msbc)))
+- return false;
++ return -EINVAL;
++
+ param = &esco_param_msbc[conn->attempt - 1];
+ cp.tx_coding_format.id = 0x03;
+ cp.rx_coding_format.id = 0x03;
+--
+2.50.1
+
--- /dev/null
+From da0bc1aa5fc0557452fddfbad1717da88b789977 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 11:57:19 -0400
+Subject: Bluetooth: hci_core: Fix not accounting for BIS/CIS/PA links
+ separately
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 9d4b01a0bf8d2163ae129c9c537cb0753ad5a2aa ]
+
+This fixes the likes of hci_conn_num(CIS_LINK) returning the total of
+ISO connection which includes BIS_LINK as well, so this splits the
+iso_num into each link type and introduces hci_iso_num that can be used
+in places where the total number of ISO connection still needs to be
+used.
+
+Fixes: 23205562ffc8 ("Bluetooth: separate CIS_LINK and BIS_LINK link types")
+Fixes: a7bcffc673de ("Bluetooth: Add PA_LINK to distinguish BIG sync and PA sync connections")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/hci_core.h | 30 +++++++++++++++++++++++++-----
+ 1 file changed, 25 insertions(+), 5 deletions(-)
+
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index 459f26d63451..439bc124ce70 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -129,7 +129,9 @@ struct hci_conn_hash {
+ struct list_head list;
+ unsigned int acl_num;
+ unsigned int sco_num;
+- unsigned int iso_num;
++ unsigned int cis_num;
++ unsigned int bis_num;
++ unsigned int pa_num;
+ unsigned int le_num;
+ unsigned int le_num_peripheral;
+ };
+@@ -1014,9 +1016,13 @@ static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c)
+ h->sco_num++;
+ break;
+ case CIS_LINK:
++ h->cis_num++;
++ break;
+ case BIS_LINK:
++ h->bis_num++;
++ break;
+ case PA_LINK:
+- h->iso_num++;
++ h->pa_num++;
+ break;
+ }
+ }
+@@ -1042,9 +1048,13 @@ static inline void hci_conn_hash_del(struct hci_dev *hdev, struct hci_conn *c)
+ h->sco_num--;
+ break;
+ case CIS_LINK:
++ h->cis_num--;
++ break;
+ case BIS_LINK:
++ h->bis_num--;
++ break;
+ case PA_LINK:
+- h->iso_num--;
++ h->pa_num--;
+ break;
+ }
+ }
+@@ -1061,9 +1071,11 @@ static inline unsigned int hci_conn_num(struct hci_dev *hdev, __u8 type)
+ case ESCO_LINK:
+ return h->sco_num;
+ case CIS_LINK:
++ return h->cis_num;
+ case BIS_LINK:
++ return h->bis_num;
+ case PA_LINK:
+- return h->iso_num;
++ return h->pa_num;
+ default:
+ return 0;
+ }
+@@ -1073,7 +1085,15 @@ static inline unsigned int hci_conn_count(struct hci_dev *hdev)
+ {
+ struct hci_conn_hash *c = &hdev->conn_hash;
+
+- return c->acl_num + c->sco_num + c->le_num + c->iso_num;
++ return c->acl_num + c->sco_num + c->le_num + c->cis_num + c->bis_num +
++ c->pa_num;
++}
++
++static inline unsigned int hci_iso_count(struct hci_dev *hdev)
++{
++ struct hci_conn_hash *c = &hdev->conn_hash;
++
++ return c->cis_num + c->bis_num;
+ }
+
+ static inline bool hci_conn_valid(struct hci_dev *hdev, struct hci_conn *conn)
+--
+2.50.1
+
--- /dev/null
+From 7040115aa6d8e2253c041ae903151f6f36ddd697 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 09:54:05 -0400
+Subject: Bluetooth: hci_core: Fix using {cis,bis}_capable for current settings
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 709788b154caf042874d765628ffa860f0bb0d1e ]
+
+{cis,bis}_capable only indicates the controller supports the feature
+since it doesn't check that LE is enabled so it shall not be used for
+current setting, instead this introduces {cis,bis}_enabled macros that
+can be used to indicate that these features are currently enabled.
+
+Fixes: 26afbd826ee3 ("Bluetooth: Add initial implementation of CIS connections")
+Fixes: eca0ae4aea66 ("Bluetooth: Add initial implementation of BIS connections")
+Fixes: ae7533613133 ("Bluetooth: Check for ISO support in controller")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/bluetooth.h | 4 ++--
+ include/net/bluetooth/hci_core.h | 13 ++++++++++++-
+ net/bluetooth/hci_sync.c | 4 ++--
+ net/bluetooth/iso.c | 14 +++++++-------
+ net/bluetooth/mgmt.c | 10 +++++-----
+ 5 files changed, 28 insertions(+), 17 deletions(-)
+
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index 114299bd8b98..b847cdd2c9d3 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -638,7 +638,7 @@ static inline void sco_exit(void)
+ #if IS_ENABLED(CONFIG_BT_LE)
+ int iso_init(void);
+ int iso_exit(void);
+-bool iso_enabled(void);
++bool iso_inited(void);
+ #else
+ static inline int iso_init(void)
+ {
+@@ -650,7 +650,7 @@ static inline int iso_exit(void)
+ return 0;
+ }
+
+-static inline bool iso_enabled(void)
++static inline bool iso_inited(void)
+ {
+ return false;
+ }
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index f22881bf1b39..e77cb840deee 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1932,6 +1932,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ !hci_dev_test_flag(dev, HCI_RPA_EXPIRED))
+ #define adv_rpa_valid(adv) (bacmp(&adv->random_addr, BDADDR_ANY) && \
+ !adv->rpa_expired)
++#define le_enabled(dev) (lmp_le_capable(dev) && \
++ hci_dev_test_flag(dev, HCI_LE_ENABLED))
+
+ #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
+@@ -1998,14 +2000,23 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+
+ /* CIS Master/Slave and BIS support */
+ #define iso_capable(dev) (cis_capable(dev) || bis_capable(dev))
++#define iso_enabled(dev) (le_enabled(dev) && iso_capable(dev))
+ #define cis_capable(dev) \
+ (cis_central_capable(dev) || cis_peripheral_capable(dev))
++#define cis_enabled(dev) (le_enabled(dev) && cis_capable(dev))
+ #define cis_central_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_CENTRAL)
++#define cis_central_enabled(dev) \
++ (le_enabled(dev) && cis_central_capable(dev))
+ #define cis_peripheral_capable(dev) \
+ ((dev)->le_features[3] & HCI_LE_CIS_PERIPHERAL)
++#define cis_peripheral_enabled(dev) \
++ (le_enabled(dev) && cis_peripheral_capable(dev))
+ #define bis_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_BROADCASTER)
+-#define sync_recv_capable(dev) ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
++#define bis_enabled(dev) (le_enabled(dev) && bis_capable(dev))
++#define sync_recv_capable(dev) \
++ ((dev)->le_features[3] & HCI_LE_ISO_SYNC_RECEIVER)
++#define sync_recv_enabled(dev) (le_enabled(dev) && sync_recv_capable(dev))
+
+ #define mws_transport_config_capable(dev) (((dev)->commands[30] & 0x08) && \
+ (!hci_test_quirk((dev), HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG)))
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 6c3218bac116..892eca21c6c4 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -4531,14 +4531,14 @@ static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
+ {
+ struct hci_cp_le_set_host_feature cp;
+
+- if (!cis_capable(hdev))
++ if (!iso_capable(hdev))
+ return 0;
+
+ memset(&cp, 0, sizeof(cp));
+
+ /* Connected Isochronous Channels (Host Support) */
+ cp.bit_number = 32;
+- cp.bit_value = 1;
++ cp.bit_value = iso_enabled(hdev) ? 0x01 : 0x00;
+
+ return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
+ sizeof(cp), &cp, HCI_CMD_TIMEOUT);
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 3c2c98eecc62..f6fad466d16d 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -2455,11 +2455,11 @@ static const struct net_proto_family iso_sock_family_ops = {
+ .create = iso_sock_create,
+ };
+
+-static bool iso_inited;
++static bool inited;
+
+-bool iso_enabled(void)
++bool iso_inited(void)
+ {
+- return iso_inited;
++ return inited;
+ }
+
+ int iso_init(void)
+@@ -2468,7 +2468,7 @@ int iso_init(void)
+
+ BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr));
+
+- if (iso_inited)
++ if (inited)
+ return -EALREADY;
+
+ err = proto_register(&iso_proto, 0);
+@@ -2496,7 +2496,7 @@ int iso_init(void)
+ iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs,
+ NULL, &iso_debugfs_fops);
+
+- iso_inited = true;
++ inited = true;
+
+ return 0;
+
+@@ -2507,7 +2507,7 @@ int iso_init(void)
+
+ int iso_exit(void)
+ {
+- if (!iso_inited)
++ if (!inited)
+ return -EALREADY;
+
+ bt_procfs_cleanup(&init_net, "iso");
+@@ -2521,7 +2521,7 @@ int iso_exit(void)
+
+ proto_unregister(&iso_proto);
+
+- iso_inited = false;
++ inited = false;
+
+ return 0;
+ }
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 63dba0503653..7a75309e6fd4 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -922,16 +922,16 @@ static u32 get_current_settings(struct hci_dev *hdev)
+ if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
+ settings |= MGMT_SETTING_WIDEBAND_SPEECH;
+
+- if (cis_central_capable(hdev))
++ if (cis_central_enabled(hdev))
+ settings |= MGMT_SETTING_CIS_CENTRAL;
+
+- if (cis_peripheral_capable(hdev))
++ if (cis_peripheral_enabled(hdev))
+ settings |= MGMT_SETTING_CIS_PERIPHERAL;
+
+- if (bis_capable(hdev))
++ if (bis_enabled(hdev))
+ settings |= MGMT_SETTING_ISO_BROADCASTER;
+
+- if (sync_recv_capable(hdev))
++ if (sync_recv_enabled(hdev))
+ settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
+
+ if (ll_privacy_capable(hdev))
+@@ -4512,7 +4512,7 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
+ }
+
+ if (IS_ENABLED(CONFIG_BT_LE)) {
+- flags = iso_enabled() ? BIT(0) : 0;
++ flags = iso_inited() ? BIT(0) : 0;
+ memcpy(rp->features[idx].uuid, iso_socket_uuid, 16);
+ rp->features[idx].flags = cpu_to_le32(flags);
+ idx++;
+--
+2.50.1
+
--- /dev/null
+From 47803d17594cec3b5984a88ad0d877ac0a49fda7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:05:03 -0400
+Subject: Bluetooth: hci_core: Fix using ll_privacy_capable for current
+ settings
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 3dcf7175f2c04bd3a7d50db3fa42a0bd933b6e23 ]
+
+ll_privacy_capable only indicates that the controller supports the
+feature but it doesnt' check that LE is enabled so it end up being
+marked as active in the current settings when it shouldn't.
+
+Fixes: ad383c2c65a5 ("Bluetooth: hci_sync: Enable advertising when LL privacy is enabled")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/hci_core.h | 1 +
+ net/bluetooth/mgmt.c | 2 +-
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e77cb840deee..2fcd62fdbc87 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -1951,6 +1951,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+
+ #define ll_privacy_capable(dev) ((dev)->le_features[0] & HCI_LE_LL_PRIVACY)
++#define ll_privacy_enabled(dev) (le_enabled(dev) && ll_privacy_capable(dev))
+
+ #define privacy_mode_capable(dev) (ll_privacy_capable(dev) && \
+ ((dev)->commands[39] & 0x04))
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 7a75309e6fd4..d4405d3d9bc1 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -934,7 +934,7 @@ static u32 get_current_settings(struct hci_dev *hdev)
+ if (sync_recv_enabled(hdev))
+ settings |= MGMT_SETTING_ISO_SYNC_RECEIVER;
+
+- if (ll_privacy_capable(hdev))
++ if (ll_privacy_enabled(hdev))
+ settings |= MGMT_SETTING_LL_PRIVACY;
+
+ return settings;
+--
+2.50.1
+
--- /dev/null
+From fd4f7b40edac55dbfe87ec632570f5c35ba50b42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 Aug 2025 11:36:20 +0300
+Subject: Bluetooth: hci_event: fix MTU for BN == 0 in CIS Established
+
+From: Pauli Virtanen <pav@iki.fi>
+
+[ Upstream commit 0b3725dbf61b51e7c663834811b3691157ae17d6 ]
+
+BN == 0x00 in CIS Established means no isochronous data for the
+corresponding direction (Core v6.1 pp. 2394). In this case SDU MTU
+should be 0.
+
+However, the specification does not say the Max_PDU_C_To_P or P_To_C are
+then zero. Intel AX210 in Framed CIS mode sets nonzero Max_PDU for
+direction with zero BN. This causes failure later when we try to LE
+Setup ISO Data Path for disabled direction, which is disallowed (Core
+v6.1 pp. 2750).
+
+Fix by setting SDU MTU to 0 if BN == 0.
+
+Fixes: 2be22f1941d5f ("Bluetooth: hci_event: Fix parsing of CIS Established Event")
+Signed-off-by: Pauli Virtanen <pav@iki.fi>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_event.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index b83995898da0..f93509007e92 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6744,8 +6744,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.out.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
+- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
++ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ qos->ucast.in.phy = ev->c_phy;
+ qos->ucast.out.phy = ev->p_phy;
+ break;
+@@ -6759,8 +6759,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.in.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
+- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
++ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ qos->ucast.out.phy = ev->c_phy;
+ qos->ucast.in.phy = ev->p_phy;
+ break;
+--
+2.50.1
+
--- /dev/null
+From 7a39dd678186d1192fbdc164a0ce0160c920bd0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 16:43:18 -0400
+Subject: Bluetooth: hci_sync: Fix scan state after PA Sync has been
+ established
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit ca88be1a2725a42f8dbad579181611d9dcca8e88 ]
+
+Passive scanning is used to program the address of the peer to be
+synchronized, so once HCI_EV_LE_PA_SYNC_ESTABLISHED is received it
+needs to be updated after clearing HCI_PA_SYNC then call
+hci_update_passive_scan_sync to return it to its original state.
+
+Fixes: 6d0417e4e1cf ("Bluetooth: hci_conn: Fix not setting conn_timeout for Broadcast Receiver")
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 7938c004071c..6c3218bac116 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -6985,8 +6985,6 @@ static void create_pa_complete(struct hci_dev *hdev, void *data, int err)
+
+ hci_dev_lock(hdev);
+
+- hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+-
+ if (!hci_conn_valid(hdev, conn))
+ clear_bit(HCI_CONN_CREATE_PA_SYNC, &conn->flags);
+
+@@ -7080,6 +7078,11 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
++ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
++
++ /* Update passive scan since HCI_PA_SYNC flag has been cleared */
++ hci_update_passive_scan_sync(hdev);
++
+ return err;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 9c8bcce0f7ec580ce9dcaddac1410bc3486a2810 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 15:56:03 +0800
+Subject: Bluetooth: hci_sync: Prevent unintended PA sync when SID is 0xFF
+
+From: Yang Li <yang.li@amlogic.com>
+
+[ Upstream commit 4d19cd228bbe8ff84a63fe7b11bc756b4b4370c7 ]
+
+After LE Extended Scan times out, conn->sid remains 0xFF,
+so the PA sync creation process should be aborted.
+
+Btmon snippet from PA sync with SID=0xFF:
+
+< HCI Command: LE Set Extended.. (0x08|0x0042) plen 6 #74726 [hci0] 863.107927
+ Extended scan: Enabled (0x01)
+ Filter duplicates: Enabled (0x01)
+ Duration: 0 msec (0x0000)
+ Period: 0.00 sec (0x0000)
+> HCI Event: Command Complete (0x0e) plen 4 #74727 [hci0] 863.109389
+ LE Set Extended Scan Enable (0x08|0x0042) ncmd 1
+ Status: Success (0x00)
+< HCI Command: LE Periodic Ad.. (0x08|0x0044) plen 14 #74728 [hci0] 865.141168
+ Options: 0x0000
+ Use advertising SID, Advertiser Address Type and address
+ Reporting initially enabled
+ SID: 0xff
+ Adv address type: Random (0x01)
+ Adv address: 0D:D7:2C:E7:42:46 (Non-Resolvable)
+ Skip: 0x0000
+ Sync timeout: 20000 msec (0x07d0)
+ Sync CTE type: 0x0000
+> HCI Event: Command Status (0x0f) plen 4 #74729 [hci0] 865.143223
+ LE Periodic Advertising Create Sync (0x08|0x0044) ncmd 1
+ Status: Success (0x00)
+
+Fixes: e2d471b7806b ("Bluetooth: ISO: Fix not using SID from adv report")
+Signed-off-by: Yang Li <yang.li@amlogic.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_sync.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c
+index 892eca21c6c4..a1b063fde286 100644
+--- a/net/bluetooth/hci_sync.c
++++ b/net/bluetooth/hci_sync.c
+@@ -7045,10 +7045,13 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ /* SID has not been set listen for HCI_EV_LE_EXT_ADV_REPORT to update
+ * it.
+ */
+- if (conn->sid == HCI_SID_INVALID)
+- __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
+- HCI_EV_LE_EXT_ADV_REPORT,
+- conn->conn_timeout, NULL);
++ if (conn->sid == HCI_SID_INVALID) {
++ err = __hci_cmd_sync_status_sk(hdev, HCI_OP_NOP, 0, NULL,
++ HCI_EV_LE_EXT_ADV_REPORT,
++ conn->conn_timeout, NULL);
++ if (err == -ETIMEDOUT)
++ goto done;
++ }
+
+ memset(&cp, 0, sizeof(cp));
+ cp.options = qos->bcast.options;
+@@ -7078,6 +7081,7 @@ static int hci_le_pa_create_sync(struct hci_dev *hdev, void *data)
+ __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC_CANCEL,
+ 0, NULL, HCI_CMD_TIMEOUT);
+
++done:
+ hci_dev_clear_flag(hdev, HCI_PA_SYNC);
+
+ /* Update passive scan since HCI_PA_SYNC flag has been cleared */
+--
+2.50.1
+
--- /dev/null
+From 9fdedc79405e40b7a8ab78fea0792d54f3d9c76c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Aug 2025 11:38:50 -0700
+Subject: bnxt_en: Fix lockdep warning during rmmod
+
+From: Michael Chan <michael.chan@broadcom.com>
+
+[ Upstream commit 4611d88a37cfc18cbabc6978aaf7325d1ae3f53a ]
+
+The commit under the Fixes tag added a netdev_assert_locked() in
+bnxt_free_ntp_fltrs(). The lock should be held during normal run-time
+but the assert will be triggered (see below) during bnxt_remove_one()
+which should not need the lock. The netdev is already unregistered by
+then. Fix it by calling netdev_assert_locked_or_invisible() which will
+not assert if the netdev is unregistered.
+
+WARNING: CPU: 5 PID: 2241 at ./include/net/netdev_lock.h:17 bnxt_free_ntp_fltrs+0xf8/0x100 [bnxt_en]
+Modules linked in: rpcrdma rdma_cm iw_cm ib_cm configfs ib_core bnxt_en(-) bridge stp llc x86_pkg_temp_thermal xfs tg3 [last unloaded: bnxt_re]
+CPU: 5 UID: 0 PID: 2241 Comm: rmmod Tainted: G S W 6.16.0 #2 PREEMPT(voluntary)
+Tainted: [S]=CPU_OUT_OF_SPEC, [W]=WARN
+Hardware name: Dell Inc. PowerEdge R730/072T6D, BIOS 2.4.3 01/17/2017
+RIP: 0010:bnxt_free_ntp_fltrs+0xf8/0x100 [bnxt_en]
+Code: 41 5c 41 5d 41 5e 41 5f c3 cc cc cc cc 48 8b 47 60 be ff ff ff ff 48 8d b8 28 0c 00 00 e8 d0 cf 41 c3 85 c0 0f 85 2e ff ff ff <0f> 0b e9 27 ff ff ff 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90
+RSP: 0018:ffffa92082387da0 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: ffff9e5b593d8000 RCX: 0000000000000001
+RDX: 0000000000000001 RSI: ffffffff83dc9a70 RDI: ffffffff83e1a1cf
+RBP: ffff9e5b593d8c80 R08: 0000000000000000 R09: ffffffff8373a2b3
+R10: 000000008100009f R11: 0000000000000001 R12: 0000000000000001
+R13: ffffffffc01c4478 R14: dead000000000122 R15: dead000000000100
+FS: 00007f3a8a52c740(0000) GS:ffff9e631ad1c000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000055bb289419c8 CR3: 000000011274e001 CR4: 00000000003706f0
+Call Trace:
+ <TASK>
+ bnxt_remove_one+0x57/0x180 [bnxt_en]
+ pci_device_remove+0x39/0xc0
+ device_release_driver_internal+0xa5/0x130
+ driver_detach+0x42/0x90
+ bus_remove_driver+0x61/0xc0
+ pci_unregister_driver+0x38/0x90
+ bnxt_exit+0xc/0x7d0 [bnxt_en]
+
+Fixes: 004b5008016a ("eth: bnxt: remove most dependencies on RTNL")
+Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
+Signed-off-by: Michael Chan <michael.chan@broadcom.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250816183850.4125033-1-michael.chan@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 25681c2343fb..ec8752c298e6 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -5325,7 +5325,7 @@ static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool all)
+ {
+ int i;
+
+- netdev_assert_locked(bp->dev);
++ netdev_assert_locked_or_invisible(bp->dev);
+
+ /* Under netdev instance lock and all our NAPIs have been disabled.
+ * It's safe to delete the hash table.
+--
+2.50.1
+
--- /dev/null
+From 031283998a2fc532c2afa9ff7bd06d617b46b683 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:59 +0000
+Subject: bonding: send LACPDUs periodically in passive mode after receiving
+ partner's LACPDU
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 0599640a21e98f0d6a3e9ff85c0a687c90a8103b ]
+
+When `lacp_active` is set to `off`, the bond operates in passive mode, meaning
+it only "speaks when spoken to." However, the current kernel implementation
+only sends an LACPDU in response when the partner's state changes.
+
+As a result, once LACP negotiation succeeds, the actor stops sending LACPDUs
+until the partner times out and sends an "expired" LACPDU. This causes
+continuous LACP state flapping.
+
+According to IEEE 802.1AX-2014, 6.4.13 Periodic Transmission machine. The
+values of Partner_Oper_Port_State.LACP_Activity and
+Actor_Oper_Port_State.LACP_Activity determine whether periodic transmissions
+take place. If either or both parameters are set to Active LACP, then periodic
+transmissions occur; if both are set to Passive LACP, then periodic
+transmissions do not occur.
+
+To comply with this, we remove the `!bond->params.lacp_active` check in
+`ad_periodic_machine()`. Instead, we initialize the actor's port's
+`LACP_STATE_LACP_ACTIVITY` state based on `lacp_active` setting.
+
+Additionally, we avoid setting the partner's state to
+`LACP_STATE_LACP_ACTIVITY` in the EXPIRED state, since we should not assume
+the partner is active by default.
+
+This ensures that in passive mode, the bond starts sending periodic LACPDUs
+after receiving one from the partner, and avoids flapping due to inactivity.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 42 +++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index a51305423d28..4c2560ae8866 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -95,13 +95,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr);
+ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
+ static void ad_tx_machine(struct port *port);
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
++static void ad_periodic_machine(struct port *port);
+ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
+ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ bool *update_slave_arr);
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+-static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
+ static void ad_enable_collecting(struct port *port);
+ static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
+@@ -1296,10 +1296,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
+ * case of EXPIRED even if LINK_DOWN didn't arrive for
+ * the port.
+ */
+- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->sm_vars &= ~AD_PORT_MATCHED;
++ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
++ * machine state diagram, the statue should be
++ * Partner_Oper_Port_State.Synchronization = FALSE;
++ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
++ * start current_while_timer(Short Timeout);
++ * Actor_Oper_Port_State.Expired = TRUE;
++ */
++ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
+ port->sm_vars |= AD_PORT_CHURNED;
+@@ -1405,11 +1411,10 @@ static void ad_tx_machine(struct port *port)
+ /**
+ * ad_periodic_machine - handle a port's periodic state machine
+ * @port: the port we're looking at
+- * @bond_params: bond parameters we will use
+ *
+ * Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
+ */
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
++static void ad_periodic_machine(struct port *port)
+ {
+ periodic_states_t last_state;
+
+@@ -1418,8 +1423,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
+
+ /* check if port was reinitialized */
+ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
+- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
+- !bond_params->lacp_active) {
++ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
+ port->sm_periodic_state = AD_NO_PERIODIC;
+ }
+ /* check if state machine should change state */
+@@ -1943,16 +1947,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
+ /**
+ * ad_initialize_port - initialize a given port's parameters
+ * @port: the port we're looking at
+- * @lacp_fast: boolean. whether fast periodic should be used
++ * @bond_params: bond parameters we will use
+ */
+-static void ad_initialize_port(struct port *port, int lacp_fast)
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
+ {
+ static const struct port_params tmpl = {
+ .system_priority = 0xffff,
+ .key = 1,
+ .port_number = 1,
+ .port_priority = 0xff,
+- .port_state = 1,
++ .port_state = 0,
+ };
+ static const struct lacpdu lacpdu = {
+ .subtype = 0x01,
+@@ -1970,12 +1974,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ port->actor_port_priority = 0xff;
+ port->actor_port_aggregator_identifier = 0;
+ port->ntt = false;
+- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
+- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
++ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
++ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
++ if (bond_params->lacp_active) {
++ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ }
+
+- if (lacp_fast)
++ if (bond_params->lacp_fast)
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
+
+ memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
+@@ -2187,7 +2193,7 @@ void bond_3ad_bind_slave(struct slave *slave)
+ /* port initialization */
+ port = &(SLAVE_AD_INFO(slave)->port);
+
+- ad_initialize_port(port, bond->params.lacp_fast);
++ ad_initialize_port(port, &bond->params);
+
+ port->slave = slave;
+ port->actor_port_number = SLAVE_AD_INFO(slave)->id;
+@@ -2499,7 +2505,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
+ }
+
+ ad_rx_machine(NULL, port);
+- ad_periodic_machine(port, &bond->params);
++ ad_periodic_machine(port);
+ ad_port_selection_logic(port, &update_slave_arr);
+ ad_mux_machine(port, &update_slave_arr);
+ ad_tx_machine(port);
+--
+2.50.1
+
--- /dev/null
+From 217bd833f3037fdd4f8b5f2515a6f5677f2670a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:58 +0000
+Subject: bonding: update LACP activity flag after setting lacp_active
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit b64d035f77b1f02ab449393342264b44950a75ae ]
+
+The port's actor_oper_port_state activity flag should be updated immediately
+after changing the lacp_active option to reflect the current mode correctly.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 25 +++++++++++++++++++++++++
+ drivers/net/bonding/bond_options.c | 1 +
+ include/net/bond_3ad.h | 1 +
+ 3 files changed, 27 insertions(+)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index c6807e473ab7..a51305423d28 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2869,6 +2869,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
+ spin_unlock_bh(&bond->mode_lock);
+ }
+
++/**
++ * bond_3ad_update_lacp_active - change the lacp active
++ * @bond: bonding struct
++ *
++ * Update actor_oper_port_state when lacp_active is modified.
++ */
++void bond_3ad_update_lacp_active(struct bonding *bond)
++{
++ struct port *port = NULL;
++ struct list_head *iter;
++ struct slave *slave;
++ int lacp_active;
++
++ lacp_active = bond->params.lacp_active;
++ spin_lock_bh(&bond->mode_lock);
++ bond_for_each_slave(bond, slave, iter) {
++ port = &(SLAVE_AD_INFO(slave)->port);
++ if (lacp_active)
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ else
++ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
++ }
++ spin_unlock_bh(&bond->mode_lock);
++}
++
+ size_t bond_3ad_stats_size(void)
+ {
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 91893c29b899..28c53f1b1382 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1637,6 +1637,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
+ netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.lacp_active = newval->value;
++ bond_3ad_update_lacp_active(bond);
+
+ return 0;
+ }
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 2053cd8e788a..dba369a2cf27 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -307,6 +307,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
+ int bond_3ad_set_carrier(struct bonding *bond);
+ void bond_3ad_update_lacp_rate(struct bonding *bond);
++void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+ int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+ size_t bond_3ad_stats_size(void);
+--
+2.50.1
+
--- /dev/null
+From 53fbafaa780faf6291106d667a420d5b93313b22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:29 -0400
+Subject: cgroup/cpuset: Fix a partition error with CPU hotplug
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 150e298ae0ccbecff2357a72fbabd80f8849ea6e ]
+
+It was found during testing that an invalid leaf partition with an
+empty effective exclusive CPU list can become a valid empty partition
+with no CPU afer an offline/online operation of an unrelated CPU. An
+empty partition root is allowed in the special case that it has no
+task in its cgroup and has distributed out all its CPUs to its child
+partitions. That is certainly not the case here.
+
+The problem is in the cpumask_subsets() test in the hotplug case
+(update with no new mask) of update_parent_effective_cpumask() as it
+also returns true if the effective exclusive CPU list is empty. Fix that
+by addding the cpumask_empty() test to root out this exception case.
+Also add the cpumask_empty() test in cpuset_hotplug_update_tasks()
+to avoid calling update_parent_effective_cpumask() for this special case.
+
+Fixes: 0c7f293efc87 ("cgroup/cpuset: Add cpuset.cpus.exclusive.effective for v2")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 77396bab071a..f9d7799c5c94 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -1843,7 +1843,7 @@ static int update_parent_effective_cpumask(struct cpuset *cs, int cmd,
+ if (is_partition_valid(cs))
+ adding = cpumask_and(tmp->addmask,
+ xcpus, parent->effective_xcpus);
+- } else if (is_partition_invalid(cs) &&
++ } else if (is_partition_invalid(cs) && !cpumask_empty(xcpus) &&
+ cpumask_subset(xcpus, parent->effective_xcpus)) {
+ struct cgroup_subsys_state *css;
+ struct cpuset *child;
+@@ -3870,9 +3870,10 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp)
+ partcmd = partcmd_invalidate;
+ /*
+ * On the other hand, an invalid partition root may be transitioned
+- * back to a regular one.
++ * back to a regular one with a non-empty effective xcpus.
+ */
+- else if (is_partition_valid(parent) && is_partition_invalid(cs))
++ else if (is_partition_valid(parent) && is_partition_invalid(cs) &&
++ !cpumask_empty(cs->effective_xcpus))
+ partcmd = partcmd_update;
+
+ if (partcmd >= 0) {
+--
+2.50.1
+
--- /dev/null
+From a2cefb5619ed621910f891e9d291f9189d5d849b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:28 -0400
+Subject: cgroup/cpuset: Use static_branch_enable_cpuslocked() on
+ cpusets_insane_config_key
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 65f97cc81b0adc5f49cf6cff5d874be0058e3f41 ]
+
+The following lockdep splat was observed.
+
+[ 812.359086] ============================================
+[ 812.359089] WARNING: possible recursive locking detected
+[ 812.359097] --------------------------------------------
+[ 812.359100] runtest.sh/30042 is trying to acquire lock:
+[ 812.359105] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_enable+0xe/0x20
+[ 812.359131]
+[ 812.359131] but task is already holding lock:
+[ 812.359134] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: cpuset_write_resmask+0x98/0xa70
+ :
+[ 812.359267] Call Trace:
+[ 812.359272] <TASK>
+[ 812.359367] cpus_read_lock+0x3c/0xe0
+[ 812.359382] static_key_enable+0xe/0x20
+[ 812.359389] check_insane_mems_config.part.0+0x11/0x30
+[ 812.359398] cpuset_write_resmask+0x9f2/0xa70
+[ 812.359411] cgroup_file_write+0x1c7/0x660
+[ 812.359467] kernfs_fop_write_iter+0x358/0x530
+[ 812.359479] vfs_write+0xabe/0x1250
+[ 812.359529] ksys_write+0xf9/0x1d0
+[ 812.359558] do_syscall_64+0x5f/0xe0
+
+Since commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem
+and hotplug lock order"), the ordering of cpu hotplug lock
+and cpuset_mutex had been reversed. That patch correctly
+used the cpuslocked version of the static branch API to enable
+cpusets_pre_enable_key and cpusets_enabled_key, but it didn't do the
+same for cpusets_insane_config_key.
+
+The cpusets_insane_config_key can be enabled in the
+check_insane_mems_config() which is called from update_nodemask()
+or cpuset_hotplug_update_tasks() with both cpu hotplug lock and
+cpuset_mutex held. Deadlock can happen with a pending hotplug event that
+tries to acquire the cpu hotplug write lock which will block further
+cpus_read_lock() attempt from check_insane_mems_config(). Fix that by
+switching to use static_branch_enable_cpuslocked().
+
+Fixes: d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index 3bc4301466f3..77396bab071a 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -280,7 +280,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ if (!cpusets_insane_config() &&
+ movable_only_nodes(nodes)) {
+- static_branch_enable(&cpusets_insane_config_key);
++ static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ "Cpuset allocations might fail even with a lot of memory available.\n",
+ nodemask_pr_args(nodes));
+--
+2.50.1
+
--- /dev/null
+From 943eaf1a97750d4f520158d3943e60df17d23229 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 16:27:36 +0100
+Subject: cifs: Fix oops due to uninitialised variable
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 453a6d2a68e54a483d67233c6e1e24c4095ee4be ]
+
+Fix smb3_init_transform_rq() to initialise buffer to NULL before calling
+netfs_alloc_folioq_buffer() as netfs assumes it can append to the buffer it
+is given. Setting it to NULL means it should start a fresh buffer, but the
+value is currently undefined.
+
+Fixes: a2906d3316fc ("cifs: Switch crypto buffer to use a folio_queue rather than an xarray")
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Steve French <sfrench@samba.org>
+cc: Paulo Alcantara <pc@manguebit.org>
+cc: linux-cifs@vger.kernel.org
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2ops.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 4bb065a6fbaa..d3e09b10dea4 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -4496,7 +4496,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
+ for (int i = 1; i < num_rqst; i++) {
+ struct smb_rqst *old = &old_rq[i - 1];
+ struct smb_rqst *new = &new_rq[i];
+- struct folio_queue *buffer;
++ struct folio_queue *buffer = NULL;
+ size_t size = iov_iter_count(&old->rq_iter);
+
+ orig_len += smb_rqst_len(server, old);
+--
+2.50.1
+
--- /dev/null
+From 4b1865dfe53060d0eddb86f5922fb6497e5ea44e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 21:36:41 -0500
+Subject: drm/amd/display: Add null pointer check in
+ mod_hdcp_hdcp1_create_session()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit 7a2ca2ea64b1b63c8baa94a8f5deb70b2248d119 ]
+
+The function mod_hdcp_hdcp1_create_session() calls the function
+get_first_active_display(), but does not check its return value.
+The return value is a null pointer if the display list is empty.
+This will lead to a null pointer dereference.
+
+Add a null pointer check for get_first_active_display() and return
+MOD_HDCP_STATUS_DISPLAY_NOT_FOUND if the function return null.
+
+This is similar to the commit c3e9826a2202
+("drm/amd/display: Add null pointer check for get_first_active_display()").
+
+Fixes: 2deade5ede56 ("drm/amd/display: Remove hdcp display state with mst fix")
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 5e43eb3cd731649c4f8b9134f857be62a416c893)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index e58e7b93810b..6b7db8ec9a53 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
+--
+2.50.1
+
--- /dev/null
+From b2a5b7b101379c86d50a3405a0dd2503fbbb1d86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:47 +0200
+Subject: drm/amd/display: Adjust DCE 8-10 clock, don't overclock by 15%
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit 1fc931be2f47fde23ca5aff6f19421375c312fb2 ]
+
+Adjust the nominal (and performance) clocks for DCE 8-10,
+and set them to 625 MHz, which is the value used by the legacy
+display code in amdgpu_atombios_get_clock_info.
+
+This was tested with Hawaii, Tonga and Fiji.
+These GPUs can output 4K 60Hz (10-bit depth) at 625 MHz.
+
+The extra 15% clock was added as a workaround for a Polaris issue
+which uses DCE 11, and should not have been used on DCE 8-10 which
+are already hardcoded to the highest possible display clock.
+Unfortunately, the extra 15% was mistakenly copied and kept
+even on code paths which don't affect Polaris.
+
+This commit fixes that and also adds a check to make sure
+not to exceed the maximum DCE 8-10 display clock.
+
+Fixes: 8cd61c313d8b ("drm/amd/display: Raise dispclk value for Polaris")
+Fixes: dc88b4a684d2 ("drm/amd/display: make clk mgr soc specific")
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 1ae45b5d4f371af8ae51a3827d0ec9fe27eeb867)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+index e846e4920393..dbd6ef1b60a0 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+@@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
+ /* ClocksStateLow */
+ { .display_clk_khz = 352000, .pixel_clk_khz = 330000},
+ /* ClocksStateNominal */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
++{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 },
+ /* ClocksStatePerformance */
+-{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };
++{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } };
+
+ int dentist_get_divider_from_did(int did)
+ {
+@@ -403,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base,
+ {
+ struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dm_pp_power_level_change_request level_change_req;
+- int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
+-
+- /*TODO: W/A for dal3 linux, investigate why this works */
+- if (!clk_mgr_dce->dfs_bypass_active)
+- patched_disp_clk = patched_disp_clk * 115 / 100;
++ const int max_disp_clk =
++ clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz;
++ int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz);
+
+ level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context);
+ /* get max clock state from PPLIB */
+--
+2.50.1
+
--- /dev/null
+From cd3cb16c771c9de6e85293a2bdf170a0164981aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 11:43:51 +0200
+Subject: drm/amd/display: Don't print errors for nonexistent connectors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Timur Kristóf <timur.kristof@gmail.com>
+
+[ Upstream commit f14ee2e7a86c5e57295b48b8e198cae7189b3b93 ]
+
+When getting the number of connectors, the VBIOS reports
+the number of valid indices, but it doesn't say which indices
+are valid, and not every valid index has an actual connector.
+If we don't find a connector on an index, that is not an error.
+
+Considering these are not actual errors, don't litter the logs.
+
+Fixes: 60df5628144b ("drm/amd/display: handle invalid connector indices")
+Signed-off-by: Timur Kristóf <timur.kristof@gmail.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Rodrigo Siqueira <siqueira@igalia.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 249d4bc5f1935f04bb45b3b63c0f8922565124f7)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/bios/bios_parser.c | 5 +----
+ drivers/gpu/drm/amd/display/dc/core/dc.c | 15 ++++++++++++++-
+ 2 files changed, 15 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+index 67f08495b7e6..154fd2c18e88 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+@@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id(
+ return object_id;
+ }
+
+- if (tbl->ucNumberOfObjects <= i) {
+- dm_error("Can't find connector id %d in connector table of size %d.\n",
+- i, tbl->ucNumberOfObjects);
++ if (tbl->ucNumberOfObjects <= i)
+ return object_id;
+- }
+
+ id = le16_to_cpu(tbl->asObjects[i].usObjectID);
+ object_id = object_id_from_bios_object_id(id);
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index aab1f8c9d717..eb76611a42a5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -217,11 +217,24 @@ static bool create_links(
+ connectors_num,
+ num_virtual_links);
+
+- // condition loop on link_count to allow skipping invalid indices
++ /* When getting the number of connectors, the VBIOS reports the number of valid indices,
++ * but it doesn't say which indices are valid, and not every index has an actual connector.
++ * So, if we don't find a connector on an index, that is not an error.
++ *
++ * - There is no guarantee that the first N indices will be valid
++ * - VBIOS may report a higher amount of valid indices than there are actual connectors
++ * - Some VBIOS have valid configurations for more connectors than there actually are
++ * on the card. This may be because the manufacturer used the same VBIOS for different
++ * variants of the same card.
++ */
+ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) {
++ struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i);
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
++ if (connector_id.id == CONNECTOR_ID_UNKNOWN)
++ continue;
++
+ DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count);
+
+ link_init_params.ctx = dc->ctx;
+--
+2.50.1
+
--- /dev/null
+From 4dd96508e9f284a26094440811582e0fcdccc85c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:34 +0800
+Subject: drm/hisilicon/hibmc: fix dp and vga cannot show together
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 3271faf42d135bcf569c3ff6af55c21858eec212 ]
+
+If VGA and DP connected together, there will be only one can get crtc.
+Add encoder possible_clones to support two connectors enable.
+
+Fixes: 3c7623fb5bb6 ("drm/hisilicon/hibmc: Enable this hot plug detect of irq feature")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-8-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index ac552c339671..289304500ab0 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ static int hibmc_kms_init(struct hibmc_drm_private *priv)
+ {
+ struct drm_device *dev = &priv->dev;
++ struct drm_encoder *encoder;
++ u32 clone_mask = 0;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+@@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
+ return ret;
+ }
+
++ drm_for_each_encoder(encoder, dev)
++ clone_mask |= drm_encoder_mask(encoder);
++
++ drm_for_each_encoder(encoder, dev)
++ encoder->possible_clones = clone_mask;
++
+ return 0;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 75a2a0e0ed2e1c46107a5a39e69fe01aa929f714 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:30 +0800
+Subject: drm/hisilicon/hibmc: fix irq_request()'s irq name variable is local
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 8bed4ec42a4e0dc8113172696ff076d1eb6d8bcb ]
+
+The local variable is passed in request_irq (), and there will be use
+after free problem, which will make request_irq failed. Using the global
+irq name instead of it to fix.
+
+Fixes: b11bc1ae4658 ("drm/hisilicon/hibmc: Add MSI irq getting and requesting for HPD")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-4-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index 768b97f9e74a..4cdcc34070ee 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -32,7 +32,7 @@
+
+ DEFINE_DRM_GEM_FOPS(hibmc_fops);
+
+-static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" };
++static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" };
+
+ static irqreturn_t hibmc_interrupt(int irq, void *arg)
+ {
+@@ -277,7 +277,6 @@ static void hibmc_unload(struct drm_device *dev)
+ static int hibmc_msi_init(struct drm_device *dev)
+ {
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+- char name[32] = {0};
+ int valid_irq_num;
+ int irq;
+ int ret;
+@@ -292,9 +291,6 @@ static int hibmc_msi_init(struct drm_device *dev)
+ valid_irq_num = ret;
+
+ for (int i = 0; i < valid_irq_num; i++) {
+- snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s",
+- dev->driver->name, pci_name(pdev), g_irqs_names_map[i]);
+-
+ irq = pci_irq_vector(pdev, i);
+
+ if (i)
+@@ -302,10 +298,10 @@ static int hibmc_msi_init(struct drm_device *dev)
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ hibmc_dp_interrupt,
+ hibmc_dp_hpd_isr,
+- IRQF_SHARED, name, dev);
++ IRQF_SHARED, g_irqs_names_map[i], dev);
+ else
+ ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt,
+- IRQF_SHARED, name, dev);
++ IRQF_SHARED, g_irqs_names_map[i], dev);
+ if (ret) {
+ drm_err(dev, "install irq failed: %d\n", ret);
+ return ret;
+--
+2.50.1
+
--- /dev/null
+From 47b84b20a9bb499891572216ca47888ca548d5f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:32 +0800
+Subject: drm/hisilicon/hibmc: fix rare monitors cannot display problem
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 9f98b429ba67d430b873e06bcfb90afa22888978 ]
+
+In some case, the dp link training success at 8.1Gbps, but the sink's
+maximum supported rate is less than 8.1G. So change the default 8.1Gbps
+link rate to the rate that reads from devices' capabilities.
+
+Fixes: 54063d86e036 ("drm/hisilicon/hibmc: add dp link moduel in hibmc drivers")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-6-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+index 74f7832ea53e..0726cb5b736e 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c
+@@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp)
+ return hibmc_dp_link_reduce_rate(dp);
+ }
+
++static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp)
++{
++ dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
++ if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate)
++ dp->link.cap.link_rate = DP_LINK_BW_8_1;
++
++ dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
++ if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX)
++ dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX;
++}
++
+ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
+ {
+ struct hibmc_dp_link *link = &dp->link;
+@@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp)
+ if (ret)
+ drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret);
+
+- dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE];
+- dp->link.cap.lanes = 0x2;
++ hibmc_dp_update_caps(dp);
+
+ ret = hibmc_dp_get_serdes_rate_cfg(dp);
+ if (ret < 0)
+--
+2.50.1
+
--- /dev/null
+From f3b62e6daeae078aa7387a14d47dfbde0c0aedb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:31 +0800
+Subject: drm/hisilicon/hibmc: fix the hibmc loaded failed bug
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 93a08f856fcc5aaeeecad01f71bef3088588216a ]
+
+When hibmc loaded failed, the driver use hibmc_unload to free the
+resource, but the mutexes in mode.config are not init, which will
+access an NULL pointer. Just change goto statement to return, because
+hibnc_hw_init() doesn't need to free anything.
+
+Fixes: b3df5e65cc03 ("drm/hibmc: Drop drm_vblank_cleanup")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-5-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index 4cdcc34070ee..ac552c339671 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -319,13 +319,13 @@ static int hibmc_load(struct drm_device *dev)
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+- goto err;
++ return ret;
+
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
+- goto err;
++ return ret;
+ }
+
+ ret = hibmc_kms_init(priv);
+--
+2.50.1
+
--- /dev/null
+From afab2c92beefba0fb2f9c0f4bf90e7f567b1c4b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:28 +0800
+Subject: drm/hisilicon/hibmc: fix the i2c device resource leak when vdac init
+ failed
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit e5f48bfa2ae0806d5f51fb8061afc619a73599a7 ]
+
+Currently the driver missed to clean the i2c adapter when vdac init failed.
+It may cause resource leak.
+
+Fixes: a0d078d06e516 ("drm/hisilicon: Features to support reading resolutions from EDID")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-2-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h | 1 +
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c | 5 +++++
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c | 11 ++++++++---
+ 3 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+index 274feabe7df0..ca8502e2760c 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+@@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv);
+ int hibmc_vdac_init(struct hibmc_drm_private *priv);
+
+ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector);
++void hibmc_ddc_del(struct hibmc_vdac *vdac);
+
+ int hibmc_dp_init(struct hibmc_drm_private *priv);
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+index 99b3b77b5445..44860011855e 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
+@@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac)
+
+ return i2c_bit_add_bus(&vdac->adapter);
+ }
++
++void hibmc_ddc_del(struct hibmc_vdac *vdac)
++{
++ i2c_del_adapter(&vdac->adapter);
++}
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+index e8a527ede854..841e81f47b68 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+@@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector)
+ {
+ struct hibmc_vdac *vdac = to_hibmc_vdac(connector);
+
+- i2c_del_adapter(&vdac->adapter);
++ hibmc_ddc_del(vdac);
+ drm_connector_cleanup(connector);
+ }
+
+@@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL);
+ if (ret) {
+ drm_err(dev, "failed to init encoder: %d\n", ret);
+- return ret;
++ goto err;
+ }
+
+ drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
+@@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ &vdac->adapter);
+ if (ret) {
+ drm_err(dev, "failed to init connector: %d\n", ret);
+- return ret;
++ goto err;
+ }
+
+ drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
+@@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return 0;
++
++err:
++ hibmc_ddc_del(vdac);
++
++ return ret;
+ }
+--
+2.50.1
+
--- /dev/null
+From 62f859adaa05cc1e617b8b227e4fbd492ba8492a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 19:50:27 +0800
+Subject: drm/nouveau/nvif: Fix potential memory leak in nvif_vmm_ctor().
+
+From: Fanhua Li <lifanhua5@huawei.com>
+
+[ Upstream commit bb8aeaa3191b617c6faf8ae937252e059673b7ea ]
+
+When the nvif_vmm_type is invalid, we will return error directly
+without freeing the args in nvif_vmm_ctor(), which leading a memory
+leak. Fix it by setting the ret -EINVAL and goto done.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/all/202312040659.4pJpMafN-lkp@intel.com/
+Fixes: 6b252cf42281 ("drm/nouveau: nvkm/vmm: implement raw ops to manage uvmm")
+Signed-off-by: Fanhua Li <lifanhua5@huawei.com>
+Link: https://lore.kernel.org/r/20250728115027.50878-1-lifanhua5@huawei.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nvif/vmm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
+index 99296f03371a..07c1ebc2a941 100644
+--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
++++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
+@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+ default:
+ WARN_ON(1);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto done;
+ }
+
+ memcpy(args->data, argv, argc);
+--
+2.50.1
+
--- /dev/null
+From d660f517ddea4f6aca8f73a6d71d7a775e2e6cc9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 18:54:41 +0200
+Subject: drm: nova-drm: fix 32-bit arm build
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+[ Upstream commit db2e7bcee11cd57f95fef3c6cbb562d0577eb84a ]
+
+In 32-bit arm, the build fails with:
+
+ error[E0308]: mismatched types
+ --> drivers/gpu/drm/nova/file.rs:42:28
+ |
+ 42 | getparam.set_value(value);
+ | --------- ^^^^^ expected `u64`, found `u32`
+ | |
+ | arguments to this method are incorrect
+ |
+ note: method defined here
+ --> drivers/gpu/drm/nova/uapi.rs:29:12
+ |
+ 29 | pub fn set_value(&self, v: u64) {
+ | ^^^^^^^^^ ------
+ help: you can convert a `u32` to a `u64`
+ |
+ 42 | getparam.set_value(value.into());
+ | +++++++
+
+The reason is that `Getparam::set_value` takes a `u64` (from the UAPI),
+but `pci::Device::resource_len()` returns a `resource_size_t`, which is a
+`phys_addr_t`, which may be 32- or 64-bit.
+
+Thus add an `into()` call to support the 32-bit case, while allowing the
+Clippy lint that complains in the 64-bit case where the type is the same.
+
+Fixes: cdeaeb9dd762 ("drm: nova-drm: add initial driver skeleton")
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Reviewed-by: Christian Schrefl <chrisi.schrefl@gmail.com>
+Link: https://lore.kernel.org/r/20250724165441.2105632-1-ojeda@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nova/file.rs | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs
+index 7e59a34b830d..4fe62cf98a23 100644
+--- a/drivers/gpu/drm/nova/file.rs
++++ b/drivers/gpu/drm/nova/file.rs
+@@ -39,7 +39,8 @@ impl File {
+ _ => return Err(EINVAL),
+ };
+
+- getparam.set_value(value);
++ #[allow(clippy::useless_conversion)]
++ getparam.set_value(value.into());
+
+ Ok(0)
+ }
+--
+2.50.1
+
--- /dev/null
+From 00aa486c34d7f2f296dc7ab15222436278bbf778 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jun 2025 14:38:19 +0200
+Subject: drm/panic: Add a u64 divide by 10 for arm32
+
+From: Jocelyn Falempe <jfalempe@redhat.com>
+
+[ Upstream commit 9af8f2b469c0438620832f3729a3c5c03853b56b ]
+
+On 32bits ARM, u64 divided by a constant is not optimized to a
+multiply by inverse by the compiler [1].
+So do the multiply by inverse explicitly for this architecture.
+
+Link: https://github.com/llvm/llvm-project/issues/37280 [1]
+Reported-by: Andrei Lalaev <andrey.lalaev@gmail.com>
+Closes: https://lore.kernel.org/dri-devel/c0a2771c-f3f5-4d4c-aa82-d673b3c5cb46@gmail.com/
+Fixes: 675008f196ca ("drm/panic: Use a decimal fifo to avoid u64 by u64 divide")
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_panic_qr.rs | 22 +++++++++++++++++++++-
+ 1 file changed, 21 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs
+index 18492daae4b3..b9cc64458437 100644
+--- a/drivers/gpu/drm/drm_panic_qr.rs
++++ b/drivers/gpu/drm/drm_panic_qr.rs
+@@ -381,6 +381,26 @@ struct DecFifo {
+ len: usize,
+ }
+
++// On arm32 architecture, dividing an `u64` by a constant will generate a call
++// to `__aeabi_uldivmod` which is not present in the kernel.
++// So use the multiply by inverse method for this architecture.
++fn div10(val: u64) -> u64 {
++ if cfg!(target_arch = "arm") {
++ let val_h = val >> 32;
++ let val_l = val & 0xFFFFFFFF;
++ let b_h: u64 = 0x66666666;
++ let b_l: u64 = 0x66666667;
++
++ let tmp1 = val_h * b_l + ((val_l * b_l) >> 32);
++ let tmp2 = val_l * b_h + (tmp1 & 0xffffffff);
++ let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32);
++
++ tmp3 >> 2
++ } else {
++ val / 10
++ }
++}
++
+ impl DecFifo {
+ fn push(&mut self, data: u64, len: usize) {
+ let mut chunk = data;
+@@ -389,7 +409,7 @@ impl DecFifo {
+ }
+ for i in 0..len {
+ self.decimals[i] = (chunk % 10) as u8;
+- chunk /= 10;
++ chunk = div10(chunk);
+ }
+ self.len += len;
+ }
+--
+2.50.1
+
--- /dev/null
+From 6b797da495407bb40ce5a77bbc07d3b6bb549131 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Jun 2025 10:37:04 +0200
+Subject: drm/tests: Do not use drm_fb_blit() in format-helper tests
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 5a4856e0e38109ba994f369962f054ecb445c098 ]
+
+Export additional helpers from the format-helper library and open-code
+drm_fb_blit() in tests. Prepares for the removal of drm_fb_blit(). Only
+sysfb drivers use drm_fb_blit(). The function will soon be removed from
+format helpers and be refactored within sysfb helpers.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Reviewed-by: José Expósito <jose.exposito89@gmail.com>
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Link: https://lore.kernel.org/r/20250616083846.221396-2-tzimmermann@suse.de
+Stable-dep-of: 05663d88fd0b ("drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_format_helper.c | 108 ++++++++++++++++--
+ drivers/gpu/drm/drm_format_internal.h | 8 ++
+ .../gpu/drm/tests/drm_format_helper_test.c | 108 +++---------------
+ include/drm/drm_format_helper.h | 9 ++
+ 4 files changed, 131 insertions(+), 102 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
+index d36e6cacc575..73b5a80771cc 100644
+--- a/drivers/gpu/drm/drm_format_helper.c
++++ b/drivers/gpu/drm/drm_format_helper.c
+@@ -857,11 +857,33 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
+ }
+
+-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+- const struct iosys_map *src,
+- const struct drm_framebuffer *fb,
+- const struct drm_rect *clip,
+- struct drm_format_conv_state *state)
++/**
++ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
++ * @dst: Array of ABGR8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
++ * natively. It sets an opaque alpha channel as part of the conversion.
++ */
++void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src,
++ const struct drm_framebuffer *fb,
++ const struct drm_rect *clip,
++ struct drm_format_conv_state *state)
+ {
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+@@ -870,17 +892,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_abgr8888_line);
+ }
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
+
+ static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
+ }
+
+-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+- const struct iosys_map *src,
+- const struct drm_framebuffer *fb,
+- const struct drm_rect *clip,
+- struct drm_format_conv_state *state)
++/**
++ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
++ * @dst: Array of XBGR8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
++ * natively.
++ */
++void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src,
++ const struct drm_framebuffer *fb,
++ const struct drm_rect *clip,
++ struct drm_format_conv_state *state)
+ {
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+@@ -889,6 +934,49 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_xbgr8888_line);
+ }
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
++
++static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
++{
++ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
++}
++
++/**
++ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
++ * @dst: Array of BGRX8888 destination buffers
++ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
++ * within @dst; can be NULL if scanlines are stored next to each other.
++ * @src: Array of XRGB8888 source buffer
++ * @fb: DRM framebuffer
++ * @clip: Clip rectangle area to copy
++ * @state: Transform and conversion state
++ *
++ * This function copies parts of a framebuffer to display memory and converts the
++ * color format during the process. The parameters @dst, @dst_pitch and @src refer
++ * to arrays. Each array must have at least as many entries as there are planes in
++ * @fb's format. Each entry stores the value for the format's respective color plane
++ * at the same index.
++ *
++ * This function does not apply clipping on @dst (i.e. the destination is at the
++ * top-left corner).
++ *
++ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
++ * natively.
++ */
++void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src,
++ const struct drm_framebuffer *fb,
++ const struct drm_rect *clip,
++ struct drm_format_conv_state *state)
++{
++ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
++ 4,
++ };
++
++ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
++ drm_fb_xrgb8888_to_bgrx8888_line);
++}
++EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
+
+ static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
+ {
+diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
+index 9f857bfa368d..0aa458b8a3e0 100644
+--- a/drivers/gpu/drm/drm_format_internal.h
++++ b/drivers/gpu/drm/drm_format_internal.h
+@@ -111,6 +111,14 @@ static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
+ ((pix & 0x000000ff) << 16);
+ }
+
++static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
++{
++ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
++ ((pix & 0x00ff0000) >> 8) |
++ ((pix & 0x0000ff00) << 8) |
++ ((pix & 0x000000ff) << 24);
++}
++
+ static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
+ {
+ return GENMASK(31, 24) | /* fill alpha bits */
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 2a3d80b27cae..8b62adbd4dfa 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -748,14 +748,9 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
+ buf = dst.vaddr;
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, ¶ms->clip,
++ &fmtcnv_state, false);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -795,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -842,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -889,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -939,12 +916,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
++ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -985,12 +957,8 @@ static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, ¶ms->clip,
++ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip,
+ &fmtcnv_state);
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1030,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1077,12 +1039,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
+- ¶ms->clip, &fmtcnv_state);
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
++ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1122,14 +1079,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
+- ¶ms->clip, &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1202,23 +1153,15 @@ static void drm_test_fb_swab(struct kunit *test)
+ buf = dst.vaddr; /* restore original value of buf */
+ memset(buf, 0, dst_size);
+
+- int blit_result;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
+- &src, &fb, ¶ms->clip, &fmtcnv_state);
++ drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr;
+ memset(buf, 0, dst_size);
+
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
++ drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr;
+@@ -1229,11 +1172,8 @@ static void drm_test_fb_swab(struct kunit *test)
+ mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
+ fb.format = &mock_format;
+
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
++ drm_fb_swab(&dst, dst_pitch, &src, &fb, ¶ms->clip, false, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1266,14 +1206,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
+ const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
+ NULL : &result->dst_pitch;
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1306,14 +1240,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
+ const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
+ NULL : &result->dst_pitch;
+
+- int blit_result = 0;
+-
+- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, ¶ms->clip,
+- &fmtcnv_state);
+-
++ drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+-
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+@@ -1910,12 +1838,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
+ memset(buf[i], 0, dst_size[i]);
+ }
+
+- int blit_result;
+-
+- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, ¶ms->clip,
+- &fmtcnv_state);
++ drm_fb_memcpy(dst, dst_pitches, src, &fb, ¶ms->clip);
+
+- KUNIT_EXPECT_FALSE(test, blit_result);
+ for (size_t i = 0; i < fb.format->num_planes; i++) {
+ expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
+ KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
+diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
+index d8539174ca11..49a2e09155d1 100644
+--- a/include/drm/drm_format_helper.h
++++ b/include/drm/drm_format_helper.h
+@@ -102,6 +102,15 @@ void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pi
+ void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
++void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
++ const struct iosys_map *src, const struct drm_framebuffer *fb,
++ const struct drm_rect *clip, struct drm_format_conv_state *state);
+ void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+--
+2.50.1
+
--- /dev/null
+From 493b2d7bb3412b9e6efce551b69c05fd2e360eaf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Jun 2025 11:00:54 +0200
+Subject: drm/tests: Fix drm_test_fb_xrgb8888_to_xrgb2101010() on big-endian
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit 05663d88fd0b8ee1c54ab2d5fb36f9b6a3ed37f7 ]
+
+Fix failures on big-endian architectures on tests cases
+single_pixel_source_buffer, single_pixel_clip_rectangle,
+well_known_colors and destination_pitch.
+
+Fixes: 15bda1f8de5d ("drm/tests: Add calls to drm_fb_blit() on supported format conversion tests")
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250630090054.353246-2-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tests/drm_format_helper_test.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 8b62adbd4dfa..e17643c408bf 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -1040,6 +1040,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ memset(buf, 0, dst_size);
+
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
++ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From b46fe2eb2e9a459bdeeb346e30a7adf5eb4fefd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 30 Jun 2025 11:00:53 +0200
+Subject: drm/tests: Fix endian warning
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit d28b9d2925b4f773adb21b1fc20260ddc370fb13 ]
+
+When compiling with sparse enabled, this warning is thrown:
+
+ warning: incorrect type in argument 2 (different base types)
+ expected restricted __le32 const [usertype] *buf
+ got unsigned int [usertype] *[assigned] buf
+
+Add a cast to fix it.
+
+Fixes: 453114319699 ("drm/format-helper: Add KUnit tests for drm_fb_xrgb8888_to_xrgb2101010()")
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250630090054.353246-1-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tests/drm_format_helper_test.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
+index 35cd3405d045..2a3d80b27cae 100644
+--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
++++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
+@@ -1071,7 +1071,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+ NULL : &result->dst_pitch;
+
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state);
+- buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
++ buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
+
+ buf = dst.vaddr; /* restore original value of buf */
+--
+2.50.1
+
--- /dev/null
+From 70e0cecc6e0a782d164dd7eff39831ca0be3cbc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 12:43:57 +0200
+Subject: drm/xe: Assign ioctl xe file handler to vm in xe_vm_create
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Piotr Piórkowski <piotr.piorkowski@intel.com>
+
+[ Upstream commit 658a1c8e0a66d0777e0e37a11ba19f27a81e77f4 ]
+
+In several code paths, such as xe_pt_create(), the vm->xef field is used
+to determine whether a VM originates from userspace or the kernel.
+
+Previously, this handler was only assigned in xe_vm_create_ioctl(),
+after the VM was created by xe_vm_create(). However, xe_vm_create()
+triggers page table creation, and that function assumes vm->xef should
+be already set. This could lead to incorrect origin detection.
+
+To fix this problem and ensure consistency in the initialization of
+the VM object, let's move the assignment of this handler to
+xe_vm_create.
+
+v2:
+ - take reference to the xe file object only when xef is not NULL
+ - release the reference to the xe file object on the error path (Matthew)
+
+Fixes: 7f387e6012b6 ("drm/xe: add XE_BO_FLAG_PINNED_LATE_RESTORE")
+Signed-off-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20250811104358.2064150-2-piotr.piorkowski@intel.com
+Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
+(cherry picked from commit 9337166fa1d80f7bb7c7d3a8f901f21c348c0f2a)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_migrate.c | 2 +-
+ drivers/gpu/drm/xe/xe_pxp_submit.c | 2 +-
+ drivers/gpu/drm/xe/xe_vm.c | 11 ++++++-----
+ drivers/gpu/drm/xe/xe_vm.h | 2 +-
+ 4 files changed, 9 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
+index 1e3fd139dfcb..0a481190f3e6 100644
+--- a/drivers/gpu/drm/xe/xe_migrate.c
++++ b/drivers/gpu/drm/xe/xe_migrate.c
+@@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
+
+ /* Special layout, prepared below.. */
+ vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION |
+- XE_VM_FLAG_SET_TILE_ID(tile));
++ XE_VM_FLAG_SET_TILE_ID(tile), NULL);
+ if (IS_ERR(vm))
+ return ERR_CAST(vm);
+
+diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c
+index d92ec0f515b0..ca95f2a4d4ef 100644
+--- a/drivers/gpu/drm/xe/xe_pxp_submit.c
++++ b/drivers/gpu/drm/xe/xe_pxp_submit.c
+@@ -101,7 +101,7 @@ static int allocate_gsc_client_resources(struct xe_gt *gt,
+ xe_assert(xe, hwe);
+
+ /* PXP instructions must be issued from PPGTT */
+- vm = xe_vm_create(xe, XE_VM_FLAG_GSC);
++ vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 861577746929..7251f23b919c 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1612,7 +1612,7 @@ static void xe_vm_free_scratch(struct xe_vm *vm)
+ }
+ }
+
+-struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
++struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
+ {
+ struct drm_gem_object *vm_resv_obj;
+ struct xe_vm *vm;
+@@ -1633,9 +1633,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+ vm->xe = xe;
+
+ vm->size = 1ull << xe->info.va_bits;
+-
+ vm->flags = flags;
+
++ if (xef)
++ vm->xef = xe_file_get(xef);
+ /**
+ * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be
+ * manipulated under the PXP mutex. However, the PXP mutex can be taken
+@@ -1786,6 +1787,8 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
+ for_each_tile(tile, xe, id)
+ xe_range_fence_tree_fini(&vm->rftree[id]);
+ ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move);
++ if (vm->xef)
++ xe_file_put(vm->xef);
+ kfree(vm);
+ if (flags & XE_VM_FLAG_LR_MODE)
+ xe_pm_runtime_put(xe);
+@@ -2069,7 +2072,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+ if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)
+ flags |= XE_VM_FLAG_FAULT_MODE;
+
+- vm = xe_vm_create(xe, flags);
++ vm = xe_vm_create(xe, flags, xef);
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+@@ -2085,8 +2088,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+ vm->usm.asid = asid;
+ }
+
+- vm->xef = xe_file_get(xef);
+-
+ /* Record BO memory for VM pagetable created against client */
+ for_each_tile(tile, xe, id)
+ if (vm->pt_root[id])
+diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
+index 494af6bdc646..0158ec0ae3b2 100644
+--- a/drivers/gpu/drm/xe/xe_vm.h
++++ b/drivers/gpu/drm/xe/xe_vm.h
+@@ -26,7 +26,7 @@ struct xe_sync_entry;
+ struct xe_svm_range;
+ struct drm_exec;
+
+-struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags);
++struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef);
+
+ struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id);
+ int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node);
+--
+2.50.1
+
--- /dev/null
+From 1d2f90c75d030c63dbb43cbb2144e1827b3ceb3c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 12:12:30 +0200
+Subject: drm/xe: Fix vm_bind_ioctl double free bug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Christoph Manszewski <christoph.manszewski@intel.com>
+
+[ Upstream commit 111fb43a557726079a67ce3ab51f602ddbf7097e ]
+
+If the argument check during an array bind fails, the bind_ops are freed
+twice as seen below. Fix this by setting bind_ops to NULL after freeing.
+
+==================================================================
+BUG: KASAN: double-free in xe_vm_bind_ioctl+0x1b2/0x21f0 [xe]
+Free of addr ffff88813bb9b800 by task xe_vm/14198
+
+CPU: 5 UID: 0 PID: 14198 Comm: xe_vm Not tainted 6.16.0-xe-eudebug-cmanszew+ #520 PREEMPT(full)
+Hardware name: Intel Corporation Alder Lake Client Platform/AlderLake-P DDR5 RVP, BIOS ADLPFWI1.R00.2411.A02.2110081023 10/08/2021
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x82/0xd0
+ print_report+0xcb/0x610
+ ? __virt_addr_valid+0x19a/0x300
+ ? xe_vm_bind_ioctl+0x1b2/0x21f0 [xe]
+ kasan_report_invalid_free+0xc8/0xf0
+ ? xe_vm_bind_ioctl+0x1b2/0x21f0 [xe]
+ ? xe_vm_bind_ioctl+0x1b2/0x21f0 [xe]
+ check_slab_allocation+0x102/0x130
+ kfree+0x10d/0x440
+ ? should_fail_ex+0x57/0x2f0
+ ? xe_vm_bind_ioctl+0x1b2/0x21f0 [xe]
+ xe_vm_bind_ioctl+0x1b2/0x21f0 [xe]
+ ? __pfx_xe_vm_bind_ioctl+0x10/0x10 [xe]
+ ? __lock_acquire+0xab9/0x27f0
+ ? lock_acquire+0x165/0x300
+ ? drm_dev_enter+0x53/0xe0 [drm]
+ ? find_held_lock+0x2b/0x80
+ ? drm_dev_exit+0x30/0x50 [drm]
+ ? drm_ioctl_kernel+0x128/0x1c0 [drm]
+ drm_ioctl_kernel+0x128/0x1c0 [drm]
+ ? __pfx_xe_vm_bind_ioctl+0x10/0x10 [xe]
+ ? find_held_lock+0x2b/0x80
+ ? __pfx_drm_ioctl_kernel+0x10/0x10 [drm]
+ ? should_fail_ex+0x57/0x2f0
+ ? __pfx_xe_vm_bind_ioctl+0x10/0x10 [xe]
+ drm_ioctl+0x352/0x620 [drm]
+ ? __pfx_drm_ioctl+0x10/0x10 [drm]
+ ? __pfx_rpm_resume+0x10/0x10
+ ? do_raw_spin_lock+0x11a/0x1b0
+ ? find_held_lock+0x2b/0x80
+ ? __pm_runtime_resume+0x61/0xc0
+ ? rcu_is_watching+0x20/0x50
+ ? trace_irq_enable.constprop.0+0xac/0xe0
+ xe_drm_ioctl+0x91/0xc0 [xe]
+ __x64_sys_ioctl+0xb2/0x100
+ ? rcu_is_watching+0x20/0x50
+ do_syscall_64+0x68/0x2e0
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7fa9acb24ded
+
+Fixes: b43e864af0d4 ("drm/xe/uapi: Add DRM_XE_VM_BIND_FLAG_CPU_ADDR_MIRROR")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com>
+Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Christoph Manszewski <christoph.manszewski@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://lore.kernel.org/r/20250813101231.196632-2-christoph.manszewski@intel.com
+(cherry picked from commit a01b704527c28a2fd43a17a85f8996b75ec8492a)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 3135de124c18..3b11b1d52bee 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -3200,6 +3200,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
+ free_bind_ops:
+ if (args->num_binds > 1)
+ kvfree(*bind_ops);
++ *bind_ops = NULL;
+ return err;
+ }
+
+@@ -3305,7 +3306,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
+ struct xe_exec_queue *q = NULL;
+ u32 num_syncs, num_ufence = 0;
+ struct xe_sync_entry *syncs = NULL;
+- struct drm_xe_vm_bind_op *bind_ops;
++ struct drm_xe_vm_bind_op *bind_ops = NULL;
+ struct xe_vma_ops vops;
+ struct dma_fence *fence;
+ int err;
+--
+2.50.1
+
--- /dev/null
+From 49dcf22d5dbed2f82351c0b1f87a600b09dd741f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 12:43:58 +0200
+Subject: drm/xe: Move ASID allocation and user PT BO tracking into
+ xe_vm_create
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Piotr Piórkowski <piotr.piorkowski@intel.com>
+
+[ Upstream commit 8a30114073639fd97f2c7390abbc34fb8711327a ]
+
+Currently, ASID assignment for user VMs and page-table BO accounting for
+client memory tracking are performed in xe_vm_create_ioctl.
+To consolidate VM object initialization, move this logic to
+xe_vm_create.
+
+v2:
+ - removed unnecessary duplicate BO tracking code
+ - using the local variable xef to verify whether the VM is being created
+ by userspace
+
+Fixes: 658a1c8e0a66 ("drm/xe: Assign ioctl xe file handler to vm in xe_vm_create")
+Suggested-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Piotr Piórkowski <piotr.piorkowski@intel.com>
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://lore.kernel.org/r/20250811104358.2064150-3-piotr.piorkowski@intel.com
+Signed-off-by: Michał Winiarski <michal.winiarski@intel.com>
+(cherry picked from commit 30e0c3f43a414616e0b6ca76cf7f7b2cd387e1d4)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+[Rodrigo: Added fixes tag]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 34 +++++++++++++++-------------------
+ 1 file changed, 15 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 7251f23b919c..3135de124c18 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -1767,6 +1767,20 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef)
+ if (number_tiles > 1)
+ vm->composite_fence_ctx = dma_fence_context_alloc(1);
+
++ if (xef && xe->info.has_asid) {
++ u32 asid;
++
++ down_write(&xe->usm.lock);
++ err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
++ XA_LIMIT(1, XE_MAX_ASID - 1),
++ &xe->usm.next_asid, GFP_KERNEL);
++ up_write(&xe->usm.lock);
++ if (err < 0)
++ goto err_unlock_close;
++
++ vm->usm.asid = asid;
++ }
++
+ trace_xe_vm_create(vm);
+
+ return vm;
+@@ -2034,9 +2048,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct xe_device *xe = to_xe_device(dev);
+ struct xe_file *xef = to_xe_file(file);
+ struct drm_xe_vm_create *args = data;
+- struct xe_tile *tile;
+ struct xe_vm *vm;
+- u32 id, asid;
++ u32 id;
+ int err;
+ u32 flags = 0;
+
+@@ -2076,23 +2089,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
+ if (IS_ERR(vm))
+ return PTR_ERR(vm);
+
+- if (xe->info.has_asid) {
+- down_write(&xe->usm.lock);
+- err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm,
+- XA_LIMIT(1, XE_MAX_ASID - 1),
+- &xe->usm.next_asid, GFP_KERNEL);
+- up_write(&xe->usm.lock);
+- if (err < 0)
+- goto err_close_and_put;
+-
+- vm->usm.asid = asid;
+- }
+-
+- /* Record BO memory for VM pagetable created against client */
+- for_each_tile(tile, xe, id)
+- if (vm->pt_root[id])
+- xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo);
+-
+ #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM)
+ /* Warning: Security issue - never enable by default */
+ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE);
+--
+2.50.1
+
--- /dev/null
+From 23b6f090864f338d3c13d18c406275a08aee0e98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 14:12:45 -0700
+Subject: gve: prevent ethtool ops after shutdown
+
+From: Jordan Rhee <jordanrhee@google.com>
+
+[ Upstream commit 75a9a46d67f46d608205888f9b34e315c1786345 ]
+
+A crash can occur if an ethtool operation is invoked
+after shutdown() is called.
+
+shutdown() is invoked during system shutdown to stop DMA operations
+without performing expensive deallocations. It is discouraged to
+unregister the netdev in this path, so the device may still be visible
+to userspace and kernel helpers.
+
+In gve, shutdown() tears down most internal data structures. If an
+ethtool operation is dispatched after shutdown(), it will dereference
+freed or NULL pointers, leading to a kernel panic. While graceful
+shutdown normally quiesces userspace before invoking the reboot
+syscall, forced shutdowns (as observed on GCP VMs) can still trigger
+this path.
+
+Fix by calling netif_device_detach() in shutdown().
+This marks the device as detached so the ethtool ioctl handler
+will skip dispatching operations to the driver.
+
+Fixes: 974365e51861 ("gve: Implement suspend/resume/shutdown")
+Signed-off-by: Jordan Rhee <jordanrhee@google.com>
+Signed-off-by: Jeroen de Borst <jeroendb@google.com>
+Link: https://patch.msgid.link/20250818211245.1156919-1-jeroendb@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index d1aeb722d48f..36a6d766b638 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2726,6 +2726,8 @@ static void gve_shutdown(struct pci_dev *pdev)
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_running(priv->dev);
+
++ netif_device_detach(netdev);
++
+ rtnl_lock();
+ netdev_lock(netdev);
+ if (was_up && gve_close(priv->dev)) {
+--
+2.50.1
+
--- /dev/null
+From ae48c29fca685f50c979be1322585ab84a90415f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:59 -0700
+Subject: igc: fix disabling L1.2 PCI-E link substate on I226 on init
+
+From: ValdikSS <iam@valdikss.org.ru>
+
+[ Upstream commit 1468c1f97cf32418e34dbb40b784ed9333b9e123 ]
+
+Device ID comparison in igc_is_device_id_i226 is performed before
+the ID is set, resulting in always failing check on init.
+
+Before the patch:
+* L1.2 is not disabled on init
+* L1.2 is properly disabled after suspend-resume cycle
+
+With the patch:
+* L1.2 is properly disabled both on init and after suspend-resume
+
+How to test:
+Connect to the 1G link with 300+ mbit/s Internet speed, and run
+the download speed test, such as:
+
+ curl -o /dev/null http://speedtest.selectel.ru/1GB
+
+Without L1.2 disabled, the speed would be no more than ~200 mbit/s.
+With L1.2 disabled, the speed would reach 1 gbit/s.
+Note: it's required that the latency between your host and the remote
+be around 3-5 ms, the test inside LAN (<1 ms latency) won't trigger the
+issue.
+
+Link: https://lore.kernel.org/intel-wired-lan/15248b4f-3271-42dd-8e35-02bfc92b25e1@intel.com
+Fixes: 0325143b59c6 ("igc: disable L1.2 PCI-E link substate to avoid performance issue")
+Signed-off-by: ValdikSS <iam@valdikss.org.ru>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-6-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 031c332f66c4..1b4465d6b2b7 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7115,6 +7115,13 @@ static int igc_probe(struct pci_dev *pdev,
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
++ /* PCI config space info */
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->revision_id = pdev->revision;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+@@ -7141,13 +7148,6 @@ static int igc_probe(struct pci_dev *pdev,
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+--
+2.50.1
+
--- /dev/null
+From c2edc0523d5d7997284024e98abd5283f43ddb05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 08:40:27 -0700
+Subject: iommu/amd: Avoid stack buffer overflow from kernel cmdline
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
+
+While the kernel command line is considered trusted in most environments,
+avoid writing 1 byte past the end of "acpiid" if the "str" argument is
+maximum length.
+
+Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
+Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
+Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
+Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 9c17dfa76703..7add9bcf45dc 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3596,7 +3596,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+- char acpiid[ACPIID_LEN] = {0};
++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ int i;
+
+ addr = strchr(str, '@');
+@@ -3622,7 +3622,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+- if (strlen(str) > ACPIID_LEN + 1)
++ if (strlen(str) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+--
+2.50.1
+
--- /dev/null
+From a580147b3b0c25af0de6597826ac6cff03c1c1bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Jul 2025 16:20:51 +0530
+Subject: iosys-map: Fix undefined behavior in iosys_map_clear()
+
+From: Nitin Gote <nitin.r.gote@intel.com>
+
+[ Upstream commit 5634c8cb298a7146b4e38873473e280b50e27a2c ]
+
+The current iosys_map_clear() implementation reads the potentially
+uninitialized 'is_iomem' boolean field to decide which union member
+to clear. This causes undefined behavior when called on uninitialized
+structures, as 'is_iomem' may contain garbage values like 0xFF.
+
+UBSAN detects this as:
+ UBSAN: invalid-load in include/linux/iosys-map.h:267
+ load of value 255 is not a valid value for type '_Bool'
+
+Fix by unconditionally clearing the entire structure with memset(),
+eliminating the need to read uninitialized data and ensuring all
+fields are set to known good values.
+
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14639
+Fixes: 01fd30da0474 ("dma-buf: Add struct dma-buf-map for storing struct dma_buf.vaddr_ptr")
+Signed-off-by: Nitin Gote <nitin.r.gote@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250718105051.2709487-1-nitin.r.gote@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/iosys-map.h | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
+index 4696abfd311c..3e85afe794c0 100644
+--- a/include/linux/iosys-map.h
++++ b/include/linux/iosys-map.h
+@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
+ */
+ static inline void iosys_map_clear(struct iosys_map *map)
+ {
+- if (map->is_iomem) {
+- map->vaddr_iomem = NULL;
+- map->is_iomem = false;
+- } else {
+- map->vaddr = NULL;
+- }
++ memset(map, 0, sizeof(*map));
+ }
+
+ /**
+--
+2.50.1
+
--- /dev/null
+From 7b84c8de5a7e1268fe609b43dcd2e03ce01f10a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 5dae892bbc73..fd58426f222b 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -305,6 +305,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From 65b117c33eb906de7404eccb92a87a564df79078 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index ac58964b2f08..7b941505a9d0 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -398,7 +398,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -433,6 +433,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From a12c3e80444939c7b8f6c8d9c839cc9b09869c6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:51:15 +0800
+Subject: LoongArch: KVM: Use kvm_get_vcpu_by_id() instead of kvm_get_vcpu()
+
+From: Song Gao <gaosong@loongson.cn>
+
+[ Upstream commit 0dfd9ea7bf80fabe11f5b775d762a5cd168cdf41 ]
+
+Since using kvm_get_vcpu() may fail to retrieve the vCPU context,
+kvm_get_vcpu_by_id() should be used instead.
+
+Fixes: 8e3054261bc3 ("LoongArch: KVM: Add IPI user mode read and write function")
+Fixes: 3956a52bc05b ("LoongArch: KVM: Add EIOINTC read and write functions")
+Reviewed-by: Yanteng Si <siyanteng@cqsoftware.com.cm>
+Signed-off-by: Song Gao <gaosong@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kvm/intc/eiointc.c | 7 ++++++-
+ arch/loongarch/kvm/intc/ipi.c | 2 +-
+ 2 files changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
+index 3cf9894999da..0207cfe1dbd6 100644
+--- a/arch/loongarch/kvm/intc/eiointc.c
++++ b/arch/loongarch/kvm/intc/eiointc.c
+@@ -45,7 +45,12 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
+ }
+
+ cpu = s->sw_coremap[irq];
+- vcpu = kvm_get_vcpu(s->kvm, cpu);
++ vcpu = kvm_get_vcpu_by_id(s->kvm, cpu);
++ if (unlikely(vcpu == NULL)) {
++ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
++ return;
++ }
++
+ if (level) {
+ /* if not enable return false */
+ if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
+diff --git a/arch/loongarch/kvm/intc/ipi.c b/arch/loongarch/kvm/intc/ipi.c
+index 6a13d2f44575..4859e320e3a1 100644
+--- a/arch/loongarch/kvm/intc/ipi.c
++++ b/arch/loongarch/kvm/intc/ipi.c
+@@ -318,7 +318,7 @@ static int kvm_ipi_regs_access(struct kvm_device *dev,
+ cpu = (attr->attr >> 16) & 0x3ff;
+ addr = attr->attr & 0xff;
+
+- vcpu = kvm_get_vcpu(dev->kvm, cpu);
++ vcpu = kvm_get_vcpu_by_id(dev->kvm, cpu);
+ if (unlikely(vcpu == NULL)) {
+ kvm_err("%s: invalid target cpu: %d\n", __func__, cpu);
+ return -EINVAL;
+--
+2.50.1
+
--- /dev/null
+From 04109acbed205a234461ae4315f40bba792a9e22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Jul 2025 09:26:32 +0800
+Subject: LoongArch: KVM: Use standard bitops API with eiointc
+
+From: Bibo Mao <maobibo@loongson.cn>
+
+[ Upstream commit d23bd878f6ea9cff93104159356e012a8b2bbfaf ]
+
+Standard bitops APIs such test_bit() is used here, rather than manually
+calculating the offset and mask. Also use non-atomic API __set_bit() and
+__clear_bit() rather than set_bit() and clear_bit(), since the global
+spinlock is held already.
+
+Signed-off-by: Bibo Mao <maobibo@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Stable-dep-of: 0dfd9ea7bf80 ("LoongArch: KVM: Use kvm_get_vcpu_by_id() instead of kvm_get_vcpu()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kvm/intc/eiointc.c | 27 +++++++++++----------------
+ 1 file changed, 11 insertions(+), 16 deletions(-)
+
+diff --git a/arch/loongarch/kvm/intc/eiointc.c b/arch/loongarch/kvm/intc/eiointc.c
+index a75f865d6fb9..3cf9894999da 100644
+--- a/arch/loongarch/kvm/intc/eiointc.c
++++ b/arch/loongarch/kvm/intc/eiointc.c
+@@ -9,7 +9,7 @@
+
+ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
+ {
+- int ipnum, cpu, cpuid, irq_index, irq_mask, irq;
++ int ipnum, cpu, cpuid, irq;
+ struct kvm_vcpu *vcpu;
+
+ for (irq = 0; irq < EIOINTC_IRQS; irq++) {
+@@ -18,8 +18,6 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
+ ipnum = count_trailing_zeros(ipnum);
+ ipnum = (ipnum >= 0 && ipnum < 4) ? ipnum : 0;
+ }
+- irq_index = irq / 32;
+- irq_mask = BIT(irq & 0x1f);
+
+ cpuid = s->coremap.reg_u8[irq];
+ vcpu = kvm_get_vcpu_by_cpuid(s->kvm, cpuid);
+@@ -27,16 +25,16 @@ static void eiointc_set_sw_coreisr(struct loongarch_eiointc *s)
+ continue;
+
+ cpu = vcpu->vcpu_id;
+- if (!!(s->coreisr.reg_u32[cpu][irq_index] & irq_mask))
+- set_bit(irq, s->sw_coreisr[cpu][ipnum]);
++ if (test_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]))
++ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ else
+- clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
++ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ }
+ }
+
+ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
+ {
+- int ipnum, cpu, found, irq_index, irq_mask;
++ int ipnum, cpu, found;
+ struct kvm_vcpu *vcpu;
+ struct kvm_interrupt vcpu_irq;
+
+@@ -48,19 +46,16 @@ static void eiointc_update_irq(struct loongarch_eiointc *s, int irq, int level)
+
+ cpu = s->sw_coremap[irq];
+ vcpu = kvm_get_vcpu(s->kvm, cpu);
+- irq_index = irq / 32;
+- irq_mask = BIT(irq & 0x1f);
+-
+ if (level) {
+ /* if not enable return false */
+- if (((s->enable.reg_u32[irq_index]) & irq_mask) == 0)
++ if (!test_bit(irq, (unsigned long *)s->enable.reg_u32))
+ return;
+- s->coreisr.reg_u32[cpu][irq_index] |= irq_mask;
++ __set_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
+ found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
+- set_bit(irq, s->sw_coreisr[cpu][ipnum]);
++ __set_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ } else {
+- s->coreisr.reg_u32[cpu][irq_index] &= ~irq_mask;
+- clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
++ __clear_bit(irq, (unsigned long *)s->coreisr.reg_u32[cpu]);
++ __clear_bit(irq, s->sw_coreisr[cpu][ipnum]);
+ found = find_first_bit(s->sw_coreisr[cpu][ipnum], EIOINTC_IRQS);
+ }
+
+@@ -110,8 +105,8 @@ void eiointc_set_irq(struct loongarch_eiointc *s, int irq, int level)
+ unsigned long flags;
+ unsigned long *isr = (unsigned long *)s->isr.reg_u8;
+
+- level ? set_bit(irq, isr) : clear_bit(irq, isr);
+ spin_lock_irqsave(&s->lock, flags);
++ level ? __set_bit(irq, isr) : __clear_bit(irq, isr);
+ eiointc_update_irq(s, irq, level);
+ spin_unlock_irqrestore(&s->lock, flags);
+ }
+--
+2.50.1
+
--- /dev/null
+From afdf18e1c1fcc3656476604cc1e0ffcb55343ac9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Optimize module load time by optimizing PLT/GOT counting
+
+From: Kanglong Wang <wangkanglong@loongson.cn>
+
+[ Upstream commit 63dbd8fb2af3a89466538599a9acb2d11ef65c06 ]
+
+When enabling CONFIG_KASAN, CONFIG_PREEMPT_VOLUNTARY_BUILD and
+CONFIG_PREEMPT_VOLUNTARY at the same time, there will be soft deadlock,
+the relevant logs are as follows:
+
+rcu: INFO: rcu_sched self-detected stall on CPU
+...
+Call Trace:
+[<900000000024f9e4>] show_stack+0x5c/0x180
+[<90000000002482f4>] dump_stack_lvl+0x94/0xbc
+[<9000000000224544>] rcu_dump_cpu_stacks+0x1fc/0x280
+[<900000000037ac80>] rcu_sched_clock_irq+0x720/0xf88
+[<9000000000396c34>] update_process_times+0xb4/0x150
+[<90000000003b2474>] tick_nohz_handler+0xf4/0x250
+[<9000000000397e28>] __hrtimer_run_queues+0x1d0/0x428
+[<9000000000399b2c>] hrtimer_interrupt+0x214/0x538
+[<9000000000253634>] constant_timer_interrupt+0x64/0x80
+[<9000000000349938>] __handle_irq_event_percpu+0x78/0x1a0
+[<9000000000349a78>] handle_irq_event_percpu+0x18/0x88
+[<9000000000354c00>] handle_percpu_irq+0x90/0xf0
+[<9000000000348c74>] handle_irq_desc+0x94/0xb8
+[<9000000001012b28>] handle_cpu_irq+0x68/0xa0
+[<9000000001def8c0>] handle_loongarch_irq+0x30/0x48
+[<9000000001def958>] do_vint+0x80/0xd0
+[<9000000000268a0c>] kasan_mem_to_shadow.part.0+0x2c/0x2a0
+[<90000000006344f4>] __asan_load8+0x4c/0x120
+[<900000000025c0d0>] module_frob_arch_sections+0x5c8/0x6b8
+[<90000000003895f0>] load_module+0x9e0/0x2958
+[<900000000038b770>] __do_sys_init_module+0x208/0x2d0
+[<9000000001df0c34>] do_syscall+0x94/0x190
+[<900000000024d6fc>] handle_syscall+0xbc/0x158
+
+After analysis, this is because the slow speed of loading the amdgpu
+module leads to the long time occupation of the cpu and then the soft
+deadlock.
+
+When loading a module, module_frob_arch_sections() tries to figure out
+the number of PLTs/GOTs that will be needed to handle all the RELAs. It
+will call the count_max_entries() to find in an out-of-order date which
+counting algorithm has O(n^2) complexity.
+
+To make it faster, we sort the relocation list by info and addend. That
+way, to check for a duplicate relocation, it just needs to compare with
+the previous entry. This reduces the complexity of the algorithm to O(n
+ log n), as done in commit d4e0340919fb ("arm64/module: Optimize module
+load time by optimizing PLT counting"). This gives sinificant reduction
+in module load time for modules with large number of relocations.
+
+After applying this patch, the soft deadlock problem has been solved,
+and the kernel starts normally without "Call Trace".
+
+Using the default configuration to test some modules, the results are as
+follows:
+
+Module Size
+ip_tables 36K
+fat 143K
+radeon 2.5MB
+amdgpu 16MB
+
+Without this patch:
+Module Module load time (ms) Count(PLTs/GOTs)
+ip_tables 18 59/6
+fat 0 162/14
+radeon 54 1221/84
+amdgpu 1411 4525/1098
+
+With this patch:
+Module Module load time (ms) Count(PLTs/GOTs)
+ip_tables 18 59/6
+fat 0 162/14
+radeon 22 1221/84
+amdgpu 45 4525/1098
+
+Fixes: fcdfe9d22bed ("LoongArch: Add ELF and module support")
+Signed-off-by: Kanglong Wang <wangkanglong@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/module-sections.c | 36 ++++++++++++-------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
+index e2f30ff9afde..a43ba7f9f987 100644
+--- a/arch/loongarch/kernel/module-sections.c
++++ b/arch/loongarch/kernel/module-sections.c
+@@ -8,6 +8,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleloader.h>
+ #include <linux/ftrace.h>
++#include <linux/sort.h>
+
+ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
+ {
+@@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
+ return (Elf_Addr)&plt[nr];
+ }
+
+-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+-{
+- return x->r_info == y->r_info && x->r_addend == y->r_addend;
+-}
++#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
+
+-static bool duplicate_rela(const Elf_Rela *rela, int idx)
++static int compare_rela(const void *x, const void *y)
+ {
+- int i;
++ int ret;
++ const Elf_Rela *rela_x = x, *rela_y = y;
+
+- for (i = 0; i < idx; i++) {
+- if (is_rela_equal(&rela[i], &rela[idx]))
+- return true;
+- }
++ ret = cmp_3way(rela_x->r_info, rela_y->r_info);
++ if (ret == 0)
++ ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
+
+- return false;
++ return ret;
+ }
+
+ static void count_max_entries(Elf_Rela *relas, int num,
+ unsigned int *plts, unsigned int *gots)
+ {
+- unsigned int i, type;
++ unsigned int i;
++
++ sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
+
+ for (i = 0; i < num; i++) {
+- type = ELF_R_TYPE(relas[i].r_info);
+- switch (type) {
++ if (i && !compare_rela(&relas[i-1], &relas[i]))
++ continue;
++
++ switch (ELF_R_TYPE(relas[i].r_info)) {
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ case R_LARCH_B26:
+- if (!duplicate_rela(relas, i))
+- (*plts)++;
++ (*plts)++;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+- if (!duplicate_rela(relas, i))
+- (*gots)++;
++ (*gots)++;
+ break;
+ default:
+ break; /* Do nothing. */
+--
+2.50.1
+
--- /dev/null
+From 44138dbf449a9d08f2ad18b9b3f693d5dbfef32b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:15 +0800
+Subject: LoongArch: Pass annotate-tablejump option if LTO is enabled
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit 5dfea6644d201bfeffaa7e0d79d62309856613b7 ]
+
+When compiling with LLVM and CONFIG_LTO_CLANG is set, there exist many
+objtool warnings "sibling call from callable instruction with modified
+stack frame".
+
+For this special case, the related object file shows that there is no
+generated relocation section '.rela.discard.tablejump_annotate' for the
+table jump instruction jirl, thus objtool can not know that what is the
+actual destination address.
+
+It needs to do something on the LLVM side to make sure that there is the
+relocation section '.rela.discard.tablejump_annotate' if LTO is enabled,
+but in order to maintain compatibility for the current LLVM compiler,
+this can be done in the kernel Makefile for now. Ensure it is aware of
+linker with LTO, '--loongarch-annotate-tablejump' needs to be passed via
+'-mllvm' to ld.lld.
+
+Note that it should also pass the compiler option -mannotate-tablejump
+rather than only pass '-mllvm --loongarch-annotate-tablejump' to ld.lld
+if LTO is enabled, otherwise there are no jump info for some table jump
+instructions.
+
+Fixes: e20ab7d454ee ("LoongArch: Enable jump table for objtool")
+Closes: https://lore.kernel.org/loongarch/20250731175655.GA1455142@ax162/
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Co-developed-by: WANG Rui <wangrui@loongson.cn>
+Signed-off-by: WANG Rui <wangrui@loongson.cn>
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/Makefile | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/arch/loongarch/Makefile b/arch/loongarch/Makefile
+index b0703a4e02a2..a3a9759414f4 100644
+--- a/arch/loongarch/Makefile
++++ b/arch/loongarch/Makefile
+@@ -102,7 +102,13 @@ KBUILD_CFLAGS += $(call cc-option,-mthin-add-sub) $(call cc-option,-Wa$(comma)
+
+ ifdef CONFIG_OBJTOOL
+ ifdef CONFIG_CC_HAS_ANNOTATE_TABLEJUMP
++# The annotate-tablejump option can not be passed to LLVM backend when LTO is enabled.
++# Ensure it is aware of linker with LTO, '--loongarch-annotate-tablejump' also needs to
++# be passed via '-mllvm' to ld.lld.
+ KBUILD_CFLAGS += -mannotate-tablejump
++ifdef CONFIG_LTO_CLANG
++KBUILD_LDFLAGS += -mllvm --loongarch-annotate-tablejump
++endif
+ else
+ KBUILD_CFLAGS += -fno-jump-tables # keep compatibility with older compilers
+ endif
+--
+2.50.1
+
--- /dev/null
+From fd698512da62bf447cfb98ed0177661827feba99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Aug 2025 08:25:33 +0800
+Subject: md: add helper rdev_needs_recovery()
+
+From: Zheng Qixing <zhengqixing@huawei.com>
+
+[ Upstream commit cb0780ad4333040a98e10f014b593ef738a3f31e ]
+
+Add a helper for checking if an rdev needs recovery.
+
+Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
+Link: https://lore.kernel.org/linux-raid/20250816002534.1754356-2-zhengqixing@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Stable-dep-of: b7ee30f0efd1 ("md: fix sync_action incorrect display during resync")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 23 ++++++++++++-----------
+ 1 file changed, 12 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 80470bcf4383..0348b5f3adc5 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4822,6 +4822,15 @@ metadata_store(struct mddev *mddev, const char *buf, size_t len)
+ static struct md_sysfs_entry md_metadata =
+ __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store);
+
++static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
++{
++ return rdev->raid_disk >= 0 &&
++ !test_bit(Journal, &rdev->flags) &&
++ !test_bit(Faulty, &rdev->flags) &&
++ !test_bit(In_sync, &rdev->flags) &&
++ rdev->recovery_offset < sectors;
++}
++
+ enum sync_action md_sync_action(struct mddev *mddev)
+ {
+ unsigned long recovery = mddev->recovery;
+@@ -8959,11 +8968,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
+ start = MaxSector;
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+- if (rdev->raid_disk >= 0 &&
+- !test_bit(Journal, &rdev->flags) &&
+- !test_bit(Faulty, &rdev->flags) &&
+- !test_bit(In_sync, &rdev->flags) &&
+- rdev->recovery_offset < start)
++ if (rdev_needs_recovery(rdev, start))
+ start = rdev->recovery_offset;
+ rcu_read_unlock();
+
+@@ -9322,12 +9327,8 @@ void md_do_sync(struct md_thread *thread)
+ test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) {
+ rcu_read_lock();
+ rdev_for_each_rcu(rdev, mddev)
+- if (rdev->raid_disk >= 0 &&
+- mddev->delta_disks >= 0 &&
+- !test_bit(Journal, &rdev->flags) &&
+- !test_bit(Faulty, &rdev->flags) &&
+- !test_bit(In_sync, &rdev->flags) &&
+- rdev->recovery_offset < mddev->curr_resync)
++ if (mddev->delta_disks >= 0 &&
++ rdev_needs_recovery(rdev, mddev->curr_resync))
+ rdev->recovery_offset = mddev->curr_resync;
+ rcu_read_unlock();
+ }
+--
+2.50.1
+
--- /dev/null
+From 414c43d71e7ed410bd667dd44342ded8aae4f7bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Aug 2025 08:25:34 +0800
+Subject: md: fix sync_action incorrect display during resync
+
+From: Zheng Qixing <zhengqixing@huawei.com>
+
+[ Upstream commit b7ee30f0efd12f42735ae233071015389407966c ]
+
+During raid resync, if a disk becomes faulty, the operation is
+briefly interrupted. The MD_RECOVERY_RECOVER flag triggered by
+the disk failure causes sync_action to incorrectly show "recover"
+instead of "resync". The same issue affects reshape operations.
+
+Reproduction steps:
+ mdadm -Cv /dev/md1 -l1 -n4 -e1.2 /dev/sd{a..d} // -> resync happened
+ mdadm -f /dev/md1 /dev/sda // -> resync interrupted
+ cat sync_action
+ -> recover
+
+Add progress checks in md_sync_action() for resync/recover/reshape
+to ensure the interface correctly reports the actual operation type.
+
+Fixes: 4b10a3bc67c1 ("md: ensure resync is prioritized over recovery")
+Signed-off-by: Zheng Qixing <zhengqixing@huawei.com>
+Link: https://lore.kernel.org/linux-raid/20250816002534.1754356-3-zhengqixing@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 37 +++++++++++++++++++++++++++++++++++--
+ 1 file changed, 35 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 0348b5f3adc5..8746b22060a7 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4831,9 +4831,33 @@ static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors)
+ rdev->recovery_offset < sectors;
+ }
+
++static enum sync_action md_get_active_sync_action(struct mddev *mddev)
++{
++ struct md_rdev *rdev;
++ bool is_recover = false;
++
++ if (mddev->resync_offset < MaxSector)
++ return ACTION_RESYNC;
++
++ if (mddev->reshape_position != MaxSector)
++ return ACTION_RESHAPE;
++
++ rcu_read_lock();
++ rdev_for_each_rcu(rdev, mddev) {
++ if (rdev_needs_recovery(rdev, MaxSector)) {
++ is_recover = true;
++ break;
++ }
++ }
++ rcu_read_unlock();
++
++ return is_recover ? ACTION_RECOVER : ACTION_IDLE;
++}
++
+ enum sync_action md_sync_action(struct mddev *mddev)
+ {
+ unsigned long recovery = mddev->recovery;
++ enum sync_action active_action;
+
+ /*
+ * frozen has the highest priority, means running sync_thread will be
+@@ -4857,8 +4881,17 @@ enum sync_action md_sync_action(struct mddev *mddev)
+ !test_bit(MD_RECOVERY_NEEDED, &recovery))
+ return ACTION_IDLE;
+
+- if (test_bit(MD_RECOVERY_RESHAPE, &recovery) ||
+- mddev->reshape_position != MaxSector)
++ /*
++ * Check if any sync operation (resync/recover/reshape) is
++ * currently active. This ensures that only one sync operation
++ * can run at a time. Returns the type of active operation, or
++ * ACTION_IDLE if none are active.
++ */
++ active_action = md_get_active_sync_action(mddev);
++ if (active_action != ACTION_IDLE)
++ return active_action;
++
++ if (test_bit(MD_RECOVERY_RESHAPE, &recovery))
+ return ACTION_RESHAPE;
+
+ if (test_bit(MD_RECOVERY_RECOVER, &recovery))
+--
+2.50.1
+
--- /dev/null
+From d26a98b6f4bc6104e8ec765a928f28d7ab15caf4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Jul 2025 11:33:40 +0800
+Subject: md: rename recovery_cp to resync_offset
+
+From: Li Nan <linan122@huawei.com>
+
+[ Upstream commit 907a99c314a5a695e35acff78ac61f4ec950a6d3 ]
+
+'recovery_cp' was used to represent the progress of sync, but its name
+contains recovery, which can cause confusion. Replaces 'recovery_cp'
+with 'resync_offset' for clarity.
+
+Signed-off-by: Li Nan <linan122@huawei.com>
+Link: https://lore.kernel.org/linux-raid/20250722033340.1933388-1-linan666@huaweicloud.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Stable-dep-of: b7ee30f0efd1 ("md: fix sync_action incorrect display during resync")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-raid.c | 42 ++++++++++++++--------------
+ drivers/md/md-bitmap.c | 8 +++---
+ drivers/md/md-cluster.c | 16 +++++------
+ drivers/md/md.c | 50 +++++++++++++++++-----------------
+ drivers/md/md.h | 2 +-
+ drivers/md/raid0.c | 6 ++--
+ drivers/md/raid1-10.c | 2 +-
+ drivers/md/raid1.c | 10 +++----
+ drivers/md/raid10.c | 16 +++++------
+ drivers/md/raid5-ppl.c | 6 ++--
+ drivers/md/raid5.c | 30 ++++++++++----------
+ include/uapi/linux/raid/md_p.h | 2 +-
+ 12 files changed, 95 insertions(+), 95 deletions(-)
+
+diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
+index e8c0a8c6fb51..9835f2fe26e9 100644
+--- a/drivers/md/dm-raid.c
++++ b/drivers/md/dm-raid.c
+@@ -439,7 +439,7 @@ static bool rs_is_reshapable(struct raid_set *rs)
+ /* Return true, if raid set in @rs is recovering */
+ static bool rs_is_recovering(struct raid_set *rs)
+ {
+- return rs->md.recovery_cp < rs->md.dev_sectors;
++ return rs->md.resync_offset < rs->md.dev_sectors;
+ }
+
+ /* Return true, if raid set in @rs is reshaping */
+@@ -769,7 +769,7 @@ static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *r
+ rs->md.layout = raid_type->algorithm;
+ rs->md.new_layout = rs->md.layout;
+ rs->md.delta_disks = 0;
+- rs->md.recovery_cp = MaxSector;
++ rs->md.resync_offset = MaxSector;
+
+ for (i = 0; i < raid_devs; i++)
+ md_rdev_init(&rs->dev[i].rdev);
+@@ -913,7 +913,7 @@ static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
+ rs->md.external = 0;
+ rs->md.persistent = 1;
+ rs->md.major_version = 2;
+- } else if (rebuild && !rs->md.recovery_cp) {
++ } else if (rebuild && !rs->md.resync_offset) {
+ /*
+ * Without metadata, we will not be able to tell if the array
+ * is in-sync or not - we must assume it is not. Therefore,
+@@ -1696,20 +1696,20 @@ static void rs_setup_recovery(struct raid_set *rs, sector_t dev_sectors)
+ {
+ /* raid0 does not recover */
+ if (rs_is_raid0(rs))
+- rs->md.recovery_cp = MaxSector;
++ rs->md.resync_offset = MaxSector;
+ /*
+ * A raid6 set has to be recovered either
+ * completely or for the grown part to
+ * ensure proper parity and Q-Syndrome
+ */
+ else if (rs_is_raid6(rs))
+- rs->md.recovery_cp = dev_sectors;
++ rs->md.resync_offset = dev_sectors;
+ /*
+ * Other raid set types may skip recovery
+ * depending on the 'nosync' flag.
+ */
+ else
+- rs->md.recovery_cp = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
++ rs->md.resync_offset = test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)
+ ? MaxSector : dev_sectors;
+ }
+
+@@ -2144,7 +2144,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
+ sb->events = cpu_to_le64(mddev->events);
+
+ sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
+- sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
++ sb->array_resync_offset = cpu_to_le64(mddev->resync_offset);
+
+ sb->level = cpu_to_le32(mddev->level);
+ sb->layout = cpu_to_le32(mddev->layout);
+@@ -2335,18 +2335,18 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
+ }
+
+ if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags))
+- mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
++ mddev->resync_offset = le64_to_cpu(sb->array_resync_offset);
+
+ /*
+ * During load, we set FirstUse if a new superblock was written.
+ * There are two reasons we might not have a superblock:
+ * 1) The raid set is brand new - in which case, all of the
+ * devices must have their In_sync bit set. Also,
+- * recovery_cp must be 0, unless forced.
++ * resync_offset must be 0, unless forced.
+ * 2) This is a new device being added to an old raid set
+ * and the new device needs to be rebuilt - in which
+ * case the In_sync bit will /not/ be set and
+- * recovery_cp must be MaxSector.
++ * resync_offset must be MaxSector.
+ * 3) This is/are a new device(s) being added to an old
+ * raid set during takeover to a higher raid level
+ * to provide capacity for redundancy or during reshape
+@@ -2391,8 +2391,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
+ new_devs > 1 ? "s" : "");
+ return -EINVAL;
+ } else if (!test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags) && rs_is_recovering(rs)) {
+- DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)",
+- (unsigned long long) mddev->recovery_cp);
++ DMERR("'rebuild' specified while raid set is not in-sync (resync_offset=%llu)",
++ (unsigned long long) mddev->resync_offset);
+ return -EINVAL;
+ } else if (rs_is_reshaping(rs)) {
+ DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)",
+@@ -2697,11 +2697,11 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
+ }
+ out:
+ /*
+- * Raise recovery_cp in case data_offset != 0 to
++ * Raise resync_offset in case data_offset != 0 to
+ * avoid false recovery positives in the constructor.
+ */
+- if (rs->md.recovery_cp < rs->md.dev_sectors)
+- rs->md.recovery_cp += rs->dev[0].rdev.data_offset;
++ if (rs->md.resync_offset < rs->md.dev_sectors)
++ rs->md.resync_offset += rs->dev[0].rdev.data_offset;
+
+ /* Adjust data offsets on all rdevs but on any raid4/5/6 journal device */
+ rdev_for_each(rdev, &rs->md) {
+@@ -2756,7 +2756,7 @@ static int rs_setup_takeover(struct raid_set *rs)
+ }
+
+ clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+
+ while (d--) {
+ rdev = &rs->dev[d].rdev;
+@@ -2764,7 +2764,7 @@ static int rs_setup_takeover(struct raid_set *rs)
+ if (test_bit(d, (void *) rs->rebuild_disks)) {
+ clear_bit(In_sync, &rdev->flags);
+ clear_bit(Faulty, &rdev->flags);
+- mddev->recovery_cp = rdev->recovery_offset = 0;
++ mddev->resync_offset = rdev->recovery_offset = 0;
+ /* Bitmap has to be created when we do an "up" takeover */
+ set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
+ }
+@@ -3222,7 +3222,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ if (r)
+ goto bad;
+
+- rs_setup_recovery(rs, rs->md.recovery_cp < rs->md.dev_sectors ? rs->md.recovery_cp : rs->md.dev_sectors);
++ rs_setup_recovery(rs, rs->md.resync_offset < rs->md.dev_sectors ? rs->md.resync_offset : rs->md.dev_sectors);
+ } else {
+ /* This is no size change or it is shrinking, update size and record in superblocks */
+ r = rs_set_dev_and_array_sectors(rs, rs->ti->len, false);
+@@ -3446,7 +3446,7 @@ static sector_t rs_get_progress(struct raid_set *rs, unsigned long recovery,
+
+ } else {
+ if (state == st_idle && !test_bit(MD_RECOVERY_INTR, &recovery))
+- r = mddev->recovery_cp;
++ r = mddev->resync_offset;
+ else
+ r = mddev->curr_resync_completed;
+
+@@ -4074,9 +4074,9 @@ static int raid_preresume(struct dm_target *ti)
+ }
+
+ /* Check for any resize/reshape on @rs and adjust/initiate */
+- if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) {
++ if (mddev->resync_offset && mddev->resync_offset < MaxSector) {
+ set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
+- mddev->resync_min = mddev->recovery_cp;
++ mddev->resync_min = mddev->resync_offset;
+ if (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags))
+ mddev->resync_max_sectors = mddev->dev_sectors;
+ }
+diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c
+index 7f524a26cebc..334b71404930 100644
+--- a/drivers/md/md-bitmap.c
++++ b/drivers/md/md-bitmap.c
+@@ -1987,12 +1987,12 @@ static void bitmap_dirty_bits(struct mddev *mddev, unsigned long s,
+
+ md_bitmap_set_memory_bits(bitmap, sec, 1);
+ md_bitmap_file_set_bit(bitmap, sec);
+- if (sec < bitmap->mddev->recovery_cp)
++ if (sec < bitmap->mddev->resync_offset)
+ /* We are asserting that the array is dirty,
+- * so move the recovery_cp address back so
++ * so move the resync_offset address back so
+ * that it is obvious that it is dirty
+ */
+- bitmap->mddev->recovery_cp = sec;
++ bitmap->mddev->resync_offset = sec;
+ }
+ }
+
+@@ -2258,7 +2258,7 @@ static int bitmap_load(struct mddev *mddev)
+ || bitmap->events_cleared == mddev->events)
+ /* no need to keep dirty bits to optimise a
+ * re-add of a missing device */
+- start = mddev->recovery_cp;
++ start = mddev->resync_offset;
+
+ mutex_lock(&mddev->bitmap_info.mutex);
+ err = md_bitmap_init_from_disk(bitmap, start);
+diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
+index 94221d964d4f..5497eaee96e7 100644
+--- a/drivers/md/md-cluster.c
++++ b/drivers/md/md-cluster.c
+@@ -337,11 +337,11 @@ static void recover_bitmaps(struct md_thread *thread)
+ md_wakeup_thread(mddev->sync_thread);
+
+ if (hi > 0) {
+- if (lo < mddev->recovery_cp)
+- mddev->recovery_cp = lo;
++ if (lo < mddev->resync_offset)
++ mddev->resync_offset = lo;
+ /* wake up thread to continue resync in case resync
+ * is not finished */
+- if (mddev->recovery_cp != MaxSector) {
++ if (mddev->resync_offset != MaxSector) {
+ /*
+ * clear the REMOTE flag since we will launch
+ * resync thread in current node.
+@@ -863,9 +863,9 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
+ lockres_free(bm_lockres);
+ continue;
+ }
+- if ((hi > 0) && (lo < mddev->recovery_cp)) {
++ if ((hi > 0) && (lo < mddev->resync_offset)) {
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+- mddev->recovery_cp = lo;
++ mddev->resync_offset = lo;
+ md_check_recovery(mddev);
+ }
+
+@@ -1027,7 +1027,7 @@ static int leave(struct mddev *mddev)
+ * Also, we should send BITMAP_NEEDS_SYNC message in
+ * case reshaping is interrupted.
+ */
+- if ((cinfo->slot_number > 0 && mddev->recovery_cp != MaxSector) ||
++ if ((cinfo->slot_number > 0 && mddev->resync_offset != MaxSector) ||
+ (mddev->reshape_position != MaxSector &&
+ test_bit(MD_CLOSING, &mddev->flags)))
+ resync_bitmap(mddev);
+@@ -1605,8 +1605,8 @@ static int gather_bitmaps(struct md_rdev *rdev)
+ pr_warn("md-cluster: Could not gather bitmaps from slot %d", sn);
+ goto out;
+ }
+- if ((hi > 0) && (lo < mddev->recovery_cp))
+- mddev->recovery_cp = lo;
++ if ((hi > 0) && (lo < mddev->resync_offset))
++ mddev->resync_offset = lo;
+ }
+ out:
+ return err;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 10670c62b09e..80470bcf4383 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -1402,13 +1402,13 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, stru
+ mddev->layout = -1;
+
+ if (sb->state & (1<<MD_SB_CLEAN))
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ else {
+ if (sb->events_hi == sb->cp_events_hi &&
+ sb->events_lo == sb->cp_events_lo) {
+- mddev->recovery_cp = sb->recovery_cp;
++ mddev->resync_offset = sb->resync_offset;
+ } else
+- mddev->recovery_cp = 0;
++ mddev->resync_offset = 0;
+ }
+
+ memcpy(mddev->uuid+0, &sb->set_uuid0, 4);
+@@ -1534,13 +1534,13 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev)
+ mddev->minor_version = sb->minor_version;
+ if (mddev->in_sync)
+ {
+- sb->recovery_cp = mddev->recovery_cp;
++ sb->resync_offset = mddev->resync_offset;
+ sb->cp_events_hi = (mddev->events>>32);
+ sb->cp_events_lo = (u32)mddev->events;
+- if (mddev->recovery_cp == MaxSector)
++ if (mddev->resync_offset == MaxSector)
+ sb->state = (1<< MD_SB_CLEAN);
+ } else
+- sb->recovery_cp = 0;
++ sb->resync_offset = 0;
+
+ sb->layout = mddev->layout;
+ sb->chunk_size = mddev->chunk_sectors << 9;
+@@ -1888,7 +1888,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struc
+ mddev->bitmap_info.default_space = (4096-1024) >> 9;
+ mddev->reshape_backwards = 0;
+
+- mddev->recovery_cp = le64_to_cpu(sb->resync_offset);
++ mddev->resync_offset = le64_to_cpu(sb->resync_offset);
+ memcpy(mddev->uuid, sb->set_uuid, 16);
+
+ mddev->max_disks = (4096-256)/2;
+@@ -2074,7 +2074,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
+ sb->utime = cpu_to_le64((__u64)mddev->utime);
+ sb->events = cpu_to_le64(mddev->events);
+ if (mddev->in_sync)
+- sb->resync_offset = cpu_to_le64(mddev->recovery_cp);
++ sb->resync_offset = cpu_to_le64(mddev->resync_offset);
+ else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags))
+ sb->resync_offset = cpu_to_le64(MaxSector);
+ else
+@@ -2754,7 +2754,7 @@ void md_update_sb(struct mddev *mddev, int force_change)
+ /* If this is just a dirty<->clean transition, and the array is clean
+ * and 'events' is odd, we can roll back to the previous clean state */
+ if (nospares
+- && (mddev->in_sync && mddev->recovery_cp == MaxSector)
++ && (mddev->in_sync && mddev->resync_offset == MaxSector)
+ && mddev->can_decrease_events
+ && mddev->events != 1) {
+ mddev->events--;
+@@ -4290,9 +4290,9 @@ __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store);
+ static ssize_t
+ resync_start_show(struct mddev *mddev, char *page)
+ {
+- if (mddev->recovery_cp == MaxSector)
++ if (mddev->resync_offset == MaxSector)
+ return sprintf(page, "none\n");
+- return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp);
++ return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset);
+ }
+
+ static ssize_t
+@@ -4318,7 +4318,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len)
+ err = -EBUSY;
+
+ if (!err) {
+- mddev->recovery_cp = n;
++ mddev->resync_offset = n;
+ if (mddev->pers)
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ }
+@@ -6405,7 +6405,7 @@ static void md_clean(struct mddev *mddev)
+ mddev->external_size = 0;
+ mddev->dev_sectors = 0;
+ mddev->raid_disks = 0;
+- mddev->recovery_cp = 0;
++ mddev->resync_offset = 0;
+ mddev->resync_min = 0;
+ mddev->resync_max = MaxSector;
+ mddev->reshape_position = MaxSector;
+@@ -7359,9 +7359,9 @@ int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info)
+ * openned
+ */
+ if (info->state & (1<<MD_SB_CLEAN))
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ else
+- mddev->recovery_cp = 0;
++ mddev->resync_offset = 0;
+ mddev->persistent = ! info->not_persistent;
+ mddev->external = 0;
+
+@@ -8300,7 +8300,7 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev)
+ seq_printf(seq, "\tresync=REMOTE");
+ return 1;
+ }
+- if (mddev->recovery_cp < MaxSector) {
++ if (mddev->resync_offset < MaxSector) {
+ seq_printf(seq, "\tresync=PENDING");
+ return 1;
+ }
+@@ -8943,7 +8943,7 @@ static sector_t md_sync_position(struct mddev *mddev, enum sync_action action)
+ return mddev->resync_min;
+ case ACTION_RESYNC:
+ if (!mddev->bitmap)
+- return mddev->recovery_cp;
++ return mddev->resync_offset;
+ return 0;
+ case ACTION_RESHAPE:
+ /*
+@@ -9181,8 +9181,8 @@ void md_do_sync(struct md_thread *thread)
+ atomic_read(&mddev->recovery_active) == 0);
+ mddev->curr_resync_completed = j;
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+- j > mddev->recovery_cp)
+- mddev->recovery_cp = j;
++ j > mddev->resync_offset)
++ mddev->resync_offset = j;
+ update_time = jiffies;
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ sysfs_notify_dirent_safe(mddev->sysfs_completed);
+@@ -9302,19 +9302,19 @@ void md_do_sync(struct md_thread *thread)
+ mddev->curr_resync > MD_RESYNC_ACTIVE) {
+ if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
+- if (mddev->curr_resync >= mddev->recovery_cp) {
++ if (mddev->curr_resync >= mddev->resync_offset) {
+ pr_debug("md: checkpointing %s of %s.\n",
+ desc, mdname(mddev));
+ if (test_bit(MD_RECOVERY_ERROR,
+ &mddev->recovery))
+- mddev->recovery_cp =
++ mddev->resync_offset =
+ mddev->curr_resync_completed;
+ else
+- mddev->recovery_cp =
++ mddev->resync_offset =
+ mddev->curr_resync;
+ }
+ } else
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ } else {
+ if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery))
+ mddev->curr_resync = MaxSector;
+@@ -9536,7 +9536,7 @@ static bool md_choose_sync_action(struct mddev *mddev, int *spares)
+ }
+
+ /* Check if resync is in progress. */
+- if (mddev->recovery_cp < MaxSector) {
++ if (mddev->resync_offset < MaxSector) {
+ remove_spares(mddev, NULL);
+ set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
+ clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
+@@ -9717,7 +9717,7 @@ void md_check_recovery(struct mddev *mddev)
+ test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ (mddev->external == 0 && mddev->safemode == 1) ||
+ (mddev->safemode == 2
+- && !mddev->in_sync && mddev->recovery_cp == MaxSector)
++ && !mddev->in_sync && mddev->resync_offset == MaxSector)
+ ))
+ return;
+
+diff --git a/drivers/md/md.h b/drivers/md/md.h
+index d45a9e6ead80..43ae2d03faa1 100644
+--- a/drivers/md/md.h
++++ b/drivers/md/md.h
+@@ -523,7 +523,7 @@ struct mddev {
+ unsigned long normal_io_events; /* IO event timestamp */
+ atomic_t recovery_active; /* blocks scheduled, but not written */
+ wait_queue_head_t recovery_wait;
+- sector_t recovery_cp;
++ sector_t resync_offset;
+ sector_t resync_min; /* user requested sync
+ * starts here */
+ sector_t resync_max; /* resync should pause
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index d8f639f4ae12..613f4fab83b2 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -673,7 +673,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
+ mddev->raid_disks--;
+ mddev->delta_disks = -1;
+ /* make sure it will be not marked as dirty */
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
+
+ create_strip_zones(mddev, &priv_conf);
+@@ -716,7 +716,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
+ mddev->raid_disks += mddev->delta_disks;
+ mddev->degraded = 0;
+ /* make sure it will be not marked as dirty */
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
+
+ create_strip_zones(mddev, &priv_conf);
+@@ -759,7 +759,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
+ mddev->delta_disks = 1 - mddev->raid_disks;
+ mddev->raid_disks = 1;
+ /* make sure it will be not marked as dirty */
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
+
+ create_strip_zones(mddev, &priv_conf);
+diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
+index b8b3a9069701..52881e6032da 100644
+--- a/drivers/md/raid1-10.c
++++ b/drivers/md/raid1-10.c
+@@ -283,7 +283,7 @@ static inline int raid1_check_read_range(struct md_rdev *rdev,
+ static inline bool raid1_should_read_first(struct mddev *mddev,
+ sector_t this_sector, int len)
+ {
+- if ((mddev->recovery_cp < this_sector + len))
++ if ((mddev->resync_offset < this_sector + len))
+ return true;
+
+ if (mddev_is_clustered(mddev) &&
+diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
+index 64b8176907a9..6cee738a645f 100644
+--- a/drivers/md/raid1.c
++++ b/drivers/md/raid1.c
+@@ -2822,7 +2822,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
+ }
+
+ if (mddev->bitmap == NULL &&
+- mddev->recovery_cp == MaxSector &&
++ mddev->resync_offset == MaxSector &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+ conf->fullsync == 0) {
+ *skipped = 1;
+@@ -3282,9 +3282,9 @@ static int raid1_run(struct mddev *mddev)
+ }
+
+ if (conf->raid_disks - mddev->degraded == 1)
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+
+- if (mddev->recovery_cp != MaxSector)
++ if (mddev->resync_offset != MaxSector)
+ pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
+@@ -3345,8 +3345,8 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
+
+ md_set_array_sectors(mddev, newsize);
+ if (sectors > mddev->dev_sectors &&
+- mddev->recovery_cp > mddev->dev_sectors) {
+- mddev->recovery_cp = mddev->dev_sectors;
++ mddev->resync_offset > mddev->dev_sectors) {
++ mddev->resync_offset = mddev->dev_sectors;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ mddev->dev_sectors = sectors;
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index 95dc354a86a0..b60c30bfb6c7 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -2117,7 +2117,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
+ int last = conf->geo.raid_disks - 1;
+ struct raid10_info *p;
+
+- if (mddev->recovery_cp < MaxSector)
++ if (mddev->resync_offset < MaxSector)
+ /* only hot-add to in-sync arrays, as recovery is
+ * very different from resync
+ */
+@@ -3185,7 +3185,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
+ * of a clean array, like RAID1 does.
+ */
+ if (mddev->bitmap == NULL &&
+- mddev->recovery_cp == MaxSector &&
++ mddev->resync_offset == MaxSector &&
+ mddev->reshape_position == MaxSector &&
+ !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
+ !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
+@@ -4145,7 +4145,7 @@ static int raid10_run(struct mddev *mddev)
+ disk->recovery_disabled = mddev->recovery_disabled - 1;
+ }
+
+- if (mddev->recovery_cp != MaxSector)
++ if (mddev->resync_offset != MaxSector)
+ pr_notice("md/raid10:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+ pr_info("md/raid10:%s: active with %d out of %d devices\n",
+@@ -4245,8 +4245,8 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
+
+ md_set_array_sectors(mddev, size);
+ if (sectors > mddev->dev_sectors &&
+- mddev->recovery_cp > oldsize) {
+- mddev->recovery_cp = oldsize;
++ mddev->resync_offset > oldsize) {
++ mddev->resync_offset = oldsize;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ calc_sectors(conf, sectors);
+@@ -4275,7 +4275,7 @@ static void *raid10_takeover_raid0(struct mddev *mddev, sector_t size, int devs)
+ mddev->delta_disks = mddev->raid_disks;
+ mddev->raid_disks *= 2;
+ /* make sure it will be not marked as dirty */
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ mddev->dev_sectors = size;
+
+ conf = setup_conf(mddev);
+@@ -5087,8 +5087,8 @@ static void raid10_finish_reshape(struct mddev *mddev)
+ return;
+
+ if (mddev->delta_disks > 0) {
+- if (mddev->recovery_cp > mddev->resync_max_sectors) {
+- mddev->recovery_cp = mddev->resync_max_sectors;
++ if (mddev->resync_offset > mddev->resync_max_sectors) {
++ mddev->resync_offset = mddev->resync_max_sectors;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ mddev->resync_max_sectors = mddev->array_sectors;
+diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
+index c0fb335311aa..56b234683ee6 100644
+--- a/drivers/md/raid5-ppl.c
++++ b/drivers/md/raid5-ppl.c
+@@ -1163,7 +1163,7 @@ static int ppl_load_distributed(struct ppl_log *log)
+ le64_to_cpu(pplhdr->generation));
+
+ /* attempt to recover from log if we are starting a dirty array */
+- if (pplhdr && !mddev->pers && mddev->recovery_cp != MaxSector)
++ if (pplhdr && !mddev->pers && mddev->resync_offset != MaxSector)
+ ret = ppl_recover(log, pplhdr, pplhdr_offset);
+
+ /* write empty header if we are starting the array */
+@@ -1422,14 +1422,14 @@ int ppl_init_log(struct r5conf *conf)
+
+ if (ret) {
+ goto err;
+- } else if (!mddev->pers && mddev->recovery_cp == 0 &&
++ } else if (!mddev->pers && mddev->resync_offset == 0 &&
+ ppl_conf->recovered_entries > 0 &&
+ ppl_conf->mismatch_count == 0) {
+ /*
+ * If we are starting a dirty array and the recovery succeeds
+ * without any issues, set the array as clean.
+ */
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+ set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags);
+ } else if (mddev->pers && ppl_conf->mismatch_count > 0) {
+ /* no mismatch allowed when enabling PPL for a running array */
+diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
+index ca5b0e8ba707..38a193c0fdae 100644
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -3740,7 +3740,7 @@ static int want_replace(struct stripe_head *sh, int disk_idx)
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && (rdev->recovery_offset <= sh->sector
+- || rdev->mddev->recovery_cp <= sh->sector))
++ || rdev->mddev->resync_offset <= sh->sector))
+ rv = 1;
+ return rv;
+ }
+@@ -3832,7 +3832,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
+ * is missing/faulty, then we need to read everything we can.
+ */
+ if (!force_rcw &&
+- sh->sector < sh->raid_conf->mddev->recovery_cp)
++ sh->sector < sh->raid_conf->mddev->resync_offset)
+ /* reconstruct-write isn't being forced */
+ return 0;
+ for (i = 0; i < s->failed && i < 2; i++) {
+@@ -4097,7 +4097,7 @@ static int handle_stripe_dirtying(struct r5conf *conf,
+ int disks)
+ {
+ int rmw = 0, rcw = 0, i;
+- sector_t recovery_cp = conf->mddev->recovery_cp;
++ sector_t resync_offset = conf->mddev->resync_offset;
+
+ /* Check whether resync is now happening or should start.
+ * If yes, then the array is dirty (after unclean shutdown or
+@@ -4107,14 +4107,14 @@ static int handle_stripe_dirtying(struct r5conf *conf,
+ * generate correct data from the parity.
+ */
+ if (conf->rmw_level == PARITY_DISABLE_RMW ||
+- (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
++ (resync_offset < MaxSector && sh->sector >= resync_offset &&
+ s->failed == 0)) {
+ /* Calculate the real rcw later - for now make it
+ * look like rcw is cheaper
+ */
+ rcw = 1; rmw = 2;
+- pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n",
+- conf->rmw_level, (unsigned long long)recovery_cp,
++ pr_debug("force RCW rmw_level=%u, resync_offset=%llu sh->sector=%llu\n",
++ conf->rmw_level, (unsigned long long)resync_offset,
+ (unsigned long long)sh->sector);
+ } else for (i = disks; i--; ) {
+ /* would I have to read this buffer for read_modify_write */
+@@ -4770,14 +4770,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
+ if (test_bit(STRIPE_SYNCING, &sh->state)) {
+ /* If there is a failed device being replaced,
+ * we must be recovering.
+- * else if we are after recovery_cp, we must be syncing
++ * else if we are after resync_offset, we must be syncing
+ * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
+ * else we can only be replacing
+ * sync and recovery both need to read all devices, and so
+ * use the same flag.
+ */
+ if (do_recovery ||
+- sh->sector >= conf->mddev->recovery_cp ||
++ sh->sector >= conf->mddev->resync_offset ||
+ test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
+ s->syncing = 1;
+ else
+@@ -7780,7 +7780,7 @@ static int raid5_run(struct mddev *mddev)
+ int first = 1;
+ int ret = -EIO;
+
+- if (mddev->recovery_cp != MaxSector)
++ if (mddev->resync_offset != MaxSector)
+ pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
+ mdname(mddev));
+
+@@ -7921,7 +7921,7 @@ static int raid5_run(struct mddev *mddev)
+ mdname(mddev));
+ mddev->ro = 1;
+ set_disk_ro(mddev->gendisk, 1);
+- } else if (mddev->recovery_cp == MaxSector)
++ } else if (mddev->resync_offset == MaxSector)
+ set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ }
+
+@@ -7988,7 +7988,7 @@ static int raid5_run(struct mddev *mddev)
+ mddev->resync_max_sectors = mddev->dev_sectors;
+
+ if (mddev->degraded > dirty_parity_disks &&
+- mddev->recovery_cp != MaxSector) {
++ mddev->resync_offset != MaxSector) {
+ if (test_bit(MD_HAS_PPL, &mddev->flags))
+ pr_crit("md/raid:%s: starting dirty degraded array with PPL.\n",
+ mdname(mddev));
+@@ -8328,8 +8328,8 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
+
+ md_set_array_sectors(mddev, newsize);
+ if (sectors > mddev->dev_sectors &&
+- mddev->recovery_cp > mddev->dev_sectors) {
+- mddev->recovery_cp = mddev->dev_sectors;
++ mddev->resync_offset > mddev->dev_sectors) {
++ mddev->resync_offset = mddev->dev_sectors;
+ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ }
+ mddev->dev_sectors = sectors;
+@@ -8423,7 +8423,7 @@ static int raid5_start_reshape(struct mddev *mddev)
+ return -EINVAL;
+
+ /* raid5 can't handle concurrent reshape and recovery */
+- if (mddev->recovery_cp < MaxSector)
++ if (mddev->resync_offset < MaxSector)
+ return -EBUSY;
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->disks[i].replacement)
+@@ -8648,7 +8648,7 @@ static void *raid45_takeover_raid0(struct mddev *mddev, int level)
+ mddev->raid_disks += 1;
+ mddev->delta_disks = 1;
+ /* make sure it will be not marked as dirty */
+- mddev->recovery_cp = MaxSector;
++ mddev->resync_offset = MaxSector;
+
+ return setup_conf(mddev);
+ }
+diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
+index ff47b6f0ba0f..b13946287277 100644
+--- a/include/uapi/linux/raid/md_p.h
++++ b/include/uapi/linux/raid/md_p.h
+@@ -173,7 +173,7 @@ typedef struct mdp_superblock_s {
+ #else
+ #error unspecified endianness
+ #endif
+- __u32 recovery_cp; /* 11 recovery checkpoint sector count */
++ __u32 resync_offset; /* 11 resync checkpoint sector count */
+ /* There are only valid for minor_version > 90 */
+ __u64 reshape_position; /* 12,13 next address in array-space for reshape */
+ __u32 new_level; /* 14 new level we are reshaping to */
+--
+2.50.1
+
--- /dev/null
+From 591aba37eb1ec1e49c90716953cfa051e5a53c98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 11:35:13 +0530
+Subject: microchip: lan865x: fix missing netif_start_queue() call on device
+ open
+
+From: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+[ Upstream commit 1683fd1b2fa79864d3c7a951d9cea0a9ba1a1923 ]
+
+This fixes an issue where the transmit queue is started implicitly only
+the very first time the device is registered. When the device is taken
+down and brought back up again (using `ip` or `ifconfig`), the transmit
+queue is not restarted, causing packet transmission to hang.
+
+Adding an explicit call to netif_start_queue() in lan865x_net_open()
+ensures the transmit queue is properly started every time the device
+is reopened.
+
+Fixes: 5cd2340cb6a3 ("microchip: lan865x: add driver support for Microchip's LAN865X MAC-PHY")
+Signed-off-by: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+Link: https://patch.msgid.link/20250818060514.52795-2-parthiban.veerasooran@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/microchip/lan865x/lan865x.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+index dd436bdff0f8..d03f5a8de58d 100644
+--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
++++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+@@ -311,6 +311,8 @@ static int lan865x_net_open(struct net_device *netdev)
+
+ phy_start(netdev->phydev);
+
++ netif_start_queue(netdev);
++
+ return 0;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 0eb9440faa80d02c4dc006f99d09740c414ad7c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 11:35:14 +0530
+Subject: microchip: lan865x: fix missing Timer Increment config for Rev.B0/B1
+
+From: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+
+[ Upstream commit 2cd58fec912acec273cb155911ab8f06ddbb131a ]
+
+Fix missing configuration for LAN865x silicon revisions B0 and B1 as per
+Microchip Application Note AN1760 (Rev F, June 2024).
+
+The Timer Increment register was not being set, which is required for
+accurate timestamping. As per the application note, configure the MAC to
+set timestamping at the end of the Start of Frame Delimiter (SFD), and
+set the Timer Increment register to 40 ns (corresponding to a 25 MHz
+internal clock).
+
+Link: https://www.microchip.com/en-us/application-notes/an1760
+
+Fixes: 5cd2340cb6a3 ("microchip: lan865x: add driver support for Microchip's LAN865X MAC-PHY")
+Signed-off-by: Parthiban Veerasooran <parthiban.veerasooran@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Link: https://patch.msgid.link/20250818060514.52795-3-parthiban.veerasooran@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/microchip/lan865x/lan865x.c | 19 +++++++++++++++++++
+ 1 file changed, 19 insertions(+)
+
+diff --git a/drivers/net/ethernet/microchip/lan865x/lan865x.c b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+index d03f5a8de58d..84c41f193561 100644
+--- a/drivers/net/ethernet/microchip/lan865x/lan865x.c
++++ b/drivers/net/ethernet/microchip/lan865x/lan865x.c
+@@ -32,6 +32,10 @@
+ /* MAC Specific Addr 1 Top Reg */
+ #define LAN865X_REG_MAC_H_SADDR1 0x00010023
+
++/* MAC TSU Timer Increment Register */
++#define LAN865X_REG_MAC_TSU_TIMER_INCR 0x00010077
++#define MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS 0x0028
++
+ struct lan865x_priv {
+ struct work_struct multicast_work;
+ struct net_device *netdev;
+@@ -346,6 +350,21 @@ static int lan865x_probe(struct spi_device *spi)
+ goto free_netdev;
+ }
+
++ /* LAN865x Rev.B0/B1 configuration parameters from AN1760
++ * As per the Configuration Application Note AN1760 published in the
++ * link, https://www.microchip.com/en-us/application-notes/an1760
++ * Revision F (DS60001760G - June 2024), configure the MAC to set time
++ * stamping at the end of the Start of Frame Delimiter (SFD) and set the
++ * Timer Increment reg to 40 ns to be used as a 25 MHz internal clock.
++ */
++ ret = oa_tc6_write_register(priv->tc6, LAN865X_REG_MAC_TSU_TIMER_INCR,
++ MAC_TSU_TIMER_INCR_COUNT_NANOSECONDS);
++ if (ret) {
++ dev_err(&spi->dev, "Failed to config TSU Timer Incr reg: %d\n",
++ ret);
++ goto oa_tc6_exit;
++ }
++
+ /* As per the point s3 in the below errata, SPI receive Ethernet frame
+ * transfer may halt when starting the next frame in the same data block
+ * (chunk) as the end of a previous frame. The RFA field should be
+--
+2.50.1
+
--- /dev/null
+From 06fd82287b793ca268955fb984efca05c7bcf378 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 15:06:40 +0200
+Subject: mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit f604d3aaf64ff0d90cc875295474d3abf4155629 ]
+
+By default, the device does not forward IPv4 packets with a link-local
+source IP (i.e., 169.254.0.0/16). This behavior does not align with the
+kernel which does forward them.
+
+Fix by instructing the device to forward such packets instead of
+dropping them.
+
+Fixes: ca360db4b825 ("mlxsw: spectrum: Disable DIP_LINK_LOCAL check in hardware pipeline")
+Reported-by: Zoey Mertes <zoey@cloudflare.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/6721e6b2c96feb80269e72ce8d0b426e2f32d99c.1755174341.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 618957d65663..9a2d64a0a858 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2375,6 +2375,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 80ee5c4825dc..9962dc157901 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -94,6 +94,7 @@ enum {
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+--
+2.50.1
+
--- /dev/null
+From 1cb7f25805ac9480f7e649af2c2cca9c6e831f99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 11:58:25 +0200
+Subject: net: airoha: ppe: Do not invalid PPE entries in case of SW hash
+ collision
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 9f6b606b6b37e61427412708411e8e04b1a858e8 ]
+
+SW hash computed by airoha_ppe_foe_get_entry_hash routine (used for
+foe_flow hlist) can theoretically produce collisions between two
+different HW PPE entries.
+In airoha_ppe_foe_insert_entry() if the collision occurs we will mark
+the second PPE entry in the list as stale (setting the hw hash to 0xffff).
+Stale entries are no more updated in airoha_ppe_foe_flow_entry_update
+routine and so they are removed by Netfilter.
+Fix the problem not marking the second entry as stale in
+airoha_ppe_foe_insert_entry routine if we have already inserted the
+brand new entry in the PPE table and let Netfilter remove real stale
+entries according to their timestamp.
+Please note this is just a theoretical issue spotted reviewing the code
+and not faced running the system.
+
+Fixes: cd53f622611f9 ("net: airoha: Add L2 hw acceleration support")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20250818-airoha-en7581-hash-collision-fix-v1-1-d190c4b53d1c@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/airoha/airoha_ppe.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/airoha/airoha_ppe.c b/drivers/net/ethernet/airoha/airoha_ppe.c
+index 7832fe8fc202..af6e4d4c0ece 100644
+--- a/drivers/net/ethernet/airoha/airoha_ppe.c
++++ b/drivers/net/ethernet/airoha/airoha_ppe.c
+@@ -726,10 +726,8 @@ static void airoha_ppe_foe_insert_entry(struct airoha_ppe *ppe,
+ continue;
+ }
+
+- if (commit_done || !airoha_ppe_foe_compare_entry(e, hwe)) {
+- e->hash = 0xffff;
++ if (!airoha_ppe_foe_compare_entry(e, hwe))
+ continue;
+- }
+
+ airoha_ppe_foe_commit_entry(ppe, &e->data, hash);
+ commit_done = true;
+--
+2.50.1
+
--- /dev/null
+From 56de973acb5a22da4d3acd1078485ac878b18b97 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 10:10:54 +0800
+Subject: net: bridge: fix soft lockup in br_multicast_query_expired()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d1547bf460baec718b3398365f8de33d25c5f36f ]
+
+When set multicast_query_interval to a large value, the local variable
+'time' in br_multicast_send_query() may overflow. If the time is smaller
+than jiffies, the timer will expire immediately, and then call mod_timer()
+again, which creates a loop and may trigger the following soft lockup
+issue.
+
+ watchdog: BUG: soft lockup - CPU#1 stuck for 221s! [rb_consumer:66]
+ CPU: 1 UID: 0 PID: 66 Comm: rb_consumer Not tainted 6.16.0+ #259 PREEMPT(none)
+ Call Trace:
+ <IRQ>
+ __netdev_alloc_skb+0x2e/0x3a0
+ br_ip6_multicast_alloc_query+0x212/0x1b70
+ __br_multicast_send_query+0x376/0xac0
+ br_multicast_send_query+0x299/0x510
+ br_multicast_query_expired.constprop.0+0x16d/0x1b0
+ call_timer_fn+0x3b/0x2a0
+ __run_timers+0x619/0x950
+ run_timer_softirq+0x11c/0x220
+ handle_softirqs+0x18e/0x560
+ __irq_exit_rcu+0x158/0x1a0
+ sysvec_apic_timer_interrupt+0x76/0x90
+ </IRQ>
+
+This issue can be reproduced with:
+ ip link add br0 type bridge
+ echo 1 > /sys/class/net/br0/bridge/multicast_querier
+ echo 0xffffffffffffffff >
+ /sys/class/net/br0/bridge/multicast_query_interval
+ ip link set dev br0 up
+
+The multicast_startup_query_interval can also cause this issue. Similar to
+the commit 99b40610956a ("net: bridge: mcast: add and enforce query
+interval minimum"), add check for the query interval maximum to fix this
+issue.
+
+Link: https://lore.kernel.org/netdev/20250806094941.1285944-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250812091818.542238-1-wangliang74@huawei.com/
+Fixes: d902eee43f19 ("bridge: Add multicast count/interval sysfs entries")
+Suggested-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250813021054.1643649-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_multicast.c | 16 ++++++++++++++++
+ net/bridge/br_private.h | 2 ++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 1377f31b719c..8ce145938b02 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -4818,6 +4818,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_query_interval = intvl_jiffies;
+ }
+
+@@ -4834,6 +4842,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+ }
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index b159aae594c0..8de0904b9627 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -31,6 +31,8 @@
+ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+ #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+ #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
+
+ #define BR_HWDOM_MAX BITS_PER_LONG
+
+--
+2.50.1
+
--- /dev/null
+From d03dc4ce654e2ea6cf5817acf8d6619de661a8b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 18:04:57 -0700
+Subject: net: dsa: microchip: Fix KSZ9477 HSR port setup issue
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tristram Ha <tristram.ha@microchip.com>
+
+[ Upstream commit e318cd6714592fb762fcab59c5684a442243a12f ]
+
+ksz9477_hsr_join() is called once to setup the HSR port membership, but
+the port can be enabled later, or disabled and enabled back and the port
+membership is not set correctly inside ksz_update_port_member(). The
+added code always use the correct HSR port membership for HSR port that
+is enabled.
+
+Fixes: 2d61298fdd7b ("net: dsa: microchip: Enable HSR offloading for KSZ9477")
+Reported-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Signed-off-by: Tristram Ha <tristram.ha@microchip.com>
+Reviewed-by: Łukasz Majewski <lukma@nabladev.com>
+Tested-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Reviewed-by: Frieder Schrempf <frieder.schrempf@kontron.de>
+Link: https://patch.msgid.link/20250819010457.563286-1-Tristram.Ha@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index 7c142c17b3f6..adef7aa327ce 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2347,6 +2347,12 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
+ dev->dev_ops->cfg_port_member(dev, i, val | cpu_port);
+ }
+
++ /* HSR ports are setup once so need to use the assigned membership
++ * when the port is enabled.
++ */
++ if (!port_member && p->stp_state == BR_STATE_FORWARDING &&
++ (dev->hsr_ports & BIT(port)))
++ port_member = dev->hsr_ports;
+ dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 5cdfd28d2a9d30806694519075f731a5630fbcdc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:57 +0800
+Subject: net: ethernet: mtk_ppe: add RCU lock around dev_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 62c30c544359aa18b8fb2734166467a07d435c2d ]
+
+Ensure ndo_fill_forward_path() is called with RCU lock held.
+
+Fixes: 2830e314778d ("net: ethernet: mtk-ppe: fix traffic offload with bridged wlan")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-1-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index c855fb799ce1..e9bd32741983 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
++ rcu_read_lock();
+ err = dev_fill_forward_path(dev, addr, &stack);
++ rcu_read_unlock();
+ if (err)
+ return err;
+
+--
+2.50.1
+
--- /dev/null
+From ff1ea9d66bd5203e8008ae08c82807d36051b50c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 12:51:19 +0200
+Subject: net: gso: Forbid IPv6 TSO with extensions on devices with only
+ IPV6_CSUM
+
+From: Jakub Ramaseuski <jramaseu@redhat.com>
+
+[ Upstream commit 864e3396976ef41de6cc7bc366276bf4e084fff2 ]
+
+When performing Generic Segmentation Offload (GSO) on an IPv6 packet that
+contains extension headers, the kernel incorrectly requests checksum offload
+if the egress device only advertises NETIF_F_IPV6_CSUM feature, which has
+a strict contract: it supports checksum offload only for plain TCP or UDP
+over IPv6 and explicitly does not support packets with extension headers.
+The current GSO logic violates this contract by failing to disable the feature
+for packets with extension headers, such as those used in GREoIPv6 tunnels.
+
+This violation results in the device being asked to perform an operation
+it cannot support, leading to a `skb_warn_bad_offload` warning and a collapse
+of network throughput. While device TSO/USO is correctly bypassed in favor
+of software GSO for these packets, the GSO stack must be explicitly told not
+to request checksum offload.
+
+Mask NETIF_F_IPV6_CSUM, NETIF_F_TSO6 and NETIF_F_GSO_UDP_L4
+in gso_features_check if the IPv6 header contains extension headers to compute
+checksum in software.
+
+The exception is a BIG TCP extension, which, as stated in commit
+68e068cabd2c6c53 ("net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets"):
+"The feature is only enabled on devices that support BIG TCP TSO.
+The header is only present for PF_PACKET taps like tcpdump,
+and not transmitted by physical devices."
+
+kernel log output (truncated):
+WARNING: CPU: 1 PID: 5273 at net/core/dev.c:3535 skb_warn_bad_offload+0x81/0x140
+...
+Call Trace:
+ <TASK>
+ skb_checksum_help+0x12a/0x1f0
+ validate_xmit_skb+0x1a3/0x2d0
+ validate_xmit_skb_list+0x4f/0x80
+ sch_direct_xmit+0x1a2/0x380
+ __dev_xmit_skb+0x242/0x670
+ __dev_queue_xmit+0x3fc/0x7f0
+ ip6_finish_output2+0x25e/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_tnl_xmit+0x608/0xc00 [ip6_tunnel]
+ ip6gre_tunnel_xmit+0x1c0/0x390 [ip6_gre]
+ dev_hard_start_xmit+0x63/0x1c0
+ __dev_queue_xmit+0x6d0/0x7f0
+ ip6_finish_output2+0x214/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ inet6_csk_xmit+0xeb/0x150
+ __tcp_transmit_skb+0x555/0xa80
+ tcp_write_xmit+0x32a/0xe90
+ tcp_sendmsg_locked+0x437/0x1110
+ tcp_sendmsg+0x2f/0x50
+...
+skb linear: 00000000: e4 3d 1a 7d ec 30 e4 3d 1a 7e 5d 90 86 dd 60 0e
+skb linear: 00000010: 00 0a 1b 34 3c 40 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000020: 00 00 00 00 00 12 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000030: 00 00 00 00 00 11 2f 00 04 01 04 01 01 00 00 00
+skb linear: 00000040: 86 dd 60 0e 00 0a 1b 00 06 40 20 23 00 00 00 00
+skb linear: 00000050: 00 00 00 00 00 00 00 00 00 12 20 23 00 00 00 00
+skb linear: 00000060: 00 00 00 00 00 00 00 00 00 11 bf 96 14 51 13 f9
+skb linear: 00000070: ae 27 a0 a8 2b e3 80 18 00 40 5b 6f 00 00 01 01
+skb linear: 00000080: 08 0a 42 d4 50 d5 4b 70 f8 1a
+
+Fixes: 04c20a9356f283da ("net: skip offload for NETIF_F_IPV6_CSUM if ipv6 header contains extension")
+Reported-by: Tianhao Zhao <tizhao@redhat.com>
+Suggested-by: Michal Schmidt <mschmidt@redhat.com>
+Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Jakub Ramaseuski <jramaseu@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250814105119.1525687-1-jramaseu@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index be97c440ecd5..b014a5ce9e0f 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3782,6 +3782,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ features &= ~NETIF_F_TSO_MANGLEID;
+ }
+
++ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
++ * so neither does TSO that depends on it.
++ */
++ if (features & NETIF_F_IPV6_CSUM &&
++ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++ skb_transport_header_was_set(skb) &&
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ !ipv6_has_hopopt_jumbo(skb))
++ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
++
+ return features;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 170b4766e2e837582a5128b7472187403d25a3a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Jul 2025 09:48:14 +0300
+Subject: net/mlx5: Add IFC bits and enums for buf_ownership
+
+From: Oren Sidi <osidi@nvidia.com>
+
+[ Upstream commit 6f09ee0b583cad4f2b6a82842c26235bee3d5c2e ]
+
+Extend structure layouts and defines buf_ownership.
+buf_ownership indicates whether the buffer is managed by SW or FW.
+
+Signed-off-by: Oren Sidi <osidi@nvidia.com>
+Reviewed-by: Alex Lazar <alazar@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/1752734895-257735-3-git-send-email-tariqt@nvidia.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Stable-dep-of: 451d2849ea66 ("net/mlx5e: Query FW for buffer ownership")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/mlx5/mlx5_ifc.h | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 2c09df4ee574..83288df7bb45 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -10460,8 +10460,16 @@ struct mlx5_ifc_pifr_reg_bits {
+ u8 port_filter_update_en[8][0x20];
+ };
+
++enum {
++ MLX5_BUF_OWNERSHIP_UNKNOWN = 0x0,
++ MLX5_BUF_OWNERSHIP_FW_OWNED = 0x1,
++ MLX5_BUF_OWNERSHIP_SW_OWNED = 0x2,
++};
++
+ struct mlx5_ifc_pfcc_reg_bits {
+- u8 reserved_at_0[0x8];
++ u8 reserved_at_0[0x4];
++ u8 buf_ownership[0x2];
++ u8 reserved_at_6[0x2];
+ u8 local_port[0x8];
+ u8 reserved_at_10[0xb];
+ u8 ppan_mask_n[0x1];
+@@ -10597,7 +10605,9 @@ struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 fec_200G_per_lane_in_pplm[0x1];
+ u8 reserved_at_1e[0x2a];
+ u8 fec_100G_per_lane_in_pplm[0x1];
+- u8 reserved_at_49[0x1f];
++ u8 reserved_at_49[0xa];
++ u8 buffer_ownership[0x1];
++ u8 resereved_at_54[0x14];
+ u8 fec_50G_per_lane_in_pplm[0x1];
+ u8 reserved_at_69[0x4];
+ u8 rx_icrc_encapsulated_counter[0x1];
+--
+2.50.1
+
--- /dev/null
+From de5b2f0854556447379288a9ce5044cfc5594440 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:02 +0300
+Subject: net/mlx5: Base ECVF devlink port attrs from 0
+
+From: Daniel Jurgens <danielj@nvidia.com>
+
+[ Upstream commit bc17455bc843b2f4b206e0bb8139013eb3d3c08b ]
+
+Adjust the vport number by the base ECVF vport number so the port
+attributes start at 0. Previously the port attributes would start 1
+after the maximum number of host VFs.
+
+Fixes: dc13180824b7 ("net/mlx5: Enable devlink port for embedded cpu VF vports")
+Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
+Reviewed-by: Parav Pandit <parav@nvidia.com>
+Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250820133209.389065-2-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+index b7102e14d23d..c33accadae0f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
+ devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ vport_num - 1, external);
+ } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
++ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
++
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
+- vport_num - 1, false);
++ vport_num - base_vport, false);
+ }
+ }
+
+--
+2.50.1
+
--- /dev/null
+From cbc54d7be2805a90756aa696edc241fbd6d3f518 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Aug 2025 23:23:23 +0300
+Subject: net/mlx5: CT: Use the correct counter offset
+
+From: Vlad Dogaru <vdogaru@nvidia.com>
+
+[ Upstream commit d2d6f950cb43be6845a41cac5956cb2a10e657e5 ]
+
+Specifying the counter action is not enough, as it is used by multiple
+counters that were allocated in a bulk. By omitting the offset, rules
+will be associated with a different counter from the same bulk.
+Subsequently, the CT subsystem checks the correct counter, assumes that
+no traffic has triggered the rule, and ages out the rule. The end result
+is intermittent offloading of long lived connections, as rules are aged
+out then promptly re-added.
+
+Fix this by specifying the correct offset along with the counter rule.
+
+Fixes: 34eea5b12a10 ("net/mlx5e: CT: Add initial support for Hardware Steering")
+Signed-off-by: Vlad Dogaru <vdogaru@nvidia.com>
+Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250817202323.308604-8-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
+index a4263137fef5..01d522b02947 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/ct_fs_hmfs.c
+@@ -173,6 +173,8 @@ static void mlx5_ct_fs_hmfs_fill_rule_actions(struct mlx5_ct_fs_hmfs *fs_hmfs,
+
+ memset(rule_actions, 0, NUM_CT_HMFS_RULES * sizeof(*rule_actions));
+ rule_actions[0].action = mlx5_fc_get_hws_action(fs_hmfs->ctx, attr->counter);
++ rule_actions[0].counter.offset =
++ attr->counter->id - attr->counter->bulk->base_id;
+ /* Modify header is special, it may require extra arguments outside the action itself. */
+ if (mh_action->mh_data) {
+ rule_actions[1].modify_header.offset = mh_action->mh_data->offset;
+--
+2.50.1
+
--- /dev/null
+From 84d0502beed8f112c1c8d755a8712e640a7384ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Aug 2025 23:23:17 +0300
+Subject: net/mlx5: HWS, fix bad parameter in CQ creation
+
+From: Yevgeny Kliteynik <kliteyn@nvidia.com>
+
+[ Upstream commit 2462c1b9217246a889ec318b3894d84e4dd709c6 ]
+
+'cqe_sz' valid value should be 0 for 64-byte CQE.
+
+Fixes: 2ca62599aa0b ("net/mlx5: HWS, added send engine and context handling")
+Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
+Reviewed-by: Vlad Dogaru <vdogaru@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250817202323.308604-2-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+index c4b22be19a9b..b0595c9b09e4 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/send.c
+@@ -964,7 +964,6 @@ static int hws_send_ring_open_cq(struct mlx5_core_dev *mdev,
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+- MLX5_SET(cqc, cqc_data, cqe_sz, queue->num_entries);
+ MLX5_SET(cqc, cqc_data, log_cq_size, ilog2(queue->num_entries));
+
+ err = hws_send_ring_alloc_cq(mdev, numa_node, queue, cqc_data, cq);
+--
+2.50.1
+
--- /dev/null
+From 2b895e71b24de8b2916e843ba116b7777603ba9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Aug 2025 23:23:19 +0300
+Subject: net/mlx5: HWS, fix complex rules rehash error flow
+
+From: Yevgeny Kliteynik <kliteyn@nvidia.com>
+
+[ Upstream commit 4a842b1bf18a32ee0c25dd6dd98728b786a76fe4 ]
+
+Moving rules from matcher to matcher should not fail.
+However, if it does fail due to various reasons, the error flow
+should allow the kernel to continue functioning (albeit with broken
+steering rules) instead of going into series of soft lock-ups or
+some other problematic behaviour.
+
+Similar to the simple rules, complex rules rehash logic suffers
+from the same problems. This patch fixes the error flow for moving
+complex rules:
+ - If new rule creation fails before it was even enqeued, do not
+ poll for completion
+ - If TIMEOUT happened while moving the rule, no point trying
+ to poll for completions for other rules. Something is broken,
+ completion won't come, just abort the rehash sequence.
+ - If some other completion with error received, don't give up.
+ Continue handling rest of the rules to minimize the damage.
+ - Make sure that the first error code that was received will
+ be actually returned to the caller instead of replacing it
+ with the generic error code.
+
+All the aforementioned issues stem from the same bad error flow,
+so no point fixing them one by one and leaving partially broken
+code - fixing them in one patch.
+
+Fixes: 17e0accac577 ("net/mlx5: HWS, support complex matchers")
+Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
+Reviewed-by: Vlad Dogaru <vdogaru@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250817202323.308604-4-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mlx5/core/steering/hws/bwc_complex.c | 41 +++++++++++++------
+ 1 file changed, 28 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+index ca7501c57468..14e79579c719 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+@@ -1328,11 +1328,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+ {
+ struct mlx5hws_context *ctx = bwc_matcher->matcher->tbl->ctx;
+ struct mlx5hws_matcher *matcher = bwc_matcher->matcher;
+- bool move_error = false, poll_error = false;
+ u16 bwc_queues = mlx5hws_bwc_queues(ctx);
+ struct mlx5hws_bwc_rule *tmp_bwc_rule;
+ struct mlx5hws_rule_attr rule_attr;
+ struct mlx5hws_table *isolated_tbl;
++ int move_error = 0, poll_error = 0;
+ struct mlx5hws_rule *tmp_rule;
+ struct list_head *rules_list;
+ u32 expected_completions = 1;
+@@ -1391,11 +1391,15 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+ ret = mlx5hws_matcher_resize_rule_move(matcher,
+ tmp_rule,
+ &rule_attr);
+- if (unlikely(ret && !move_error)) {
+- mlx5hws_err(ctx,
+- "Moving complex BWC rule failed (%d), attempting to move rest of the rules\n",
+- ret);
+- move_error = true;
++ if (unlikely(ret)) {
++ if (!move_error) {
++ mlx5hws_err(ctx,
++ "Moving complex BWC rule: move failed (%d), attempting to move rest of the rules\n",
++ ret);
++ move_error = ret;
++ }
++ /* Rule wasn't queued, no need to poll */
++ continue;
+ }
+
+ expected_completions = 1;
+@@ -1403,11 +1407,19 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+ rule_attr.queue_id,
+ &expected_completions,
+ true);
+- if (unlikely(ret && !poll_error)) {
+- mlx5hws_err(ctx,
+- "Moving complex BWC rule: poll failed (%d), attempting to move rest of the rules\n",
+- ret);
+- poll_error = true;
++ if (unlikely(ret)) {
++ if (ret == -ETIMEDOUT) {
++ mlx5hws_err(ctx,
++ "Moving complex BWC rule: timeout polling for completions (%d), aborting rehash\n",
++ ret);
++ return ret;
++ }
++ if (!poll_error) {
++ mlx5hws_err(ctx,
++ "Moving complex BWC rule: polling for completions failed (%d), attempting to move rest of the rules\n",
++ ret);
++ poll_error = ret;
++ }
+ }
+
+ /* Done moving the rule to the new matcher,
+@@ -1422,8 +1434,11 @@ mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher)
+ }
+ }
+
+- if (move_error || poll_error)
+- ret = -EINVAL;
++ /* Return the first error that happened */
++ if (unlikely(move_error))
++ return move_error;
++ if (unlikely(poll_error))
++ return poll_error;
+
+ return ret;
+ }
+--
+2.50.1
+
--- /dev/null
+From 206f065cb2d07a2311aafcf6e3f4574c949b58ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 17 Aug 2025 23:23:22 +0300
+Subject: net/mlx5: HWS, Fix table creation UID
+
+From: Alex Vesker <valex@nvidia.com>
+
+[ Upstream commit 8a51507320ebddaab32610199774f69cd7d53e78 ]
+
+During table creation, caller passes a UID using ft_attr. The UID
+value was ignored, which leads to problems when the caller sets the
+UID to a non-zero value, such as SHARED_RESOURCE_UID (0xffff) - the
+internal FT objects will be created with UID=0.
+
+Fixes: 0869701cba3d ("net/mlx5: HWS, added FW commands handling")
+Signed-off-by: Alex Vesker <valex@nvidia.com>
+Reviewed-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250817202323.308604-7-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/steering/hws/cmd.c | 1 +
+ .../ethernet/mellanox/mlx5/core/steering/hws/cmd.h | 1 +
+ .../mellanox/mlx5/core/steering/hws/fs_hws.c | 1 +
+ .../mellanox/mlx5/core/steering/hws/matcher.c | 5 ++++-
+ .../mellanox/mlx5/core/steering/hws/mlx5hws.h | 1 +
+ .../mellanox/mlx5/core/steering/hws/table.c | 13 ++++++++++---
+ .../mellanox/mlx5/core/steering/hws/table.h | 3 ++-
+ 7 files changed, 20 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+index 9c83753e4592..0bdcab2e5cf3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.c
+@@ -55,6 +55,7 @@ int mlx5hws_cmd_flow_table_create(struct mlx5_core_dev *mdev,
+
+ MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+ MLX5_SET(create_flow_table_in, in, table_type, ft_attr->type);
++ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
+
+ ft_ctx = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+ MLX5_SET(flow_table_context, ft_ctx, level, ft_attr->level);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+index fa6bff210266..122ccc671628 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/cmd.h
+@@ -36,6 +36,7 @@ struct mlx5hws_cmd_set_fte_attr {
+ struct mlx5hws_cmd_ft_create_attr {
+ u8 type;
+ u8 level;
++ u16 uid;
+ bool rtc_valid;
+ bool decap_en;
+ bool reformat_en;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+index bf4643d0ce17..47e3947e7b51 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+@@ -267,6 +267,7 @@ static int mlx5_cmd_hws_create_flow_table(struct mlx5_flow_root_namespace *ns,
+
+ tbl_attr.type = MLX5HWS_TABLE_TYPE_FDB;
+ tbl_attr.level = ft_attr->level;
++ tbl_attr.uid = ft_attr->uid;
+ tbl = mlx5hws_table_create(ctx, &tbl_attr);
+ if (!tbl) {
+ mlx5_core_err(ns->dev, "Failed creating hws flow_table\n");
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+index ce28ee1c0e41..6000f2c641e0 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/matcher.c
+@@ -85,6 +85,7 @@ static int hws_matcher_create_end_ft_isolated(struct mlx5hws_matcher *matcher)
+
+ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
+ tbl,
++ 0,
+ &matcher->end_ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Isolated matcher: failed to create end flow table\n");
+@@ -112,7 +113,9 @@ static int hws_matcher_create_end_ft(struct mlx5hws_matcher *matcher)
+ if (mlx5hws_matcher_is_isolated(matcher))
+ ret = hws_matcher_create_end_ft_isolated(matcher);
+ else
+- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl,
++ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
++ tbl,
++ 0,
+ &matcher->end_ft_id);
+
+ if (ret) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+index d8ac6c196211..a2fe2f9e832d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+@@ -75,6 +75,7 @@ struct mlx5hws_context_attr {
+ struct mlx5hws_table_attr {
+ enum mlx5hws_table_type type;
+ u32 level;
++ u16 uid;
+ };
+
+ enum mlx5hws_matcher_flow_src {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+index 568f691733f3..6113383ae47b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.c
+@@ -9,6 +9,7 @@ u32 mlx5hws_table_get_id(struct mlx5hws_table *tbl)
+ }
+
+ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
++ u16 uid,
+ struct mlx5hws_cmd_ft_create_attr *ft_attr)
+ {
+ ft_attr->type = tbl->fw_ft_type;
+@@ -16,7 +17,9 @@ static void hws_table_init_next_ft_attr(struct mlx5hws_table *tbl,
+ ft_attr->level = tbl->ctx->caps->fdb_ft.max_level - 1;
+ else
+ ft_attr->level = tbl->ctx->caps->nic_ft.max_level - 1;
++
+ ft_attr->rtc_valid = true;
++ ft_attr->uid = uid;
+ }
+
+ static void hws_table_set_cap_attr(struct mlx5hws_table *tbl,
+@@ -119,12 +122,12 @@ static int hws_table_connect_to_default_miss_tbl(struct mlx5hws_table *tbl, u32
+
+ int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+- u32 *ft_id)
++ u16 uid, u32 *ft_id)
+ {
+ struct mlx5hws_cmd_ft_create_attr ft_attr = {0};
+ int ret;
+
+- hws_table_init_next_ft_attr(tbl, &ft_attr);
++ hws_table_init_next_ft_attr(tbl, uid, &ft_attr);
+ hws_table_set_cap_attr(tbl, &ft_attr);
+
+ ret = mlx5hws_cmd_flow_table_create(mdev, &ft_attr, ft_id);
+@@ -189,7 +192,10 @@ static int hws_table_init(struct mlx5hws_table *tbl)
+ }
+
+ mutex_lock(&ctx->ctrl_lock);
+- ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev, tbl, &tbl->ft_id);
++ ret = mlx5hws_table_create_default_ft(tbl->ctx->mdev,
++ tbl,
++ tbl->uid,
++ &tbl->ft_id);
+ if (ret) {
+ mlx5hws_err(tbl->ctx, "Failed to create flow table object\n");
+ mutex_unlock(&ctx->ctrl_lock);
+@@ -239,6 +245,7 @@ struct mlx5hws_table *mlx5hws_table_create(struct mlx5hws_context *ctx,
+ tbl->ctx = ctx;
+ tbl->type = attr->type;
+ tbl->level = attr->level;
++ tbl->uid = attr->uid;
+
+ ret = hws_table_init(tbl);
+ if (ret) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+index 0400cce0c317..1246f9bd8422 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/table.h
+@@ -18,6 +18,7 @@ struct mlx5hws_table {
+ enum mlx5hws_table_type type;
+ u32 fw_ft_type;
+ u32 level;
++ u16 uid;
+ struct list_head matchers_list;
+ struct list_head tbl_list_node;
+ struct mlx5hws_default_miss default_miss;
+@@ -47,7 +48,7 @@ u32 mlx5hws_table_get_res_fw_ft_type(enum mlx5hws_table_type tbl_type,
+
+ int mlx5hws_table_create_default_ft(struct mlx5_core_dev *mdev,
+ struct mlx5hws_table *tbl,
+- u32 *ft_id);
++ u16 uid, u32 *ft_id);
+
+ void mlx5hws_table_destroy_default_ft(struct mlx5hws_table *tbl,
+ u32 ft_id);
+--
+2.50.1
+
--- /dev/null
+From 09a5dd261b66b798d789d0ad7c4a517cbaf5c8f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:09 +0300
+Subject: net/mlx5e: Preserve shared buffer capacity during headroom updates
+
+From: Armen Ratner <armeng@nvidia.com>
+
+[ Upstream commit 8b0587a885fdb34fd6090a3f8625cb7ac1444826 ]
+
+When port buffer headroom changes, port_update_shared_buffer()
+recalculates the shared buffer size and splits it in a 3:1 ratio
+(lossy:lossless) - Currently, the calculation is:
+lossless = shared / 4;
+lossy = (shared / 4) * 3;
+
+Meaning, the calculation dropped the remainder of shared % 4 due to
+integer division, unintentionally reducing the total shared buffer
+by up to three cells on each update. Over time, this could shrink
+the buffer below usable size.
+
+Fix it by changing the calculation to:
+lossless = shared / 4;
+lossy = shared - lossless;
+
+This retains all buffer cells while still approximating the
+intended 3:1 split, preventing capacity loss over time.
+
+While at it, perform headroom calculations in units of cells rather than
+in bytes for more accurate calculations avoiding extra divisions.
+
+Fixes: a440030d8946 ("net/mlx5e: Update shared buffer along with device buffer changes")
+Signed-off-by: Armen Ratner <armeng@nvidia.com>
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Alexei Lazar <alazar@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/20250820133209.389065-9-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlx5/core/en/port_buffer.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 5ae787656a7c..3efa8bf1d14e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
+ /* Total shared buffer size is split in a ratio of 3:1 between
+ * lossy and lossless pools respectively.
+ */
+- lossy_epool_size = (shared_buffer_size / 4) * 3;
+ lossless_ipool_size = shared_buffer_size / 4;
++ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
+
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
+ lossy_epool_size);
+@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+- u32 new_headroom_size = 0;
+- u32 current_headroom_size;
++ u32 current_headroom_cells = 0;
++ u32 new_headroom_cells = 0;
+ void *in;
+ int err;
+ int i;
+
+- current_headroom_size = port_buffer->headroom_size;
+-
+ in = kzalloc(sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
+ void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
++ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
++
+ u64 size = port_buffer->buffer[i].size;
+ u64 xoff = port_buffer->buffer[i].xoff;
+ u64 xon = port_buffer->buffer[i].xon;
+
+- new_headroom_size += size;
+ do_div(size, port_buff_cell_sz);
++ new_headroom_cells += size;
+ do_div(xoff, port_buff_cell_sz);
+ do_div(xon, port_buff_cell_sz);
+ MLX5_SET(bufferx_reg, buffer, size, size);
+@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
+ }
+
+- new_headroom_size /= port_buff_cell_sz;
+- current_headroom_size /= port_buff_cell_sz;
+- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
+- new_headroom_size);
++ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
++ new_headroom_cells);
+ if (err)
+ goto out;
+
+--
+2.50.1
+
--- /dev/null
+From 82007abb0743cad6df89090992798408e1d74b4f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:08 +0300
+Subject: net/mlx5e: Query FW for buffer ownership
+
+From: Alexei Lazar <alazar@nvidia.com>
+
+[ Upstream commit 451d2849ea66659040b59ae3cb7e50cc97404733 ]
+
+The SW currently saves local buffer ownership when setting
+the buffer.
+This means that the SW assumes it has ownership of the buffer
+after the command is set.
+
+If setting the buffer fails and we remain in FW ownership,
+the local buffer ownership state incorrectly remains as SW-owned.
+This leads to incorrect behavior in subsequent PFC commands,
+causing failures.
+
+Instead of saving local buffer ownership in SW,
+query the FW for buffer ownership when setting the buffer.
+This ensures that the buffer ownership state is accurately
+reflected, avoiding the issues caused by incorrect ownership
+states.
+
+Fixes: ecdf2dadee8e ("net/mlx5e: Receive buffer support for DCBX")
+Signed-off-by: Alexei Lazar <alazar@nvidia.com>
+Reviewed-by: Shahar Shitrit <shshitrit@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250820133209.389065-8-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/mellanox/mlx5/core/en/dcbnl.h | 1 -
+ .../ethernet/mellanox/mlx5/core/en_dcbnl.c | 12 ++++++++---
+ .../ethernet/mellanox/mlx5/core/mlx5_core.h | 2 ++
+ .../net/ethernet/mellanox/mlx5/core/port.c | 20 +++++++++++++++++++
+ 4 files changed, 31 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+index b59aee75de94..2c98a5299df3 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+@@ -26,7 +26,6 @@ struct mlx5e_dcbx {
+ u8 cap;
+
+ /* Buffer configuration */
+- bool manual_buffer;
+ u32 cable_len;
+ u32 xoff;
+ u16 port_buff_cell_sz;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+index 5fe016e477b3..d166c0d5189e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+@@ -362,6 +362,7 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
+ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+ {
++ u8 buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 old_cable_len = priv->dcbx.cable_len;
+@@ -389,7 +390,14 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
+
+ if (MLX5_BUFFER_SUPPORTED(mdev)) {
+ pfc_new.pfc_en = (changed & MLX5E_PORT_BUFFER_PFC) ? pfc->pfc_en : curr_pfc_en;
+- if (priv->dcbx.manual_buffer)
++ ret = mlx5_query_port_buffer_ownership(mdev,
++ &buffer_ownership);
++ if (ret)
++ netdev_err(dev,
++ "%s, Failed to get buffer ownership: %d\n",
++ __func__, ret);
++
++ if (buffer_ownership == MLX5_BUF_OWNERSHIP_SW_OWNED)
+ ret = mlx5e_port_manual_buffer_config(priv, changed,
+ dev->mtu, &pfc_new,
+ NULL, NULL);
+@@ -982,7 +990,6 @@ static int mlx5e_dcbnl_setbuffer(struct net_device *dev,
+ if (!changed)
+ return 0;
+
+- priv->dcbx.manual_buffer = true;
+ err = mlx5e_port_manual_buffer_config(priv, changed, dev->mtu, NULL,
+ buffer_size, prio2buffer);
+ return err;
+@@ -1252,7 +1259,6 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
+ priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+
+ priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
+- priv->dcbx.manual_buffer = false;
+ priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
+
+ mlx5e_ets_init(priv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+index 2e02bdea8361..c2f6d205ddb1 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+@@ -358,6 +358,8 @@ int mlx5_query_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *out);
+ int mlx5_set_port_dcbx_param(struct mlx5_core_dev *mdev, u32 *in);
+ int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state);
+ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state);
++int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
++ u8 *buffer_ownership);
+ int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio);
+ int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index 549f1066d2a5..2d7adf7444ba 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -968,6 +968,26 @@ int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
+ return err;
+ }
+
++int mlx5_query_port_buffer_ownership(struct mlx5_core_dev *mdev,
++ u8 *buffer_ownership)
++{
++ u32 out[MLX5_ST_SZ_DW(pfcc_reg)] = {};
++ int err;
++
++ if (!MLX5_CAP_PCAM_FEATURE(mdev, buffer_ownership)) {
++ *buffer_ownership = MLX5_BUF_OWNERSHIP_UNKNOWN;
++ return 0;
++ }
++
++ err = mlx5_query_pfcc_reg(mdev, out, sizeof(out));
++ if (err)
++ return err;
++
++ *buffer_ownership = MLX5_GET(pfcc_reg, out, buf_ownership);
++
++ return 0;
++}
++
+ int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
+ {
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+--
+2.50.1
+
--- /dev/null
+From 3e962af68173e9b863127d5c14cc5dd4f0c468d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Aug 2025 23:57:57 +0000
+Subject: net/sched: Fix backlog accounting in qdisc_dequeue_internal
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 52bf272636bda69587952b35ae97690b8dc89941 ]
+
+This issue applies for the following qdiscs: hhf, fq, fq_codel, and
+fq_pie, and occurs in their change handlers when adjusting to the new
+limit. The problem is the following in the values passed to the
+subsequent qdisc_tree_reduce_backlog call given a tbf parent:
+
+ When the tbf parent runs out of tokens, skbs of these qdiscs will
+ be placed in gso_skb. Their peek handlers are qdisc_peek_dequeued,
+ which accounts for both qlen and backlog. However, in the case of
+ qdisc_dequeue_internal, ONLY qlen is accounted for when pulling
+ from gso_skb. This means that these qdiscs are missing a
+ qdisc_qstats_backlog_dec when dropping packets to satisfy the
+ new limit in their change handlers.
+
+ One can observe this issue with the following (with tc patched to
+ support a limit of 0):
+
+ export TARGET=fq
+ tc qdisc del dev lo root
+ tc qdisc add dev lo root handle 1: tbf rate 8bit burst 100b latency 1ms
+ tc qdisc replace dev lo handle 3: parent 1:1 $TARGET limit 1000
+ echo ''; echo 'add child'; tc -s -d qdisc show dev lo
+ ping -I lo -f -c2 -s32 -W0.001 127.0.0.1 2>&1 >/dev/null
+ echo ''; echo 'after ping'; tc -s -d qdisc show dev lo
+ tc qdisc change dev lo handle 3: parent 1:1 $TARGET limit 0
+ echo ''; echo 'after limit drop'; tc -s -d qdisc show dev lo
+ tc qdisc replace dev lo handle 2: parent 1:1 sfq
+ echo ''; echo 'post graft'; tc -s -d qdisc show dev lo
+
+ The second to last show command shows 0 packets but a positive
+ number (74) of backlog bytes. The problem becomes clearer in the
+ last show command, where qdisc_purge_queue triggers
+ qdisc_tree_reduce_backlog with the positive backlog and causes an
+ underflow in the tbf parent's backlog (4096 Mb instead of 0).
+
+To fix this issue, the codepath for all clients of qdisc_dequeue_internal
+has been simplified: codel, pie, hhf, fq, fq_pie, and fq_codel.
+qdisc_dequeue_internal handles the backlog adjustments for all cases that
+do not directly use the dequeue handler.
+
+The old fq_codel_change limit adjustment loop accumulated the arguments to
+the subsequent qdisc_tree_reduce_backlog call through the cstats field.
+However, this is confusing and error prone as fq_codel_dequeue could also
+potentially mutate this field (which qdisc_dequeue_internal calls in the
+non gso_skb case), so we have unified the code here with other qdiscs.
+
+Fixes: 2d3cbfd6d54a ("net_sched: Flush gso_skb list too during ->change()")
+Fixes: 4b549a2ef4be ("fq_codel: Fair Queue Codel AQM")
+Fixes: 10239edf86f1 ("net-qdisc-hhf: Heavy-Hitter Filter (HHF) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250812235725.45243-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 11 ++++++++---
+ net/sched/sch_codel.c | 12 +++++++-----
+ net/sched/sch_fq.c | 12 +++++++-----
+ net/sched/sch_fq_codel.c | 12 +++++++-----
+ net/sched/sch_fq_pie.c | 12 +++++++-----
+ net/sched/sch_hhf.c | 12 +++++++-----
+ net/sched/sch_pie.c | 12 +++++++-----
+ 7 files changed, 50 insertions(+), 33 deletions(-)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 638948be4c50..738cd5b13c62 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1038,12 +1038,17 @@ static inline struct sk_buff *qdisc_dequeue_internal(struct Qdisc *sch, bool dir
+ skb = __skb_dequeue(&sch->gso_skb);
+ if (skb) {
+ sch->q.qlen--;
++ qdisc_qstats_backlog_dec(sch, skb);
+ return skb;
+ }
+- if (direct)
+- return __qdisc_dequeue_head(&sch->q);
+- else
++ if (direct) {
++ skb = __qdisc_dequeue_head(&sch->q);
++ if (skb)
++ qdisc_qstats_backlog_dec(sch, skb);
++ return skb;
++ } else {
+ return sch->dequeue(sch);
++ }
+ }
+
+ static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index c93761040c6e..fa0314679e43 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -101,9 +101,9 @@ static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ unsigned int dropped_pkts = 0, dropped_bytes = 0;
+ struct codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_CODEL_MAX + 1];
+- unsigned int qlen, dropped = 0;
+ int err;
+
+ err = nla_parse_nested_deprecated(tb, TCA_CODEL_MAX, opt,
+@@ -142,15 +142,17 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
+ WRITE_ONCE(q->params.ecn,
+ !!nla_get_u32(tb[TCA_CODEL_ECN]));
+
+- qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+
+- dropped += qdisc_pkt_len(skb);
+- qdisc_qstats_backlog_dec(sch, skb);
++ if (!skb)
++ break;
++
++ dropped_pkts++;
++ dropped_bytes += qdisc_pkt_len(skb);
+ rtnl_qdisc_drop(skb, sch);
+ }
+- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
++ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
+
+ sch_tree_unlock(sch);
+ return 0;
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 902ff5470607..fee922da2f99 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -1013,11 +1013,11 @@ static int fq_load_priomap(struct fq_sched_data *q,
+ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ unsigned int dropped_pkts = 0, dropped_bytes = 0;
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_MAX + 1];
+- int err, drop_count = 0;
+- unsigned drop_len = 0;
+ u32 fq_log;
++ int err;
+
+ err = nla_parse_nested_deprecated(tb, TCA_FQ_MAX, opt, fq_policy,
+ NULL);
+@@ -1135,16 +1135,18 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
+ err = fq_resize(sch, fq_log);
+ sch_tree_lock(sch);
+ }
++
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+
+ if (!skb)
+ break;
+- drop_len += qdisc_pkt_len(skb);
++
++ dropped_pkts++;
++ dropped_bytes += qdisc_pkt_len(skb);
+ rtnl_kfree_skbs(skb, skb);
+- drop_count++;
+ }
+- qdisc_tree_reduce_backlog(sch, drop_count, drop_len);
++ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
+
+ sch_tree_unlock(sch);
+ return err;
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index 2a0f3a513bfa..a14142392939 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -366,6 +366,7 @@ static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
+ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ unsigned int dropped_pkts = 0, dropped_bytes = 0;
+ struct fq_codel_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
+ u32 quantum = 0;
+@@ -443,13 +444,14 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
+ q->memory_usage > q->memory_limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+
+- q->cstats.drop_len += qdisc_pkt_len(skb);
++ if (!skb)
++ break;
++
++ dropped_pkts++;
++ dropped_bytes += qdisc_pkt_len(skb);
+ rtnl_kfree_skbs(skb, skb);
+- q->cstats.drop_count++;
+ }
+- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
+- q->cstats.drop_count = 0;
+- q->cstats.drop_len = 0;
++ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
+
+ sch_tree_unlock(sch);
+ return 0;
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index b0e34daf1f75..7b96bc3ff891 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -287,10 +287,9 @@ static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
+ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ unsigned int dropped_pkts = 0, dropped_bytes = 0;
+ struct fq_pie_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
+- unsigned int len_dropped = 0;
+- unsigned int num_dropped = 0;
+ int err;
+
+ err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
+@@ -368,11 +367,14 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+
+- len_dropped += qdisc_pkt_len(skb);
+- num_dropped += 1;
++ if (!skb)
++ break;
++
++ dropped_pkts++;
++ dropped_bytes += qdisc_pkt_len(skb);
+ rtnl_kfree_skbs(skb, skb);
+ }
+- qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
++ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
+
+ sch_tree_unlock(sch);
+ return 0;
+diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
+index 5aa434b46707..2d4855e28a28 100644
+--- a/net/sched/sch_hhf.c
++++ b/net/sched/sch_hhf.c
+@@ -508,9 +508,9 @@ static const struct nla_policy hhf_policy[TCA_HHF_MAX + 1] = {
+ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ unsigned int dropped_pkts = 0, dropped_bytes = 0;
+ struct hhf_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_HHF_MAX + 1];
+- unsigned int qlen, prev_backlog;
+ int err;
+ u64 non_hh_quantum;
+ u32 new_quantum = q->quantum;
+@@ -561,15 +561,17 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
+ usecs_to_jiffies(us));
+ }
+
+- qlen = sch->q.qlen;
+- prev_backlog = sch->qstats.backlog;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
+
++ if (!skb)
++ break;
++
++ dropped_pkts++;
++ dropped_bytes += qdisc_pkt_len(skb);
+ rtnl_kfree_skbs(skb, skb);
+ }
+- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen,
+- prev_backlog - sch->qstats.backlog);
++ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
+
+ sch_tree_unlock(sch);
+ return 0;
+diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
+index ad46ee3ed5a9..0a377313b6a9 100644
+--- a/net/sched/sch_pie.c
++++ b/net/sched/sch_pie.c
+@@ -141,9 +141,9 @@ static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
+ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
++ unsigned int dropped_pkts = 0, dropped_bytes = 0;
+ struct pie_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_PIE_MAX + 1];
+- unsigned int qlen, dropped = 0;
+ int err;
+
+ err = nla_parse_nested_deprecated(tb, TCA_PIE_MAX, opt, pie_policy,
+@@ -193,15 +193,17 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
+ nla_get_u32(tb[TCA_PIE_DQ_RATE_ESTIMATOR]));
+
+ /* Drop excess packets if new limit is lower */
+- qlen = sch->q.qlen;
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
+
+- dropped += qdisc_pkt_len(skb);
+- qdisc_qstats_backlog_dec(sch, skb);
++ if (!skb)
++ break;
++
++ dropped_pkts++;
++ dropped_bytes += qdisc_pkt_len(skb);
+ rtnl_qdisc_drop(skb, sch);
+ }
+- qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
++ qdisc_tree_reduce_backlog(sch, dropped_pkts, dropped_bytes);
+
+ sch_tree_unlock(sch);
+ return 0;
+--
+2.50.1
+
--- /dev/null
+From 159390647210bbc23cda533b482e62f6e6ccc881 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 48dd8c88903f..aa9f31e4415a 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1747,7 +1747,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1757,6 +1757,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1924,13 +1925,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From c3f852901371cc789aeb51a45835e303c0712fa0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index c968ea763774..b5e40c51655a 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From 63985097ed1a4434e4bd54c68ffa866b86b9960e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 13:46:18 +0800
+Subject: net/smc: fix UAF on smcsk after smc_listen_out()
+
+From: D. Wythe <alibuda@linux.alibaba.com>
+
+[ Upstream commit d9cef55ed49117bd63695446fb84b4b91815c0b4 ]
+
+BPF CI testing report a UAF issue:
+
+ [ 16.446633] BUG: kernel NULL pointer dereference, address: 000000000000003 0
+ [ 16.447134] #PF: supervisor read access in kernel mod e
+ [ 16.447516] #PF: error_code(0x0000) - not-present pag e
+ [ 16.447878] PGD 0 P4D 0
+ [ 16.448063] Oops: Oops: 0000 [#1] PREEMPT SMP NOPT I
+ [ 16.448409] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Tainted: G OE 6.13.0-rc3-g89e8a75fda73-dirty #4 2
+ [ 16.449124] Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODUL E
+ [ 16.449502] Hardware name: QEMU Ubuntu 24.04 PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/201 4
+ [ 16.450201] Workqueue: smc_hs_wq smc_listen_wor k
+ [ 16.450531] RIP: 0010:smc_listen_work+0xc02/0x159 0
+ [ 16.452158] RSP: 0018:ffffb5ab40053d98 EFLAGS: 0001024 6
+ [ 16.452526] RAX: 0000000000000001 RBX: 0000000000000002 RCX: 000000000000030 0
+ [ 16.452994] RDX: 0000000000000280 RSI: 00003513840053f0 RDI: 000000000000000 0
+ [ 16.453492] RBP: ffffa097808e3800 R08: ffffa09782dba1e0 R09: 000000000000000 5
+ [ 16.453987] R10: 0000000000000000 R11: 0000000000000000 R12: ffffa0978274640 0
+ [ 16.454497] R13: 0000000000000000 R14: 0000000000000000 R15: ffffa09782d4092 0
+ [ 16.454996] FS: 0000000000000000(0000) GS:ffffa097bbc00000(0000) knlGS:000000000000000 0
+ [ 16.455557] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003 3
+ [ 16.455961] CR2: 0000000000000030 CR3: 0000000102788004 CR4: 0000000000770ef 0
+ [ 16.456459] PKRU: 5555555 4
+ [ 16.456654] Call Trace :
+ [ 16.456832] <TASK >
+ [ 16.456989] ? __die+0x23/0x7 0
+ [ 16.457215] ? page_fault_oops+0x180/0x4c 0
+ [ 16.457508] ? __lock_acquire+0x3e6/0x249 0
+ [ 16.457801] ? exc_page_fault+0x68/0x20 0
+ [ 16.458080] ? asm_exc_page_fault+0x26/0x3 0
+ [ 16.458389] ? smc_listen_work+0xc02/0x159 0
+ [ 16.458689] ? smc_listen_work+0xc02/0x159 0
+ [ 16.458987] ? lock_is_held_type+0x8f/0x10 0
+ [ 16.459284] process_one_work+0x1ea/0x6d 0
+ [ 16.459570] worker_thread+0x1c3/0x38 0
+ [ 16.459839] ? __pfx_worker_thread+0x10/0x1 0
+ [ 16.460144] kthread+0xe0/0x11 0
+ [ 16.460372] ? __pfx_kthread+0x10/0x1 0
+ [ 16.460640] ret_from_fork+0x31/0x5 0
+ [ 16.460896] ? __pfx_kthread+0x10/0x1 0
+ [ 16.461166] ret_from_fork_asm+0x1a/0x3 0
+ [ 16.461453] </TASK >
+ [ 16.461616] Modules linked in: bpf_testmod(OE) [last unloaded: bpf_testmod(OE) ]
+ [ 16.462134] CR2: 000000000000003 0
+ [ 16.462380] ---[ end trace 0000000000000000 ]---
+ [ 16.462710] RIP: 0010:smc_listen_work+0xc02/0x1590
+
+The direct cause of this issue is that after smc_listen_out_connected(),
+newclcsock->sk may be NULL since it will releases the smcsk. Therefore,
+if the application closes the socket immediately after accept,
+newclcsock->sk can be NULL. A possible execution order could be as
+follows:
+
+smc_listen_work | userspace
+-----------------------------------------------------------------
+lock_sock(sk) |
+smc_listen_out_connected() |
+| \- smc_listen_out |
+| | \- release_sock |
+ | |- sk->sk_data_ready() |
+ | fd = accept();
+ | close(fd);
+ | \- socket->sk = NULL;
+/* newclcsock->sk is NULL now */
+SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk))
+
+Since smc_listen_out_connected() will not fail, simply swapping the order
+of the code can easily fix this issue.
+
+Fixes: 3b2dec2603d5 ("net/smc: restructure client and server code in af_smc")
+Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
+Reviewed-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Alexandra Winter <wintera@linux.ibm.com>
+Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
+Link: https://patch.msgid.link/20250818054618.41615-1-alibuda@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 1882bab8e00e..dc72ff353813 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2568,8 +2568,9 @@ static void smc_listen_work(struct work_struct *work)
+ goto out_decl;
+ }
+
+- smc_listen_out_connected(new_smc);
+ SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
++ /* smc_listen_out() will release smcsk */
++ smc_listen_out_connected(new_smc);
+ goto out_free;
+
+ out_unlock:
+--
+2.50.1
+
--- /dev/null
+From 510ae7b1f1848b0e36f14ba7c4a00ce7b68d2c00 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 10:48:03 +0000
+Subject: net: stmmac: thead: Enable TX clock before MAC initialization
+
+From: Yao Zi <ziyao@disroot.org>
+
+[ Upstream commit 6d6714bf0c4e8eb2274081b4b023dfa01581c123 ]
+
+The clk_tx_i clock must be supplied to the MAC for successful
+initialization. On TH1520 SoC, the clock is provided by an internal
+divider configured through GMAC_PLLCLK_DIV register when using RGMII
+interface. However, currently we don't setup the divider before
+initialization of the MAC, resulting in DMA reset failures if the
+bootloader/firmware doesn't enable the divider,
+
+[ 7.839601] thead-dwmac ffe7060000.ethernet eth0: Register MEM_TYPE_PAGE_POOL RxQ-0
+[ 7.938338] thead-dwmac ffe7060000.ethernet eth0: PHY [stmmac-0:02] driver [RTL8211F Gigabit Ethernet] (irq=POLL)
+[ 8.160746] thead-dwmac ffe7060000.ethernet eth0: Failed to reset the dma
+[ 8.170118] thead-dwmac ffe7060000.ethernet eth0: stmmac_hw_setup: DMA engine initialization failed
+[ 8.179384] thead-dwmac ffe7060000.ethernet eth0: __stmmac_open: Hw setup failed
+
+Let's simply write GMAC_PLLCLK_DIV_EN to GMAC_PLLCLK_DIV to enable the
+divider before MAC initialization. Note that for reconfiguring the
+divisor, the divider must be disabled first and re-enabled later to make
+sure the new divisor take effect.
+
+The exact clock rate doesn't affect MAC's initialization according to my
+test. It's set to the speed required by RGMII when the linkspeed is
+1Gbps and could be reclocked later after link is up if necessary.
+
+Fixes: 33a1a01e3afa ("net: stmmac: Add glue layer for T-HEAD TH1520 SoC")
+Signed-off-by: Yao Zi <ziyao@disroot.org>
+Reviewed-by: Drew Fustini <fustini@kernel.org>
+Link: https://patch.msgid.link/20250815104803.55294-1-ziyao@disroot.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+index f2946bea0bc2..6c6c49e4b66f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-thead.c
+@@ -152,7 +152,7 @@ static int thead_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
+ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
+ {
+ struct thead_dwmac *dwmac = plat->bsp_priv;
+- u32 reg;
++ u32 reg, div;
+
+ switch (plat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+@@ -164,6 +164,13 @@ static int thead_dwmac_enable_clk(struct plat_stmmacenet_data *plat)
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* use pll */
++ div = clk_get_rate(plat->stmmac_clk) / rgmii_clock(SPEED_1000);
++ reg = FIELD_PREP(GMAC_PLLCLK_DIV_EN, 1) |
++ FIELD_PREP(GMAC_PLLCLK_DIV_NUM, div);
++
++ writel(0, dwmac->apb_base + GMAC_PLLCLK_DIV);
++ writel(reg, dwmac->apb_base + GMAC_PLLCLK_DIV);
++
+ writel(GMAC_GTXCLK_SEL_PLL, dwmac->apb_base + GMAC_GTXCLK_SEL);
+ reg = GMAC_TX_CLK_EN | GMAC_TX_CLK_N_EN | GMAC_TX_CLK_OUT_EN |
+ GMAC_RX_CLK_EN | GMAC_RX_CLK_N_EN;
+--
+2.50.1
+
--- /dev/null
+From e30500b44d321b3a373bd8f8f3b12f53510d63ca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 16:21:06 +0530
+Subject: net: ti: icssg-prueth: Fix HSR and switch offload Enablement during
+ firwmare reload.
+
+From: MD Danish Anwar <danishanwar@ti.com>
+
+[ Upstream commit 01792bc3e5bdafa171dd83c7073f00e7de93a653 ]
+
+To enable HSR / Switch offload, certain configurations are needed.
+Currently they are done inside icssg_change_mode(). This function only
+gets called if we move from one mode to another without bringing the
+links up / down.
+
+Once in HSR / Switch mode, if we bring the links down and bring it back
+up again. The callback sequence is,
+
+- emac_ndo_stop()
+ Firmwares are stopped
+- emac_ndo_open()
+ Firmwares are loaded
+
+In this path icssg_change_mode() doesn't get called and as a result the
+configurations needed for HSR / Switch is not done.
+
+To fix this, put all these configurations in a separate function
+icssg_enable_fw_offload() and call this from both icssg_change_mode()
+and emac_ndo_open()
+
+Fixes: 56375086d093 ("net: ti: icssg-prueth: Enable HSR Tx duplication, Tx Tag and Rx Tag offload")
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Link: https://patch.msgid.link/20250814105106.1491871-1-danishanwar@ti.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icssg_prueth.c | 72 +++++++++++---------
+ 1 file changed, 41 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index 008d77727400..f436d7cf565a 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -240,6 +240,44 @@ static void prueth_emac_stop(struct prueth *prueth)
+ }
+ }
+
++static void icssg_enable_fw_offload(struct prueth *prueth)
++{
++ struct prueth_emac *emac;
++ int mac;
++
++ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
++ emac = prueth->emac[mac];
++ if (prueth->is_hsr_offload_mode) {
++ if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
++ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
++ else
++ icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
++ }
++
++ if (prueth->is_switch_mode || prueth->is_hsr_offload_mode) {
++ if (netif_running(emac->ndev)) {
++ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
++ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
++ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
++ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
++ ICSSG_FDB_ENTRY_BLOCK,
++ true);
++ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
++ BIT(emac->port_id) | DEFAULT_PORT_MASK,
++ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
++ true);
++ if (prueth->is_hsr_offload_mode)
++ icssg_vtbl_modify(emac, DEFAULT_VID,
++ DEFAULT_PORT_MASK,
++ DEFAULT_UNTAG_MASK, true);
++ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
++ if (prueth->is_switch_mode)
++ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
++ }
++ }
++ }
++}
++
+ static int prueth_emac_common_start(struct prueth *prueth)
+ {
+ struct prueth_emac *emac;
+@@ -790,6 +828,7 @@ static int emac_ndo_open(struct net_device *ndev)
+ ret = prueth_emac_common_start(prueth);
+ if (ret)
+ goto free_rx_irq;
++ icssg_enable_fw_offload(prueth);
+ }
+
+ flow_cfg = emac->dram.va + ICSSG_CONFIG_OFFSET + PSI_L_REGULAR_FLOW_ID_BASE_OFFSET;
+@@ -1397,8 +1436,7 @@ static int prueth_emac_restart(struct prueth *prueth)
+
+ static void icssg_change_mode(struct prueth *prueth)
+ {
+- struct prueth_emac *emac;
+- int mac, ret;
++ int ret;
+
+ ret = prueth_emac_restart(prueth);
+ if (ret) {
+@@ -1406,35 +1444,7 @@ static void icssg_change_mode(struct prueth *prueth)
+ return;
+ }
+
+- for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+- emac = prueth->emac[mac];
+- if (prueth->is_hsr_offload_mode) {
+- if (emac->ndev->features & NETIF_F_HW_HSR_TAG_RM)
+- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_ENABLE);
+- else
+- icssg_set_port_state(emac, ICSSG_EMAC_HSR_RX_OFFLOAD_DISABLE);
+- }
+-
+- if (netif_running(emac->ndev)) {
+- icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+- ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+- ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+- ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+- ICSSG_FDB_ENTRY_BLOCK,
+- true);
+- icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+- BIT(emac->port_id) | DEFAULT_PORT_MASK,
+- BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+- true);
+- if (prueth->is_hsr_offload_mode)
+- icssg_vtbl_modify(emac, DEFAULT_VID,
+- DEFAULT_PORT_MASK,
+- DEFAULT_UNTAG_MASK, true);
+- icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+- if (prueth->is_switch_mode)
+- icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+- }
+- }
++ icssg_enable_fw_offload(prueth);
+ }
+
+ static int prueth_netdevice_port_link(struct net_device *ndev,
+--
+2.50.1
+
--- /dev/null
+From 03750fc732ce6e7e7d1046f4926df7b5d07a7608 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:45:07 +0900
+Subject: net: usb: asix_devices: Fix PHY address mask in MDIO bus
+ initialization
+
+From: Yuichiro Tsuji <yuichtsu@amazon.com>
+
+[ Upstream commit 24ef2f53c07f273bad99173e27ee88d44d135b1c ]
+
+Syzbot reported shift-out-of-bounds exception on MDIO bus initialization.
+
+The PHY address should be masked to 5 bits (0-31). Without this
+mask, invalid PHY addresses could be used, potentially causing issues
+with MDIO bus operations.
+
+Fix this by masking the PHY address with 0x1f (31 decimal) to ensure
+it stays within the valid range.
+
+Fixes: 4faff70959d5 ("net: usb: asix_devices: add phy_mask for ax88772 mdio bus")
+Reported-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=20537064367a0f98d597
+Tested-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Signed-off-by: Yuichiro Tsuji <yuichtsu@amazon.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250818084541.1958-1-yuichtsu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/asix_devices.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index d9f5942ccc44..792ddda1ad49 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -676,7 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+- priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
++ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+--
+2.50.1
+
--- /dev/null
+From 1d88295857531fe1a7dd5e18a9133b5f8bf873a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 19:25:59 +0530
+Subject: net: xilinx: axienet: Fix RX skb ring management in DMAengine mode
+
+From: Suraj Gupta <suraj.gupta2@amd.com>
+
+[ Upstream commit fd980bf6e9cdae885105685259421164f843ca55 ]
+
+Submit multiple descriptors in axienet_rx_cb() to fill Rx skb ring. This
+ensures the ring "catches up" on previously missed allocations.
+
+Increment Rx skb ring head pointer after BD is successfully allocated.
+Previously, head pointer was incremented before verifying if descriptor is
+successfully allocated and has valid entries, which could lead to ring
+state inconsistency if descriptor setup failed.
+
+These changes improve reliability by maintaining adequate descriptor
+availability and ensuring proper ring buffer state management.
+
+Fixes: 6a91b846af85 ("net: axienet: Introduce dmaengine support")
+Signed-off-by: Suraj Gupta <suraj.gupta2@amd.com>
+Link: https://patch.msgid.link/20250813135559.1555652-1-suraj.gupta2@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 6011d7eae0c7..0d8a05fe541a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1160,6 +1160,7 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ struct axienet_local *lp = data;
+ struct sk_buff *skb;
+ u32 *app_metadata;
++ int i;
+
+ skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
+ skb = skbuf_dma->skb;
+@@ -1178,7 +1179,10 @@ static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
+ u64_stats_add(&lp->rx_packets, 1);
+ u64_stats_add(&lp->rx_bytes, rx_len);
+ u64_stats_update_end(&lp->rx_stat_sync);
+- axienet_rx_submit_desc(lp->ndev);
++
++ for (i = 0; i < CIRC_SPACE(lp->rx_ring_head, lp->rx_ring_tail,
++ RX_BUF_NUM_DEFAULT); i++)
++ axienet_rx_submit_desc(lp->ndev);
+ dma_async_issue_pending(lp->rx_chan);
+ }
+
+@@ -1457,7 +1461,6 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
+ if (!skbuf_dma)
+ return;
+
+- lp->rx_ring_head++;
+ skb = netdev_alloc_skb(ndev, lp->max_frm_size);
+ if (!skb)
+ return;
+@@ -1482,6 +1485,7 @@ static void axienet_rx_submit_desc(struct net_device *ndev)
+ skbuf_dma->desc = dma_rx_desc;
+ dma_rx_desc->callback_param = lp;
+ dma_rx_desc->callback_result = axienet_dma_rx_cb;
++ lp->rx_ring_head++;
+ dmaengine_submit(dma_rx_desc);
+
+ return;
+--
+2.50.1
+
--- /dev/null
+From 804704aa274f92359eb1c53ca166f13fe0bde242 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 14:37:07 +0200
+Subject: netfilter: nf_reject: don't leak dst refcount for loopback packets
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 91a79b792204313153e1bdbbe5acbfc28903b3a5 ]
+
+recent patches to add a WARN() when replacing skb dst entry found an
+old bug:
+
+WARNING: include/linux/skbuff.h:1165 skb_dst_check_unset include/linux/skbuff.h:1164 [inline]
+WARNING: include/linux/skbuff.h:1165 skb_dst_set include/linux/skbuff.h:1210 [inline]
+WARNING: include/linux/skbuff.h:1165 nf_reject_fill_skb_dst+0x2a4/0x330 net/ipv4/netfilter/nf_reject_ipv4.c:234
+[..]
+Call Trace:
+ nf_send_unreach+0x17b/0x6e0 net/ipv4/netfilter/nf_reject_ipv4.c:325
+ nft_reject_inet_eval+0x4bc/0x690 net/netfilter/nft_reject_inet.c:27
+ expr_call_ops_eval net/netfilter/nf_tables_core.c:237 [inline]
+ ..
+
+This is because blamed commit forgot about loopback packets.
+Such packets already have a dst_entry attached, even at PRE_ROUTING stage.
+
+Instead of checking hook just check if the skb already has a route
+attached to it.
+
+Fixes: f53b9b0bdc59 ("netfilter: introduce support for reject at prerouting stage")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20250820123707.10671-1-fw@strlen.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++----
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 ++---
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 87fd945a0d27..0d3cb2ba6fc8 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(oldskb) < 0)
++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) ||
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index 9ae2b2725bf9..c3d64c4b69d7 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++ if (!skb_dst(oldskb)) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+- nf_reject6_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+--
+2.50.1
+
--- /dev/null
+From 416228402072d8676335c7216282a092d3504509 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:15 +0800
+Subject: objtool/LoongArch: Get table size correctly if LTO is enabled
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+[ Upstream commit a47bc954cf0eb51f2828e1607d169d487df7f11f ]
+
+When compiling with LLVM and CONFIG_LTO_CLANG is set, there exist many
+objtool warnings "sibling call from callable instruction with modified
+stack frame".
+
+For this special case, the related object file shows that there is no
+generated relocation section '.rela.discard.tablejump_annotate' for the
+table jump instruction jirl, thus objtool can not know that what is the
+actual destination address.
+
+It needs to do something on the LLVM side to make sure that there is the
+relocation section '.rela.discard.tablejump_annotate' if LTO is enabled,
+but in order to maintain compatibility for the current LLVM compiler,
+this can be done in the kernel Makefile for now. Ensure it is aware of
+linker with LTO, '--loongarch-annotate-tablejump' needs to be passed via
+'-mllvm' to ld.lld.
+
+Before doing the above changes, it should handle the special case of the
+relocation section '.rela.discard.tablejump_annotate' to get the correct
+table size first, otherwise there are many objtool warnings and errors
+if LTO is enabled.
+
+There are many different rodata for each function if LTO is enabled, it
+is necessary to enhance get_rodata_table_size_by_table_annotate().
+
+Fixes: b95f852d3af2 ("objtool/LoongArch: Add support for switch table")
+Closes: https://lore.kernel.org/loongarch/20250731175655.GA1455142@ax162/
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/objtool/arch/loongarch/special.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/tools/objtool/arch/loongarch/special.c b/tools/objtool/arch/loongarch/special.c
+index e39f86d97002..a80b75f7b061 100644
+--- a/tools/objtool/arch/loongarch/special.c
++++ b/tools/objtool/arch/loongarch/special.c
+@@ -27,6 +27,7 @@ static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
+ struct table_info *next_table;
+ unsigned long tmp_insn_offset;
+ unsigned long tmp_rodata_offset;
++ bool is_valid_list = false;
+
+ rsec = find_section_by_name(file->elf, ".rela.discard.tablejump_annotate");
+ if (!rsec)
+@@ -35,6 +36,12 @@ static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
+ INIT_LIST_HEAD(&table_list);
+
+ for_each_reloc(rsec, reloc) {
++ if (reloc->sym->sec->rodata)
++ continue;
++
++ if (strcmp(insn->sec->name, reloc->sym->sec->name))
++ continue;
++
+ orig_table = malloc(sizeof(struct table_info));
+ if (!orig_table) {
+ WARN("malloc failed");
+@@ -49,6 +56,22 @@ static void get_rodata_table_size_by_table_annotate(struct objtool_file *file,
+
+ if (reloc_idx(reloc) + 1 == sec_num_entries(rsec))
+ break;
++
++ if (strcmp(insn->sec->name, (reloc + 1)->sym->sec->name)) {
++ list_for_each_entry(orig_table, &table_list, jump_info) {
++ if (orig_table->insn_offset == insn->offset) {
++ is_valid_list = true;
++ break;
++ }
++ }
++
++ if (!is_valid_list) {
++ list_del_init(&table_list);
++ continue;
++ }
++
++ break;
++ }
+ }
+
+ list_for_each_entry(orig_table, &table_list, jump_info) {
+--
+2.50.1
+
--- /dev/null
+From 5916366c819a2a404502ab0224c866f61d1f1c7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 12:09:18 +0530
+Subject: Octeontx2-af: Skip overlap check for SPI field
+
+From: Hariprasad Kelam <hkelam@marvell.com>
+
+[ Upstream commit 8c5d95988c34f0aeba1f34cd5e4ba69494c90c5f ]
+
+Octeontx2/CN10K silicon supports generating a 256-bit key per packet.
+The specific fields to be extracted from a packet for key generation
+are configurable via a Key Extraction (MKEX) Profile.
+
+The AF driver scans the configured extraction profile to ensure that
+fields from upper layers do not overwrite fields from lower layers in
+the key.
+
+Example Packet Field Layout:
+LA: DMAC + SMAC
+LB: VLAN
+LC: IPv4/IPv6
+LD: TCP/UDP
+
+Valid MKEX Profile Configuration:
+
+LA -> DMAC -> key_offset[0-5]
+LC -> SIP -> key_offset[20-23]
+LD -> SPORT -> key_offset[30-31]
+
+Invalid MKEX profile configuration:
+
+LA -> DMAC -> key_offset[0-5]
+LC -> SIP -> key_offset[20-23]
+LD -> SPORT -> key_offset[2-3] // Overlaps with DMAC field
+
+In another scenario, if the MKEX profile is configured to extract
+the SPI field from both AH and ESP headers at the same key offset,
+the driver rejecting this configuration. In a regular traffic,
+ipsec packet will be having either AH(LD) or ESP (LE). This patch
+relaxes the check for the same.
+
+Fixes: 12aa0a3b93f3 ("octeontx2-af: Harden rule validation.")
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Link: https://patch.msgid.link/20250820063919.1463518-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 1b765045aa63..b56395ac5a74 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -606,8 +606,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+
+- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
++ /* Allow extracting SPI field from AH and ESP headers at same offset */
++ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
+ (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
+ *features |= BIT_ULL(NPC_IPSEC_SPI);
+
+--
+2.50.1
+
--- /dev/null
+From 1b64f6de080b1eabb829b36620ecca8100a927b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 10:10:29 +0200
+Subject: phy: mscc: Fix timestamping for vsc8584
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit bc1a59cff9f797bfbf8f3104507584d89e9ecf2e ]
+
+There was a problem when we received frames and the frames were
+timestamped. The driver is configured to store the nanosecond part of
+the timestmap in the ptp reserved bits and it would take the second part
+by reading the LTC. The problem is that when reading the LTC we are in
+atomic context and to read the second part will go over mdio bus which
+might sleep, so we get an error.
+The fix consists in actually put all the frames in a queue and start the
+aux work and in that work to read the LTC and then calculate the full
+received time.
+
+Fixes: 7d272e63e0979d ("net: phy: mscc: timestamping and PHC support")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250818081029.1300780-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc.h | 12 ++++++++
+ drivers/net/phy/mscc/mscc_main.c | 12 ++++++++
+ drivers/net/phy/mscc/mscc_ptp.c | 49 ++++++++++++++++++++++++--------
+ 3 files changed, 61 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 6a3d8a754eb8..58c6d47fbe04 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -362,6 +362,13 @@ struct vsc85xx_hw_stat {
+ u16 mask;
+ };
+
++struct vsc8531_skb_cb {
++ u32 ns;
++};
++
++#define VSC8531_SKB_CB(skb) \
++ ((struct vsc8531_skb_cb *)((skb)->cb))
++
+ struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+@@ -410,6 +417,11 @@ struct vsc8531_private {
+ */
+ struct mutex ts_lock;
+ struct mutex phc_lock;
++
++ /* list of skbs that were received and need timestamp information but it
++ * didn't received it yet
++ */
++ struct sk_buff_head rx_skbs_list;
+ };
+
+ /* Shared structure between the PHYs of the same package.
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 7ff975efd8e7..c3209cf00e96 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2336,6 +2336,13 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ }
+
++static void vsc85xx_remove(struct phy_device *phydev)
++{
++ struct vsc8531_private *priv = phydev->priv;
++
++ skb_queue_purge(&priv->rx_skbs_list);
++}
++
+ /* Microsemi VSC85xx PHYs */
+ static struct phy_driver vsc85xx_driver[] = {
+ {
+@@ -2590,6 +2597,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2615,6 +2623,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2640,6 +2649,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2663,6 +2673,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2686,6 +2697,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index 275706de5847..de6c7312e8f2 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1194,9 +1194,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ {
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+- struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct vsc85xx_ptphdr *ptphdr;
+- struct timespec64 ts;
+ unsigned long ns;
+
+ if (!vsc8531->ptp->configured)
+@@ -1206,27 +1204,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ type == PTP_CLASS_NONE)
+ return false;
+
+- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+-
+ ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+ if (!ptphdr)
+ return false;
+
+- shhwtstamps = skb_hwtstamps(skb);
+- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+-
+ ns = ntohl(ptphdr->rsrvd2);
+
+- /* nsec is in reserved field */
+- if (ts.tv_nsec < ns)
+- ts.tv_sec--;
++ VSC8531_SKB_CB(skb)->ns = ns;
++ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
+
+- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+- netif_rx(skb);
++ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
+
+ return true;
+ }
+
++static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
++{
++ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
++ struct skb_shared_hwtstamps *shhwtstamps = NULL;
++ struct phy_device *phydev = ptp->phydev;
++ struct vsc8531_private *priv = phydev->priv;
++ struct sk_buff_head received;
++ struct sk_buff *rx_skb;
++ struct timespec64 ts;
++ unsigned long flags;
++
++ __skb_queue_head_init(&received);
++ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
++ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
++ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
++
++ vsc85xx_gettime(info, &ts);
++ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
++ shhwtstamps = skb_hwtstamps(rx_skb);
++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++
++ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
++ ts.tv_sec--;
++
++ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
++ VSC8531_SKB_CB(rx_skb)->ns);
++ netif_rx(rx_skb);
++ }
++
++ return -1;
++}
++
+ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .owner = THIS_MODULE,
+ .name = "VSC85xx timer",
+@@ -1240,6 +1263,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .adjfine = &vsc85xx_adjfine,
+ .gettime64 = &vsc85xx_gettime,
+ .settime64 = &vsc85xx_settime,
++ .do_aux_work = &vsc85xx_do_aux_work,
+ };
+
+ static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+@@ -1567,6 +1591,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+
+ mutex_init(&vsc8531->phc_lock);
+ mutex_init(&vsc8531->ts_lock);
++ skb_queue_head_init(&vsc8531->rx_skbs_list);
+
+ /* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ * the same GPIO can be requested by all the PHYs of the same package.
+--
+2.50.1
+
--- /dev/null
+From 78ef5b7a0b4e5aa6bdd5e44c2e6a92a63f4de0a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 10:06:37 +0000
+Subject: platform/x86/amd/hsmp: Ensure sock->metric_tbl_addr is non-NULL
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Suma Hegde <suma.hegde@amd.com>
+
+[ Upstream commit 2c78fb287e1f430b929f2e49786518350d15605c ]
+
+If metric table address is not allocated, accessing metrics_bin will
+result in a NULL pointer dereference, so add a check.
+
+Fixes: 5150542b8ec5 ("platform/x86/amd/hsmp: add support for metrics tbl")
+Signed-off-by: Suma Hegde <suma.hegde@amd.com>
+Link: https://lore.kernel.org/r/20250807100637.952729-1-suma.hegde@amd.com
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/amd/hsmp/hsmp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/platform/x86/amd/hsmp/hsmp.c b/drivers/platform/x86/amd/hsmp/hsmp.c
+index 885e2f8136fd..19f82c1d3090 100644
+--- a/drivers/platform/x86/amd/hsmp/hsmp.c
++++ b/drivers/platform/x86/amd/hsmp/hsmp.c
+@@ -356,6 +356,11 @@ ssize_t hsmp_metric_tbl_read(struct hsmp_socket *sock, char *buf, size_t size)
+ if (!sock || !buf)
+ return -EINVAL;
+
++ if (!sock->metric_tbl_addr) {
++ dev_err(sock->dev, "Metrics table address not available\n");
++ return -ENOMEM;
++ }
++
+ /* Do not support lseek(), also don't allow more than the size of metric table */
+ if (size != sizeof(struct hsmp_metric_table)) {
+ dev_err(sock->dev, "Wrong buffer size\n");
+--
+2.50.1
+
--- /dev/null
+From 5e44b68148d18ccc98409c962be205dfec8fa512 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:58 +0800
+Subject: ppp: fix race conditions in ppp_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 0417adf367a0af11adf7ace849af4638cfb573f7 ]
+
+ppp_fill_forward_path() has two race conditions:
+
+1. The ppp->channels list can change between list_empty() and
+ list_first_entry(), as ppp_lock() is not held. If the only channel
+ is deleted in ppp_disconnect_channel(), list_first_entry() may
+ access an empty head or a freed entry, and trigger a panic.
+
+2. pch->chan can be NULL. When ppp_unregister_channel() is called,
+ pch->chan is set to NULL before pch is removed from ppp->channels.
+
+Fix these by using a lockless RCU approach:
+- Use list_first_or_null_rcu() to safely test and access the first list
+ entry.
+- Convert list modifications on ppp->channels to their RCU variants and
+ add synchronize_net() after removal.
+- Check for a NULL pch->chan before dereferencing it.
+
+Fixes: f6efc675c9dd ("net: ppp: resolve forwarding path for bridge pppoe devices")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-2-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/ppp_generic.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index def84e87e05b..5e7672d2022c 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ppp_channel.h>
+ #include <linux/ppp-comp.h>
+ #include <linux/skbuff.h>
++#include <linux/rculist.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/if_arp.h>
+ #include <linux/ip.h>
+@@ -1612,11 +1613,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+- if (list_empty(&ppp->channels))
++ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
++ if (!pch)
++ return -ENODEV;
++
++ chan = READ_ONCE(pch->chan);
++ if (!chan)
+ return -ENODEV;
+
+- pch = list_first_entry(&ppp->channels, struct channel, clist);
+- chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+@@ -2999,7 +3003,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+- pch->chan = NULL;
++ WRITE_ONCE(pch->chan, NULL);
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+@@ -3509,7 +3513,7 @@ ppp_connect_channel(struct channel *pch, int unit)
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+- list_add_tail(&pch->clist, &ppp->channels);
++ list_add_tail_rcu(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ refcount_inc(&ppp->file.refcnt);
+@@ -3539,10 +3543,11 @@ ppp_disconnect_channel(struct channel *pch)
+ if (ppp) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+- list_del(&pch->clist);
++ list_del_rcu(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
++ synchronize_net();
+ if (refcount_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+--
+2.50.1
+
--- /dev/null
+From 34cc20f44ba15c1c3cd22686b3ff03189a498c8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:59 +0530
+Subject: RDMA/bnxt_re: Fix a possible memory leak in the driver
+
+From: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+
+[ Upstream commit ba60a1e8cbbd396c69ff9c8bc3242f5ab133e38a ]
+
+The GID context reuse logic requires the context memory to be
+not freed if and when DEL_GID firmware command fails. But, if
+there's no subsequent ADD_GID to reuse it, the context memory
+must be freed when the driver is unloaded. Otherwise it leads
+to a memory leak.
+
+Below is the kmemleak trace reported:
+
+unreferenced object 0xffff88817a4f34d0 (size 8):
+ comm "insmod", pid 1072504, jiffies 4402561550
+ hex dump (first 8 bytes):
+ 01 00 00 00 00 00 00 00 ........
+ backtrace (crc ccaa009e):
+ __kmalloc_cache_noprof+0x33e/0x400
+ 0xffffffffc2db9d48
+ add_modify_gid+0x5e0/0xb60 [ib_core]
+ __ib_cache_gid_add+0x213/0x350 [ib_core]
+ update_gid+0xf2/0x180 [ib_core]
+ enum_netdev_ipv4_ips+0x3f3/0x690 [ib_core]
+ enum_all_gids_of_dev_cb+0x125/0x1b0 [ib_core]
+ ib_enum_roce_netdev+0x14b/0x250 [ib_core]
+ ib_cache_setup_one+0x2e5/0x540 [ib_core]
+ ib_register_device+0x82c/0xf10 [ib_core]
+ 0xffffffffc2df5ad9
+ 0xffffffffc2da8b07
+ 0xffffffffc2db174d
+ auxiliary_bus_probe+0xa5/0x120
+ really_probe+0x1e4/0x850
+ __driver_probe_device+0x18f/0x3d0
+
+Fixes: 4a62c5e9e2e1 ("RDMA/bnxt_re: Do not free the ctx_tbl entry if delete GID fails")
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-4-kalesh-anakkur.purayil@broadcom.com
+Reviewed-by: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/main.c | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 293b0a96c8e3..df7cf8d68e27 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -2017,6 +2017,28 @@ static void bnxt_re_free_nqr_mem(struct bnxt_re_dev *rdev)
+ rdev->nqr = NULL;
+ }
+
++/* When DEL_GID fails, driver is not freeing GID ctx memory.
++ * To avoid the memory leak, free the memory during unload
++ */
++static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev)
++{
++ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
++ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
++ int i;
++
++ if (!sgid_tbl->active)
++ return;
++
++ ctx_tbl = sgid_tbl->ctx;
++ for (i = 0; i < sgid_tbl->max; i++) {
++ if (sgid_tbl->hw_id[i] == 0xFFFF)
++ continue;
++
++ ctx = ctx_tbl[i];
++ kfree(ctx);
++ }
++}
++
+ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ {
+ u8 type;
+@@ -2030,6 +2052,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+ if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags))
+ cancel_delayed_work_sync(&rdev->worker);
+
++ bnxt_re_free_gid_ctx(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED,
+ &rdev->flags))
+ bnxt_re_cleanup_res(rdev);
+--
+2.50.1
+
--- /dev/null
+From 643211882ecc23432175d87f88458676dd7ae751 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:57 +0530
+Subject: RDMA/bnxt_re: Fix to do SRQ armena by default
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 6296f9a5293ada28558f2867ac54c487e1e2b9f2 ]
+
+Whenever SRQ is created, make sure SRQ arm enable is always
+set. Driver is always ready to receive SRQ ASYNC event.
+
+Additional note -
+There is no need to do srq arm enable conditionally.
+See bnxt_qplib_armen_db in bnxt_qplib_create_cq().
+
+Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-2-kalesh-anakkur.purayil@broadcom.com
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index be34c605d516..eb82440cdded 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -705,8 +705,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+- if (srq->threshold)
+- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
++ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+--
+2.50.1
+
--- /dev/null
+From aa014bfe9aac532dd4b60ad3da553ada1df1cbbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:40:00 +0530
+Subject: RDMA/bnxt_re: Fix to initialize the PBL array
+
+From: Anantha Prabhu <anantha.prabhu@broadcom.com>
+
+[ Upstream commit 806b9f494f62791ee6d68f515a8056c615a0e7b2 ]
+
+memset the PBL page pointer and page map arrays before
+populating the SGL addresses of the HWQ.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 6cd05207ffed..cc5c82d96839 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
++ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+--
+2.50.1
+
--- /dev/null
+From 21dd0e7c32eeed18c3e86120bde64f22ab577733 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:58 +0530
+Subject: RDMA/bnxt_re: Fix to remove workload check in SRQ limit path
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 666bce0bd7e771127cb0cda125cc9d32d9f9f15d ]
+
+There should not be any checks of current workload to set
+srq_limit value to SRQ hw context.
+
+Remove all such workload checks and make a direct call to
+set srq_limit via doorbell SRQ_ARM.
+
+Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-3-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 ++-----
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 27 ------------------------
+ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 --
+ 3 files changed, 2 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index 3a627acb82ce..9b33072f9a06 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1921,7 +1921,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+- int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+@@ -1933,11 +1932,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+- rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+- if (rc) {
+- ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
+- return rc;
+- }
++ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
++
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index eb82440cdded..c2784561156f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -706,7 +706,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+- srq->arm_req = false;
+
+ return 0;
+ fail:
+@@ -716,24 +715,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ return rc;
+ }
+
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+- struct bnxt_qplib_srq *srq)
+-{
+- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+- u32 count;
+-
+- count = __bnxt_qplib_get_avail(srq_hwq);
+- if (count > srq->threshold) {
+- srq->arm_req = false;
+- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+- } else {
+- /* Deferred arming */
+- srq->arm_req = true;
+- }
+-
+- return 0;
+-}
+-
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+ {
+@@ -775,7 +756,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe;
+ struct sq_sge *hw_sge;
+- u32 count = 0;
+ int i, next;
+
+ spin_lock(&srq_hwq->lock);
+@@ -807,15 +787,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+
+- spin_lock(&srq_hwq->lock);
+- count = __bnxt_qplib_get_avail(srq_hwq);
+- spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+- if (srq->arm_req == true && count > srq->threshold) {
+- srq->arm_req = false;
+- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+- }
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 0d9487c889ff..846501f12227 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -543,8 +543,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ srqn_handler_t srq_handler);
+ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+- struct bnxt_qplib_srq *srq);
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+--
+2.50.1
+
--- /dev/null
+From d07b34e0b772d263bb07326cef689a2419606c9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 Aug 2025 23:21:58 +0530
+Subject: RDMA/core: Free pfn_list with appropriate kvfree call
+
+From: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+
+[ Upstream commit 111aea0464c20f3eb25a48d5ff6c036e6b416123 ]
+
+Ensure that pfn_list allocated by kvcalloc() is freed using corresponding
+kvfree() function. Match memory allocation and free routines kvcalloc -> kvfree.
+
+Fixes: 259e9bd07c57 ("RDMA/core: Avoid hmm_dma_map_alloc() for virtual DMA devices")
+Signed-off-by: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+Link: https://patch.msgid.link/aJjcPjL1BVh8QrMN@bhairav-test.ee.iitb.ac.in
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/umem_odp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
+index b1c44ec1a3f3..572a91a62a7b 100644
+--- a/drivers/infiniband/core/umem_odp.c
++++ b/drivers/infiniband/core/umem_odp.c
+@@ -115,7 +115,7 @@ static int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
+
+ out_free_map:
+ if (ib_uses_virt_dma(dev))
+- kfree(map->pfn_list);
++ kvfree(map->pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, map);
+ return ret;
+@@ -287,7 +287,7 @@ static void ib_umem_odp_free(struct ib_umem_odp *umem_odp)
+ mutex_unlock(&umem_odp->umem_mutex);
+ mmu_interval_notifier_remove(&umem_odp->notifier);
+ if (ib_uses_virt_dma(dev))
+- kfree(umem_odp->map.pfn_list);
++ kvfree(umem_odp->map.pfn_list);
+ else
+ hmm_dma_map_free(dev->dma_device, &umem_odp->map);
+ }
+--
+2.50.1
+
--- /dev/null
+From 1f10872fb8f49dc025f2bd99bc1a28b3c8d0720c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 13:53:55 +0800
+Subject: RDMA/erdma: Fix ignored return value of init_kernel_qp
+
+From: Boshi Yu <boshiyu@linux.alibaba.com>
+
+[ Upstream commit d5c74713f0117d07f91eb48b10bc2ad44e23c9b9 ]
+
+The init_kernel_qp interface may fail. Check its return value and free
+related resources properly when it does.
+
+Fixes: 155055771704 ("RDMA/erdma: Add verbs implementation")
+Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
+Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
+Link: https://patch.msgid.link/20250725055410.67520-3-boshiyu@linux.alibaba.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/erdma/erdma_verbs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index ec0ad4086066..c1b2b8c3cdcc 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -1031,7 +1031,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ if (ret)
+ goto err_out_cmd;
+ } else {
+- init_kernel_qp(dev, qp, attrs);
++ ret = init_kernel_qp(dev, qp, attrs);
++ if (ret)
++ goto err_out_xa;
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+--
+2.50.1
+
--- /dev/null
+From db987fd37af736279bbc1ba8be1fb531215da4fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 13:53:56 +0800
+Subject: RDMA/erdma: Fix unset QPN of GSI QP
+
+From: Boshi Yu <boshiyu@linux.alibaba.com>
+
+[ Upstream commit d4ac86b47563c7895dae28658abd1879d266b2b4 ]
+
+The QPN of the GSI QP was not set, which may cause issues.
+Set the QPN to 1 when creating the GSI QP.
+
+Fixes: 999a0a2e9b87 ("RDMA/erdma: Support UD QPs and UD WRs")
+Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
+Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
+Link: https://patch.msgid.link/20250725055410.67520-4-boshiyu@linux.alibaba.com
+Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/erdma/erdma_verbs.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index c1b2b8c3cdcc..8d7596abb822 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -994,6 +994,8 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ old_entry = xa_store(&dev->qp_xa, 1, qp, GFP_KERNEL);
+ if (xa_is_err(old_entry))
+ ret = xa_err(old_entry);
++ else
++ qp->ibqp.qp_num = 1;
+ } else {
+ ret = xa_alloc_cyclic(&dev->qp_xa, &qp->ibqp.qp_num, qp,
+ XA_LIMIT(1, dev->attrs.max_qp - 1),
+--
+2.50.1
+
--- /dev/null
+From 71db23687e81a4caafd284416de5412562847dcc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Aug 2025 20:26:02 +0800
+Subject: RDMA/hns: Fix dip entries leak on devices newer than hip09
+
+From: Junxian Huang <huangjunxian6@hisilicon.com>
+
+[ Upstream commit fa2e2d31ee3b7212079323b4b09201ef68af3a97 ]
+
+DIP algorithm is also supported on devices newer than hip09, so free
+dip entries too.
+
+Fixes: f91696f2f053 ("RDMA/hns: Support congestion control type selection according to the FW")
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20250812122602.3524602-1-huangjunxian6@hisilicon.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 256757f0ff65..b544ca024484 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -3043,7 +3043,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
+ if (!hr_dev->is_vf)
+ hns_roce_free_link_table(hr_dev);
+
+- if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP09)
++ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ free_dip_entry(hr_dev);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 7f3c7afb3a525378fc6123f0a08641ebc75af90f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Jul 2025 15:53:45 +0800
+Subject: RDMA/hns: Fix querying wrong SCC context for DIP algorithm
+
+From: wenglianfa <wenglianfa@huawei.com>
+
+[ Upstream commit 085a1b42e52750769a3fa29d4da6c05ab56f18f8 ]
+
+When using DIP algorithm, all QPs establishing connections with
+the same destination IP share the same SCC, which is indexed by
+dip_idx, but dip_idx isn't necessarily equal to qpn. Therefore,
+dip_idx should be used to query SCC context instead of qpn.
+
+Fixes: 124a9fbe43aa ("RDMA/hns: Append SCC context to the raw dump of QPC")
+Signed-off-by: wenglianfa <wenglianfa@huawei.com>
+Signed-off-by: Junxian Huang <huangjunxian6@hisilicon.com>
+Link: https://patch.msgid.link/20250726075345.846957-1-huangjunxian6@hisilicon.com
+Reviewed-by: Zhu Yanjun <yanjun.zhu@linux.dev>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hns/hns_roce_hw_v2.c | 4 ++--
+ drivers/infiniband/hw/hns/hns_roce_restrack.c | 9 ++++++++-
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index b30dce00f240..256757f0ff65 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -5514,7 +5514,7 @@ static int hns_roce_v2_query_srqc(struct hns_roce_dev *hr_dev, u32 srqn,
+ return ret;
+ }
+
+-static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
++static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 sccn,
+ void *buffer)
+ {
+ struct hns_roce_v2_scc_context *context;
+@@ -5526,7 +5526,7 @@ static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
+- qpn);
++ sccn);
+ if (ret)
+ goto out;
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+index f637b73b946e..230187dda6a0 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
++++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+@@ -100,6 +100,7 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+ struct hns_roce_v2_qp_context qpc;
+ struct hns_roce_v2_scc_context sccc;
+ } context = {};
++ u32 sccn = hr_qp->qpn;
+ int ret;
+
+ if (!hr_dev->hw->query_qpc)
+@@ -116,7 +117,13 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
+ !hr_dev->hw->query_sccc)
+ goto out;
+
+- ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
++ if (hr_qp->cong_type == CONG_TYPE_DIP) {
++ if (!hr_qp->dip)
++ goto out;
++ sccn = hr_qp->dip->dip_idx;
++ }
++
++ ret = hr_dev->hw->query_sccc(hr_dev, sccn, &context.sccc);
+ if (ret)
+ ibdev_warn_ratelimited(&hr_dev->ib_dev,
+ "failed to query SCCC, ret = %d.\n",
+--
+2.50.1
+
--- /dev/null
+From 3ecd1e76d12248759bd098c8a886b9b711ac7932 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 12:52:09 +0800
+Subject: regulator: pca9450: Use devm_register_sys_off_handler
+
+From: Peng Fan <peng.fan@nxp.com>
+
+[ Upstream commit 447be50598c05499f7ccc2b1f6ddb3da30f8099a ]
+
+With module test, there is error dump:
+------------[ cut here ]------------
+ notifier callback pca9450_i2c_restart_handler already registered
+ WARNING: kernel/notifier.c:23 at notifier_chain_register+0x5c/0x88,
+ CPU#0: kworker/u16:3/50
+ Call trace:
+ notifier_chain_register+0x5c/0x88 (P)
+ atomic_notifier_chain_register+0x30/0x58
+ register_restart_handler+0x1c/0x28
+ pca9450_i2c_probe+0x418/0x538
+ i2c_device_probe+0x220/0x3d0
+ really_probe+0x114/0x410
+ __driver_probe_device+0xa0/0x150
+ driver_probe_device+0x40/0x114
+ __device_attach_driver+0xd4/0x12c
+
+So use devm_register_sys_off_handler to let kernel handle the resource
+free to avoid kernel dump.
+
+Fixes: 6157e62b07d9 ("regulator: pca9450: Add restart handler")
+Signed-off-by: Peng Fan <peng.fan@nxp.com>
+Link: https://patch.msgid.link/20250815-pca9450-v1-1-7748e362dc97@nxp.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/pca9450-regulator.c | 13 +++++--------
+ 1 file changed, 5 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
+index 14d19a6d6655..49ff762eb33e 100644
+--- a/drivers/regulator/pca9450-regulator.c
++++ b/drivers/regulator/pca9450-regulator.c
+@@ -34,7 +34,6 @@ struct pca9450 {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *sd_vsel_gpio;
+- struct notifier_block restart_nb;
+ enum pca9450_chip_type type;
+ unsigned int rcnt;
+ int irq;
+@@ -967,10 +966,9 @@ static irqreturn_t pca9450_irq_handler(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static int pca9450_i2c_restart_handler(struct notifier_block *nb,
+- unsigned long action, void *data)
++static int pca9450_i2c_restart_handler(struct sys_off_data *data)
+ {
+- struct pca9450 *pca9450 = container_of(nb, struct pca9450, restart_nb);
++ struct pca9450 *pca9450 = data->cb_data;
+ struct i2c_client *i2c = container_of(pca9450->dev, struct i2c_client, dev);
+
+ dev_dbg(&i2c->dev, "Restarting device..\n");
+@@ -1128,10 +1126,9 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
+ pca9450->sd_vsel_fixed_low =
+ of_property_read_bool(ldo5->dev.of_node, "nxp,sd-vsel-fixed-low");
+
+- pca9450->restart_nb.notifier_call = pca9450_i2c_restart_handler;
+- pca9450->restart_nb.priority = PCA9450_RESTART_HANDLER_PRIORITY;
+-
+- if (register_restart_handler(&pca9450->restart_nb))
++ if (devm_register_sys_off_handler(&i2c->dev, SYS_OFF_MODE_RESTART,
++ PCA9450_RESTART_HANDLER_PRIORITY,
++ pca9450_i2c_restart_handler, pca9450))
+ dev_warn(&i2c->dev, "Failed to register restart handler\n");
+
+ dev_info(&i2c->dev, "%s probed.\n",
+--
+2.50.1
+
--- /dev/null
+From 18aeed871437b1db14d0366e3c96b766db667baa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 12:40:41 +0300
+Subject: regulator: tps65219: regulator: tps65219: Fix error codes in probe()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 11cd7a5c21db020b8001aedcae27bd3fa9e1e901 ]
+
+There is a copy and paste error and we accidentally use "PTR_ERR(rdev)"
+instead of "error". The "rdev" pointer is valid at this point.
+
+Also there is no need to print the error code in the error message
+because dev_err_probe() already prints that. So clean up the error
+message a bit.
+
+Fixes: 38c9f98db20a ("regulator: tps65219: Add support for TPS65215 Regulator IRQs")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKRGmVdbvT1HBvm8@stanley.mountain
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/tps65219-regulator.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/regulator/tps65219-regulator.c b/drivers/regulator/tps65219-regulator.c
+index 5e67fdc88f49..d77ca486879f 100644
+--- a/drivers/regulator/tps65219-regulator.c
++++ b/drivers/regulator/tps65219-regulator.c
+@@ -454,9 +454,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
+ irq_type->irq_name,
+ irq_data);
+ if (error)
+- return dev_err_probe(tps->dev, PTR_ERR(rdev),
+- "Failed to request %s IRQ %d: %d\n",
+- irq_type->irq_name, irq, error);
++ return dev_err_probe(tps->dev, error,
++ "Failed to request %s IRQ %d\n",
++ irq_type->irq_name, irq);
+ }
+
+ for (i = 0; i < pmic->dev_irq_size; ++i) {
+@@ -477,9 +477,9 @@ static int tps65219_regulator_probe(struct platform_device *pdev)
+ irq_type->irq_name,
+ irq_data);
+ if (error)
+- return dev_err_probe(tps->dev, PTR_ERR(rdev),
+- "Failed to request %s IRQ %d: %d\n",
+- irq_type->irq_name, irq, error);
++ return dev_err_probe(tps->dev, error,
++ "Failed to request %s IRQ %d\n",
++ irq_type->irq_name, irq);
+ }
+
+ return 0;
+--
+2.50.1
+
--- /dev/null
+From 86c5c55694303ec7624f520381c910a4bef98a3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 15:16:31 +0800
+Subject: rtase: Fix Rx descriptor CRC error bit definition
+
+From: Justin Lai <justinlai0215@realtek.com>
+
+[ Upstream commit 065c31f2c6915b38f45b1c817b31f41f62eaa774 ]
+
+The CRC error bit is located at bit 17 in the Rx descriptor, but the
+driver was incorrectly using bit 16. Fix it.
+
+Fixes: a36e9f5cfe9e ("rtase: Add support for a pci table in this module")
+Signed-off-by: Justin Lai <justinlai0215@realtek.com>
+Link: https://patch.msgid.link/20250813071631.7566-1-justinlai0215@realtek.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/realtek/rtase/rtase.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/realtek/rtase/rtase.h b/drivers/net/ethernet/realtek/rtase/rtase.h
+index 498cfe4d0cac..5f2e1ab6a100 100644
+--- a/drivers/net/ethernet/realtek/rtase/rtase.h
++++ b/drivers/net/ethernet/realtek/rtase/rtase.h
+@@ -241,7 +241,7 @@ union rtase_rx_desc {
+ #define RTASE_RX_RES BIT(20)
+ #define RTASE_RX_RUNT BIT(19)
+ #define RTASE_RX_RWT BIT(18)
+-#define RTASE_RX_CRC BIT(16)
++#define RTASE_RX_CRC BIT(17)
+ #define RTASE_RX_V6F BIT(31)
+ #define RTASE_RX_V4F BIT(30)
+ #define RTASE_RX_UDPT BIT(29)
+--
+2.50.1
+
--- /dev/null
+From 3f857bc4082a92e50449b7ce62de9e1b94d27c7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Aug 2025 22:42:15 +0200
+Subject: rust: alloc: fix `rusttest` by providing `Cmalloc::aligned_layout`
+ too
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+[ Upstream commit 0f580d5d3d9d9cd0953695cd32e43aac3a946338 ]
+
+Commit fde578c86281 ("rust: alloc: replace aligned_size() with
+Kmalloc::aligned_layout()") provides a public `aligned_layout` function
+in `Kamlloc`, but not in `Cmalloc`, and thus uses of it will trigger an
+error in `rusttest`.
+
+Such a user appeared in the following commit 22ab0641b939 ("rust: drm:
+ensure kmalloc() compatible Layout"):
+
+ error[E0599]: no function or associated item named `aligned_layout` found for struct `alloc::allocator_test::Cmalloc` in the current scope
+ --> rust/kernel/drm/device.rs:100:31
+ |
+ 100 | let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
+ | ^^^^^^^^^^^^^^ function or associated item not found in `Cmalloc`
+ |
+ ::: rust/kernel/alloc/allocator_test.rs:19:1
+ |
+ 19 | pub struct Cmalloc;
+ | ------------------ function or associated item `aligned_layout` not found for this struct
+
+Thus add an equivalent one for `Cmalloc`.
+
+Fixes: fde578c86281 ("rust: alloc: replace aligned_size() with Kmalloc::aligned_layout()")
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Link: https://lore.kernel.org/r/20250816204215.2719559-1-ojeda@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/alloc/allocator_test.rs | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/rust/kernel/alloc/allocator_test.rs b/rust/kernel/alloc/allocator_test.rs
+index d19c06ef0498..981e002ae3fc 100644
+--- a/rust/kernel/alloc/allocator_test.rs
++++ b/rust/kernel/alloc/allocator_test.rs
+@@ -22,6 +22,17 @@ pub type Kmalloc = Cmalloc;
+ pub type Vmalloc = Kmalloc;
+ pub type KVmalloc = Kmalloc;
+
++impl Cmalloc {
++ /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
++ /// `layout`.
++ pub fn aligned_layout(layout: Layout) -> Layout {
++ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++ // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
++ // a properly aligned object (see comments in `kmalloc()` for more information).
++ layout.pad_to_align()
++ }
++}
++
+ extern "C" {
+ #[link_name = "aligned_alloc"]
+ fn libc_aligned_alloc(align: usize, size: usize) -> *mut crate::ffi::c_void;
+--
+2.50.1
+
--- /dev/null
+From ef1e04d9a4786c8494ee98c75ac5fb46b3885c44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 17:48:06 +0200
+Subject: rust: alloc: replace aligned_size() with Kmalloc::aligned_layout()
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit fde578c86281f27b182680c7642836a0dbbd0be7 ]
+
+aligned_size() dates back to when Rust did support kmalloc() only, but
+is now used in ReallocFunc::call() and hence for all allocators.
+
+However, the additional padding applied by aligned_size() is only
+required by the kmalloc() allocator backend.
+
+Hence, replace aligned_size() with Kmalloc::aligned_layout() and use it
+for the affected allocators, i.e. kmalloc() and kvmalloc(), only.
+
+While at it, make Kmalloc::aligned_layout() public, such that Rust
+abstractions, which have to call subsystem specific kmalloc() based
+allocation primitives directly, can make use of it.
+
+Fixes: 8a799831fc63 ("rust: alloc: implement `ReallocFunc`")
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://lore.kernel.org/r/20250731154919.4132-2-dakr@kernel.org
+[ Remove `const` from Kmalloc::aligned_layout(). - Danilo ]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/alloc/allocator.rs | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/rust/kernel/alloc/allocator.rs b/rust/kernel/alloc/allocator.rs
+index aa2dfa9dca4c..2692cf90c948 100644
+--- a/rust/kernel/alloc/allocator.rs
++++ b/rust/kernel/alloc/allocator.rs
+@@ -43,17 +43,6 @@ pub struct Vmalloc;
+ /// For more details see [self].
+ pub struct KVmalloc;
+
+-/// Returns a proper size to alloc a new object aligned to `new_layout`'s alignment.
+-fn aligned_size(new_layout: Layout) -> usize {
+- // Customized layouts from `Layout::from_size_align()` can have size < align, so pad first.
+- let layout = new_layout.pad_to_align();
+-
+- // Note that `layout.size()` (after padding) is guaranteed to be a multiple of `layout.align()`
+- // which together with the slab guarantees means the `krealloc` will return a properly aligned
+- // object (see comments in `kmalloc()` for more information).
+- layout.size()
+-}
+-
+ /// # Invariants
+ ///
+ /// One of the following: `krealloc`, `vrealloc`, `kvrealloc`.
+@@ -88,7 +77,7 @@ impl ReallocFunc {
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+- let size = aligned_size(layout);
++ let size = layout.size();
+ let ptr = match ptr {
+ Some(ptr) => {
+ if old_layout.size() == 0 {
+@@ -123,6 +112,17 @@ impl ReallocFunc {
+ }
+ }
+
++impl Kmalloc {
++ /// Returns a [`Layout`] that makes [`Kmalloc`] fulfill the requested size and alignment of
++ /// `layout`.
++ pub fn aligned_layout(layout: Layout) -> Layout {
++ // Note that `layout.size()` (after padding) is guaranteed to be a multiple of
++ // `layout.align()` which together with the slab guarantees means that `Kmalloc` will return
++ // a properly aligned object (see comments in `kmalloc()` for more information).
++ layout.pad_to_align()
++ }
++}
++
+ // SAFETY: `realloc` delegates to `ReallocFunc::call`, which guarantees that
+ // - memory remains valid until it is explicitly freed,
+ // - passing a pointer to a valid memory allocation is OK,
+@@ -135,6 +135,8 @@ unsafe impl Allocator for Kmalloc {
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
++ let layout = Kmalloc::aligned_layout(layout);
++
+ // SAFETY: `ReallocFunc::call` has the same safety requirements as `Allocator::realloc`.
+ unsafe { ReallocFunc::KREALLOC.call(ptr, layout, old_layout, flags) }
+ }
+@@ -176,6 +178,10 @@ unsafe impl Allocator for KVmalloc {
+ old_layout: Layout,
+ flags: Flags,
+ ) -> Result<NonNull<[u8]>, AllocError> {
++ // `KVmalloc` may use the `Kmalloc` backend, hence we have to enforce a `Kmalloc`
++ // compatible layout.
++ let layout = Kmalloc::aligned_layout(layout);
++
+ // TODO: Support alignments larger than PAGE_SIZE.
+ if layout.align() > bindings::PAGE_SIZE {
+ pr_warn!("KVmalloc does not support alignments larger than PAGE_SIZE yet.\n");
+--
+2.50.1
+
--- /dev/null
+From 77a45994dffbb34040b389d09917ee4ff5d81ac8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 17:48:09 +0200
+Subject: rust: drm: don't pass the address of drm::Device to drm_dev_put()
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit 360077278ba62e81310080f075a1a3028e778ef9 ]
+
+In drm_dev_put() call in AlwaysRefCounted::dec_ref() we rely on struct
+drm_device to be the first field in drm::Device, whereas everywhere
+else we correctly obtain the address of the actual struct drm_device.
+
+Analogous to the from_drm_device() helper, provide the
+into_drm_device() helper in order to address this.
+
+Fixes: 1e4b8896c0f3 ("rust: drm: add device abstraction")
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://lore.kernel.org/r/20250731154919.4132-5-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/drm/device.rs | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
+index cb6bbd024a1e..3832779f439f 100644
+--- a/rust/kernel/drm/device.rs
++++ b/rust/kernel/drm/device.rs
+@@ -120,9 +120,13 @@ impl<T: drm::Driver> Device<T> {
+ // - `raw_data` is a valid pointer to uninitialized memory.
+ // - `raw_data` will not move until it is dropped.
+ unsafe { data.__pinned_init(raw_data) }.inspect_err(|_| {
+- // SAFETY: `__drm_dev_alloc()` was successful, hence `raw_drm` must be valid and the
++ // SAFETY: `raw_drm` is a valid pointer to `Self`, given that `__drm_dev_alloc` was
++ // successful.
++ let drm_dev = unsafe { Self::into_drm_device(raw_drm) };
++
++ // SAFETY: `__drm_dev_alloc()` was successful, hence `drm_dev` must be valid and the
+ // refcount must be non-zero.
+- unsafe { bindings::drm_dev_put(ptr::addr_of_mut!((*raw_drm.as_ptr()).dev).cast()) };
++ unsafe { bindings::drm_dev_put(drm_dev) };
+ })?;
+
+ // SAFETY: The reference count is one, and now we take ownership of that reference as a
+@@ -145,6 +149,14 @@ impl<T: drm::Driver> Device<T> {
+ unsafe { crate::container_of!(ptr, Self, dev) }.cast_mut()
+ }
+
++ /// # Safety
++ ///
++ /// `ptr` must be a valid pointer to `Self`.
++ unsafe fn into_drm_device(ptr: NonNull<Self>) -> *mut bindings::drm_device {
++ // SAFETY: By the safety requirements of this function, `ptr` is a valid pointer to `Self`.
++ unsafe { &raw mut (*ptr.as_ptr()).dev }.cast()
++ }
++
+ /// Not intended to be called externally, except via declare_drm_ioctls!()
+ ///
+ /// # Safety
+@@ -194,8 +206,11 @@ unsafe impl<T: drm::Driver> AlwaysRefCounted for Device<T> {
+ }
+
+ unsafe fn dec_ref(obj: NonNull<Self>) {
++ // SAFETY: `obj` is a valid pointer to `Self`.
++ let drm_dev = unsafe { Self::into_drm_device(obj) };
++
+ // SAFETY: The safety requirements guarantee that the refcount is non-zero.
+- unsafe { bindings::drm_dev_put(obj.cast().as_ptr()) };
++ unsafe { bindings::drm_dev_put(drm_dev) };
+ }
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 4811162822925283371016633b9a740f8fa98578 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 17:48:07 +0200
+Subject: rust: drm: ensure kmalloc() compatible Layout
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit 22ab0641b939967f630d108e33a3582841ad6846 ]
+
+drm::Device is allocated through __drm_dev_alloc() (which uses
+kmalloc()) and the driver private data, <T as drm::Driver>::Data, is
+initialized in-place.
+
+Due to the order of fields in drm::Device
+
+ pub struct Device<T: drm::Driver> {
+ dev: Opaque<bindings::drm_device>,
+ data: T::Data,
+ }
+
+even with an arbitrary large alignment requirement of T::Data it can't
+happen that the size of Device is smaller than its alignment requirement.
+
+However, let's not rely on this subtle circumstance and create a proper
+kmalloc() compatible Layout.
+
+Fixes: 1e4b8896c0f3 ("rust: drm: add device abstraction")
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://lore.kernel.org/r/20250731154919.4132-3-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/drm/device.rs | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
+index 14c1aa402951..6c1fc33cdc68 100644
+--- a/rust/kernel/drm/device.rs
++++ b/rust/kernel/drm/device.rs
+@@ -5,6 +5,7 @@
+ //! C header: [`include/linux/drm/drm_device.h`](srctree/include/linux/drm/drm_device.h)
+
+ use crate::{
++ alloc::allocator::Kmalloc,
+ bindings, device, drm,
+ drm::driver::AllocImpl,
+ error::from_err_ptr,
+@@ -12,7 +13,7 @@ use crate::{
+ prelude::*,
+ types::{ARef, AlwaysRefCounted, Opaque},
+ };
+-use core::{mem, ops::Deref, ptr, ptr::NonNull};
++use core::{alloc::Layout, mem, ops::Deref, ptr, ptr::NonNull};
+
+ #[cfg(CONFIG_DRM_LEGACY)]
+ macro_rules! drm_legacy_fields {
+@@ -96,6 +97,10 @@ impl<T: drm::Driver> Device<T> {
+
+ /// Create a new `drm::Device` for a `drm::Driver`.
+ pub fn new(dev: &device::Device, data: impl PinInit<T::Data, Error>) -> Result<ARef<Self>> {
++ // `__drm_dev_alloc` uses `kmalloc()` to allocate memory, hence ensure a `kmalloc()`
++ // compatible `Layout`.
++ let layout = Kmalloc::aligned_layout(Layout::new::<Self>());
++
+ // SAFETY:
+ // - `VTABLE`, as a `const` is pinned to the read-only section of the compilation,
+ // - `dev` is valid by its type invarants,
+@@ -103,7 +108,7 @@ impl<T: drm::Driver> Device<T> {
+ bindings::__drm_dev_alloc(
+ dev.as_raw(),
+ &Self::VTABLE,
+- mem::size_of::<Self>(),
++ layout.size(),
+ mem::offset_of!(Self, dev),
+ )
+ }
+--
+2.50.1
+
--- /dev/null
+From 9151dbe29ef4d8f1dbfba8b5c7d87fbfc0b2a009 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 17:48:08 +0200
+Subject: rust: drm: remove pin annotations from drm::Device
+
+From: Danilo Krummrich <dakr@kernel.org>
+
+[ Upstream commit 0c04a81c1d0214d5b2025f805ccec1ac37c96b08 ]
+
+The #[pin_data] and #[pin] annotations are not necessary for
+drm::Device, since we don't use any pin-init macros, but only
+__pinned_init() on the impl PinInit<T::Data, Error> argument of
+drm::Device::new().
+
+Fixes: 1e4b8896c0f3 ("rust: drm: add device abstraction")
+Reviewed-by: Benno Lossin <lossin@kernel.org>
+Link: https://lore.kernel.org/r/20250731154919.4132-4-dakr@kernel.org
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/drm/device.rs | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/rust/kernel/drm/device.rs b/rust/kernel/drm/device.rs
+index 6c1fc33cdc68..cb6bbd024a1e 100644
+--- a/rust/kernel/drm/device.rs
++++ b/rust/kernel/drm/device.rs
+@@ -54,10 +54,8 @@ macro_rules! drm_legacy_fields {
+ ///
+ /// `self.dev` is a valid instance of a `struct device`.
+ #[repr(C)]
+-#[pin_data]
+ pub struct Device<T: drm::Driver> {
+ dev: Opaque<bindings::drm_device>,
+- #[pin]
+ data: T::Data,
+ }
+
+--
+2.50.1
+
--- /dev/null
+From fcbc9e4ba24c67557c0ce45c2a12e7e20bd04ef0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index 5d9effb0867c..e74eb8f9b23a 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -66,23 +66,27 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 1bd07ec0379bc9fe52c6a217a25157e9b7cdaa55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index e74eb8f9b23a..41a0d2066fa0 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -84,7 +85,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From 65b457d88f00552e6340587a56dd9ea8abe2e584 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 17:04:27 +0200
+Subject: s390/mm: Do not map lowcore with identity mapping
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 93f616ff870a1fb7e84d472cad0af651b18f9f87 ]
+
+Since the identity mapping is pinned to address zero the lowcore is always
+also mapped to address zero, this happens regardless of the relocate_lowcore
+command line option. If the option is specified the lowcore is mapped
+twice, instead of only once.
+
+This means that NULL pointer accesses will succeed instead of causing an
+exception (low address protection still applies, but covers only parts).
+To fix this never map the first two pages of physical memory with the
+identity mapping.
+
+Fixes: 32db401965f1 ("s390/mm: Pin identity mapping base to zero")
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/boot/vmem.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c
+index 1d073acd05a7..cea3de4dce8c 100644
+--- a/arch/s390/boot/vmem.c
++++ b/arch/s390/boot/vmem.c
+@@ -530,6 +530,9 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
+ lowcore_address + sizeof(struct lowcore),
+ POPULATE_LOWCORE);
+ for_each_physmem_usable_range(i, &start, &end) {
++ /* Do not map lowcore with identity mapping */
++ if (!start)
++ start = sizeof(struct lowcore);
+ pgtable_populate((unsigned long)__identity_va(start),
+ (unsigned long)__identity_va(end),
+ POPULATE_IDENTITY);
+--
+2.50.1
+
--- /dev/null
+From a150adbbb344390b7b11b69f58d1064b7ca557fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index a39f1da4ce47..a761c0aa5127 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 13d30a72e2d54c05a71937b4da5cad2dd2d77892 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 08:58:23 -0700
+Subject: scsi: ufs: core: Fix IRQ lock inversion for the SCSI host lock
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit eabcac808ca3ee9878223d4b49b750979029016b ]
+
+Commit 3c7ac40d7322 ("scsi: ufs: core: Delegate the interrupt service
+routine to a threaded IRQ handler") introduced an IRQ lock inversion
+issue. Fix this lock inversion by changing the spin_lock_irq() calls into
+spin_lock_irqsave() calls in code that can be called either from
+interrupt context or from thread context. This patch fixes the following
+lockdep complaint:
+
+WARNING: possible irq lock inversion dependency detected
+6.12.30-android16-5-maybe-dirty-4k #1 Tainted: G W OE
+--------------------------------------------------------
+kworker/u28:0/12 just changed the state of lock:
+ffffff881e29dd60 (&hba->clk_gating.lock){-...}-{2:2}, at: ufshcd_release_scsi_cmd+0x60/0x110
+but this lock took another, HARDIRQ-unsafe lock in the past:
+ (shost->host_lock){+.+.}-{2:2}
+
+and interrupts could create inverse lock ordering between them.
+
+other info that might help us debug this:
+ Possible interrupt unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(shost->host_lock);
+ local_irq_disable();
+ lock(&hba->clk_gating.lock);
+ lock(shost->host_lock);
+ <Interrupt>
+ lock(&hba->clk_gating.lock);
+
+ *** DEADLOCK ***
+
+4 locks held by kworker/u28:0/12:
+ #0: ffffff8800ac6158 ((wq_completion)async){+.+.}-{0:0}, at: process_one_work+0x1bc/0x65c
+ #1: ffffffc085c93d70 ((work_completion)(&entry->work)){+.+.}-{0:0}, at: process_one_work+0x1e4/0x65c
+ #2: ffffff881e29c0e0 (&shost->scan_mutex){+.+.}-{3:3}, at: __scsi_add_device+0x74/0x120
+ #3: ffffff881960ea00 (&hwq->cq_lock){-...}-{2:2}, at: ufshcd_mcq_poll_cqe_lock+0x28/0x104
+
+the shortest dependencies between 2nd lock and 1st lock:
+ -> (shost->host_lock){+.+.}-{2:2} {
+ HARDIRQ-ON-W at:
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock+0x48/0x64
+ ufshcd_sl_intr+0x4c/0xa08
+ ufshcd_threaded_intr+0x70/0x12c
+ irq_thread_fn+0x48/0xa8
+ irq_thread+0x130/0x1ec
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+ SOFTIRQ-ON-W at:
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock+0x48/0x64
+ ufshcd_sl_intr+0x4c/0xa08
+ ufshcd_threaded_intr+0x70/0x12c
+ irq_thread_fn+0x48/0xa8
+ irq_thread+0x130/0x1ec
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+ INITIAL USE at:
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock+0x48/0x64
+ ufshcd_sl_intr+0x4c/0xa08
+ ufshcd_threaded_intr+0x70/0x12c
+ irq_thread_fn+0x48/0xa8
+ irq_thread+0x130/0x1ec
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+ }
+ ... key at: [<ffffffc085ba1a98>] scsi_host_alloc.__key+0x0/0x10
+ ... acquired at:
+ _raw_spin_lock_irqsave+0x5c/0x80
+ __ufshcd_release+0x78/0x118
+ ufshcd_send_uic_cmd+0xe4/0x118
+ ufshcd_dme_set_attr+0x88/0x1c8
+ ufs_google_phy_initialization+0x68/0x418 [ufs]
+ ufs_google_link_startup_notify+0x78/0x27c [ufs]
+ ufshcd_link_startup+0x84/0x720
+ ufshcd_init+0xf3c/0x1330
+ ufshcd_pltfrm_init+0x728/0x7d8
+ ufs_google_probe+0x30/0x84 [ufs]
+ platform_probe+0xa0/0xe0
+ really_probe+0x114/0x454
+ __driver_probe_device+0xa4/0x160
+ driver_probe_device+0x44/0x23c
+ __driver_attach_async_helper+0x60/0xd4
+ async_run_entry_fn+0x4c/0x17c
+ process_one_work+0x26c/0x65c
+ worker_thread+0x33c/0x498
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+
+-> (&hba->clk_gating.lock){-...}-{2:2} {
+ IN-HARDIRQ-W at:
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock_irqsave+0x5c/0x80
+ ufshcd_release_scsi_cmd+0x60/0x110
+ ufshcd_compl_one_cqe+0x2c0/0x3f4
+ ufshcd_mcq_poll_cqe_lock+0xb0/0x104
+ ufs_google_mcq_intr+0x80/0xa0 [ufs]
+ __handle_irq_event_percpu+0x104/0x32c
+ handle_irq_event+0x40/0x9c
+ handle_fasteoi_irq+0x170/0x2e8
+ generic_handle_domain_irq+0x58/0x80
+ gic_handle_irq+0x48/0x104
+ call_on_irq_stack+0x3c/0x50
+ do_interrupt_handler+0x7c/0xd8
+ el1_interrupt+0x34/0x58
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x68/0x6c
+ _raw_spin_unlock_irqrestore+0x3c/0x6c
+ debug_object_assert_init+0x16c/0x21c
+ __mod_timer+0x4c/0x48c
+ schedule_timeout+0xd4/0x16c
+ io_schedule_timeout+0x48/0x70
+ do_wait_for_common+0x100/0x194
+ wait_for_completion_io_timeout+0x48/0x6c
+ blk_execute_rq+0x124/0x17c
+ scsi_execute_cmd+0x18c/0x3f8
+ scsi_probe_and_add_lun+0x204/0xd74
+ __scsi_add_device+0xbc/0x120
+ ufshcd_async_scan+0x80/0x3c0
+ async_run_entry_fn+0x4c/0x17c
+ process_one_work+0x26c/0x65c
+ worker_thread+0x33c/0x498
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+ INITIAL USE at:
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock_irqsave+0x5c/0x80
+ ufshcd_hold+0x34/0x14c
+ ufshcd_send_uic_cmd+0x28/0x118
+ ufshcd_dme_set_attr+0x88/0x1c8
+ ufs_google_phy_initialization+0x68/0x418 [ufs]
+ ufs_google_link_startup_notify+0x78/0x27c [ufs]
+ ufshcd_link_startup+0x84/0x720
+ ufshcd_init+0xf3c/0x1330
+ ufshcd_pltfrm_init+0x728/0x7d8
+ ufs_google_probe+0x30/0x84 [ufs]
+ platform_probe+0xa0/0xe0
+ really_probe+0x114/0x454
+ __driver_probe_device+0xa4/0x160
+ driver_probe_device+0x44/0x23c
+ __driver_attach_async_helper+0x60/0xd4
+ async_run_entry_fn+0x4c/0x17c
+ process_one_work+0x26c/0x65c
+ worker_thread+0x33c/0x498
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+ }
+ ... key at: [<ffffffc085ba6fe8>] ufshcd_init.__key+0x0/0x10
+ ... acquired at:
+ mark_lock+0x1c4/0x224
+ __lock_acquire+0x438/0x2e1c
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock_irqsave+0x5c/0x80
+ ufshcd_release_scsi_cmd+0x60/0x110
+ ufshcd_compl_one_cqe+0x2c0/0x3f4
+ ufshcd_mcq_poll_cqe_lock+0xb0/0x104
+ ufs_google_mcq_intr+0x80/0xa0 [ufs]
+ __handle_irq_event_percpu+0x104/0x32c
+ handle_irq_event+0x40/0x9c
+ handle_fasteoi_irq+0x170/0x2e8
+ generic_handle_domain_irq+0x58/0x80
+ gic_handle_irq+0x48/0x104
+ call_on_irq_stack+0x3c/0x50
+ do_interrupt_handler+0x7c/0xd8
+ el1_interrupt+0x34/0x58
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x68/0x6c
+ _raw_spin_unlock_irqrestore+0x3c/0x6c
+ debug_object_assert_init+0x16c/0x21c
+ __mod_timer+0x4c/0x48c
+ schedule_timeout+0xd4/0x16c
+ io_schedule_timeout+0x48/0x70
+ do_wait_for_common+0x100/0x194
+ wait_for_completion_io_timeout+0x48/0x6c
+ blk_execute_rq+0x124/0x17c
+ scsi_execute_cmd+0x18c/0x3f8
+ scsi_probe_and_add_lun+0x204/0xd74
+ __scsi_add_device+0xbc/0x120
+ ufshcd_async_scan+0x80/0x3c0
+ async_run_entry_fn+0x4c/0x17c
+ process_one_work+0x26c/0x65c
+ worker_thread+0x33c/0x498
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+
+stack backtrace:
+CPU: 6 UID: 0 PID: 12 Comm: kworker/u28:0 Tainted: G W OE 6.12.30-android16-5-maybe-dirty-4k #1 ccd4020fe444bdf629efc3b86df6be920b8df7d0
+Tainted: [W]=WARN, [O]=OOT_MODULE, [E]=UNSIGNED_MODULE
+Hardware name: Spacecraft board based on MALIBU (DT)
+Workqueue: async async_run_entry_fn
+Call trace:
+ dump_backtrace+0xfc/0x17c
+ show_stack+0x18/0x28
+ dump_stack_lvl+0x40/0xa0
+ dump_stack+0x18/0x24
+ print_irq_inversion_bug+0x2fc/0x304
+ mark_lock_irq+0x388/0x4fc
+ mark_lock+0x1c4/0x224
+ __lock_acquire+0x438/0x2e1c
+ lock_acquire+0x134/0x2b4
+ _raw_spin_lock_irqsave+0x5c/0x80
+ ufshcd_release_scsi_cmd+0x60/0x110
+ ufshcd_compl_one_cqe+0x2c0/0x3f4
+ ufshcd_mcq_poll_cqe_lock+0xb0/0x104
+ ufs_google_mcq_intr+0x80/0xa0 [ufs dd6f385554e109da094ab91d5f7be18625a2222a]
+ __handle_irq_event_percpu+0x104/0x32c
+ handle_irq_event+0x40/0x9c
+ handle_fasteoi_irq+0x170/0x2e8
+ generic_handle_domain_irq+0x58/0x80
+ gic_handle_irq+0x48/0x104
+ call_on_irq_stack+0x3c/0x50
+ do_interrupt_handler+0x7c/0xd8
+ el1_interrupt+0x34/0x58
+ el1h_64_irq_handler+0x18/0x24
+ el1h_64_irq+0x68/0x6c
+ _raw_spin_unlock_irqrestore+0x3c/0x6c
+ debug_object_assert_init+0x16c/0x21c
+ __mod_timer+0x4c/0x48c
+ schedule_timeout+0xd4/0x16c
+ io_schedule_timeout+0x48/0x70
+ do_wait_for_common+0x100/0x194
+ wait_for_completion_io_timeout+0x48/0x6c
+ blk_execute_rq+0x124/0x17c
+ scsi_execute_cmd+0x18c/0x3f8
+ scsi_probe_and_add_lun+0x204/0xd74
+ __scsi_add_device+0xbc/0x120
+ ufshcd_async_scan+0x80/0x3c0
+ async_run_entry_fn+0x4c/0x17c
+ process_one_work+0x26c/0x65c
+ worker_thread+0x33c/0x498
+ kthread+0x110/0x134
+ ret_from_fork+0x10/0x20
+
+Cc: Neil Armstrong <neil.armstrong@linaro.org>
+Cc: André Draszik <andre.draszik@linaro.org>
+Reviewed-by: Peter Wang <peter.wang@mediatek.com>
+Fixes: 3c7ac40d7322 ("scsi: ufs: core: Delegate the interrupt service routine to a threaded IRQ handler")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20250815155842.472867-2-bvanassche@acm.org
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index 3cc566e8bd1d..f2eeb82ffa9b 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5531,7 +5531,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+ irqreturn_t retval = IRQ_NONE;
+ struct uic_command *cmd;
+
+- spin_lock(hba->host->host_lock);
++ guard(spinlock_irqsave)(hba->host->host_lock);
+ cmd = hba->active_uic_cmd;
+ if (WARN_ON_ONCE(!cmd))
+ goto unlock;
+@@ -5558,8 +5558,6 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+ ufshcd_add_uic_command_trace(hba, cmd, UFS_CMD_COMP);
+
+ unlock:
+- spin_unlock(hba->host->host_lock);
+-
+ return retval;
+ }
+
+@@ -6892,7 +6890,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
+ bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
+
+- spin_lock(hba->host->host_lock);
++ guard(spinlock_irqsave)(hba->host->host_lock);
+ hba->errors |= UFSHCD_ERROR_MASK & intr_status;
+
+ if (hba->errors & INT_FATAL_ERRORS) {
+@@ -6951,7 +6949,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
+ */
+ hba->errors = 0;
+ hba->uic_error = 0;
+- spin_unlock(hba->host->host_lock);
++
+ return retval;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 5c8e68dd50ba13b93b800cb89c894bb68261d92d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 08:58:24 -0700
+Subject: scsi: ufs: core: Remove WARN_ON_ONCE() call from
+ ufshcd_uic_cmd_compl()
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit e5203d89d59bfcbe1f348aa0d2dc4449a8ba644c ]
+
+The UIC completion interrupt may be disabled while an UIC command is
+being processed. When the UIC completion interrupt is reenabled, an UIC
+interrupt is triggered and the WARN_ON_ONCE(!cmd) statement is hit.
+Hence this patch that removes this kernel warning.
+
+Fixes: fcd8b0450a9a ("scsi: ufs: core: Make ufshcd_uic_cmd_compl() easier to analyze")
+Reviewed-by: Peter Wang <peter.wang@mediatek.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20250815155842.472867-3-bvanassche@acm.org
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/core/ufshcd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
+index f2eeb82ffa9b..5224a2145402 100644
+--- a/drivers/ufs/core/ufshcd.c
++++ b/drivers/ufs/core/ufshcd.c
+@@ -5533,7 +5533,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+
+ guard(spinlock_irqsave)(hba->host->host_lock);
+ cmd = hba->active_uic_cmd;
+- if (WARN_ON_ONCE(!cmd))
++ if (!cmd)
+ goto unlock;
+
+ if (ufshcd_is_auto_hibern8_error(hba, intr_status))
+--
+2.50.1
+
--- /dev/null
+From 3e4a91a2c0e9e73227da0026bd3ccd1ef43f2cdc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 Aug 2025 13:03:30 +0530
+Subject: scsi: ufs: ufs-qcom: Fix ESI null pointer dereference
+
+From: Nitin Rawat <quic_nitirawa@quicinc.com>
+
+[ Upstream commit 6300d5c5438724c0876828da2f6e2c1a661871fc ]
+
+ESI/MSI is a performance optimization feature that provides dedicated
+interrupts per MCQ hardware queue. This is optional feature and UFS MCQ
+should work with and without ESI feature.
+
+Commit e46a28cea29a ("scsi: ufs: qcom: Remove the MSI descriptor abuse")
+brings a regression in ESI (Enhanced System Interrupt) configuration that
+causes a null pointer dereference when Platform MSI allocation fails.
+
+The issue occurs in when platform_device_msi_init_and_alloc_irqs() in
+ufs_qcom_config_esi() fails (returns -EINVAL) but the current code uses
+__free() macro for automatic cleanup free MSI resources that were never
+successfully allocated.
+
+Unable to handle kernel NULL pointer dereference at virtual
+address 0000000000000008
+
+ Call trace:
+ mutex_lock+0xc/0x54 (P)
+ platform_device_msi_free_irqs_all+0x1c/0x40
+ ufs_qcom_config_esi+0x1d0/0x220 [ufs_qcom]
+ ufshcd_config_mcq+0x28/0x104
+ ufshcd_init+0xa3c/0xf40
+ ufshcd_pltfrm_init+0x504/0x7d4
+ ufs_qcom_probe+0x20/0x58 [ufs_qcom]
+
+Fix by restructuring the ESI configuration to try MSI allocation first,
+before any other resource allocation and instead use explicit cleanup
+instead of __free() macro to avoid cleanup of unallocated resources.
+
+Tested on SM8750 platform with MCQ enabled, both with and without
+Platform ESI support.
+
+Fixes: e46a28cea29a ("scsi: ufs: qcom: Remove the MSI descriptor abuse")
+Cc: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
+Signed-off-by: Nitin Rawat <quic_nitirawa@quicinc.com>
+Link: https://lore.kernel.org/r/20250811073330.20230-1-quic_nitirawa@quicinc.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/host/ufs-qcom.c | 39 ++++++++++++++-----------------------
+ 1 file changed, 15 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 53301a2c27be..2e4edc192e8e 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -2053,17 +2053,6 @@ static irqreturn_t ufs_qcom_mcq_esi_handler(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
+-static void ufs_qcom_irq_free(struct ufs_qcom_irq *uqi)
+-{
+- for (struct ufs_qcom_irq *q = uqi; q->irq; q++)
+- devm_free_irq(q->hba->dev, q->irq, q->hba);
+-
+- platform_device_msi_free_irqs_all(uqi->hba->dev);
+- devm_kfree(uqi->hba->dev, uqi);
+-}
+-
+-DEFINE_FREE(ufs_qcom_irq, struct ufs_qcom_irq *, if (_T) ufs_qcom_irq_free(_T))
+-
+ static int ufs_qcom_config_esi(struct ufs_hba *hba)
+ {
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+@@ -2078,18 +2067,18 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
+ */
+ nr_irqs = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL];
+
+- struct ufs_qcom_irq *qi __free(ufs_qcom_irq) =
+- devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
+- if (!qi)
+- return -ENOMEM;
+- /* Preset so __free() has a pointer to hba in all error paths */
+- qi[0].hba = hba;
+-
+ ret = platform_device_msi_init_and_alloc_irqs(hba->dev, nr_irqs,
+ ufs_qcom_write_msi_msg);
+ if (ret) {
+- dev_err(hba->dev, "Failed to request Platform MSI %d\n", ret);
+- return ret;
++ dev_warn(hba->dev, "Platform MSI not supported or failed, continuing without ESI\n");
++ return ret; /* Continue without ESI */
++ }
++
++ struct ufs_qcom_irq *qi = devm_kcalloc(hba->dev, nr_irqs, sizeof(*qi), GFP_KERNEL);
++
++ if (!qi) {
++ platform_device_msi_free_irqs_all(hba->dev);
++ return -ENOMEM;
+ }
+
+ for (int idx = 0; idx < nr_irqs; idx++) {
+@@ -2100,15 +2089,17 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
+ ret = devm_request_irq(hba->dev, qi[idx].irq, ufs_qcom_mcq_esi_handler,
+ IRQF_SHARED, "qcom-mcq-esi", qi + idx);
+ if (ret) {
+- dev_err(hba->dev, "%s: Fail to request IRQ for %d, err = %d\n",
++ dev_err(hba->dev, "%s: Failed to request IRQ for %d, err = %d\n",
+ __func__, qi[idx].irq, ret);
+- qi[idx].irq = 0;
++ /* Free previously allocated IRQs */
++ for (int j = 0; j < idx; j++)
++ devm_free_irq(hba->dev, qi[j].irq, qi + j);
++ platform_device_msi_free_irqs_all(hba->dev);
++ devm_kfree(hba->dev, qi);
+ return ret;
+ }
+ }
+
+- retain_and_null_ptr(qi);
+-
+ if (host->hw_ver.major >= 6) {
+ ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
+--
+2.50.1
+
--- /dev/null
+From f9a8b8b35214e37b38a12a967eb26d95ec7668a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 14 Jul 2025 13:23:34 +0530
+Subject: scsi: ufs: ufs-qcom: Update esi_vec_mask for HW major version >= 6
+
+From: Bao D. Nguyen <quic_nguyenb@quicinc.com>
+
+[ Upstream commit 7a9d5195a7f5871a4ad4e55fc567a2b3bee49a59 ]
+
+The MCQ feature and ESI are supported by all Qualcomm UFS controller
+versions 6 and above.
+
+Therefore, update the ESI vector mask in the UFS_MEM_CFG3 register for
+platforms with major version number of 6 or higher.
+
+Reviewed-by: Manivannan Sadhasivam <mani@kernel.org>
+Signed-off-by: Bao D. Nguyen <quic_nguyenb@quicinc.com>
+Signed-off-by: Nitin Rawat <quic_nitirawa@quicinc.com>
+Link: https://lore.kernel.org/r/20250714075336.2133-2-quic_nitirawa@quicinc.com
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: 6300d5c54387 ("scsi: ufs: ufs-qcom: Fix ESI null pointer dereference")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ufs/host/ufs-qcom.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
+index 18a978452001..53301a2c27be 100644
+--- a/drivers/ufs/host/ufs-qcom.c
++++ b/drivers/ufs/host/ufs-qcom.c
+@@ -2109,8 +2109,7 @@ static int ufs_qcom_config_esi(struct ufs_hba *hba)
+
+ retain_and_null_ptr(qi);
+
+- if (host->hw_ver.major == 6 && host->hw_ver.minor == 0 &&
+- host->hw_ver.step == 0) {
++ if (host->hw_ver.major >= 6) {
+ ufshcd_rmwl(hba, ESI_VEC_MASK, FIELD_PREP(ESI_VEC_MASK, MAX_ESI_VEC - 1),
+ REG_UFS_CFG3);
+ }
+--
+2.50.1
+
iio-imu-inv_icm42600-use-instead-of-memset.patch
iio-imu-inv_icm42600-convert-to-uxx-and-sxx-integer-types.patch
iio-imu-inv_icm42600-change-invalid-data-error-to-ebusy.patch
+spi-spi-qpic-snand-use-correct-cw_per_page-value-for.patch
+spi-spi-fsl-lpspi-clamp-too-high-speed_hz.patch
+spi-spi-qpic-snand-fix-calculating-of-ecc-oob-region.patch
+drm-nouveau-nvif-fix-potential-memory-leak-in-nvif_v.patch
+cgroup-cpuset-use-static_branch_enable_cpuslocked-on.patch
+cgroup-cpuset-fix-a-partition-error-with-cpu-hotplug.patch
+drm-tests-fix-endian-warning.patch
+drm-tests-do-not-use-drm_fb_blit-in-format-helper-te.patch
+drm-tests-fix-drm_test_fb_xrgb8888_to_xrgb2101010-on.patch
+iosys-map-fix-undefined-behavior-in-iosys_map_clear.patch
+rust-alloc-replace-aligned_size-with-kmalloc-aligned.patch
+rust-drm-ensure-kmalloc-compatible-layout.patch
+rust-drm-remove-pin-annotations-from-drm-device.patch
+rust-drm-don-t-pass-the-address-of-drm-device-to-drm.patch
+drm-panic-add-a-u64-divide-by-10-for-arm32.patch
+platform-x86-amd-hsmp-ensure-sock-metric_tbl_addr-is.patch
+rdma-erdma-fix-ignored-return-value-of-init_kernel_q.patch
+rdma-erdma-fix-unset-qpn-of-gsi-qp.patch
+rdma-hns-fix-querying-wrong-scc-context-for-dip-algo.patch
+rdma-bnxt_re-fix-to-do-srq-armena-by-default.patch
+rdma-bnxt_re-fix-to-remove-workload-check-in-srq-lim.patch
+rdma-bnxt_re-fix-a-possible-memory-leak-in-the-drive.patch
+rdma-bnxt_re-fix-to-initialize-the-pbl-array.patch
+rdma-core-free-pfn_list-with-appropriate-kvfree-call.patch
+rdma-hns-fix-dip-entries-leak-on-devices-newer-than-.patch
+net-xilinx-axienet-fix-rx-skb-ring-management-in-dma.patch
+net-bridge-fix-soft-lockup-in-br_multicast_query_exp.patch
+net-sched-fix-backlog-accounting-in-qdisc_dequeue_in.patch
+rtase-fix-rx-descriptor-crc-error-bit-definition.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+iommu-amd-avoid-stack-buffer-overflow-from-kernel-cm.patch
+bluetooth-hci_sync-fix-scan-state-after-pa-sync-has-.patch
+bluetooth-btmtk-fix-wait_on_bit_timeout-interruption.patch
+bluetooth-hci_core-fix-using-cis-bis-_capable-for-cu.patch
+bluetooth-hci_core-fix-using-ll_privacy_capable-for-.patch
+bluetooth-hci_sync-prevent-unintended-pa-sync-when-s.patch
+bluetooth-hci_event-fix-mtu-for-bn-0-in-cis-establis.patch
+bluetooth-hci_conn-do-return-error-from-hci_enhanced.patch
+bluetooth-add-pa_link-to-distinguish-big-sync-and-pa.patch
+bluetooth-hci_core-fix-not-accounting-for-bis-cis-pa.patch
+mlxsw-spectrum-forward-packets-with-an-ipv4-link-loc.patch
+drm-nova-drm-fix-32-bit-arm-build.patch
+md-rename-recovery_cp-to-resync_offset.patch
+md-add-helper-rdev_needs_recovery.patch
+md-fix-sync_action-incorrect-display-during-resync.patch
+rust-alloc-fix-rusttest-by-providing-cmalloc-aligned.patch
+drm-hisilicon-hibmc-fix-the-i2c-device-resource-leak.patch
+drm-hisilicon-hibmc-fix-irq_request-s-irq-name-varia.patch
+drm-hisilicon-hibmc-fix-the-hibmc-loaded-failed-bug.patch
+drm-hisilicon-hibmc-fix-rare-monitors-cannot-display.patch
+drm-hisilicon-hibmc-fix-dp-and-vga-cannot-show-toget.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+regulator-pca9450-use-devm_register_sys_off_handler.patch
+drm-amd-display-add-null-pointer-check-in-mod_hdcp_h.patch
+drm-amd-display-adjust-dce-8-10-clock-don-t-overcloc.patch
+drm-amd-display-don-t-print-errors-for-nonexistent-c.patch
+net-gso-forbid-ipv6-tso-with-extensions-on-devices-w.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+bnxt_en-fix-lockdep-warning-during-rmmod.patch
+scsi-ufs-core-fix-irq-lock-inversion-for-the-scsi-ho.patch
+scsi-ufs-core-remove-warn_on_once-call-from-ufshcd_u.patch
+scsi-ufs-ufs-qcom-update-esi_vec_mask-for-hw-major-v.patch
+scsi-ufs-ufs-qcom-fix-esi-null-pointer-dereference.patch
+net-ethernet-mtk_ppe-add-rcu-lock-around-dev_fill_fo.patch
+ppp-fix-race-conditions-in-ppp_fill_forward_path.patch
+net-ti-icssg-prueth-fix-hsr-and-switch-offload-enabl.patch
+drm-xe-assign-ioctl-xe-file-handler-to-vm-in-xe_vm_c.patch
+regulator-tps65219-regulator-tps65219-fix-error-code.patch
+cifs-fix-oops-due-to-uninitialised-variable.patch
+phy-mscc-fix-timestamping-for-vsc8584.patch
+net-usb-asix_devices-fix-phy-address-mask-in-mdio-bu.patch
+gve-prevent-ethtool-ops-after-shutdown.patch
+net-stmmac-thead-enable-tx-clock-before-mac-initiali.patch
+net-smc-fix-uaf-on-smcsk-after-smc_listen_out.patch
+net-mlx5-hws-fix-bad-parameter-in-cq-creation.patch
+net-mlx5-hws-fix-complex-rules-rehash-error-flow.patch
+net-mlx5-hws-fix-table-creation-uid.patch
+net-mlx5-ct-use-the-correct-counter-offset.patch
+microchip-lan865x-fix-missing-netif_start_queue-call.patch
+microchip-lan865x-fix-missing-timer-increment-config.patch
+objtool-loongarch-get-table-size-correctly-if-lto-is.patch
+loongarch-pass-annotate-tablejump-option-if-lto-is-e.patch
+loongarch-optimize-module-load-time-by-optimizing-pl.patch
+asoc-cs35l56-update-firmware-addresses-for-cs35l63-f.patch
+asoc-cs35l56-handle-new-algorithms-ids-for-cs35l63.patch
+asoc-cs35l56-remove-soundwire-clock-divider-workarou.patch
+s390-mm-do-not-map-lowcore-with-identity-mapping.patch
+loongarch-kvm-use-standard-bitops-api-with-eiointc.patch
+loongarch-kvm-use-kvm_get_vcpu_by_id-instead-of-kvm_.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+igc-fix-disabling-l1.2-pci-e-link-substate-on-i226-o.patch
+net-dsa-microchip-fix-ksz9477-hsr-port-setup-issue.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+alsa-timer-fix-ida_free-call-while-not-allocated.patch
+bonding-update-lacp-activity-flag-after-setting-lacp.patch
+bonding-send-lacpdus-periodically-in-passive-mode-af.patch
+net-airoha-ppe-do-not-invalid-ppe-entries-in-case-of.patch
+block-move-elevator-queue-allocation-logic-into-blk_.patch
+block-fix-lockdep-warning-caused-by-lock-dependency-.patch
+block-fix-potential-deadlock-while-running-nr_hw_que.patch
+blk-mq-fix-lockdep-warning-in-__blk_mq_update_nr_hw_.patch
+block-decrement-block_rq_qos-static-key-in-rq_qos_de.patch
+block-skip-q-rq_qos-check-in-rq_qos_done_bio.patch
+block-avoid-cpu_hotplug_lock-depedency-on-freeze_loc.patch
+octeontx2-af-skip-overlap-check-for-spi-field.patch
+net-mlx5-base-ecvf-devlink-port-attrs-from-0.patch
+net-mlx5-add-ifc-bits-and-enums-for-buf_ownership.patch
+net-mlx5e-query-fw-for-buffer-ownership.patch
+net-mlx5e-preserve-shared-buffer-capacity-during-hea.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
+netfilter-nf_reject-don-t-leak-dst-refcount-for-loop.patch
+drm-xe-move-asid-allocation-and-user-pt-bo-tracking-.patch
+drm-xe-fix-vm_bind_ioctl-double-free-bug.patch
--- /dev/null
+From 6c716885353a08e7f505628a21b8bec3c078e870 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 12:07:42 +0200
+Subject: spi: spi-fsl-lpspi: Clamp too high speed_hz
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit af357a6a3b7d685e7aa621c6fb1d4ed6c349ec9e ]
+
+Currently the driver is not able to handle the case that a SPI device
+specifies a higher spi-max-frequency than half of per-clk:
+
+ per-clk should be at least two times of transfer speed
+
+Fix this by clamping to the max possible value and use the minimum SCK
+period of 2 cycles.
+
+Fixes: 77736a98b859 ("spi: lpspi: add the error info of transfer speed setting")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://patch.msgid.link/20250807100742.9917-1-wahrenst@gmx.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-fsl-lpspi.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 5e3818445234..1a22d356a73d 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -331,13 +331,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ }
+
+ if (config.speed_hz > perclk_rate / 2) {
+- dev_err(fsl_lpspi->dev,
+- "per-clk should be at least two times of transfer speed");
+- return -EINVAL;
++ div = 2;
++ } else {
++ div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+ }
+
+- div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+-
+ for (prescale = 0; prescale <= prescale_max; prescale++) {
+ scldiv = div / (1 << prescale) - 2;
+ if (scldiv >= 0 && scldiv < 256) {
+--
+2.50.1
+
--- /dev/null
+From f14f1b2f305cd6356a53a33ef3fd7bb4370f4fe9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 18:05:42 +0200
+Subject: spi: spi-qpic-snand: fix calculating of ECC OOB regions' properties
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit 13d0fe84a214658254a7412b2b46ec1507dc51f0 ]
+
+The OOB layout used by the driver has two distinct regions which contains
+hardware specific ECC data, yet the qcom_spi_ooblayout_ecc() function sets
+the same offset and length values for both regions which is clearly wrong.
+
+Change the code to calculate the correct values for both regions.
+
+For reference, the following table shows the computed offset and length
+values for various OOB size/ECC strength configurations:
+
+ +-----------------+-----------------+
+ |before the change| after the change|
+ +-------+----------+--------+--------+--------+--------+--------+
+ | OOB | ECC | region | region | region | region | region |
+ | size | strength | index | offset | length | offset | length |
+ +-------+----------+--------+--------+--------+--------+--------+
+ | 128 | 8 | 0 | 113 | 15 | 0 | 49 |
+ | | | 1 | 113 | 15 | 65 | 63 |
+ +-------+----------+--------+--------+--------+--------+--------+
+ | 128 | 4 | 0 | 117 | 11 | 0 | 37 |
+ | | | 1 | 117 | 11 | 53 | 75 |
+ +-------+----------+--------+--------+--------+--------+--------+
+ | 64 | 4 | 0 | 53 | 11 | 0 | 37 |
+ | | | 1 | 53 | 11 | 53 | 11 |
+ +-------+----------+--------+--------+--------+--------+--------+
+
+Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Reviewed-by: Konrad Dybcio <konrad.dybcio@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250805-qpic-snand-oob-ecc-fix-v2-1-e6f811c70d6f@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-qpic-snand.c | 20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
+index 722ab60d616f..e98e997680c7 100644
+--- a/drivers/spi/spi-qpic-snand.c
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -216,13 +216,21 @@ static int qcom_spi_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct qcom_nand_controller *snandc = nand_to_qcom_snand(nand);
+ struct qpic_ecc *qecc = snandc->qspi->ecc;
+
+- if (section > 1)
+- return -ERANGE;
+-
+- oobregion->length = qecc->ecc_bytes_hw + qecc->spare_bytes;
+- oobregion->offset = mtd->oobsize - oobregion->length;
++ switch (section) {
++ case 0:
++ oobregion->offset = 0;
++ oobregion->length = qecc->bytes * (qecc->steps - 1) +
++ qecc->bbm_size;
++ return 0;
++ case 1:
++ oobregion->offset = qecc->bytes * (qecc->steps - 1) +
++ qecc->bbm_size +
++ qecc->steps * 4;
++ oobregion->length = mtd->oobsize - oobregion->offset;
++ return 0;
++ }
+
+- return 0;
++ return -ERANGE;
+ }
+
+ static int qcom_spi_ooblayout_free(struct mtd_info *mtd, int section,
+--
+2.50.1
+
--- /dev/null
+From 48e99a89bd6c9166582613daa1a80e5d32b98dbf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 09:58:35 +0200
+Subject: spi: spi-qpic-snand: use correct CW_PER_PAGE value for OOB write
+
+From: Gabor Juhos <j4g8y7@gmail.com>
+
+[ Upstream commit 6bc829220b33da8522572cc50fdf5067c51d3bf3 ]
+
+The qcom_spi_program_oob() function uses only the last codeword to write
+the OOB data into the flash, but it sets the CW_PER_PAGE field in the
+CFG0 register as it would use all codewords.
+
+It seems that this confuses the hardware somehow, and any access to the
+flash fails with a timeout error after the function is called. The problem
+can be easily reproduced with the following commands:
+
+ # dd if=/dev/zero bs=2176 count=1 > /tmp/test.bin
+ 1+0 records in
+ 1+0 records out
+ # flash_erase /dev/mtd4 0 0
+ Erasing 128 Kibyte @ 0 -- 100 % complete
+ # nandwrite -O /dev/mtd4 /tmp/test.bin
+ Writing data to block 0 at offset 0x0
+ # nanddump -o /dev/mtd4 >/dev/null
+ ECC failed: 0
+ ECC corrected: 0
+ Number of bad blocks: 0
+ Number of bbt blocks: 0
+ Block size 131072, page size 2048, OOB size 128
+ Dumping data starting at 0x00000000 and ending at 0x00020000...
+ [ 33.197605] qcom_snand 79b0000.spi: failure to read oob
+ libmtd: error!: MEMREADOOB64 ioctl failed for mtd4, offset 0 (eraseblock 0)
+ error 110 (Operation timed out)
+ [ 35.277582] qcom_snand 79b0000.spi: failure in submitting cmd descriptor
+ libmtd: error!: cannot read 2048 bytes from mtd4 (eraseblock 0, offset 2048)
+ error 110 (Operation timed out)
+ nanddump: error!: mtd_read
+
+Change the code to use the correct CW_PER_PAGE value to avoid this.
+
+Fixes: 7304d1909080 ("spi: spi-qpic: add driver for QCOM SPI NAND flash Interface")
+Signed-off-by: Gabor Juhos <j4g8y7@gmail.com>
+Link: https://patch.msgid.link/20250801-qpic-snand-oob-cwpp-fix-v1-1-f5a41b86af2e@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-qpic-snand.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-qpic-snand.c b/drivers/spi/spi-qpic-snand.c
+index 3b757e3d00c0..722ab60d616f 100644
+--- a/drivers/spi/spi-qpic-snand.c
++++ b/drivers/spi/spi-qpic-snand.c
+@@ -1185,7 +1185,7 @@ static int qcom_spi_program_oob(struct qcom_nand_controller *snandc,
+ u32 cfg0, cfg1, ecc_bch_cfg, ecc_buf_cfg;
+
+ cfg0 = (ecc_cfg->cfg0 & ~CW_PER_PAGE_MASK) |
+- FIELD_PREP(CW_PER_PAGE_MASK, num_cw - 1);
++ FIELD_PREP(CW_PER_PAGE_MASK, 0);
+ cfg1 = ecc_cfg->cfg1;
+ ecc_bch_cfg = ecc_cfg->ecc_bch_cfg;
+ ecc_buf_cfg = ecc_cfg->ecc_buf_cfg;
+--
+2.50.1
+
--- /dev/null
+From ac0a56950a7b4b7fc8899cd2b8b4de1ff29d5e64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 12:59:45 +0300
+Subject: ALSA: usb-audio: Fix size validation in convert_chmap_v3()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 89f0addeee3cb2dc49837599330ed9c4612f05b0 ]
+
+The "p" pointer is void so sizeof(*p) is 1. The intent was to check
+sizeof(*cs_desc), which is 3, instead.
+
+Fixes: ecfd41166b72 ("ALSA: usb-audio: Validate UAC3 cluster segment descriptors")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/aKL5kftC1qGt6lpv@stanley.mountain
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index f5a6e990d07a..12a5e053ec54 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -349,7 +349,7 @@ snd_pcm_chmap_elem *convert_chmap_v3(struct uac3_cluster_header_descriptor
+ u16 cs_len;
+ u8 cs_type;
+
+- if (len < sizeof(*p))
++ if (len < sizeof(*cs_desc))
+ break;
+ cs_len = le16_to_cpu(cs_desc->wLength);
+ if (len < cs_len)
+--
+2.50.1
+
--- /dev/null
+From ef5397bbbd9812415d8bf0467fe0609f0649cf4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 17:08:34 +0200
+Subject: ALSA: usb-audio: Use correct sub-type for UAC3 feature unit
+ validation
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 8410fe81093ff231e964891e215b624dabb734b0 ]
+
+The entry of the validators table for UAC3 feature unit is defined
+with a wrong sub-type UAC_FEATURE (= 0x06) while it should have been
+UAC3_FEATURE (= 0x07). This patch corrects the entry value.
+
+Fixes: 57f8770620e9 ("ALSA: usb-audio: More validations of descriptor units")
+Link: https://patch.msgid.link/20250821150835.8894-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/validate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/usb/validate.c b/sound/usb/validate.c
+index 4f4e8e87a14c..a0d55b77c994 100644
+--- a/sound/usb/validate.c
++++ b/sound/usb/validate.c
+@@ -285,7 +285,7 @@ static const struct usb_desc_validator audio_validators[] = {
+ /* UAC_VERSION_3, UAC3_EXTENDED_TERMINAL: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_MIXER_UNIT, validate_mixer_unit),
+ FUNC(UAC_VERSION_3, UAC3_SELECTOR_UNIT, validate_selector_unit),
+- FUNC(UAC_VERSION_3, UAC_FEATURE_UNIT, validate_uac3_feature_unit),
++ FUNC(UAC_VERSION_3, UAC3_FEATURE_UNIT, validate_uac3_feature_unit),
+ /* UAC_VERSION_3, UAC3_EFFECT_UNIT: not implemented yet */
+ FUNC(UAC_VERSION_3, UAC3_PROCESSING_UNIT, validate_processing_unit),
+ FUNC(UAC_VERSION_3, UAC3_EXTENSION_UNIT, validate_processing_unit),
+--
+2.50.1
+
--- /dev/null
+From 8238c813610f7173dee6b561df92c4012a6a520b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 22:14:51 +0300
+Subject: Bluetooth: hci_conn: do return error from hci_enhanced_setup_sync()
+
+From: Sergey Shtylyov <s.shtylyov@omp.ru>
+
+[ Upstream commit 0eaf7c7e85da7495c0e03a99375707fc954f5e7b ]
+
+The commit e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to
+hci_sync") missed to update the *return* statement under the *case* of
+BT_CODEC_TRANSPARENT in hci_enhanced_setup_sync(), which led to returning
+success (0) instead of the negative error code (-EINVAL). However, the
+result of hci_enhanced_setup_sync() seems to be ignored anyway, since NULL
+gets passed to hci_cmd_sync_queue() as the last argument in that case and
+the only function interested in that result is specified by that argument.
+
+Fixes: e07a06b4eb41 ("Bluetooth: Convert SCO configure_datapath to hci_sync")
+Signed-off-by: Sergey Shtylyov <s.shtylyov@omp.ru>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_conn.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 549ee9e87d63..ff9d2520ba74 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -339,7 +339,8 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data)
+ case BT_CODEC_TRANSPARENT:
+ if (!find_next_esco_param(conn, esco_param_msbc,
+ ARRAY_SIZE(esco_param_msbc)))
+- return false;
++ return -EINVAL;
++
+ param = &esco_param_msbc[conn->attempt - 1];
+ cp.tx_coding_format.id = 0x03;
+ cp.rx_coding_format.id = 0x03;
+--
+2.50.1
+
--- /dev/null
+From cefbaf040e700b307960fdc0c21d0a876e5621ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 Aug 2025 11:36:20 +0300
+Subject: Bluetooth: hci_event: fix MTU for BN == 0 in CIS Established
+
+From: Pauli Virtanen <pav@iki.fi>
+
+[ Upstream commit 0b3725dbf61b51e7c663834811b3691157ae17d6 ]
+
+BN == 0x00 in CIS Established means no isochronous data for the
+corresponding direction (Core v6.1 pp. 2394). In this case SDU MTU
+should be 0.
+
+However, the specification does not say the Max_PDU_C_To_P or P_To_C are
+then zero. Intel AX210 in Framed CIS mode sets nonzero Max_PDU for
+direction with zero BN. This causes failure later when we try to LE
+Setup ISO Data Path for disabled direction, which is disallowed (Core
+v6.1 pp. 2750).
+
+Fix by setting SDU MTU to 0 if BN == 0.
+
+Fixes: 2be22f1941d5f ("Bluetooth: hci_event: Fix parsing of CIS Established Event")
+Signed-off-by: Pauli Virtanen <pav@iki.fi>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_event.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 3b22ce3aa95b..c06010c0d882 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -6664,8 +6664,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.out.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+- qos->ucast.in.sdu = le16_to_cpu(ev->c_mtu);
+- qos->ucast.out.sdu = le16_to_cpu(ev->p_mtu);
++ qos->ucast.in.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++ qos->ucast.out.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ qos->ucast.in.phy = ev->c_phy;
+ qos->ucast.out.phy = ev->p_phy;
+ break;
+@@ -6679,8 +6679,8 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,
+ qos->ucast.in.latency =
+ DIV_ROUND_CLOSEST(get_unaligned_le24(ev->p_latency),
+ 1000);
+- qos->ucast.out.sdu = le16_to_cpu(ev->c_mtu);
+- qos->ucast.in.sdu = le16_to_cpu(ev->p_mtu);
++ qos->ucast.out.sdu = ev->c_bn ? le16_to_cpu(ev->c_mtu) : 0;
++ qos->ucast.in.sdu = ev->p_bn ? le16_to_cpu(ev->p_mtu) : 0;
+ qos->ucast.out.phy = ev->c_phy;
+ qos->ucast.in.phy = ev->p_phy;
+ break;
+--
+2.50.1
+
--- /dev/null
+From 16419356867d20a4d1aad78b37559c3f2d1aed11 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Feb 2024 17:58:58 +0000
+Subject: bonding: Add independent control state machine
+
+From: Aahil Awatramani <aahila@google.com>
+
+[ Upstream commit 240fd405528bbf7fafa0559202ca7aa524c9cd96 ]
+
+Add support for the independent control state machine per IEEE
+802.1AX-2008 5.4.15 in addition to the existing implementation of the
+coupled control state machine.
+
+Introduces two new states, AD_MUX_COLLECTING and AD_MUX_DISTRIBUTING in
+the LACP MUX state machine for separated handling of an initial
+Collecting state before the Collecting and Distributing state. This
+enables a port to be in a state where it can receive incoming packets
+while not still distributing. This is useful for reducing packet loss when
+a port begins distributing before its partner is able to collect.
+
+Added new functions such as bond_set_slave_tx_disabled_flags and
+bond_set_slave_rx_enabled_flags to precisely manage the port's collecting
+and distributing states. Previously, there was no dedicated method to
+disable TX while keeping RX enabled, which this patch addresses.
+
+Note that the regular flow process in the kernel's bonding driver remains
+unaffected by this patch. The extension requires explicit opt-in by the
+user (in order to ensure no disruptions for existing setups) via netlink
+support using the new bonding parameter coupled_control. The default value
+for coupled_control is set to 1 so as to preserve existing behaviour.
+
+Signed-off-by: Aahil Awatramani <aahila@google.com>
+Reviewed-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://lore.kernel.org/r/20240202175858.1573852-1-aahila@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 0599640a21e9 ("bonding: send LACPDUs periodically in passive mode after receiving partner's LACPDU")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/networking/bonding.rst | 12 ++
+ drivers/net/bonding/bond_3ad.c | 157 +++++++++++++++++++++++++--
+ drivers/net/bonding/bond_main.c | 1 +
+ drivers/net/bonding/bond_netlink.c | 16 +++
+ drivers/net/bonding/bond_options.c | 28 ++++-
+ include/net/bond_3ad.h | 2 +
+ include/net/bond_options.h | 1 +
+ include/net/bonding.h | 23 ++++
+ include/uapi/linux/if_link.h | 1 +
+ tools/include/uapi/linux/if_link.h | 1 +
+ 10 files changed, 234 insertions(+), 8 deletions(-)
+
+diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
+index f7a73421eb76..e774b48de9f5 100644
+--- a/Documentation/networking/bonding.rst
++++ b/Documentation/networking/bonding.rst
+@@ -444,6 +444,18 @@ arp_missed_max
+
+ The default value is 2, and the allowable range is 1 - 255.
+
++coupled_control
++
++ Specifies whether the LACP state machine's MUX in the 802.3ad mode
++ should have separate Collecting and Distributing states.
++
++ This is by implementing the independent control state machine per
++ IEEE 802.1AX-2008 5.4.15 in addition to the existing coupled control
++ state machine.
++
++ The default value is 1. This setting does not separate the Collecting
++ and Distributing states, maintaining the bond in coupled control.
++
+ downdelay
+
+ Specifies the time, in milliseconds, to wait before disabling
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index 56b18ab2fa28..a92a841ccec9 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -106,6 +106,9 @@ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+ static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_enable_collecting(struct port *port);
++static void ad_disable_distributing(struct port *port,
++ bool *update_slave_arr);
+ static void ad_enable_collecting_distributing(struct port *port,
+ bool *update_slave_arr);
+ static void ad_disable_collecting_distributing(struct port *port,
+@@ -171,9 +174,38 @@ static inline int __agg_has_partner(struct aggregator *agg)
+ return !is_zero_ether_addr(agg->partner_system.mac_addr_value);
+ }
+
++/**
++ * __disable_distributing_port - disable the port's slave for distributing.
++ * Port will still be able to collect.
++ * @port: the port we're looking at
++ *
++ * This will disable only distributing on the port's slave.
++ */
++static void __disable_distributing_port(struct port *port)
++{
++ bond_set_slave_tx_disabled_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
++}
++
++/**
++ * __enable_collecting_port - enable the port's slave for collecting,
++ * if it's up
++ * @port: the port we're looking at
++ *
++ * This will enable only collecting on the port's slave.
++ */
++static void __enable_collecting_port(struct port *port)
++{
++ struct slave *slave = port->slave;
++
++ if (slave->link == BOND_LINK_UP && bond_slave_is_up(slave))
++ bond_set_slave_rx_enabled_flags(slave, BOND_SLAVE_NOTIFY_LATER);
++}
++
+ /**
+ * __disable_port - disable the port's slave
+ * @port: the port we're looking at
++ *
++ * This will disable both collecting and distributing on the port's slave.
+ */
+ static inline void __disable_port(struct port *port)
+ {
+@@ -183,6 +215,8 @@ static inline void __disable_port(struct port *port)
+ /**
+ * __enable_port - enable the port's slave, if it's up
+ * @port: the port we're looking at
++ *
++ * This will enable both collecting and distributing on the port's slave.
+ */
+ static inline void __enable_port(struct port *port)
+ {
+@@ -193,10 +227,27 @@ static inline void __enable_port(struct port *port)
+ }
+
+ /**
+- * __port_is_enabled - check if the port's slave is in active state
++ * __port_move_to_attached_state - check if port should transition back to attached
++ * state.
++ * @port: the port we're looking at
++ */
++static bool __port_move_to_attached_state(struct port *port)
++{
++ if (!(port->sm_vars & AD_PORT_SELECTED) ||
++ (port->sm_vars & AD_PORT_STANDBY) ||
++ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
++ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION))
++ port->sm_mux_state = AD_MUX_ATTACHED;
++
++ return port->sm_mux_state == AD_MUX_ATTACHED;
++}
++
++/**
++ * __port_is_collecting_distributing - check if the port's slave is in the
++ * combined collecting/distributing state
+ * @port: the port we're looking at
+ */
+-static inline int __port_is_enabled(struct port *port)
++static int __port_is_collecting_distributing(struct port *port)
+ {
+ return bond_is_active_slave(port->slave);
+ }
+@@ -942,6 +993,7 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
+ */
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ {
++ struct bonding *bond = __get_bond_by_port(port);
+ mux_states_t last_state;
+
+ /* keep current State Machine state to compare later if it was
+@@ -999,9 +1051,13 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ if ((port->sm_vars & AD_PORT_SELECTED) &&
+ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
+ !__check_agg_selection_timer(port)) {
+- if (port->aggregator->is_active)
+- port->sm_mux_state =
+- AD_MUX_COLLECTING_DISTRIBUTING;
++ if (port->aggregator->is_active) {
++ int state = AD_MUX_COLLECTING_DISTRIBUTING;
++
++ if (!bond->params.coupled_control)
++ state = AD_MUX_COLLECTING;
++ port->sm_mux_state = state;
++ }
+ } else if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY)) {
+ /* if UNSELECTED or STANDBY */
+@@ -1019,11 +1075,45 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ }
+ break;
+ case AD_MUX_COLLECTING_DISTRIBUTING:
++ if (!__port_move_to_attached_state(port)) {
++ /* if port state hasn't changed make
++ * sure that a collecting distributing
++ * port in an active aggregator is enabled
++ */
++ if (port->aggregator->is_active &&
++ !__port_is_collecting_distributing(port)) {
++ __enable_port(port);
++ *update_slave_arr = true;
++ }
++ }
++ break;
++ case AD_MUX_COLLECTING:
++ if (!__port_move_to_attached_state(port)) {
++ if ((port->sm_vars & AD_PORT_SELECTED) &&
++ (port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) &&
++ (port->partner_oper.port_state & LACP_STATE_COLLECTING)) {
++ port->sm_mux_state = AD_MUX_DISTRIBUTING;
++ } else {
++ /* If port state hasn't changed, make sure that a collecting
++ * port is enabled for an active aggregator.
++ */
++ struct slave *slave = port->slave;
++
++ if (port->aggregator->is_active &&
++ bond_is_slave_rx_disabled(slave)) {
++ ad_enable_collecting(port);
++ *update_slave_arr = true;
++ }
++ }
++ }
++ break;
++ case AD_MUX_DISTRIBUTING:
+ if (!(port->sm_vars & AD_PORT_SELECTED) ||
+ (port->sm_vars & AD_PORT_STANDBY) ||
++ !(port->partner_oper.port_state & LACP_STATE_COLLECTING) ||
+ !(port->partner_oper.port_state & LACP_STATE_SYNCHRONIZATION) ||
+ !(port->actor_oper_port_state & LACP_STATE_SYNCHRONIZATION)) {
+- port->sm_mux_state = AD_MUX_ATTACHED;
++ port->sm_mux_state = AD_MUX_COLLECTING;
+ } else {
+ /* if port state hasn't changed make
+ * sure that a collecting distributing
+@@ -1031,7 +1121,7 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ */
+ if (port->aggregator &&
+ port->aggregator->is_active &&
+- !__port_is_enabled(port)) {
++ !__port_is_collecting_distributing(port)) {
+ __enable_port(port);
+ *update_slave_arr = true;
+ }
+@@ -1082,6 +1172,20 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
+ update_slave_arr);
+ port->ntt = true;
+ break;
++ case AD_MUX_COLLECTING:
++ port->actor_oper_port_state |= LACP_STATE_COLLECTING;
++ port->actor_oper_port_state &= ~LACP_STATE_DISTRIBUTING;
++ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
++ ad_enable_collecting(port);
++ ad_disable_distributing(port, update_slave_arr);
++ port->ntt = true;
++ break;
++ case AD_MUX_DISTRIBUTING:
++ port->actor_oper_port_state |= LACP_STATE_DISTRIBUTING;
++ port->actor_oper_port_state |= LACP_STATE_SYNCHRONIZATION;
++ ad_enable_collecting_distributing(port,
++ update_slave_arr);
++ break;
+ default:
+ break;
+ }
+@@ -1906,6 +2010,45 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ }
+ }
+
++/**
++ * ad_enable_collecting - enable a port's receive
++ * @port: the port we're looking at
++ *
++ * Enable @port if it's in an active aggregator
++ */
++static void ad_enable_collecting(struct port *port)
++{
++ if (port->aggregator->is_active) {
++ struct slave *slave = port->slave;
++
++ slave_dbg(slave->bond->dev, slave->dev,
++ "Enabling collecting on port %d (LAG %d)\n",
++ port->actor_port_number,
++ port->aggregator->aggregator_identifier);
++ __enable_collecting_port(port);
++ }
++}
++
++/**
++ * ad_disable_distributing - disable a port's transmit
++ * @port: the port we're looking at
++ * @update_slave_arr: Does slave array need update?
++ */
++static void ad_disable_distributing(struct port *port, bool *update_slave_arr)
++{
++ if (port->aggregator &&
++ !MAC_ADDRESS_EQUAL(&port->aggregator->partner_system,
++ &(null_mac_addr))) {
++ slave_dbg(port->slave->bond->dev, port->slave->dev,
++ "Disabling distributing on port %d (LAG %d)\n",
++ port->actor_port_number,
++ port->aggregator->aggregator_identifier);
++ __disable_distributing_port(port);
++ /* Slave array needs an update */
++ *update_slave_arr = true;
++ }
++}
++
+ /**
+ * ad_enable_collecting_distributing - enable a port's transmit/receive
+ * @port: the port we're looking at
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 85ab69257162..cd5691ed9f17 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -6399,6 +6399,7 @@ static int __init bond_check_params(struct bond_params *params)
+ params->ad_actor_sys_prio = ad_actor_sys_prio;
+ eth_zero_addr(params->ad_actor_system);
+ params->ad_user_port_key = ad_user_port_key;
++ params->coupled_control = 1;
+ if (packets_per_slave > 0) {
+ params->reciprocal_packets_per_slave =
+ reciprocal_value(packets_per_slave);
+diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
+index 27cbe148f0db..aebc814ad495 100644
+--- a/drivers/net/bonding/bond_netlink.c
++++ b/drivers/net/bonding/bond_netlink.c
+@@ -122,6 +122,7 @@ static const struct nla_policy bond_policy[IFLA_BOND_MAX + 1] = {
+ [IFLA_BOND_PEER_NOTIF_DELAY] = NLA_POLICY_FULL_RANGE(NLA_U32, &delay_range),
+ [IFLA_BOND_MISSED_MAX] = { .type = NLA_U8 },
+ [IFLA_BOND_NS_IP6_TARGET] = { .type = NLA_NESTED },
++ [IFLA_BOND_COUPLED_CONTROL] = { .type = NLA_U8 },
+ };
+
+ static const struct nla_policy bond_slave_policy[IFLA_BOND_SLAVE_MAX + 1] = {
+@@ -549,6 +550,16 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
+ return err;
+ }
+
++ if (data[IFLA_BOND_COUPLED_CONTROL]) {
++ int coupled_control = nla_get_u8(data[IFLA_BOND_COUPLED_CONTROL]);
++
++ bond_opt_initval(&newval, coupled_control);
++ err = __bond_opt_set(bond, BOND_OPT_COUPLED_CONTROL, &newval,
++ data[IFLA_BOND_COUPLED_CONTROL], extack);
++ if (err)
++ return err;
++ }
++
+ return 0;
+ }
+
+@@ -615,6 +626,7 @@ static size_t bond_get_size(const struct net_device *bond_dev)
+ /* IFLA_BOND_NS_IP6_TARGET */
+ nla_total_size(sizeof(struct nlattr)) +
+ nla_total_size(sizeof(struct in6_addr)) * BOND_MAX_NS_TARGETS +
++ nla_total_size(sizeof(u8)) + /* IFLA_BOND_COUPLED_CONTROL */
+ 0;
+ }
+
+@@ -774,6 +786,10 @@ static int bond_fill_info(struct sk_buff *skb,
+ bond->params.missed_max))
+ goto nla_put_failure;
+
++ if (nla_put_u8(skb, IFLA_BOND_COUPLED_CONTROL,
++ bond->params.coupled_control))
++ goto nla_put_failure;
++
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ struct ad_info info;
+
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index b282ed5b59a9..8291803e4f00 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -85,7 +85,8 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+ static int bond_option_missed_max_set(struct bonding *bond,
+ const struct bond_opt_value *newval);
+-
++static int bond_option_coupled_control_set(struct bonding *bond,
++ const struct bond_opt_value *newval);
+
+ static const struct bond_opt_value bond_mode_tbl[] = {
+ { "balance-rr", BOND_MODE_ROUNDROBIN, BOND_VALFLAG_DEFAULT},
+@@ -233,6 +234,12 @@ static const struct bond_opt_value bond_missed_max_tbl[] = {
+ { NULL, -1, 0},
+ };
+
++static const struct bond_opt_value bond_coupled_control_tbl[] = {
++ { "on", 1, BOND_VALFLAG_DEFAULT},
++ { "off", 0, 0},
++ { NULL, -1, 0},
++};
++
+ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
+ [BOND_OPT_MODE] = {
+ .id = BOND_OPT_MODE,
+@@ -497,6 +504,15 @@ static const struct bond_option bond_opts[BOND_OPT_LAST] = {
+ .desc = "Delay between each peer notification on failover event, in milliseconds",
+ .values = bond_peer_notif_delay_tbl,
+ .set = bond_option_peer_notif_delay_set
++ },
++ [BOND_OPT_COUPLED_CONTROL] = {
++ .id = BOND_OPT_COUPLED_CONTROL,
++ .name = "coupled_control",
++ .desc = "Opt into using coupled control MUX for LACP states",
++ .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_8023AD)),
++ .flags = BOND_OPTFLAG_IFDOWN,
++ .values = bond_coupled_control_tbl,
++ .set = bond_option_coupled_control_set,
+ }
+ };
+
+@@ -1812,3 +1828,13 @@ static int bond_option_ad_user_port_key_set(struct bonding *bond,
+ bond->params.ad_user_port_key = newval->value;
+ return 0;
+ }
++
++static int bond_option_coupled_control_set(struct bonding *bond,
++ const struct bond_opt_value *newval)
++{
++ netdev_info(bond->dev, "Setting coupled_control to %s (%llu)\n",
++ newval->string, newval->value);
++
++ bond->params.coupled_control = newval->value;
++ return 0;
++}
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index 29f2a681aa14..078e16d2512a 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -54,6 +54,8 @@ typedef enum {
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
++ AD_MUX_COLLECTING, /* mux machine */
++ AD_MUX_DISTRIBUTING, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
+ } mux_states_t;
+
+diff --git a/include/net/bond_options.h b/include/net/bond_options.h
+index f631d9f09941..18687ccf0638 100644
+--- a/include/net/bond_options.h
++++ b/include/net/bond_options.h
+@@ -76,6 +76,7 @@ enum {
+ BOND_OPT_MISSED_MAX,
+ BOND_OPT_NS_TARGETS,
+ BOND_OPT_PRIO,
++ BOND_OPT_COUPLED_CONTROL,
+ BOND_OPT_LAST
+ };
+
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 94594026a5c5..8bb5f016969f 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -148,6 +148,7 @@ struct bond_params {
+ #if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr ns_targets[BOND_MAX_NS_TARGETS];
+ #endif
++ int coupled_control;
+
+ /* 2 bytes of padding : see ether_addr_equal_64bits() */
+ u8 ad_actor_system[ETH_ALEN + 2];
+@@ -167,6 +168,7 @@ struct slave {
+ u8 backup:1, /* indicates backup slave. Value corresponds with
+ BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
+ inactive:1, /* indicates inactive slave */
++ rx_disabled:1, /* indicates whether slave's Rx is disabled */
+ should_notify:1, /* indicates whether the state changed */
+ should_notify_link:1; /* indicates whether the link changed */
+ u8 duplex;
+@@ -568,6 +570,14 @@ static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
+ if (!slave->bond->params.all_slaves_active)
+ slave->inactive = 1;
++ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
++ slave->rx_disabled = 1;
++}
++
++static inline void bond_set_slave_tx_disabled_flags(struct slave *slave,
++ bool notify)
++{
++ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
+ }
+
+ static inline void bond_set_slave_active_flags(struct slave *slave,
+@@ -575,6 +585,14 @@ static inline void bond_set_slave_active_flags(struct slave *slave,
+ {
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
+ slave->inactive = 0;
++ if (BOND_MODE(slave->bond) == BOND_MODE_8023AD)
++ slave->rx_disabled = 0;
++}
++
++static inline void bond_set_slave_rx_enabled_flags(struct slave *slave,
++ bool notify)
++{
++ slave->rx_disabled = 0;
+ }
+
+ static inline bool bond_is_slave_inactive(struct slave *slave)
+@@ -582,6 +600,11 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
+ return slave->inactive;
+ }
+
++static inline bool bond_is_slave_rx_disabled(struct slave *slave)
++{
++ return slave->rx_disabled;
++}
++
+ static inline void bond_propose_link_state(struct slave *slave, int state)
+ {
+ slave->link_new_state = state;
+diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
+index ce3117df9cec..6750911da4f0 100644
+--- a/include/uapi/linux/if_link.h
++++ b/include/uapi/linux/if_link.h
+@@ -950,6 +950,7 @@ enum {
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
++ IFLA_BOND_COUPLED_CONTROL,
+ __IFLA_BOND_MAX,
+ };
+
+diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
+index 39e659c83cfd..cb8b0a3029d3 100644
+--- a/tools/include/uapi/linux/if_link.h
++++ b/tools/include/uapi/linux/if_link.h
+@@ -865,6 +865,7 @@ enum {
+ IFLA_BOND_AD_LACP_ACTIVE,
+ IFLA_BOND_MISSED_MAX,
+ IFLA_BOND_NS_IP6_TARGET,
++ IFLA_BOND_COUPLED_CONTROL,
+ __IFLA_BOND_MAX,
+ };
+
+--
+2.50.1
+
--- /dev/null
+From d3ad0fb150cdffeb5b330709df43fdfb98eb9fee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:59 +0000
+Subject: bonding: send LACPDUs periodically in passive mode after receiving
+ partner's LACPDU
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 0599640a21e98f0d6a3e9ff85c0a687c90a8103b ]
+
+When `lacp_active` is set to `off`, the bond operates in passive mode, meaning
+it only "speaks when spoken to." However, the current kernel implementation
+only sends an LACPDU in response when the partner's state changes.
+
+As a result, once LACP negotiation succeeds, the actor stops sending LACPDUs
+until the partner times out and sends an "expired" LACPDU. This causes
+continuous LACP state flapping.
+
+According to IEEE 802.1AX-2014, 6.4.13 Periodic Transmission machine. The
+values of Partner_Oper_Port_State.LACP_Activity and
+Actor_Oper_Port_State.LACP_Activity determine whether periodic transmissions
+take place. If either or both parameters are set to Active LACP, then periodic
+transmissions occur; if both are set to Passive LACP, then periodic
+transmissions do not occur.
+
+To comply with this, we remove the `!bond->params.lacp_active` check in
+`ad_periodic_machine()`. Instead, we initialize the actor's port's
+`LACP_STATE_LACP_ACTIVITY` state based on `lacp_active` setting.
+
+Additionally, we avoid setting the partner's state to
+`LACP_STATE_LACP_ACTIVITY` in the EXPIRED state, since we should not assume
+the partner is active by default.
+
+This ensures that in passive mode, the bond starts sending periodic LACPDUs
+after receiving one from the partner, and avoids flapping due to inactivity.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-3-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 42 +++++++++++++++++++---------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index a92a841ccec9..d02a91cefec8 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -99,13 +99,13 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker);
+ static void ad_mux_machine(struct port *port, bool *update_slave_arr);
+ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port);
+ static void ad_tx_machine(struct port *port);
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params);
++static void ad_periodic_machine(struct port *port);
+ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr);
+ static void ad_agg_selection_logic(struct aggregator *aggregator,
+ bool *update_slave_arr);
+ static void ad_clear_agg(struct aggregator *aggregator);
+ static void ad_initialize_agg(struct aggregator *aggregator);
+-static void ad_initialize_port(struct port *port, int lacp_fast);
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params);
+ static void ad_enable_collecting(struct port *port);
+ static void ad_disable_distributing(struct port *port,
+ bool *update_slave_arr);
+@@ -1300,10 +1300,16 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
+ * case of EXPIRED even if LINK_DOWN didn't arrive for
+ * the port.
+ */
+- port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->sm_vars &= ~AD_PORT_MATCHED;
++ /* Based on IEEE 8021AX-2014, Figure 6-18 - Receive
++ * machine state diagram, the statue should be
++ * Partner_Oper_Port_State.Synchronization = FALSE;
++ * Partner_Oper_Port_State.LACP_Timeout = Short Timeout;
++ * start current_while_timer(Short Timeout);
++ * Actor_Oper_Port_State.Expired = TRUE;
++ */
++ port->partner_oper.port_state &= ~LACP_STATE_SYNCHRONIZATION;
+ port->partner_oper.port_state |= LACP_STATE_LACP_TIMEOUT;
+- port->partner_oper.port_state |= LACP_STATE_LACP_ACTIVITY;
+ port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(AD_SHORT_TIMEOUT));
+ port->actor_oper_port_state |= LACP_STATE_EXPIRED;
+ port->sm_vars |= AD_PORT_CHURNED;
+@@ -1409,11 +1415,10 @@ static void ad_tx_machine(struct port *port)
+ /**
+ * ad_periodic_machine - handle a port's periodic state machine
+ * @port: the port we're looking at
+- * @bond_params: bond parameters we will use
+ *
+ * Turn ntt flag on priodically to perform periodic transmission of lacpdu's.
+ */
+-static void ad_periodic_machine(struct port *port, struct bond_params *bond_params)
++static void ad_periodic_machine(struct port *port)
+ {
+ periodic_states_t last_state;
+
+@@ -1422,8 +1427,7 @@ static void ad_periodic_machine(struct port *port, struct bond_params *bond_para
+
+ /* check if port was reinitialized */
+ if (((port->sm_vars & AD_PORT_BEGIN) || !(port->sm_vars & AD_PORT_LACP_ENABLED) || !port->is_enabled) ||
+- (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY)) ||
+- !bond_params->lacp_active) {
++ (!(port->actor_oper_port_state & LACP_STATE_LACP_ACTIVITY) && !(port->partner_oper.port_state & LACP_STATE_LACP_ACTIVITY))) {
+ port->sm_periodic_state = AD_NO_PERIODIC;
+ }
+ /* check if state machine should change state */
+@@ -1947,16 +1951,16 @@ static void ad_initialize_agg(struct aggregator *aggregator)
+ /**
+ * ad_initialize_port - initialize a given port's parameters
+ * @port: the port we're looking at
+- * @lacp_fast: boolean. whether fast periodic should be used
++ * @bond_params: bond parameters we will use
+ */
+-static void ad_initialize_port(struct port *port, int lacp_fast)
++static void ad_initialize_port(struct port *port, const struct bond_params *bond_params)
+ {
+ static const struct port_params tmpl = {
+ .system_priority = 0xffff,
+ .key = 1,
+ .port_number = 1,
+ .port_priority = 0xff,
+- .port_state = 1,
++ .port_state = 0,
+ };
+ static const struct lacpdu lacpdu = {
+ .subtype = 0x01,
+@@ -1974,12 +1978,14 @@ static void ad_initialize_port(struct port *port, int lacp_fast)
+ port->actor_port_priority = 0xff;
+ port->actor_port_aggregator_identifier = 0;
+ port->ntt = false;
+- port->actor_admin_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
+- port->actor_oper_port_state = LACP_STATE_AGGREGATION |
+- LACP_STATE_LACP_ACTIVITY;
++ port->actor_admin_port_state = LACP_STATE_AGGREGATION;
++ port->actor_oper_port_state = LACP_STATE_AGGREGATION;
++ if (bond_params->lacp_active) {
++ port->actor_admin_port_state |= LACP_STATE_LACP_ACTIVITY;
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ }
+
+- if (lacp_fast)
++ if (bond_params->lacp_fast)
+ port->actor_oper_port_state |= LACP_STATE_LACP_TIMEOUT;
+
+ memcpy(&port->partner_admin, &tmpl, sizeof(tmpl));
+@@ -2195,7 +2201,7 @@ void bond_3ad_bind_slave(struct slave *slave)
+ /* port initialization */
+ port = &(SLAVE_AD_INFO(slave)->port);
+
+- ad_initialize_port(port, bond->params.lacp_fast);
++ ad_initialize_port(port, &bond->params);
+
+ port->slave = slave;
+ port->actor_port_number = SLAVE_AD_INFO(slave)->id;
+@@ -2507,7 +2513,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
+ }
+
+ ad_rx_machine(NULL, port);
+- ad_periodic_machine(port, &bond->params);
++ ad_periodic_machine(port);
+ ad_port_selection_logic(port, &update_slave_arr);
+ ad_mux_machine(port, &update_slave_arr);
+ ad_tx_machine(port);
+--
+2.50.1
+
--- /dev/null
+From 5371f03a99ec11d9100419bb3e105ef159594892 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 06:19:58 +0000
+Subject: bonding: update LACP activity flag after setting lacp_active
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit b64d035f77b1f02ab449393342264b44950a75ae ]
+
+The port's actor_oper_port_state activity flag should be updated immediately
+after changing the lacp_active option to reflect the current mode correctly.
+
+Fixes: 3a755cd8b7c6 ("bonding: add new option lacp_active")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250815062000.22220-2-liuhangbin@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_3ad.c | 25 +++++++++++++++++++++++++
+ drivers/net/bonding/bond_options.c | 1 +
+ include/net/bond_3ad.h | 1 +
+ 3 files changed, 27 insertions(+)
+
+diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
+index c99ffe6c683a..56b18ab2fa28 100644
+--- a/drivers/net/bonding/bond_3ad.c
++++ b/drivers/net/bonding/bond_3ad.c
+@@ -2734,6 +2734,31 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
+ spin_unlock_bh(&bond->mode_lock);
+ }
+
++/**
++ * bond_3ad_update_lacp_active - change the lacp active
++ * @bond: bonding struct
++ *
++ * Update actor_oper_port_state when lacp_active is modified.
++ */
++void bond_3ad_update_lacp_active(struct bonding *bond)
++{
++ struct port *port = NULL;
++ struct list_head *iter;
++ struct slave *slave;
++ int lacp_active;
++
++ lacp_active = bond->params.lacp_active;
++ spin_lock_bh(&bond->mode_lock);
++ bond_for_each_slave(bond, slave, iter) {
++ port = &(SLAVE_AD_INFO(slave)->port);
++ if (lacp_active)
++ port->actor_oper_port_state |= LACP_STATE_LACP_ACTIVITY;
++ else
++ port->actor_oper_port_state &= ~LACP_STATE_LACP_ACTIVITY;
++ }
++ spin_unlock_bh(&bond->mode_lock);
++}
++
+ size_t bond_3ad_stats_size(void)
+ {
+ return nla_total_size_64bit(sizeof(u64)) + /* BOND_3AD_STAT_LACPDU_RX */
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 6d003c0ef669..b282ed5b59a9 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1618,6 +1618,7 @@ static int bond_option_lacp_active_set(struct bonding *bond,
+ netdev_dbg(bond->dev, "Setting LACP active to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.lacp_active = newval->value;
++ bond_3ad_update_lacp_active(bond);
+
+ return 0;
+ }
+diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
+index c5e57c6bd873..29f2a681aa14 100644
+--- a/include/net/bond_3ad.h
++++ b/include/net/bond_3ad.h
+@@ -302,6 +302,7 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
+ struct slave *slave);
+ int bond_3ad_set_carrier(struct bonding *bond);
+ void bond_3ad_update_lacp_rate(struct bonding *bond);
++void bond_3ad_update_lacp_active(struct bonding *bond);
+ void bond_3ad_update_ad_actor_settings(struct bonding *bond);
+ int bond_3ad_stats_fill(struct sk_buff *skb, struct bond_3ad_stats *stats);
+ size_t bond_3ad_stats_size(void);
+--
+2.50.1
+
--- /dev/null
+From f17b18ef6ad6a593eece584804267040498d17cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 13:24:28 -0400
+Subject: cgroup/cpuset: Use static_branch_enable_cpuslocked() on
+ cpusets_insane_config_key
+
+From: Waiman Long <longman@redhat.com>
+
+[ Upstream commit 65f97cc81b0adc5f49cf6cff5d874be0058e3f41 ]
+
+The following lockdep splat was observed.
+
+[ 812.359086] ============================================
+[ 812.359089] WARNING: possible recursive locking detected
+[ 812.359097] --------------------------------------------
+[ 812.359100] runtest.sh/30042 is trying to acquire lock:
+[ 812.359105] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: static_key_enable+0xe/0x20
+[ 812.359131]
+[ 812.359131] but task is already holding lock:
+[ 812.359134] ffffffffa7f27420 (cpu_hotplug_lock){++++}-{0:0}, at: cpuset_write_resmask+0x98/0xa70
+ :
+[ 812.359267] Call Trace:
+[ 812.359272] <TASK>
+[ 812.359367] cpus_read_lock+0x3c/0xe0
+[ 812.359382] static_key_enable+0xe/0x20
+[ 812.359389] check_insane_mems_config.part.0+0x11/0x30
+[ 812.359398] cpuset_write_resmask+0x9f2/0xa70
+[ 812.359411] cgroup_file_write+0x1c7/0x660
+[ 812.359467] kernfs_fop_write_iter+0x358/0x530
+[ 812.359479] vfs_write+0xabe/0x1250
+[ 812.359529] ksys_write+0xf9/0x1d0
+[ 812.359558] do_syscall_64+0x5f/0xe0
+
+Since commit d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem
+and hotplug lock order"), the ordering of cpu hotplug lock
+and cpuset_mutex had been reversed. That patch correctly
+used the cpuslocked version of the static branch API to enable
+cpusets_pre_enable_key and cpusets_enabled_key, but it didn't do the
+same for cpusets_insane_config_key.
+
+The cpusets_insane_config_key can be enabled in the
+check_insane_mems_config() which is called from update_nodemask()
+or cpuset_hotplug_update_tasks() with both cpu hotplug lock and
+cpuset_mutex held. Deadlock can happen with a pending hotplug event that
+tries to acquire the cpu hotplug write lock which will block further
+cpus_read_lock() attempt from check_insane_mems_config(). Fix that by
+switching to use static_branch_enable_cpuslocked().
+
+Fixes: d74b27d63a8b ("cgroup/cpuset: Change cpuset_rwsem and hotplug lock order")
+Signed-off-by: Waiman Long <longman@redhat.com>
+Reviewed-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cpuset.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
+index ad8b62202bdc..eadb028916c8 100644
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -432,7 +432,7 @@ static inline void check_insane_mems_config(nodemask_t *nodes)
+ {
+ if (!cpusets_insane_config() &&
+ movable_only_nodes(nodes)) {
+- static_branch_enable(&cpusets_insane_config_key);
++ static_branch_enable_cpuslocked(&cpusets_insane_config_key);
+ pr_info("Unsupported (movable nodes only) cpuset configuration detected (nmask=%*pbl)!\n"
+ "Cpuset allocations might fail even with a lot of memory available.\n",
+ nodemask_pr_args(nodes));
+--
+2.50.1
+
--- /dev/null
+From feafc9be700db2dcd9a5cfe7af2b365790cd74e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 21:36:41 -0500
+Subject: drm/amd/display: Add null pointer check in
+ mod_hdcp_hdcp1_create_session()
+
+From: Chenyuan Yang <chenyuan0y@gmail.com>
+
+[ Upstream commit 7a2ca2ea64b1b63c8baa94a8f5deb70b2248d119 ]
+
+The function mod_hdcp_hdcp1_create_session() calls the function
+get_first_active_display(), but does not check its return value.
+The return value is a null pointer if the display list is empty.
+This will lead to a null pointer dereference.
+
+Add a null pointer check for get_first_active_display() and return
+MOD_HDCP_STATUS_DISPLAY_NOT_FOUND if the function return null.
+
+This is similar to the commit c3e9826a2202
+("drm/amd/display: Add null pointer check for get_first_active_display()").
+
+Fixes: 2deade5ede56 ("drm/amd/display: Remove hdcp display state with mst fix")
+Signed-off-by: Chenyuan Yang <chenyuan0y@gmail.com>
+Reviewed-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Dan Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 5e43eb3cd731649c4f8b9134f857be62a416c893)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+index 7f8f127e7722..ab6964ca1c2b 100644
+--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
++++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+@@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
+ return MOD_HDCP_STATUS_FAILURE;
+ }
+
++ if (!display)
++ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
++
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
+--
+2.50.1
+
--- /dev/null
+From c9b2f100c7ab84d378957a3fef370f2bc4422837 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:42:31 +0800
+Subject: drm/hisilicon/hibmc: fix the hibmc loaded failed bug
+
+From: Baihan Li <libaihan@huawei.com>
+
+[ Upstream commit 93a08f856fcc5aaeeecad01f71bef3088588216a ]
+
+When hibmc loaded failed, the driver use hibmc_unload to free the
+resource, but the mutexes in mode.config are not init, which will
+access an NULL pointer. Just change goto statement to return, because
+hibnc_hw_init() doesn't need to free anything.
+
+Fixes: b3df5e65cc03 ("drm/hibmc: Drop drm_vblank_cleanup")
+Signed-off-by: Baihan Li <libaihan@huawei.com>
+Signed-off-by: Yongbang Shi <shiyongbang@huawei.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Link: https://lore.kernel.org/r/20250813094238.3722345-5-shiyongbang@huawei.com
+Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+index 8a98fa276e8a..96f960bcfd82 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+@@ -258,13 +258,13 @@ static int hibmc_load(struct drm_device *dev)
+
+ ret = hibmc_hw_init(priv);
+ if (ret)
+- goto err;
++ return ret;
+
+ ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (ret) {
+ drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
+- goto err;
++ return ret;
+ }
+
+ ret = hibmc_kms_init(priv);
+--
+2.50.1
+
--- /dev/null
+From 05448b95693b90b6d249bd8df507d373b0a13d43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 19:50:27 +0800
+Subject: drm/nouveau/nvif: Fix potential memory leak in nvif_vmm_ctor().
+
+From: Fanhua Li <lifanhua5@huawei.com>
+
+[ Upstream commit bb8aeaa3191b617c6faf8ae937252e059673b7ea ]
+
+When the nvif_vmm_type is invalid, we will return error directly
+without freeing the args in nvif_vmm_ctor(), which leading a memory
+leak. Fix it by setting the ret -EINVAL and goto done.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/all/202312040659.4pJpMafN-lkp@intel.com/
+Fixes: 6b252cf42281 ("drm/nouveau: nvkm/vmm: implement raw ops to manage uvmm")
+Signed-off-by: Fanhua Li <lifanhua5@huawei.com>
+Link: https://lore.kernel.org/r/20250728115027.50878-1-lifanhua5@huawei.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nvif/vmm.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c
+index 99296f03371a..07c1ebc2a941 100644
+--- a/drivers/gpu/drm/nouveau/nvif/vmm.c
++++ b/drivers/gpu/drm/nouveau/nvif/vmm.c
+@@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass,
+ case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break;
+ default:
+ WARN_ON(1);
+- return -EINVAL;
++ ret = -EINVAL;
++ goto done;
+ }
+
+ memcpy(args->data, argv, argc);
+--
+2.50.1
+
--- /dev/null
+From b4ce5a5b2972d12f3c2878f3ba26bb271a463283 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 14:12:45 -0700
+Subject: gve: prevent ethtool ops after shutdown
+
+From: Jordan Rhee <jordanrhee@google.com>
+
+[ Upstream commit 75a9a46d67f46d608205888f9b34e315c1786345 ]
+
+A crash can occur if an ethtool operation is invoked
+after shutdown() is called.
+
+shutdown() is invoked during system shutdown to stop DMA operations
+without performing expensive deallocations. It is discouraged to
+unregister the netdev in this path, so the device may still be visible
+to userspace and kernel helpers.
+
+In gve, shutdown() tears down most internal data structures. If an
+ethtool operation is dispatched after shutdown(), it will dereference
+freed or NULL pointers, leading to a kernel panic. While graceful
+shutdown normally quiesces userspace before invoking the reboot
+syscall, forced shutdowns (as observed on GCP VMs) can still trigger
+this path.
+
+Fix by calling netif_device_detach() in shutdown().
+This marks the device as detached so the ethtool ioctl handler
+will skip dispatching operations to the driver.
+
+Fixes: 974365e51861 ("gve: Implement suspend/resume/shutdown")
+Signed-off-by: Jordan Rhee <jordanrhee@google.com>
+Signed-off-by: Jeroen de Borst <jeroendb@google.com>
+Link: https://patch.msgid.link/20250818211245.1156919-1-jeroendb@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/google/gve/gve_main.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index ec189f0703f9..241a541b8edd 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -2373,6 +2373,8 @@ static void gve_shutdown(struct pci_dev *pdev)
+ struct gve_priv *priv = netdev_priv(netdev);
+ bool was_up = netif_carrier_ok(priv->dev);
+
++ netif_device_detach(netdev);
++
+ rtnl_lock();
+ if (was_up && gve_close(priv->dev)) {
+ /* If the dev was up, attempt to close, if close fails, reset */
+--
+2.50.1
+
--- /dev/null
+From 42df027d93f986ebb4a903c282c5f2b17f190f5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:59 -0700
+Subject: igc: fix disabling L1.2 PCI-E link substate on I226 on init
+
+From: ValdikSS <iam@valdikss.org.ru>
+
+[ Upstream commit 1468c1f97cf32418e34dbb40b784ed9333b9e123 ]
+
+Device ID comparison in igc_is_device_id_i226 is performed before
+the ID is set, resulting in always failing check on init.
+
+Before the patch:
+* L1.2 is not disabled on init
+* L1.2 is properly disabled after suspend-resume cycle
+
+With the patch:
+* L1.2 is properly disabled both on init and after suspend-resume
+
+How to test:
+Connect to the 1G link with 300+ mbit/s Internet speed, and run
+the download speed test, such as:
+
+ curl -o /dev/null http://speedtest.selectel.ru/1GB
+
+Without L1.2 disabled, the speed would be no more than ~200 mbit/s.
+With L1.2 disabled, the speed would reach 1 gbit/s.
+Note: it's required that the latency between your host and the remote
+be around 3-5 ms, the test inside LAN (<1 ms latency) won't trigger the
+issue.
+
+Link: https://lore.kernel.org/intel-wired-lan/15248b4f-3271-42dd-8e35-02bfc92b25e1@intel.com
+Fixes: 0325143b59c6 ("igc: disable L1.2 PCI-E link substate to avoid performance issue")
+Signed-off-by: ValdikSS <iam@valdikss.org.ru>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-6-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 11543db4c47f..3e1408e1c1fc 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6772,6 +6772,13 @@ static int igc_probe(struct pci_dev *pdev,
+ adapter->port_num = hw->bus.func;
+ adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
+
++ /* PCI config space info */
++ hw->vendor_id = pdev->vendor;
++ hw->device_id = pdev->device;
++ hw->revision_id = pdev->revision;
++ hw->subsystem_vendor_id = pdev->subsystem_vendor;
++ hw->subsystem_device_id = pdev->subsystem_device;
++
+ /* Disable ASPM L1.2 on I226 devices to avoid packet loss */
+ if (igc_is_device_id_i226(hw))
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2);
+@@ -6797,13 +6804,6 @@ static int igc_probe(struct pci_dev *pdev,
+ netdev->mem_start = pci_resource_start(pdev, 0);
+ netdev->mem_end = pci_resource_end(pdev, 0);
+
+- /* PCI config space info */
+- hw->vendor_id = pdev->vendor;
+- hw->device_id = pdev->device;
+- hw->revision_id = pdev->revision;
+- hw->subsystem_vendor_id = pdev->subsystem_vendor;
+- hw->subsystem_device_id = pdev->subsystem_device;
+-
+ /* Copy the default MAC and PHY function pointers */
+ memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+ memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+--
+2.50.1
+
--- /dev/null
+From 761c1745f977e593600facdd0864e7e88fcc1c16 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 08:40:27 -0700
+Subject: iommu/amd: Avoid stack buffer overflow from kernel cmdline
+
+From: Kees Cook <kees@kernel.org>
+
+[ Upstream commit 8503d0fcb1086a7cfe26df67ca4bd9bd9e99bdec ]
+
+While the kernel command line is considered trusted in most environments,
+avoid writing 1 byte past the end of "acpiid" if the "str" argument is
+maximum length.
+
+Reported-by: Simcha Kosman <simcha.kosman@cyberark.com>
+Closes: https://lore.kernel.org/all/AS8P193MB2271C4B24BCEDA31830F37AE84A52@AS8P193MB2271.EURP193.PROD.OUTLOOK.COM
+Fixes: b6b26d86c61c ("iommu/amd: Add a length limitation for the ivrs_acpihid command-line parameter")
+Signed-off-by: Kees Cook <kees@kernel.org>
+Reviewed-by: Ankit Soni <Ankit.Soni@amd.com>
+Link: https://lore.kernel.org/r/20250804154023.work.970-kees@kernel.org
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/amd/init.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index 2e7a12f30651..431cea41df2a 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -3625,7 +3625,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ {
+ u32 seg = 0, bus, dev, fn;
+ char *hid, *uid, *p, *addr;
+- char acpiid[ACPIID_LEN] = {0};
++ char acpiid[ACPIID_LEN + 1] = { }; /* size with NULL terminator */
+ int i;
+
+ addr = strchr(str, '@');
+@@ -3651,7 +3651,7 @@ static int __init parse_ivrs_acpihid(char *str)
+ /* We have the '@', make it the terminator to get just the acpiid */
+ *addr++ = 0;
+
+- if (strlen(str) > ACPIID_LEN + 1)
++ if (strlen(str) > ACPIID_LEN)
+ goto not_found;
+
+ if (sscanf(str, "=%s", acpiid) != 1)
+--
+2.50.1
+
--- /dev/null
+From 4f139a4228d258d153ce54edcb0087078f144445 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Jul 2025 16:20:51 +0530
+Subject: iosys-map: Fix undefined behavior in iosys_map_clear()
+
+From: Nitin Gote <nitin.r.gote@intel.com>
+
+[ Upstream commit 5634c8cb298a7146b4e38873473e280b50e27a2c ]
+
+The current iosys_map_clear() implementation reads the potentially
+uninitialized 'is_iomem' boolean field to decide which union member
+to clear. This causes undefined behavior when called on uninitialized
+structures, as 'is_iomem' may contain garbage values like 0xFF.
+
+UBSAN detects this as:
+ UBSAN: invalid-load in include/linux/iosys-map.h:267
+ load of value 255 is not a valid value for type '_Bool'
+
+Fix by unconditionally clearing the entire structure with memset(),
+eliminating the need to read uninitialized data and ensuring all
+fields are set to known good values.
+
+Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/14639
+Fixes: 01fd30da0474 ("dma-buf: Add struct dma-buf-map for storing struct dma_buf.vaddr_ptr")
+Signed-off-by: Nitin Gote <nitin.r.gote@intel.com>
+Reviewed-by: Andi Shyti <andi.shyti@linux.intel.com>
+Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://lore.kernel.org/r/20250718105051.2709487-1-nitin.r.gote@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/iosys-map.h | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
+index cb71aa616bd3..631d58d0b838 100644
+--- a/include/linux/iosys-map.h
++++ b/include/linux/iosys-map.h
+@@ -264,12 +264,7 @@ static inline bool iosys_map_is_set(const struct iosys_map *map)
+ */
+ static inline void iosys_map_clear(struct iosys_map *map)
+ {
+- if (map->is_iomem) {
+- map->vaddr_iomem = NULL;
+- map->is_iomem = false;
+- } else {
+- map->vaddr = NULL;
+- }
++ memset(map, 0, sizeof(*map));
+ }
+
+ /**
+--
+2.50.1
+
--- /dev/null
+From 72fed2e5c5e9b29ef8186df17ed19a0dc2777d14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Aug 2025 14:38:45 +0800
+Subject: ipv6: sr: validate HMAC algorithm ID in seg6_hmac_info_add
+
+From: Minhong He <heminhong@kylinos.cn>
+
+[ Upstream commit 84967deee9d9870b15bc4c3acb50f1d401807902 ]
+
+The seg6_genl_sethmac() directly uses the algorithm ID provided by the
+userspace without verifying whether it is an HMAC algorithm supported
+by the system.
+If an unsupported HMAC algorithm ID is configured, packets using SRv6 HMAC
+will be dropped during encapsulation or decapsulation.
+
+Fixes: 4f4853dc1c9c ("ipv6: sr: implement API to control SR HMAC structure")
+Signed-off-by: Minhong He <heminhong@kylinos.cn>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250815063845.85426-1-heminhong@kylinos.cn
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/seg6_hmac.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
+index 22a5006ad34a..6e15a65faecc 100644
+--- a/net/ipv6/seg6_hmac.c
++++ b/net/ipv6/seg6_hmac.c
+@@ -294,6 +294,9 @@ int seg6_hmac_info_add(struct net *net, u32 key, struct seg6_hmac_info *hinfo)
+ struct seg6_pernet_data *sdata = seg6_pernet(net);
+ int err;
+
++ if (!__hmac_get_algo(hinfo->alg_id))
++ return -EINVAL;
++
+ err = rhashtable_lookup_insert_fast(&sdata->hmac_infos, &hinfo->node,
+ rht_params);
+
+--
+2.50.1
+
--- /dev/null
+From ebfe1e3b3447bc0008ef16ee5653f8c137aa6003 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 15:19:57 -0700
+Subject: ixgbe: xsk: resolve the negative overflow of budget in ixgbe_xmit_zc
+
+From: Jason Xing <kernelxing@tencent.com>
+
+[ Upstream commit 4d4d9ef9dfee877d494e5418f68a1016ef08cad6 ]
+
+Resolve the budget negative overflow which leads to returning true in
+ixgbe_xmit_zc even when the budget of descs are thoroughly consumed.
+
+Before this patch, when the budget is decreased to zero and finishes
+sending the last allowed desc in ixgbe_xmit_zc, it will always turn back
+and enter into the while() statement to see if it should keep processing
+packets, but in the meantime it unexpectedly decreases the value again to
+'unsigned int (0--)', namely, UINT_MAX. Finally, the ixgbe_xmit_zc returns
+true, showing 'we complete cleaning the budget'. That also means
+'clean_complete = true' in ixgbe_poll.
+
+The true theory behind this is if that budget number of descs are consumed,
+it implies that we might have more descs to be done. So we should return
+false in ixgbe_xmit_zc to tell napi poll to find another chance to start
+polling to handle the rest of descs. On the contrary, returning true here
+means job done and we know we finish all the possible descs this time and
+we don't intend to start a new napi poll.
+
+It is apparently against our expectations. Please also see how
+ixgbe_clean_tx_irq() handles the problem: it uses do..while() statement
+to make sure the budget can be decreased to zero at most and the negative
+overflow never happens.
+
+The patch adds 'likely' because we rarely would not hit the loop condition
+since the standard budget is 256.
+
+Fixes: 8221c5eba8c1 ("ixgbe: add AF_XDP zero-copy Tx support")
+Signed-off-by: Jason Xing <kernelxing@tencent.com>
+Reviewed-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Link: https://patch.msgid.link/20250819222000.3504873-4-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+index 1703c640a434..7ef82c30e857 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+@@ -403,7 +403,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ dma_addr_t dma;
+ u32 cmd_type;
+
+- while (budget-- > 0) {
++ while (likely(budget)) {
+ if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
+ work_done = false;
+ break;
+@@ -438,6 +438,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
+ xdp_ring->next_to_use++;
+ if (xdp_ring->next_to_use == xdp_ring->count)
+ xdp_ring->next_to_use = 0;
++
++ budget--;
+ }
+
+ if (tx_desc) {
+--
+2.50.1
+
--- /dev/null
+From 9dfd6f07093bc760913b6d0c6ced3d6ba0ce6898 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 22:23:44 +0800
+Subject: LoongArch: Optimize module load time by optimizing PLT/GOT counting
+
+From: Kanglong Wang <wangkanglong@loongson.cn>
+
+[ Upstream commit 63dbd8fb2af3a89466538599a9acb2d11ef65c06 ]
+
+When enabling CONFIG_KASAN, CONFIG_PREEMPT_VOLUNTARY_BUILD and
+CONFIG_PREEMPT_VOLUNTARY at the same time, there will be soft deadlock,
+the relevant logs are as follows:
+
+rcu: INFO: rcu_sched self-detected stall on CPU
+...
+Call Trace:
+[<900000000024f9e4>] show_stack+0x5c/0x180
+[<90000000002482f4>] dump_stack_lvl+0x94/0xbc
+[<9000000000224544>] rcu_dump_cpu_stacks+0x1fc/0x280
+[<900000000037ac80>] rcu_sched_clock_irq+0x720/0xf88
+[<9000000000396c34>] update_process_times+0xb4/0x150
+[<90000000003b2474>] tick_nohz_handler+0xf4/0x250
+[<9000000000397e28>] __hrtimer_run_queues+0x1d0/0x428
+[<9000000000399b2c>] hrtimer_interrupt+0x214/0x538
+[<9000000000253634>] constant_timer_interrupt+0x64/0x80
+[<9000000000349938>] __handle_irq_event_percpu+0x78/0x1a0
+[<9000000000349a78>] handle_irq_event_percpu+0x18/0x88
+[<9000000000354c00>] handle_percpu_irq+0x90/0xf0
+[<9000000000348c74>] handle_irq_desc+0x94/0xb8
+[<9000000001012b28>] handle_cpu_irq+0x68/0xa0
+[<9000000001def8c0>] handle_loongarch_irq+0x30/0x48
+[<9000000001def958>] do_vint+0x80/0xd0
+[<9000000000268a0c>] kasan_mem_to_shadow.part.0+0x2c/0x2a0
+[<90000000006344f4>] __asan_load8+0x4c/0x120
+[<900000000025c0d0>] module_frob_arch_sections+0x5c8/0x6b8
+[<90000000003895f0>] load_module+0x9e0/0x2958
+[<900000000038b770>] __do_sys_init_module+0x208/0x2d0
+[<9000000001df0c34>] do_syscall+0x94/0x190
+[<900000000024d6fc>] handle_syscall+0xbc/0x158
+
+After analysis, this is because the slow speed of loading the amdgpu
+module leads to the long time occupation of the cpu and then the soft
+deadlock.
+
+When loading a module, module_frob_arch_sections() tries to figure out
+the number of PLTs/GOTs that will be needed to handle all the RELAs. It
+will call the count_max_entries() to find in an out-of-order date which
+counting algorithm has O(n^2) complexity.
+
+To make it faster, we sort the relocation list by info and addend. That
+way, to check for a duplicate relocation, it just needs to compare with
+the previous entry. This reduces the complexity of the algorithm to O(n
+ log n), as done in commit d4e0340919fb ("arm64/module: Optimize module
+load time by optimizing PLT counting"). This gives sinificant reduction
+in module load time for modules with large number of relocations.
+
+After applying this patch, the soft deadlock problem has been solved,
+and the kernel starts normally without "Call Trace".
+
+Using the default configuration to test some modules, the results are as
+follows:
+
+Module Size
+ip_tables 36K
+fat 143K
+radeon 2.5MB
+amdgpu 16MB
+
+Without this patch:
+Module Module load time (ms) Count(PLTs/GOTs)
+ip_tables 18 59/6
+fat 0 162/14
+radeon 54 1221/84
+amdgpu 1411 4525/1098
+
+With this patch:
+Module Module load time (ms) Count(PLTs/GOTs)
+ip_tables 18 59/6
+fat 0 162/14
+radeon 22 1221/84
+amdgpu 45 4525/1098
+
+Fixes: fcdfe9d22bed ("LoongArch: Add ELF and module support")
+Signed-off-by: Kanglong Wang <wangkanglong@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/loongarch/kernel/module-sections.c | 36 ++++++++++++-------------
+ 1 file changed, 18 insertions(+), 18 deletions(-)
+
+diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
+index e2f30ff9afde..a43ba7f9f987 100644
+--- a/arch/loongarch/kernel/module-sections.c
++++ b/arch/loongarch/kernel/module-sections.c
+@@ -8,6 +8,7 @@
+ #include <linux/module.h>
+ #include <linux/moduleloader.h>
+ #include <linux/ftrace.h>
++#include <linux/sort.h>
+
+ Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
+ {
+@@ -61,39 +62,38 @@ Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr v
+ return (Elf_Addr)&plt[nr];
+ }
+
+-static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+-{
+- return x->r_info == y->r_info && x->r_addend == y->r_addend;
+-}
++#define cmp_3way(a, b) ((a) < (b) ? -1 : (a) > (b))
+
+-static bool duplicate_rela(const Elf_Rela *rela, int idx)
++static int compare_rela(const void *x, const void *y)
+ {
+- int i;
++ int ret;
++ const Elf_Rela *rela_x = x, *rela_y = y;
+
+- for (i = 0; i < idx; i++) {
+- if (is_rela_equal(&rela[i], &rela[idx]))
+- return true;
+- }
++ ret = cmp_3way(rela_x->r_info, rela_y->r_info);
++ if (ret == 0)
++ ret = cmp_3way(rela_x->r_addend, rela_y->r_addend);
+
+- return false;
++ return ret;
+ }
+
+ static void count_max_entries(Elf_Rela *relas, int num,
+ unsigned int *plts, unsigned int *gots)
+ {
+- unsigned int i, type;
++ unsigned int i;
++
++ sort(relas, num, sizeof(Elf_Rela), compare_rela, NULL);
+
+ for (i = 0; i < num; i++) {
+- type = ELF_R_TYPE(relas[i].r_info);
+- switch (type) {
++ if (i && !compare_rela(&relas[i-1], &relas[i]))
++ continue;
++
++ switch (ELF_R_TYPE(relas[i].r_info)) {
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ case R_LARCH_B26:
+- if (!duplicate_rela(relas, i))
+- (*plts)++;
++ (*plts)++;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+- if (!duplicate_rela(relas, i))
+- (*gots)++;
++ (*gots)++;
+ break;
+ default:
+ break; /* Do nothing. */
+--
+2.50.1
+
--- /dev/null
+From d0b1ce54f47d136ff4b4d56b15f1cbbde9940582 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 15:06:40 +0200
+Subject: mlxsw: spectrum: Forward packets with an IPv4 link-local source IP
+
+From: Ido Schimmel <idosch@nvidia.com>
+
+[ Upstream commit f604d3aaf64ff0d90cc875295474d3abf4155629 ]
+
+By default, the device does not forward IPv4 packets with a link-local
+source IP (i.e., 169.254.0.0/16). This behavior does not align with the
+kernel which does forward them.
+
+Fix by instructing the device to forward such packets instead of
+dropping them.
+
+Fixes: ca360db4b825 ("mlxsw: spectrum: Disable DIP_LINK_LOCAL check in hardware pipeline")
+Reported-by: Zoey Mertes <zoey@cloudflare.com>
+Signed-off-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: Petr Machata <petrm@nvidia.com>
+Signed-off-by: Petr Machata <petrm@nvidia.com>
+Link: https://patch.msgid.link/6721e6b2c96feb80269e72ce8d0b426e2f32d99c.1755174341.git.petrm@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 2 ++
+ drivers/net/ethernet/mellanox/mlxsw/trap.h | 1 +
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 9dbd5edff0b0..51f49510826a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2528,6 +2528,8 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
+ ROUTER_EXP, false),
+ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
+ ROUTER_EXP, false),
++ MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_LINK_LOCAL, FORWARD,
++ ROUTER_EXP, false),
+ /* Multicast Router Traps */
+ MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
+ MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+index 83477c8e6971..5bfc1499347a 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
+@@ -95,6 +95,7 @@ enum {
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_SIP_BC = 0x16A,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_IPV4_DIP_LOCAL_NET = 0x16B,
+ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_DIP_LINK_LOCAL = 0x16C,
++ MLXSW_TRAP_ID_DISCARD_ING_ROUTER_SIP_LINK_LOCAL = 0x16D,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_IRIF_EN = 0x178,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_ERIF_EN = 0x179,
+ MLXSW_TRAP_ID_DISCARD_ROUTER_LPM4 = 0x17B,
+--
+2.50.1
+
--- /dev/null
+From 671a2a40edbcdd8d3c470bd04a3d593f9df4e3fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 10:10:54 +0800
+Subject: net: bridge: fix soft lockup in br_multicast_query_expired()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d1547bf460baec718b3398365f8de33d25c5f36f ]
+
+When set multicast_query_interval to a large value, the local variable
+'time' in br_multicast_send_query() may overflow. If the time is smaller
+than jiffies, the timer will expire immediately, and then call mod_timer()
+again, which creates a loop and may trigger the following soft lockup
+issue.
+
+ watchdog: BUG: soft lockup - CPU#1 stuck for 221s! [rb_consumer:66]
+ CPU: 1 UID: 0 PID: 66 Comm: rb_consumer Not tainted 6.16.0+ #259 PREEMPT(none)
+ Call Trace:
+ <IRQ>
+ __netdev_alloc_skb+0x2e/0x3a0
+ br_ip6_multicast_alloc_query+0x212/0x1b70
+ __br_multicast_send_query+0x376/0xac0
+ br_multicast_send_query+0x299/0x510
+ br_multicast_query_expired.constprop.0+0x16d/0x1b0
+ call_timer_fn+0x3b/0x2a0
+ __run_timers+0x619/0x950
+ run_timer_softirq+0x11c/0x220
+ handle_softirqs+0x18e/0x560
+ __irq_exit_rcu+0x158/0x1a0
+ sysvec_apic_timer_interrupt+0x76/0x90
+ </IRQ>
+
+This issue can be reproduced with:
+ ip link add br0 type bridge
+ echo 1 > /sys/class/net/br0/bridge/multicast_querier
+ echo 0xffffffffffffffff >
+ /sys/class/net/br0/bridge/multicast_query_interval
+ ip link set dev br0 up
+
+The multicast_startup_query_interval can also cause this issue. Similar to
+the commit 99b40610956a ("net: bridge: mcast: add and enforce query
+interval minimum"), add check for the query interval maximum to fix this
+issue.
+
+Link: https://lore.kernel.org/netdev/20250806094941.1285944-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250812091818.542238-1-wangliang74@huawei.com/
+Fixes: d902eee43f19 ("bridge: Add multicast count/interval sysfs entries")
+Suggested-by: Nikolay Aleksandrov <razor@blackwall.org>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250813021054.1643649-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_multicast.c | 16 ++++++++++++++++
+ net/bridge/br_private.h | 2 ++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index fa16ee88ec39..f42805d9b38f 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -4807,6 +4807,14 @@ void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_query_interval = intvl_jiffies;
+ }
+
+@@ -4823,6 +4831,14 @@ void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
+ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
+ }
+
++ if (intvl_jiffies > BR_MULTICAST_STARTUP_QUERY_INTVL_MAX) {
++ br_info(brmctx->br,
++ "trying to set multicast startup query interval above maximum, setting to %lu (%ums)\n",
++ jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX),
++ jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MAX));
++ intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MAX;
++ }
++
+ brmctx->multicast_startup_query_interval = intvl_jiffies;
+ }
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index 067d47b8eb8f..ef98ec4c3f51 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -31,6 +31,8 @@
+ #define BR_MULTICAST_DEFAULT_HASH_MAX 4096
+ #define BR_MULTICAST_QUERY_INTVL_MIN msecs_to_jiffies(1000)
+ #define BR_MULTICAST_STARTUP_QUERY_INTVL_MIN BR_MULTICAST_QUERY_INTVL_MIN
++#define BR_MULTICAST_QUERY_INTVL_MAX msecs_to_jiffies(86400000) /* 24 hours */
++#define BR_MULTICAST_STARTUP_QUERY_INTVL_MAX BR_MULTICAST_QUERY_INTVL_MAX
+
+ #define BR_HWDOM_MAX BITS_PER_LONG
+
+--
+2.50.1
+
--- /dev/null
+From c98e67208cbf7fba6851721247cca02ea8778b86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:57 +0800
+Subject: net: ethernet: mtk_ppe: add RCU lock around dev_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 62c30c544359aa18b8fb2734166467a07d435c2d ]
+
+Ensure ndo_fill_forward_path() is called with RCU lock held.
+
+Fixes: 2830e314778d ("net: ethernet: mtk-ppe: fix traffic offload with bridged wlan")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-1-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+index 889fd26843e6..11e16c9e4e92 100644
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -101,7 +101,9 @@ mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_i
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
++ rcu_read_lock();
+ err = dev_fill_forward_path(dev, addr, &stack);
++ rcu_read_unlock();
+ if (err)
+ return err;
+
+--
+2.50.1
+
--- /dev/null
+From 60ab9e88591934197e2fa8581607b50f648c844b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 12:51:19 +0200
+Subject: net: gso: Forbid IPv6 TSO with extensions on devices with only
+ IPV6_CSUM
+
+From: Jakub Ramaseuski <jramaseu@redhat.com>
+
+[ Upstream commit 864e3396976ef41de6cc7bc366276bf4e084fff2 ]
+
+When performing Generic Segmentation Offload (GSO) on an IPv6 packet that
+contains extension headers, the kernel incorrectly requests checksum offload
+if the egress device only advertises NETIF_F_IPV6_CSUM feature, which has
+a strict contract: it supports checksum offload only for plain TCP or UDP
+over IPv6 and explicitly does not support packets with extension headers.
+The current GSO logic violates this contract by failing to disable the feature
+for packets with extension headers, such as those used in GREoIPv6 tunnels.
+
+This violation results in the device being asked to perform an operation
+it cannot support, leading to a `skb_warn_bad_offload` warning and a collapse
+of network throughput. While device TSO/USO is correctly bypassed in favor
+of software GSO for these packets, the GSO stack must be explicitly told not
+to request checksum offload.
+
+Mask NETIF_F_IPV6_CSUM, NETIF_F_TSO6 and NETIF_F_GSO_UDP_L4
+in gso_features_check if the IPv6 header contains extension headers to compute
+checksum in software.
+
+The exception is a BIG TCP extension, which, as stated in commit
+68e068cabd2c6c53 ("net: reenable NETIF_F_IPV6_CSUM offload for BIG TCP packets"):
+"The feature is only enabled on devices that support BIG TCP TSO.
+The header is only present for PF_PACKET taps like tcpdump,
+and not transmitted by physical devices."
+
+kernel log output (truncated):
+WARNING: CPU: 1 PID: 5273 at net/core/dev.c:3535 skb_warn_bad_offload+0x81/0x140
+...
+Call Trace:
+ <TASK>
+ skb_checksum_help+0x12a/0x1f0
+ validate_xmit_skb+0x1a3/0x2d0
+ validate_xmit_skb_list+0x4f/0x80
+ sch_direct_xmit+0x1a2/0x380
+ __dev_xmit_skb+0x242/0x670
+ __dev_queue_xmit+0x3fc/0x7f0
+ ip6_finish_output2+0x25e/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_tnl_xmit+0x608/0xc00 [ip6_tunnel]
+ ip6gre_tunnel_xmit+0x1c0/0x390 [ip6_gre]
+ dev_hard_start_xmit+0x63/0x1c0
+ __dev_queue_xmit+0x6d0/0x7f0
+ ip6_finish_output2+0x214/0x5d0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ ip6_finish_output+0x1fc/0x3f0
+ ip6_xmit+0x2ca/0x6f0
+ inet6_csk_xmit+0xeb/0x150
+ __tcp_transmit_skb+0x555/0xa80
+ tcp_write_xmit+0x32a/0xe90
+ tcp_sendmsg_locked+0x437/0x1110
+ tcp_sendmsg+0x2f/0x50
+...
+skb linear: 00000000: e4 3d 1a 7d ec 30 e4 3d 1a 7e 5d 90 86 dd 60 0e
+skb linear: 00000010: 00 0a 1b 34 3c 40 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000020: 00 00 00 00 00 12 20 11 00 00 00 00 00 00 00 00
+skb linear: 00000030: 00 00 00 00 00 11 2f 00 04 01 04 01 01 00 00 00
+skb linear: 00000040: 86 dd 60 0e 00 0a 1b 00 06 40 20 23 00 00 00 00
+skb linear: 00000050: 00 00 00 00 00 00 00 00 00 12 20 23 00 00 00 00
+skb linear: 00000060: 00 00 00 00 00 00 00 00 00 11 bf 96 14 51 13 f9
+skb linear: 00000070: ae 27 a0 a8 2b e3 80 18 00 40 5b 6f 00 00 01 01
+skb linear: 00000080: 08 0a 42 d4 50 d5 4b 70 f8 1a
+
+Fixes: 04c20a9356f283da ("net: skip offload for NETIF_F_IPV6_CSUM if ipv6 header contains extension")
+Reported-by: Tianhao Zhao <tizhao@redhat.com>
+Suggested-by: Michal Schmidt <mschmidt@redhat.com>
+Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Jakub Ramaseuski <jramaseu@redhat.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250814105119.1525687-1-jramaseu@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4006fd164b7b..2d3e0e4130c2 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3559,6 +3559,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
+ features &= ~NETIF_F_TSO_MANGLEID;
+ }
+
++ /* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
++ * so neither does TSO that depends on it.
++ */
++ if (features & NETIF_F_IPV6_CSUM &&
++ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
++ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
++ vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
++ skb_transport_header_was_set(skb) &&
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ !ipv6_has_hopopt_jumbo(skb))
++ features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
++
+ return features;
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 6524551169b27ba9249ecf757aadd7a2e893c389 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:02 +0300
+Subject: net/mlx5: Base ECVF devlink port attrs from 0
+
+From: Daniel Jurgens <danielj@nvidia.com>
+
+[ Upstream commit bc17455bc843b2f4b206e0bb8139013eb3d3c08b ]
+
+Adjust the vport number by the base ECVF vport number so the port
+attributes start at 0. Previously the port attributes would start 1
+after the maximum number of host VFs.
+
+Fixes: dc13180824b7 ("net/mlx5: Enable devlink port for embedded cpu VF vports")
+Signed-off-by: Daniel Jurgens <danielj@nvidia.com>
+Reviewed-by: Parav Pandit <parav@nvidia.com>
+Reviewed-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Link: https://patch.msgid.link/20250820133209.389065-2-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+index d8e739cbcbce..91319b5acd3d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+@@ -47,10 +47,12 @@ static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *
+ devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ vport_num - 1, external);
+ } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
++ u16 base_vport = mlx5_core_ec_vf_vport_base(dev);
++
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
+- vport_num - 1, false);
++ vport_num - base_vport, false);
+ }
+ }
+
+--
+2.50.1
+
--- /dev/null
+From d8f00df4b0067c94459661af914c4b042203614f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 16:32:09 +0300
+Subject: net/mlx5e: Preserve shared buffer capacity during headroom updates
+
+From: Armen Ratner <armeng@nvidia.com>
+
+[ Upstream commit 8b0587a885fdb34fd6090a3f8625cb7ac1444826 ]
+
+When port buffer headroom changes, port_update_shared_buffer()
+recalculates the shared buffer size and splits it in a 3:1 ratio
+(lossy:lossless) - Currently, the calculation is:
+lossless = shared / 4;
+lossy = (shared / 4) * 3;
+
+Meaning, the calculation dropped the remainder of shared % 4 due to
+integer division, unintentionally reducing the total shared buffer
+by up to three cells on each update. Over time, this could shrink
+the buffer below usable size.
+
+Fix it by changing the calculation to:
+lossless = shared / 4;
+lossy = shared - lossless;
+
+This retains all buffer cells while still approximating the
+intended 3:1 split, preventing capacity loss over time.
+
+While at it, perform headroom calculations in units of cells rather than
+in bytes for more accurate calculations avoiding extra divisions.
+
+Fixes: a440030d8946 ("net/mlx5e: Update shared buffer along with device buffer changes")
+Signed-off-by: Armen Ratner <armeng@nvidia.com>
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Alexei Lazar <alazar@nvidia.com>
+Signed-off-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Link: https://patch.msgid.link/20250820133209.389065-9-mbloch@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlx5/core/en/port_buffer.c | 18 ++++++++----------
+ 1 file changed, 8 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+index 5ae787656a7c..3efa8bf1d14e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+@@ -272,8 +272,8 @@ static int port_update_shared_buffer(struct mlx5_core_dev *mdev,
+ /* Total shared buffer size is split in a ratio of 3:1 between
+ * lossy and lossless pools respectively.
+ */
+- lossy_epool_size = (shared_buffer_size / 4) * 3;
+ lossless_ipool_size = shared_buffer_size / 4;
++ lossy_epool_size = shared_buffer_size - lossless_ipool_size;
+
+ mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0,
+ lossy_epool_size);
+@@ -288,14 +288,12 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
+- u32 new_headroom_size = 0;
+- u32 current_headroom_size;
++ u32 current_headroom_cells = 0;
++ u32 new_headroom_cells = 0;
+ void *in;
+ int err;
+ int i;
+
+- current_headroom_size = port_buffer->headroom_size;
+-
+ in = kzalloc(sz, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+@@ -306,12 +304,14 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+
+ for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
+ void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
++ current_headroom_cells += MLX5_GET(bufferx_reg, buffer, size);
++
+ u64 size = port_buffer->buffer[i].size;
+ u64 xoff = port_buffer->buffer[i].xoff;
+ u64 xon = port_buffer->buffer[i].xon;
+
+- new_headroom_size += size;
+ do_div(size, port_buff_cell_sz);
++ new_headroom_cells += size;
+ do_div(xoff, port_buff_cell_sz);
+ do_div(xon, port_buff_cell_sz);
+ MLX5_SET(bufferx_reg, buffer, size, size);
+@@ -320,10 +320,8 @@ static int port_set_buffer(struct mlx5e_priv *priv,
+ MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
+ }
+
+- new_headroom_size /= port_buff_cell_sz;
+- current_headroom_size /= port_buff_cell_sz;
+- err = port_update_shared_buffer(priv->mdev, current_headroom_size,
+- new_headroom_size);
++ err = port_update_shared_buffer(priv->mdev, current_headroom_cells,
++ new_headroom_cells);
+ if (err)
+ goto out;
+
+--
+2.50.1
+
--- /dev/null
+From e2d4bd17c3263deb7f1ef9c57d80dc61cf90e5f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:28 +0000
+Subject: net/sched: Make cake_enqueue return NET_XMIT_CN when past
+ buffer_limit
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 15de71d06a400f7fdc15bf377a2552b0ec437cf5 ]
+
+The following setup can trigger a WARNING in htb_activate due to
+the condition: !cl->leaf.q->q.qlen
+
+tc qdisc del dev lo root
+tc qdisc add dev lo root handle 1: htb default 1
+tc class add dev lo parent 1: classid 1:1 \
+ htb rate 64bit
+tc qdisc add dev lo parent 1:1 handle f: \
+ cake memlimit 1b
+ping -I lo -f -c1 -s64 -W0.001 127.0.0.1
+
+This is because the low memlimit leads to a low buffer_limit, which
+causes packet dropping. However, cake_enqueue still returns
+NET_XMIT_SUCCESS, causing htb_enqueue to call htb_activate with an
+empty child qdisc. We should return NET_XMIT_CN when packets are
+dropped from the same tin and flow.
+
+I do not believe return value of NET_XMIT_CN is necessary for packet
+drops in the case of ack filtering, as that is meant to optimize
+performance, not to signal congestion.
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20250819033601.579821-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 09242578dac5..85984c91cf51 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -1762,7 +1762,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ ktime_t now = ktime_get();
+ struct cake_tin_data *b;
+ struct cake_flow *flow;
+- u32 idx;
++ u32 idx, tin;
+
+ /* choose flow to insert into */
+ idx = cake_classify(sch, &b, skb, q->flow_mode, &ret);
+@@ -1772,6 +1772,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ __qdisc_drop(skb, to_free);
+ return ret;
+ }
++ tin = (u32)(b - q->tins);
+ idx--;
+ flow = &b->flows[idx];
+
+@@ -1939,13 +1940,22 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ q->buffer_max_used = q->buffer_used;
+
+ if (q->buffer_used > q->buffer_limit) {
++ bool same_flow = false;
+ u32 dropped = 0;
++ u32 drop_id;
+
+ while (q->buffer_used > q->buffer_limit) {
+ dropped++;
+- cake_drop(sch, to_free);
++ drop_id = cake_drop(sch, to_free);
++
++ if ((drop_id >> 16) == tin &&
++ (drop_id & 0xFFFF) == idx)
++ same_flow = true;
+ }
+ b->drop_overlimit += dropped;
++
++ if (same_flow)
++ return NET_XMIT_CN;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+--
+2.50.1
+
--- /dev/null
+From dd72b038e95bbc3b44629f7a22e4a969ea1ade74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 03:36:59 +0000
+Subject: net/sched: Remove unnecessary WARNING condition for empty child qdisc
+ in htb_activate
+
+From: William Liu <will@willsroot.io>
+
+[ Upstream commit 2c2192e5f9c7c2892fe2363244d1387f62710d83 ]
+
+The WARN_ON trigger based on !cl->leaf.q->q.qlen is unnecessary in
+htb_activate. htb_dequeue_tree already accounts for that scenario.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: William Liu <will@willsroot.io>
+Reviewed-by: Savino Dicanosa <savy@syst3mfailure.io>
+Link: https://patch.msgid.link/20250819033632.579854-1-will@willsroot.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 113b305b0d15..c8a426062923 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -592,7 +592,7 @@ htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
+ */
+ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
++ WARN_ON(cl->level || !cl->leaf.q);
+
+ if (!cl->prio_activity) {
+ cl->prio_activity = 1 << cl->prio;
+--
+2.50.1
+
--- /dev/null
+From d1ca1e5f01260d7160429c72d4f0e9827c4f07f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 13:46:18 +0800
+Subject: net/smc: fix UAF on smcsk after smc_listen_out()
+
+From: D. Wythe <alibuda@linux.alibaba.com>
+
+[ Upstream commit d9cef55ed49117bd63695446fb84b4b91815c0b4 ]
+
+BPF CI testing report a UAF issue:
+
+ [ 16.446633] BUG: kernel NULL pointer dereference, address: 000000000000003 0
+ [ 16.447134] #PF: supervisor read access in kernel mod e
+ [ 16.447516] #PF: error_code(0x0000) - not-present pag e
+ [ 16.447878] PGD 0 P4D 0
+ [ 16.448063] Oops: Oops: 0000 [#1] PREEMPT SMP NOPT I
+ [ 16.448409] CPU: 0 UID: 0 PID: 9 Comm: kworker/0:1 Tainted: G OE 6.13.0-rc3-g89e8a75fda73-dirty #4 2
+ [ 16.449124] Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODUL E
+ [ 16.449502] Hardware name: QEMU Ubuntu 24.04 PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/201 4
+ [ 16.450201] Workqueue: smc_hs_wq smc_listen_wor k
+ [ 16.450531] RIP: 0010:smc_listen_work+0xc02/0x159 0
+ [ 16.452158] RSP: 0018:ffffb5ab40053d98 EFLAGS: 0001024 6
+ [ 16.452526] RAX: 0000000000000001 RBX: 0000000000000002 RCX: 000000000000030 0
+ [ 16.452994] RDX: 0000000000000280 RSI: 00003513840053f0 RDI: 000000000000000 0
+ [ 16.453492] RBP: ffffa097808e3800 R08: ffffa09782dba1e0 R09: 000000000000000 5
+ [ 16.453987] R10: 0000000000000000 R11: 0000000000000000 R12: ffffa0978274640 0
+ [ 16.454497] R13: 0000000000000000 R14: 0000000000000000 R15: ffffa09782d4092 0
+ [ 16.454996] FS: 0000000000000000(0000) GS:ffffa097bbc00000(0000) knlGS:000000000000000 0
+ [ 16.455557] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003 3
+ [ 16.455961] CR2: 0000000000000030 CR3: 0000000102788004 CR4: 0000000000770ef 0
+ [ 16.456459] PKRU: 5555555 4
+ [ 16.456654] Call Trace :
+ [ 16.456832] <TASK >
+ [ 16.456989] ? __die+0x23/0x7 0
+ [ 16.457215] ? page_fault_oops+0x180/0x4c 0
+ [ 16.457508] ? __lock_acquire+0x3e6/0x249 0
+ [ 16.457801] ? exc_page_fault+0x68/0x20 0
+ [ 16.458080] ? asm_exc_page_fault+0x26/0x3 0
+ [ 16.458389] ? smc_listen_work+0xc02/0x159 0
+ [ 16.458689] ? smc_listen_work+0xc02/0x159 0
+ [ 16.458987] ? lock_is_held_type+0x8f/0x10 0
+ [ 16.459284] process_one_work+0x1ea/0x6d 0
+ [ 16.459570] worker_thread+0x1c3/0x38 0
+ [ 16.459839] ? __pfx_worker_thread+0x10/0x1 0
+ [ 16.460144] kthread+0xe0/0x11 0
+ [ 16.460372] ? __pfx_kthread+0x10/0x1 0
+ [ 16.460640] ret_from_fork+0x31/0x5 0
+ [ 16.460896] ? __pfx_kthread+0x10/0x1 0
+ [ 16.461166] ret_from_fork_asm+0x1a/0x3 0
+ [ 16.461453] </TASK >
+ [ 16.461616] Modules linked in: bpf_testmod(OE) [last unloaded: bpf_testmod(OE) ]
+ [ 16.462134] CR2: 000000000000003 0
+ [ 16.462380] ---[ end trace 0000000000000000 ]---
+ [ 16.462710] RIP: 0010:smc_listen_work+0xc02/0x1590
+
+The direct cause of this issue is that after smc_listen_out_connected(),
+newclcsock->sk may be NULL since it will releases the smcsk. Therefore,
+if the application closes the socket immediately after accept,
+newclcsock->sk can be NULL. A possible execution order could be as
+follows:
+
+smc_listen_work | userspace
+-----------------------------------------------------------------
+lock_sock(sk) |
+smc_listen_out_connected() |
+| \- smc_listen_out |
+| | \- release_sock |
+ | |- sk->sk_data_ready() |
+ | fd = accept();
+ | close(fd);
+ | \- socket->sk = NULL;
+/* newclcsock->sk is NULL now */
+SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk))
+
+Since smc_listen_out_connected() will not fail, simply swapping the order
+of the code can easily fix this issue.
+
+Fixes: 3b2dec2603d5 ("net/smc: restructure client and server code in af_smc")
+Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
+Reviewed-by: Guangguan Wang <guangguan.wang@linux.alibaba.com>
+Reviewed-by: Alexandra Winter <wintera@linux.ibm.com>
+Reviewed-by: Dust Li <dust.li@linux.alibaba.com>
+Link: https://patch.msgid.link/20250818054618.41615-1-alibuda@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/smc/af_smc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 8f75bb9d165a..b3bfd0f18d41 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -2553,8 +2553,9 @@ static void smc_listen_work(struct work_struct *work)
+ goto out_decl;
+ }
+
+- smc_listen_out_connected(new_smc);
+ SMC_STAT_SERV_SUCC_INC(sock_net(newclcsock->sk), ini);
++ /* smc_listen_out() will release smcsk */
++ smc_listen_out_connected(new_smc);
+ goto out_free;
+
+ out_unlock:
+--
+2.50.1
+
--- /dev/null
+From fb182288ff6d4a4b289fd088652531b5e4b91bb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 17:45:07 +0900
+Subject: net: usb: asix_devices: Fix PHY address mask in MDIO bus
+ initialization
+
+From: Yuichiro Tsuji <yuichtsu@amazon.com>
+
+[ Upstream commit 24ef2f53c07f273bad99173e27ee88d44d135b1c ]
+
+Syzbot reported shift-out-of-bounds exception on MDIO bus initialization.
+
+The PHY address should be masked to 5 bits (0-31). Without this
+mask, invalid PHY addresses could be used, potentially causing issues
+with MDIO bus operations.
+
+Fix this by masking the PHY address with 0x1f (31 decimal) to ensure
+it stays within the valid range.
+
+Fixes: 4faff70959d5 ("net: usb: asix_devices: add phy_mask for ax88772 mdio bus")
+Reported-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=20537064367a0f98d597
+Tested-by: syzbot+20537064367a0f98d597@syzkaller.appspotmail.com
+Signed-off-by: Yuichiro Tsuji <yuichtsu@amazon.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250818084541.1958-1-yuichtsu@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/asix_devices.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
+index 6a2567a67902..f4340d4ef7ee 100644
+--- a/drivers/net/usb/asix_devices.c
++++ b/drivers/net/usb/asix_devices.c
+@@ -676,7 +676,7 @@ static int ax88772_init_mdio(struct usbnet *dev)
+ priv->mdio->read = &asix_mdio_bus_read;
+ priv->mdio->write = &asix_mdio_bus_write;
+ priv->mdio->name = "Asix MDIO Bus";
+- priv->mdio->phy_mask = ~(BIT(priv->phy_addr) | BIT(AX_EMBD_PHY_ADDR));
++ priv->mdio->phy_mask = ~(BIT(priv->phy_addr & 0x1f) | BIT(AX_EMBD_PHY_ADDR));
+ /* mii bus name is usb-<usb bus number>-<usb device number> */
+ snprintf(priv->mdio->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
+ dev->udev->bus->busnum, dev->udev->devnum);
+--
+2.50.1
+
--- /dev/null
+From 4ab3f4980c29f75d50406901be3efdd8ab591d76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 14:37:07 +0200
+Subject: netfilter: nf_reject: don't leak dst refcount for loopback packets
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 91a79b792204313153e1bdbbe5acbfc28903b3a5 ]
+
+recent patches to add a WARN() when replacing skb dst entry found an
+old bug:
+
+WARNING: include/linux/skbuff.h:1165 skb_dst_check_unset include/linux/skbuff.h:1164 [inline]
+WARNING: include/linux/skbuff.h:1165 skb_dst_set include/linux/skbuff.h:1210 [inline]
+WARNING: include/linux/skbuff.h:1165 nf_reject_fill_skb_dst+0x2a4/0x330 net/ipv4/netfilter/nf_reject_ipv4.c:234
+[..]
+Call Trace:
+ nf_send_unreach+0x17b/0x6e0 net/ipv4/netfilter/nf_reject_ipv4.c:325
+ nft_reject_inet_eval+0x4bc/0x690 net/netfilter/nft_reject_inet.c:27
+ expr_call_ops_eval net/netfilter/nf_tables_core.c:237 [inline]
+ ..
+
+This is because blamed commit forgot about loopback packets.
+Such packets already have a dst_entry attached, even at PRE_ROUTING stage.
+
+Instead of checking hook just check if the skb already has a route
+attached to it.
+
+Fixes: f53b9b0bdc59 ("netfilter: introduce support for reject at prerouting stage")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20250820123707.10671-1-fw@strlen.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_reject_ipv4.c | 6 ++----
+ net/ipv6/netfilter/nf_reject_ipv6.c | 5 ++---
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
+index 675b5bbed638..2d663fe50f87 100644
+--- a/net/ipv4/netfilter/nf_reject_ipv4.c
++++ b/net/ipv4/netfilter/nf_reject_ipv4.c
+@@ -247,8 +247,7 @@ void nf_send_reset(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ if (!oth)
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(oldskb) < 0)
++ if (!skb_dst(oldskb) && nf_reject_fill_skb_dst(oldskb) < 0)
+ return;
+
+ if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+@@ -321,8 +320,7 @@ void nf_send_unreach(struct sk_buff *skb_in, int code, int hook)
+ if (iph->frag_off & htons(IP_OFFSET))
+ return;
+
+- if ((hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) &&
+- nf_reject_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject_fill_skb_dst(skb_in) < 0)
+ return;
+
+ if (skb_csum_unnecessary(skb_in) ||
+diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
+index e4776bd2ed89..f3579bccf0a5 100644
+--- a/net/ipv6/netfilter/nf_reject_ipv6.c
++++ b/net/ipv6/netfilter/nf_reject_ipv6.c
+@@ -293,7 +293,7 @@ void nf_send_reset6(struct net *net, struct sock *sk, struct sk_buff *oldskb,
+ fl6.fl6_sport = otcph->dest;
+ fl6.fl6_dport = otcph->source;
+
+- if (hook == NF_INET_PRE_ROUTING || hook == NF_INET_INGRESS) {
++ if (!skb_dst(oldskb)) {
+ nf_ip6_route(net, &dst, flowi6_to_flowi(&fl6), false);
+ if (!dst)
+ return;
+@@ -397,8 +397,7 @@ void nf_send_unreach6(struct net *net, struct sk_buff *skb_in,
+ if (hooknum == NF_INET_LOCAL_OUT && skb_in->dev == NULL)
+ skb_in->dev = net->loopback_dev;
+
+- if ((hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_INGRESS) &&
+- nf_reject6_fill_skb_dst(skb_in) < 0)
++ if (!skb_dst(skb_in) && nf_reject6_fill_skb_dst(skb_in) < 0)
+ return;
+
+ icmpv6_send(skb_in, ICMPV6_DEST_UNREACH, code, 0);
+--
+2.50.1
+
--- /dev/null
+From 514efd99e88ace520d8b75d3a18a42dda6e442ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Aug 2025 12:09:18 +0530
+Subject: Octeontx2-af: Skip overlap check for SPI field
+
+From: Hariprasad Kelam <hkelam@marvell.com>
+
+[ Upstream commit 8c5d95988c34f0aeba1f34cd5e4ba69494c90c5f ]
+
+Octeontx2/CN10K silicon supports generating a 256-bit key per packet.
+The specific fields to be extracted from a packet for key generation
+are configurable via a Key Extraction (MKEX) Profile.
+
+The AF driver scans the configured extraction profile to ensure that
+fields from upper layers do not overwrite fields from lower layers in
+the key.
+
+Example Packet Field Layout:
+LA: DMAC + SMAC
+LB: VLAN
+LC: IPv4/IPv6
+LD: TCP/UDP
+
+Valid MKEX Profile Configuration:
+
+LA -> DMAC -> key_offset[0-5]
+LC -> SIP -> key_offset[20-23]
+LD -> SPORT -> key_offset[30-31]
+
+Invalid MKEX profile configuration:
+
+LA -> DMAC -> key_offset[0-5]
+LC -> SIP -> key_offset[20-23]
+LD -> SPORT -> key_offset[2-3] // Overlaps with DMAC field
+
+In another scenario, if the MKEX profile is configured to extract
+the SPI field from both AH and ESP headers at the same key offset,
+the driver rejecting this configuration. In a regular traffic,
+ipsec packet will be having either AH(LD) or ESP (LE). This patch
+relaxes the check for the same.
+
+Fixes: 12aa0a3b93f3 ("octeontx2-af: Harden rule validation.")
+Signed-off-by: Hariprasad Kelam <hkelam@marvell.com>
+Link: https://patch.msgid.link/20250820063919.1463518-1-hkelam@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+index 237f82082ebe..0f4e462d39c2 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+@@ -580,8 +580,8 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf)
+ if (!npc_check_field(rvu, blkaddr, NPC_LB, intf))
+ *features &= ~BIT_ULL(NPC_OUTER_VID);
+
+- /* Set SPI flag only if AH/ESP and IPSEC_SPI are in the key */
+- if (npc_check_field(rvu, blkaddr, NPC_IPSEC_SPI, intf) &&
++ /* Allow extracting SPI field from AH and ESP headers at same offset */
++ if (npc_is_field_present(rvu, NPC_IPSEC_SPI, intf) &&
+ (*features & (BIT_ULL(NPC_IPPROTO_ESP) | BIT_ULL(NPC_IPPROTO_AH))))
+ *features |= BIT_ULL(NPC_IPSEC_SPI);
+
+--
+2.50.1
+
--- /dev/null
+From 5e4db681c34eeb659bc87681c95a168e123ffdf9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 Aug 2025 10:10:29 +0200
+Subject: phy: mscc: Fix timestamping for vsc8584
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit bc1a59cff9f797bfbf8f3104507584d89e9ecf2e ]
+
+There was a problem when we received frames and the frames were
+timestamped. The driver is configured to store the nanosecond part of
+the timestmap in the ptp reserved bits and it would take the second part
+by reading the LTC. The problem is that when reading the LTC we are in
+atomic context and to read the second part will go over mdio bus which
+might sleep, so we get an error.
+The fix consists in actually put all the frames in a queue and start the
+aux work and in that work to read the LTC and then calculate the full
+received time.
+
+Fixes: 7d272e63e0979d ("net: phy: mscc: timestamping and PHC support")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250818081029.1300780-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc.h | 12 ++++++++
+ drivers/net/phy/mscc/mscc_main.c | 12 ++++++++
+ drivers/net/phy/mscc/mscc_ptp.c | 49 ++++++++++++++++++++++++--------
+ 3 files changed, 61 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/phy/mscc/mscc.h b/drivers/net/phy/mscc/mscc.h
+index 7a962050a4d4..cdb343779a8f 100644
+--- a/drivers/net/phy/mscc/mscc.h
++++ b/drivers/net/phy/mscc/mscc.h
+@@ -362,6 +362,13 @@ struct vsc85xx_hw_stat {
+ u16 mask;
+ };
+
++struct vsc8531_skb_cb {
++ u32 ns;
++};
++
++#define VSC8531_SKB_CB(skb) \
++ ((struct vsc8531_skb_cb *)((skb)->cb))
++
+ struct vsc8531_private {
+ int rate_magic;
+ u16 supp_led_modes;
+@@ -410,6 +417,11 @@ struct vsc8531_private {
+ */
+ struct mutex ts_lock;
+ struct mutex phc_lock;
++
++ /* list of skbs that were received and need timestamp information but it
++ * didn't received it yet
++ */
++ struct sk_buff_head rx_skbs_list;
+ };
+
+ /* Shared structure between the PHYs of the same package.
+diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
+index 4171f01d34e5..3de72d9cc22b 100644
+--- a/drivers/net/phy/mscc/mscc_main.c
++++ b/drivers/net/phy/mscc/mscc_main.c
+@@ -2335,6 +2335,13 @@ static int vsc85xx_probe(struct phy_device *phydev)
+ return vsc85xx_dt_led_modes_get(phydev, default_mode);
+ }
+
++static void vsc85xx_remove(struct phy_device *phydev)
++{
++ struct vsc8531_private *priv = phydev->priv;
++
++ skb_queue_purge(&priv->rx_skbs_list);
++}
++
+ /* Microsemi VSC85xx PHYs */
+ static struct phy_driver vsc85xx_driver[] = {
+ {
+@@ -2589,6 +2596,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2614,6 +2622,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8574_probe,
+ .set_wol = &vsc85xx_wol_set,
+ .get_wol = &vsc85xx_wol_get,
+@@ -2639,6 +2648,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2662,6 +2672,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+@@ -2685,6 +2696,7 @@ static struct phy_driver vsc85xx_driver[] = {
+ .config_intr = &vsc85xx_config_intr,
+ .suspend = &genphy_suspend,
+ .resume = &genphy_resume,
++ .remove = &vsc85xx_remove,
+ .probe = &vsc8584_probe,
+ .get_tunable = &vsc85xx_get_tunable,
+ .set_tunable = &vsc85xx_set_tunable,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index d0bd6ab45ebe..add1a9ee721a 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -1193,9 +1193,7 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ {
+ struct vsc8531_private *vsc8531 =
+ container_of(mii_ts, struct vsc8531_private, mii_ts);
+- struct skb_shared_hwtstamps *shhwtstamps = NULL;
+ struct vsc85xx_ptphdr *ptphdr;
+- struct timespec64 ts;
+ unsigned long ns;
+
+ if (!vsc8531->ptp->configured)
+@@ -1205,27 +1203,52 @@ static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+ type == PTP_CLASS_NONE)
+ return false;
+
+- vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
+-
+ ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
+ if (!ptphdr)
+ return false;
+
+- shhwtstamps = skb_hwtstamps(skb);
+- memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
+-
+ ns = ntohl(ptphdr->rsrvd2);
+
+- /* nsec is in reserved field */
+- if (ts.tv_nsec < ns)
+- ts.tv_sec--;
++ VSC8531_SKB_CB(skb)->ns = ns;
++ skb_queue_tail(&vsc8531->rx_skbs_list, skb);
+
+- shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
+- netif_rx(skb);
++ ptp_schedule_worker(vsc8531->ptp->ptp_clock, 0);
+
+ return true;
+ }
+
++static long vsc85xx_do_aux_work(struct ptp_clock_info *info)
++{
++ struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
++ struct skb_shared_hwtstamps *shhwtstamps = NULL;
++ struct phy_device *phydev = ptp->phydev;
++ struct vsc8531_private *priv = phydev->priv;
++ struct sk_buff_head received;
++ struct sk_buff *rx_skb;
++ struct timespec64 ts;
++ unsigned long flags;
++
++ __skb_queue_head_init(&received);
++ spin_lock_irqsave(&priv->rx_skbs_list.lock, flags);
++ skb_queue_splice_tail_init(&priv->rx_skbs_list, &received);
++ spin_unlock_irqrestore(&priv->rx_skbs_list.lock, flags);
++
++ vsc85xx_gettime(info, &ts);
++ while ((rx_skb = __skb_dequeue(&received)) != NULL) {
++ shhwtstamps = skb_hwtstamps(rx_skb);
++ memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
++
++ if (ts.tv_nsec < VSC8531_SKB_CB(rx_skb)->ns)
++ ts.tv_sec--;
++
++ shhwtstamps->hwtstamp = ktime_set(ts.tv_sec,
++ VSC8531_SKB_CB(rx_skb)->ns);
++ netif_rx(rx_skb);
++ }
++
++ return -1;
++}
++
+ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .owner = THIS_MODULE,
+ .name = "VSC85xx timer",
+@@ -1239,6 +1262,7 @@ static const struct ptp_clock_info vsc85xx_clk_caps = {
+ .adjfine = &vsc85xx_adjfine,
+ .gettime64 = &vsc85xx_gettime,
+ .settime64 = &vsc85xx_settime,
++ .do_aux_work = &vsc85xx_do_aux_work,
+ };
+
+ static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
+@@ -1566,6 +1590,7 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
+
+ mutex_init(&vsc8531->phc_lock);
+ mutex_init(&vsc8531->ts_lock);
++ skb_queue_head_init(&vsc8531->rx_skbs_list);
+
+ /* Retrieve the shared load/save GPIO. Request it as non exclusive as
+ * the same GPIO can be requested by all the PHYs of the same package.
+--
+2.50.1
+
--- /dev/null
+From b7d5fad213281f300d20bd5e70431a52e4365987 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Aug 2025 09:25:58 +0800
+Subject: ppp: fix race conditions in ppp_fill_forward_path
+
+From: Qingfang Deng <dqfext@gmail.com>
+
+[ Upstream commit 0417adf367a0af11adf7ace849af4638cfb573f7 ]
+
+ppp_fill_forward_path() has two race conditions:
+
+1. The ppp->channels list can change between list_empty() and
+ list_first_entry(), as ppp_lock() is not held. If the only channel
+ is deleted in ppp_disconnect_channel(), list_first_entry() may
+ access an empty head or a freed entry, and trigger a panic.
+
+2. pch->chan can be NULL. When ppp_unregister_channel() is called,
+ pch->chan is set to NULL before pch is removed from ppp->channels.
+
+Fix these by using a lockless RCU approach:
+- Use list_first_or_null_rcu() to safely test and access the first list
+ entry.
+- Convert list modifications on ppp->channels to their RCU variants and
+ add synchronize_net() after removal.
+- Check for a NULL pch->chan before dereferencing it.
+
+Fixes: f6efc675c9dd ("net: ppp: resolve forwarding path for bridge pppoe devices")
+Signed-off-by: Qingfang Deng <dqfext@gmail.com>
+Link: https://patch.msgid.link/20250814012559.3705-2-dqfext@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/ppp_generic.c | 17 +++++++++++------
+ 1 file changed, 11 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
+index ee1527cf3d0c..28b894bcd7a9 100644
+--- a/drivers/net/ppp/ppp_generic.c
++++ b/drivers/net/ppp/ppp_generic.c
+@@ -33,6 +33,7 @@
+ #include <linux/ppp_channel.h>
+ #include <linux/ppp-comp.h>
+ #include <linux/skbuff.h>
++#include <linux/rculist.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/if_arp.h>
+ #include <linux/ip.h>
+@@ -1613,11 +1614,14 @@ static int ppp_fill_forward_path(struct net_device_path_ctx *ctx,
+ if (ppp->flags & SC_MULTILINK)
+ return -EOPNOTSUPP;
+
+- if (list_empty(&ppp->channels))
++ pch = list_first_or_null_rcu(&ppp->channels, struct channel, clist);
++ if (!pch)
++ return -ENODEV;
++
++ chan = READ_ONCE(pch->chan);
++ if (!chan)
+ return -ENODEV;
+
+- pch = list_first_entry(&ppp->channels, struct channel, clist);
+- chan = pch->chan;
+ if (!chan->ops->fill_forward_path)
+ return -EOPNOTSUPP;
+
+@@ -3000,7 +3004,7 @@ ppp_unregister_channel(struct ppp_channel *chan)
+ */
+ down_write(&pch->chan_sem);
+ spin_lock_bh(&pch->downl);
+- pch->chan = NULL;
++ WRITE_ONCE(pch->chan, NULL);
+ spin_unlock_bh(&pch->downl);
+ up_write(&pch->chan_sem);
+ ppp_disconnect_channel(pch);
+@@ -3506,7 +3510,7 @@ ppp_connect_channel(struct channel *pch, int unit)
+ hdrlen = pch->file.hdrlen + 2; /* for protocol bytes */
+ if (hdrlen > ppp->dev->hard_header_len)
+ ppp->dev->hard_header_len = hdrlen;
+- list_add_tail(&pch->clist, &ppp->channels);
++ list_add_tail_rcu(&pch->clist, &ppp->channels);
+ ++ppp->n_channels;
+ pch->ppp = ppp;
+ refcount_inc(&ppp->file.refcnt);
+@@ -3536,10 +3540,11 @@ ppp_disconnect_channel(struct channel *pch)
+ if (ppp) {
+ /* remove it from the ppp unit's list */
+ ppp_lock(ppp);
+- list_del(&pch->clist);
++ list_del_rcu(&pch->clist);
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
+ ppp_unlock(ppp);
++ synchronize_net();
+ if (refcount_dec_and_test(&ppp->file.refcnt))
+ ppp_destroy_interface(ppp);
+ err = 0;
+--
+2.50.1
+
--- /dev/null
+From 62f4f1f193a71462277a5f82fc167c6117c094ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:57 +0530
+Subject: RDMA/bnxt_re: Fix to do SRQ armena by default
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 6296f9a5293ada28558f2867ac54c487e1e2b9f2 ]
+
+Whenever SRQ is created, make sure SRQ arm enable is always
+set. Driver is always ready to receive SRQ ASYNC event.
+
+Additional note -
+There is no need to do srq arm enable conditionally.
+See bnxt_qplib_armen_db in bnxt_qplib_create_cq().
+
+Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-2-kalesh-anakkur.purayil@broadcom.com
+Reviewed-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 68ea4ed0b171..8dc707f98fff 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -685,8 +685,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+- if (srq->threshold)
+- bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
++ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+
+ return 0;
+--
+2.50.1
+
--- /dev/null
+From 76db77f92cf60eeda3b4e4688dcd57b26e20f4e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:40:00 +0530
+Subject: RDMA/bnxt_re: Fix to initialize the PBL array
+
+From: Anantha Prabhu <anantha.prabhu@broadcom.com>
+
+[ Upstream commit 806b9f494f62791ee6d68f515a8056c615a0e7b2 ]
+
+memset the PBL page pointer and page map arrays before
+populating the SGL addresses of the HWQ.
+
+Fixes: 0c4dcd602817 ("RDMA/bnxt_re: Refactor hardware queue memory allocation")
+Signed-off-by: Anantha Prabhu <anantha.prabhu@broadcom.com>
+Reviewed-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Reviewed-by: Selvin Xavier <selvin.xavier@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-5-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/qplib_res.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index 96ceec1e8199..77da7cf34427 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -121,6 +121,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = vmalloc_array(pages, sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
++ memset(pbl->pg_arr, 0, pages * sizeof(void *));
+
+ pbl->pg_map_arr = vmalloc_array(pages, sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+@@ -128,6 +129,7 @@ static int __alloc_pbl(struct bnxt_qplib_res *res,
+ pbl->pg_arr = NULL;
+ return -ENOMEM;
+ }
++ memset(pbl->pg_map_arr, 0, pages * sizeof(dma_addr_t));
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+
+--
+2.50.1
+
--- /dev/null
+From a28f4e7abd63ac9d3991759a76327ba23fc4be42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 15:39:58 +0530
+Subject: RDMA/bnxt_re: Fix to remove workload check in SRQ limit path
+
+From: Kashyap Desai <kashyap.desai@broadcom.com>
+
+[ Upstream commit 666bce0bd7e771127cb0cda125cc9d32d9f9f15d ]
+
+There should not be any checks of current workload to set
+srq_limit value to SRQ hw context.
+
+Remove all such workload checks and make a direct call to
+set srq_limit via doorbell SRQ_ARM.
+
+Fixes: 37cb11acf1f7 ("RDMA/bnxt_re: Add SRQ support for Broadcom adapters")
+Signed-off-by: Kashyap Desai <kashyap.desai@broadcom.com>
+Signed-off-by: Saravanan Vajravel <saravanan.vajravel@broadcom.com>
+Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
+Link: https://patch.msgid.link/20250805101000.233310-3-kalesh-anakkur.purayil@broadcom.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/bnxt_re/ib_verbs.c | 8 ++-----
+ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 27 ------------------------
+ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 --
+ 3 files changed, 2 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index f7345e4890a1..31fff5885f1a 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -1823,7 +1823,6 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ struct bnxt_re_srq *srq = container_of(ib_srq, struct bnxt_re_srq,
+ ib_srq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+- int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+@@ -1835,11 +1834,8 @@ int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+- rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+- if (rc) {
+- ibdev_err(&rdev->ibdev, "Modify HW SRQ failed!");
+- return rc;
+- }
++ bnxt_qplib_srq_arm_db(&srq->qplib_srq.dbinfo, srq->qplib_srq.threshold);
++
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+ /* No need to Build and send response back to udata */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 8dc707f98fff..c19dd732c235 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -686,7 +686,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+- srq->arm_req = false;
+
+ return 0;
+ fail:
+@@ -696,24 +695,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ return rc;
+ }
+
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+- struct bnxt_qplib_srq *srq)
+-{
+- struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+- u32 count;
+-
+- count = __bnxt_qplib_get_avail(srq_hwq);
+- if (count > srq->threshold) {
+- srq->arm_req = false;
+- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+- } else {
+- /* Deferred arming */
+- srq->arm_req = true;
+- }
+-
+- return 0;
+-}
+-
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+ {
+@@ -755,7 +736,6 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct rq_wqe *srqe;
+ struct sq_sge *hw_sge;
+- u32 count = 0;
+ int i, next;
+
+ spin_lock(&srq_hwq->lock);
+@@ -787,15 +767,8 @@ int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+
+- spin_lock(&srq_hwq->lock);
+- count = __bnxt_qplib_get_avail(srq_hwq);
+- spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+- if (srq->arm_req == true && count > srq->threshold) {
+- srq->arm_req = false;
+- bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
+- }
+
+ return 0;
+ }
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 55fd840359ef..288196facfd7 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -519,8 +519,6 @@ int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
+ srqn_handler_t srq_handler);
+ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+-int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+- struct bnxt_qplib_srq *srq);
+ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+ void bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+--
+2.50.1
+
--- /dev/null
+From 1d158fc260eb696cd430d837cfc95ee28ea1c2e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 13:53:55 +0800
+Subject: RDMA/erdma: Fix ignored return value of init_kernel_qp
+
+From: Boshi Yu <boshiyu@linux.alibaba.com>
+
+[ Upstream commit d5c74713f0117d07f91eb48b10bc2ad44e23c9b9 ]
+
+The init_kernel_qp interface may fail. Check its return value and free
+related resources properly when it does.
+
+Fixes: 155055771704 ("RDMA/erdma: Add verbs implementation")
+Reviewed-by: Cheng Xu <chengyou@linux.alibaba.com>
+Signed-off-by: Boshi Yu <boshiyu@linux.alibaba.com>
+Link: https://patch.msgid.link/20250725055410.67520-3-boshiyu@linux.alibaba.com
+Signed-off-by: Leon Romanovsky <leon@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/erdma/erdma_verbs.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/erdma/erdma_verbs.c b/drivers/infiniband/hw/erdma/erdma_verbs.c
+index 29ad2f5ffabe..e990690d8b3c 100644
+--- a/drivers/infiniband/hw/erdma/erdma_verbs.c
++++ b/drivers/infiniband/hw/erdma/erdma_verbs.c
+@@ -979,7 +979,9 @@ int erdma_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
+ if (ret)
+ goto err_out_cmd;
+ } else {
+- init_kernel_qp(dev, qp, attrs);
++ ret = init_kernel_qp(dev, qp, attrs);
++ if (ret)
++ goto err_out_xa;
+ }
+
+ qp->attrs.max_send_sge = attrs->cap.max_send_sge;
+--
+2.50.1
+
--- /dev/null
+From 24645093d612a17d0513efcfc8e6f14df08de6e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 14:35:40 +0200
+Subject: s390/hypfs: Avoid unnecessary ioctl registration in debugfs
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit fec7bdfe7f8694a0c39e6c3ec026ff61ca1058b9 ]
+
+Currently, hypfs registers ioctl callbacks for all debugfs files,
+despite only one file requiring them. This leads to unintended exposure
+of unused interfaces to user space and can trigger side effects such as
+restricted access when kernel lockdown is enabled.
+
+Restrict ioctl registration to only those files that implement ioctl
+functionality to avoid interface clutter and unnecessary access
+restrictions.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index 4024599eb448..757d232f5d40 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -64,24 +64,28 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+ long rc;
+
+ mutex_lock(&df->lock);
+- if (df->unlocked_ioctl)
+- rc = df->unlocked_ioctl(file, cmd, arg);
+- else
+- rc = -ENOTTY;
++ rc = df->unlocked_ioctl(file, cmd, arg);
+ mutex_unlock(&df->lock);
+ return rc;
+ }
+
+-static const struct file_operations dbfs_ops = {
++static const struct file_operations dbfs_ops_ioctl = {
+ .read = dbfs_read,
+ .llseek = no_llseek,
+ .unlocked_ioctl = dbfs_ioctl,
+ };
+
++static const struct file_operations dbfs_ops = {
++ .read = dbfs_read,
++};
++
+ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+- df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
+- &dbfs_ops);
++ const struct file_operations *fops = &dbfs_ops;
++
++ if (df->unlocked_ioctl)
++ fops = &dbfs_ops_ioctl;
++ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+ }
+
+--
+2.50.1
+
--- /dev/null
+From 416ad5cadb0715af9e946b2475e99648743a41aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Aug 2025 15:12:37 +0200
+Subject: s390/hypfs: Enable limited access during lockdown
+
+From: Peter Oberparleiter <oberpar@linux.ibm.com>
+
+[ Upstream commit 3868f910440c47cd5d158776be4ba4e2186beda7 ]
+
+When kernel lockdown is active, debugfs_locked_down() blocks access to
+hypfs files that register ioctl callbacks, even if the ioctl interface
+is not required for a function. This unnecessarily breaks userspace
+tools that only rely on read operations.
+
+Resolve this by registering a minimal set of file operations during
+lockdown, avoiding ioctl registration and preserving access for affected
+tooling.
+
+Note that this change restores hypfs functionality when lockdown is
+active from early boot (e.g. via lockdown=integrity kernel parameter),
+but does not apply to scenarios where lockdown is enabled dynamically
+while Linux is running.
+
+Tested-by: Mete Durlu <meted@linux.ibm.com>
+Reviewed-by: Vasily Gorbik <gor@linux.ibm.com>
+Fixes: 5496197f9b08 ("debugfs: Restrict debugfs when the kernel is locked down")
+Signed-off-by: Peter Oberparleiter <oberpar@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/hypfs/hypfs_dbfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
+index 757d232f5d40..3612af9b4890 100644
+--- a/arch/s390/hypfs/hypfs_dbfs.c
++++ b/arch/s390/hypfs/hypfs_dbfs.c
+@@ -6,6 +6,7 @@
+ * Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
+ */
+
++#include <linux/security.h>
+ #include <linux/slab.h>
+ #include "hypfs.h"
+
+@@ -83,7 +84,7 @@ void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
+ {
+ const struct file_operations *fops = &dbfs_ops;
+
+- if (df->unlocked_ioctl)
++ if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
+ fops = &dbfs_ops_ioctl;
+ df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
+ mutex_init(&df->lock);
+--
+2.50.1
+
--- /dev/null
+From 26be02e8541200f7c02b2196a63429cb1d556841 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 08:49:08 +0300
+Subject: scsi: qla4xxx: Prevent a potential error pointer dereference
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 9dcf111dd3e7ed5fce82bb108e3a3fc001c07225 ]
+
+The qla4xxx_get_ep_fwdb() function is supposed to return NULL on error,
+but qla4xxx_ep_connect() returns error pointers. Propagating the error
+pointers will lead to an Oops in the caller, so change the error pointers
+to NULL.
+
+Fixes: 13483730a13b ("[SCSI] qla4xxx: fix flash/ddb support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://lore.kernel.org/r/aJwnVKS9tHsw1tEu@stanley.mountain
+Reviewed-by: Chris Leech <cleech@redhat.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qla4xxx/ql4_os.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
+index 77c28d2ebf01..d91efd7c983c 100644
+--- a/drivers/scsi/qla4xxx/ql4_os.c
++++ b/drivers/scsi/qla4xxx/ql4_os.c
+@@ -6606,6 +6606,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
+
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
+ vfree(dst_addr);
++ if (IS_ERR(ep))
++ return NULL;
+ return ep;
+ }
+
+--
+2.50.1
+
usb-typec-maxim_contaminant-re-enable-cc-toggle-if-cc-is-open-and-port-is-clean.patch
usb-typec-maxim_contaminant-disable-low-power-mode-when-reading-comparator-values.patch
x86-cpu-hygon-add-missing-resctrl_cpu_detect-in-bsp_init-helper.patch
+spi-spi-fsl-lpspi-clamp-too-high-speed_hz.patch
+drm-nouveau-nvif-fix-potential-memory-leak-in-nvif_v.patch
+cgroup-cpuset-use-static_branch_enable_cpuslocked-on.patch
+iosys-map-fix-undefined-behavior-in-iosys_map_clear.patch
+rdma-erdma-fix-ignored-return-value-of-init_kernel_q.patch
+rdma-bnxt_re-fix-to-do-srq-armena-by-default.patch
+rdma-bnxt_re-fix-to-remove-workload-check-in-srq-lim.patch
+rdma-bnxt_re-fix-to-initialize-the-pbl-array.patch
+net-bridge-fix-soft-lockup-in-br_multicast_query_exp.patch
+scsi-qla4xxx-prevent-a-potential-error-pointer-deref.patch
+iommu-amd-avoid-stack-buffer-overflow-from-kernel-cm.patch
+bluetooth-hci_event-fix-mtu-for-bn-0-in-cis-establis.patch
+bluetooth-hci_conn-do-return-error-from-hci_enhanced.patch
+mlxsw-spectrum-forward-packets-with-an-ipv4-link-loc.patch
+drm-hisilicon-hibmc-fix-the-hibmc-loaded-failed-bug.patch
+alsa-usb-audio-fix-size-validation-in-convert_chmap_.patch
+drm-amd-display-add-null-pointer-check-in-mod_hdcp_h.patch
+net-gso-forbid-ipv6-tso-with-extensions-on-devices-w.patch
+ipv6-sr-validate-hmac-algorithm-id-in-seg6_hmac_info.patch
+net-ethernet-mtk_ppe-add-rcu-lock-around-dev_fill_fo.patch
+ppp-fix-race-conditions-in-ppp_fill_forward_path.patch
+phy-mscc-fix-timestamping-for-vsc8584.patch
+net-usb-asix_devices-fix-phy-address-mask-in-mdio-bu.patch
+gve-prevent-ethtool-ops-after-shutdown.patch
+net-smc-fix-uaf-on-smcsk-after-smc_listen_out.patch
+loongarch-optimize-module-load-time-by-optimizing-pl.patch
+ixgbe-xsk-resolve-the-negative-overflow-of-budget-in.patch
+igc-fix-disabling-l1.2-pci-e-link-substate-on-i226-o.patch
+net-sched-make-cake_enqueue-return-net_xmit_cn-when-.patch
+net-sched-remove-unnecessary-warning-condition-for-e.patch
+bonding-update-lacp-activity-flag-after-setting-lacp.patch
+bonding-add-independent-control-state-machine.patch
+bonding-send-lacpdus-periodically-in-passive-mode-af.patch
+octeontx2-af-skip-overlap-check-for-spi-field.patch
+net-mlx5-base-ecvf-devlink-port-attrs-from-0.patch
+net-mlx5e-preserve-shared-buffer-capacity-during-hea.patch
+alsa-usb-audio-use-correct-sub-type-for-uac3-feature.patch
+s390-hypfs-avoid-unnecessary-ioctl-registration-in-d.patch
+s390-hypfs-enable-limited-access-during-lockdown.patch
+netfilter-nf_reject-don-t-leak-dst-refcount-for-loop.patch
--- /dev/null
+From 0a7c3b693af958dbbb7c59df70f4e377e8b8eada Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 12:07:42 +0200
+Subject: spi: spi-fsl-lpspi: Clamp too high speed_hz
+
+From: Stefan Wahren <wahrenst@gmx.net>
+
+[ Upstream commit af357a6a3b7d685e7aa621c6fb1d4ed6c349ec9e ]
+
+Currently the driver is not able to handle the case that a SPI device
+specifies a higher spi-max-frequency than half of per-clk:
+
+ per-clk should be at least two times of transfer speed
+
+Fix this by clamping to the max possible value and use the minimum SCK
+period of 2 cycles.
+
+Fixes: 77736a98b859 ("spi: lpspi: add the error info of transfer speed setting")
+Signed-off-by: Stefan Wahren <wahrenst@gmx.net>
+Link: https://patch.msgid.link/20250807100742.9917-1-wahrenst@gmx.net
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-fsl-lpspi.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index 9e2541dee56e..fa899ab2014c 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -330,13 +330,11 @@ static int fsl_lpspi_set_bitrate(struct fsl_lpspi_data *fsl_lpspi)
+ }
+
+ if (config.speed_hz > perclk_rate / 2) {
+- dev_err(fsl_lpspi->dev,
+- "per-clk should be at least two times of transfer speed");
+- return -EINVAL;
++ div = 2;
++ } else {
++ div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+ }
+
+- div = DIV_ROUND_UP(perclk_rate, config.speed_hz);
+-
+ for (prescale = 0; prescale <= prescale_max; prescale++) {
+ scldiv = div / (1 << prescale) - 2;
+ if (scldiv >= 0 && scldiv < 256) {
+--
+2.50.1
+