--- /dev/null
+From a639b801ef45f2d2e5422ae0b98f70b36b9859c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 21:58:04 +0200
+Subject: bpf: Scrub packet on bpf_redirect_peer
+
+From: Paul Chaignon <paul.chaignon@gmail.com>
+
+[ Upstream commit c4327229948879814229b46aa26a750718888503 ]
+
+When bpf_redirect_peer is used to redirect packets to a device in
+another network namespace, the skb isn't scrubbed. That can lead skb
+information from one namespace to be "misused" in another namespace.
+
+As one example, this is causing Cilium to drop traffic when using
+bpf_redirect_peer to redirect packets that just went through IPsec
+decryption to a container namespace. The following pwru trace shows (1)
+the packet path from the host's XFRM layer to the container's XFRM
+layer where it's dropped and (2) the number of active skb extensions at
+each function.
+
+ NETNS MARK IFACE TUPLE FUNC
+ 4026533547 d00 eth0 10.244.3.124:35473->10.244.2.158:53 xfrm_rcv_cb
+ .active_extensions = (__u8)2,
+ 4026533547 d00 eth0 10.244.3.124:35473->10.244.2.158:53 xfrm4_rcv_cb
+ .active_extensions = (__u8)2,
+ 4026533547 d00 eth0 10.244.3.124:35473->10.244.2.158:53 gro_cells_receive
+ .active_extensions = (__u8)2,
+ [...]
+ 4026533547 0 eth0 10.244.3.124:35473->10.244.2.158:53 skb_do_redirect
+ .active_extensions = (__u8)2,
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 ip_rcv
+ .active_extensions = (__u8)2,
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 ip_rcv_core
+ .active_extensions = (__u8)2,
+ [...]
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 udp_queue_rcv_one_skb
+ .active_extensions = (__u8)2,
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 __xfrm_policy_check
+ .active_extensions = (__u8)2,
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 __xfrm_decode_session
+ .active_extensions = (__u8)2,
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 security_xfrm_decode_session
+ .active_extensions = (__u8)2,
+ 4026534999 0 eth0 10.244.3.124:35473->10.244.2.158:53 kfree_skb_reason(SKB_DROP_REASON_XFRM_POLICY)
+ .active_extensions = (__u8)2,
+
+In this case, there are no XFRM policies in the container's network
+namespace so the drop is unexpected. When we decrypt the IPsec packet,
+the XFRM state used for decryption is set in the skb extensions. This
+information is preserved across the netns switch. When we reach the
+XFRM policy check in the container's netns, __xfrm_policy_check drops
+the packet with LINUX_MIB_XFRMINNOPOLS because a (container-side) XFRM
+policy can't be found that matches the (host-side) XFRM state used for
+decryption.
+
+This patch fixes this by scrubbing the packet when using
+bpf_redirect_peer, as is done on typical netns switches via veth
+devices except skb->mark and skb->tstamp are not zeroed.
+
+Fixes: 9aa1206e8f482 ("bpf: Add redirect_peer helper")
+Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/1728ead5e0fe45e7a6542c36bd4e3ca07a73b7d6.1746460653.git.paul.chaignon@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 790345c2546b7..99b23fd2f509c 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2526,6 +2526,7 @@ int skb_do_redirect(struct sk_buff *skb)
+ goto out_drop;
+ skb->dev = dev;
+ dev_sw_netstats_rx_add(dev, skb->len);
++ skb_scrub_packet(skb, false);
+ return -EAGAIN;
+ }
+ return flags & BPF_F_NEIGH ?
+--
+2.39.5
+
--- /dev/null
+From 975be82417fc5c2b34f57858a72e4d2297a149da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 09:05:55 +0200
+Subject: can: gw: fix RCU/BH usage in cgw_create_job()
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit 511e64e13d8cc72853275832e3f372607466c18c ]
+
+As reported by Sebastian Andrzej Siewior the use of local_bh_disable()
+is only feasible in uni processor systems to update the modification rules.
+The usual use-case to update the modification rules is to update the data
+of the modifications but not the modification types (AND/OR/XOR/SET) or
+the checksum functions itself.
+
+To omit additional memory allocations to maintain fast modification
+switching times, the modification description space is doubled at gw-job
+creation time so that only the reference to the active modification
+description is changed under rcu protection.
+
+Rename cgw_job::mod to cf_mod and make it a RCU pointer. Allocate in
+cgw_create_job() and free it together with cgw_job in
+cgw_job_free_rcu(). Update all users to dereference cgw_job::cf_mod with
+a RCU accessor and if possible once.
+
+[bigeasy: Replace mod1/mod2 from the Oliver's original patch with dynamic
+allocation, use RCU annotation and accessor]
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Closes: https://lore.kernel.org/linux-can/20231031112349.y0aLoBrz@linutronix.de/
+Fixes: dd895d7f21b2 ("can: cangw: introduce optional uid to reference created routing jobs")
+Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://patch.msgid.link/20250429070555.cs-7b_eZ@linutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/gw.c | 149 +++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 90 insertions(+), 59 deletions(-)
+
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 37528826935e7..e65500c52bf5c 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -130,7 +130,7 @@ struct cgw_job {
+ u32 handled_frames;
+ u32 dropped_frames;
+ u32 deleted_frames;
+- struct cf_mod mod;
++ struct cf_mod __rcu *cf_mod;
+ union {
+ /* CAN frame data source */
+ struct net_device *dev;
+@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ struct cgw_job *gwj = (struct cgw_job *)data;
+ struct canfd_frame *cf;
+ struct sk_buff *nskb;
++ struct cf_mod *mod;
+ int modidx = 0;
+
+ /* process strictly Classic CAN or CAN FD frames */
+@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ * When there is at least one modification function activated,
+ * we need to copy the skb as we want to modify skb->data.
+ */
+- if (gwj->mod.modfunc[0])
++ mod = rcu_dereference(gwj->cf_mod);
++ if (mod->modfunc[0])
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ else
+ nskb = skb_clone(skb, GFP_ATOMIC);
+@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ cf = (struct canfd_frame *)nskb->data;
+
+ /* perform preprocessed modification functions if there are any */
+- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
++ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
++ (*mod->modfunc[modidx++])(cf, mod);
+
+ /* Has the CAN frame been modified? */
+ if (modidx) {
+@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ }
+
+ /* check for checksum updates */
+- if (gwj->mod.csumfunc.crc8)
+- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
++ if (mod->csumfunc.crc8)
++ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
+
+- if (gwj->mod.csumfunc.xor)
+- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
++ if (mod->csumfunc.xor)
++ (*mod->csumfunc.xor)(cf, &mod->csum.xor);
+ }
+
+ /* clear the skb timestamp if not configured the other way */
+@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head)
+ {
+ struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
+
++ /* cgw_job::cf_mod is always accessed from the same cgw_job object within
++ * the same RCU read section. Once cgw_job is scheduled for removal,
++ * cf_mod can also be removed without mandating an additional grace period.
++ */
++ kfree(rcu_access_pointer(gwj->cf_mod));
+ kmem_cache_free(cgw_cache, gwj);
+ }
+
++/* Return cgw_job::cf_mod with RTNL protected section */
++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
++{
++ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
++}
++
+ static int cgw_notifier(struct notifier_block *nb,
+ unsigned long msg, void *ptr)
+ {
+@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ {
+ struct rtcanmsg *rtcan;
+ struct nlmsghdr *nlh;
++ struct cf_mod *mod;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
+ if (!nlh)
+@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ goto cancel;
+ }
+
++ mod = cgw_job_cf_mod(gwj);
+ if (gwj->flags & CGW_FLAGS_CAN_FD) {
+ struct cgw_fdframe_mod mb;
+
+- if (gwj->mod.modtype.and) {
+- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.and;
++ if (mod->modtype.and) {
++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++ mb.modtype = mod->modtype.and;
+ if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.or) {
+- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.or;
++ if (mod->modtype.or) {
++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++ mb.modtype = mod->modtype.or;
+ if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.xor) {
+- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.xor;
++ if (mod->modtype.xor) {
++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++ mb.modtype = mod->modtype.xor;
+ if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.set) {
+- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.set;
++ if (mod->modtype.set) {
++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++ mb.modtype = mod->modtype.set;
+ if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+ } else {
+ struct cgw_frame_mod mb;
+
+- if (gwj->mod.modtype.and) {
+- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.and;
++ if (mod->modtype.and) {
++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++ mb.modtype = mod->modtype.and;
+ if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.or) {
+- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.or;
++ if (mod->modtype.or) {
++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++ mb.modtype = mod->modtype.or;
+ if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.xor) {
+- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.xor;
++ if (mod->modtype.xor) {
++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++ mb.modtype = mod->modtype.xor;
+ if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.set) {
+- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.set;
++ if (mod->modtype.set) {
++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++ mb.modtype = mod->modtype.set;
+ if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+ }
+
+- if (gwj->mod.uid) {
+- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
++ if (mod->uid) {
++ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.csumfunc.crc8) {
++ if (mod->csumfunc.crc8) {
+ if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
+- &gwj->mod.csum.crc8) < 0)
++ &mod->csum.crc8) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.csumfunc.xor) {
++ if (mod->csumfunc.xor) {
+ if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
+- &gwj->mod.csum.xor) < 0)
++ &mod->csum.xor) < 0)
+ goto cancel;
+ }
+
+@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct net *net = sock_net(skb->sk);
+ struct rtcanmsg *r;
+ struct cgw_job *gwj;
+- struct cf_mod mod;
++ struct cf_mod *mod;
+ struct can_can_gw ccgw;
+ u8 limhops = 0;
+ int err = 0;
+@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (r->gwtype != CGW_TYPE_CAN_CAN)
+ return -EINVAL;
+
+- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
++ mod = kmalloc(sizeof(*mod), GFP_KERNEL);
++ if (!mod)
++ return -ENOMEM;
++
++ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+ if (err < 0)
+- return err;
++ goto out_free_cf;
+
+- if (mod.uid) {
++ if (mod->uid) {
+ ASSERT_RTNL();
+
+ /* check for updating an existing job with identical uid */
+ hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
+- if (gwj->mod.uid != mod.uid)
++ struct cf_mod *old_cf;
++
++ old_cf = cgw_job_cf_mod(gwj);
++ if (old_cf->uid != mod->uid)
+ continue;
+
+ /* interfaces & filters must be identical */
+- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+- return -EINVAL;
++ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
++ err = -EINVAL;
++ goto out_free_cf;
++ }
+
+- /* update modifications with disabled softirq & quit */
+- local_bh_disable();
+- memcpy(&gwj->mod, &mod, sizeof(mod));
+- local_bh_enable();
++ rcu_assign_pointer(gwj->cf_mod, mod);
++ kfree_rcu_mightsleep(old_cf);
+ return 0;
+ }
+ }
+
+ /* ifindex == 0 is not allowed for job creation */
+- if (!ccgw.src_idx || !ccgw.dst_idx)
+- return -ENODEV;
++ if (!ccgw.src_idx || !ccgw.dst_idx) {
++ err = -ENODEV;
++ goto out_free_cf;
++ }
+
+ gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
+- if (!gwj)
+- return -ENOMEM;
++ if (!gwj) {
++ err = -ENOMEM;
++ goto out_free_cf;
++ }
+
+ gwj->handled_frames = 0;
+ gwj->dropped_frames = 0;
+@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ gwj->limit_hops = limhops;
+
+ /* insert already parsed information */
+- memcpy(&gwj->mod, &mod, sizeof(mod));
++ RCU_INIT_POINTER(gwj->cf_mod, mod);
+ memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
+
+ err = -ENODEV;
+@@ -1152,9 +1178,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (!err)
+ hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
+ out:
+- if (err)
++ if (err) {
+ kmem_cache_free(cgw_cache, gwj);
+-
++out_free_cf:
++ kfree(mod);
++ }
+ return err;
+ }
+
+@@ -1214,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+
+ /* remove only the first matching entry */
+ hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
++ struct cf_mod *cf_mod;
++
+ if (gwj->flags != r->flags)
+ continue;
+
+ if (gwj->limit_hops != limhops)
+ continue;
+
++ cf_mod = cgw_job_cf_mod(gwj);
+ /* we have a match when uid is enabled and identical */
+- if (gwj->mod.uid || mod.uid) {
+- if (gwj->mod.uid != mod.uid)
++ if (cf_mod->uid || mod.uid) {
++ if (cf_mod->uid != mod.uid)
+ continue;
+ } else {
+ /* no uid => check for identical modifications */
+- if (memcmp(&gwj->mod, &mod, sizeof(mod)))
++ if (memcmp(cf_mod, &mod, sizeof(mod)))
+ continue;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 1ebd213472c5e42bf227816e999c5e8944e3df6b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Apr 2025 13:17:45 +0200
+Subject: can: m_can: m_can_class_allocate_dev(): initialize spin lock on
+ device probe
+
+From: Antonios Salios <antonios@mwa.re>
+
+[ Upstream commit dcaeeb8ae84c5506ebc574732838264f3887738c ]
+
+The spin lock tx_handling_spinlock in struct m_can_classdev is not
+being initialized. This leads the following spinlock bad magic
+complaint from the kernel, eg. when trying to send CAN frames with
+cansend from can-utils:
+
+| BUG: spinlock bad magic on CPU#0, cansend/95
+| lock: 0xff60000002ec1010, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
+| CPU: 0 UID: 0 PID: 95 Comm: cansend Not tainted 6.15.0-rc3-00032-ga79be02bba5c #5 NONE
+| Hardware name: MachineWare SIM-V (DT)
+| Call Trace:
+| [<ffffffff800133e0>] dump_backtrace+0x1c/0x24
+| [<ffffffff800022f2>] show_stack+0x28/0x34
+| [<ffffffff8000de3e>] dump_stack_lvl+0x4a/0x68
+| [<ffffffff8000de70>] dump_stack+0x14/0x1c
+| [<ffffffff80003134>] spin_dump+0x62/0x6e
+| [<ffffffff800883ba>] do_raw_spin_lock+0xd0/0x142
+| [<ffffffff807a6fcc>] _raw_spin_lock_irqsave+0x20/0x2c
+| [<ffffffff80536dba>] m_can_start_xmit+0x90/0x34a
+| [<ffffffff806148b0>] dev_hard_start_xmit+0xa6/0xee
+| [<ffffffff8065b730>] sch_direct_xmit+0x114/0x292
+| [<ffffffff80614e2a>] __dev_queue_xmit+0x3b0/0xaa8
+| [<ffffffff8073b8fa>] can_send+0xc6/0x242
+| [<ffffffff8073d1c0>] raw_sendmsg+0x1a8/0x36c
+| [<ffffffff805ebf06>] sock_write_iter+0x9a/0xee
+| [<ffffffff801d06ea>] vfs_write+0x184/0x3a6
+| [<ffffffff801d0a88>] ksys_write+0xa0/0xc0
+| [<ffffffff801d0abc>] __riscv_sys_write+0x14/0x1c
+| [<ffffffff8079ebf8>] do_trap_ecall_u+0x168/0x212
+| [<ffffffff807a830a>] handle_exception+0x146/0x152
+
+Initializing the spin lock in m_can_class_allocate_dev solves that
+problem.
+
+Fixes: 1fa80e23c150 ("can: m_can: Introduce a tx_fifo_in_flight counter")
+Signed-off-by: Antonios Salios <antonios@mwa.re>
+Reviewed-by: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+Link: https://patch.msgid.link/20250425111744.37604-2-antonios@mwa.re
+Reviewed-by: Markus Schneider-Pargmann <msp@baylibre.com>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/m_can/m_can.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
+index 3c2c1db9866d4..dbd4d8796f9b0 100644
+--- a/drivers/net/can/m_can/m_can.c
++++ b/drivers/net/can/m_can/m_can.c
+@@ -2372,6 +2372,7 @@ struct m_can_classdev *m_can_class_allocate_dev(struct device *dev,
+ SET_NETDEV_DEV(net_dev, dev);
+
+ m_can_of_parse_mram(class_dev, mram_config_vals);
++ spin_lock_init(&class_dev->tx_handling_spinlock);
+ out:
+ return class_dev;
+ }
+--
+2.39.5
+
--- /dev/null
+From 0a014a56cf2d3cba63ca34aafa9c41356b38f3bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 09:15:01 -0700
+Subject: can: mcp251xfd: fix TDC setting for low data bit rates
+
+From: Kelsey Maes <kelsey@vpprocess.com>
+
+[ Upstream commit 5e1663810e11c64956aa7e280cf74b2f3284d816 ]
+
+The TDC is currently hardcoded enabled. This means that even for lower
+CAN-FD data bitrates (with a DBRP (data bitrate prescaler) > 2) a TDC
+is configured. This leads to a bus-off condition.
+
+ISO 11898-1 section 11.3.3 says "Transmitter delay compensation" (TDC)
+is only applicable if DBRP is 1 or 2.
+
+To fix the problem, switch the driver to use the TDC calculation
+provided by the CAN driver framework (which respects ISO 11898-1
+section 11.3.3). This has the positive side effect that userspace can
+control TDC as needed.
+
+Demonstration of the feature in action:
+| $ ip link set can0 up type can bitrate 125000 dbitrate 500000 fd on
+| $ ip -details link show can0
+| 3: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UP mode DEFAULT group default qlen 10
+| link/can promiscuity 0 allmulti 0 minmtu 0 maxmtu 0
+| can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+| bitrate 125000 sample-point 0.875
+| tq 50 prop-seg 69 phase-seg1 70 phase-seg2 20 sjw 10 brp 2
+| mcp251xfd: tseg1 2..256 tseg2 1..128 sjw 1..128 brp 1..256 brp_inc 1
+| dbitrate 500000 dsample-point 0.875
+| dtq 125 dprop-seg 6 dphase-seg1 7 dphase-seg2 2 dsjw 1 dbrp 5
+| mcp251xfd: dtseg1 1..32 dtseg2 1..16 dsjw 1..16 dbrp 1..256 dbrp_inc 1
+| tdcv 0..63 tdco 0..63
+| clock 40000000 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535 tso_max_size 65536 tso_max_segs 65535 gro_max_size 65536 parentbus spi parentdev spi0.0
+| $ ip link set can0 up type can bitrate 1000000 dbitrate 4000000 fd on
+| $ ip -details link show can0
+| 3: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UP mode DEFAULT group default qlen 10
+| link/can promiscuity 0 allmulti 0 minmtu 0 maxmtu 0
+| can <FD,TDC-AUTO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+| bitrate 1000000 sample-point 0.750
+| tq 25 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 5 brp 1
+| mcp251xfd: tseg1 2..256 tseg2 1..128 sjw 1..128 brp 1..256 brp_inc 1
+| dbitrate 4000000 dsample-point 0.700
+| dtq 25 dprop-seg 3 dphase-seg1 3 dphase-seg2 3 dsjw 1 dbrp 1
+| tdco 7
+| mcp251xfd: dtseg1 1..32 dtseg2 1..16 dsjw 1..16 dbrp 1..256 dbrp_inc 1
+| tdcv 0..63 tdco 0..63
+| clock 40000000 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535 tso_max_size 65536 tso_max_segs 65535 gro_max_size 65536 parentbus spi parentdev spi0.0
+
+There has been some confusion about the MCP2518FD using a relative or
+absolute TDCO due to the datasheet specifying a range of [-64,63]. I
+have a custom board with a 40 MHz clock and an estimated loop delay of
+100 to 216 ns. During testing at a data bit rate of 4 Mbit/s I found
+that using can_get_relative_tdco() resulted in bus-off errors. The
+final TDCO value was 1 which corresponds to a 10% SSP in an absolute
+configuration. This behavior is expected if the TDCO value is really
+absolute and not relative. Using priv->can.tdc.tdco instead results in
+a final TDCO of 8, setting the SSP at exactly 80%. This configuration
+works.
+
+The automatic, manual, and off TDC modes were tested at speeds up to,
+and including, 8 Mbit/s on real hardware and behave as expected.
+
+Fixes: 55e5b97f003e ("can: mcp25xxfd: add driver for Microchip MCP25xxFD SPI CAN")
+Reported-by: Kelsey Maes <kelsey@vpprocess.com>
+Closes: https://lore.kernel.org/all/C2121586-C87F-4B23-A933-845362C29CA1@vpprocess.com
+Reviewed-by: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+Signed-off-by: Kelsey Maes <kelsey@vpprocess.com>
+Link: https://patch.msgid.link/20250430161501.79370-1-kelsey@vpprocess.com
+[mkl: add comment]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/can/spi/mcp251xfd/mcp251xfd-core.c | 40 +++++++++++++++----
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index dd0b3fb42f1b9..c30b04f8fc0df 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
+ .brp_inc = 1,
+ };
+
++/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
++ * [-64,63] for TDCO, indicating a relative TDCO.
++ *
++ * Manual tests have shown, that using a relative TDCO configuration
++ * results in bus off, while an absolute configuration works.
++ *
++ * For TDCO use the max value (63) from the data sheet, but 0 as the
++ * minimum.
++ */
++static const struct can_tdc_const mcp251xfd_tdc_const = {
++ .tdcv_min = 0,
++ .tdcv_max = 63,
++ .tdco_min = 0,
++ .tdco_max = 63,
++ .tdcf_min = 0,
++ .tdcf_max = 0,
++};
++
+ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
+ {
+ switch (model) {
+@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+ {
+ const struct can_bittiming *bt = &priv->can.bittiming;
+ const struct can_bittiming *dbt = &priv->can.data_bittiming;
+- u32 val = 0;
+- s8 tdco;
++ u32 tdcmod, val = 0;
+ int err;
+
+ /* CAN Control Register
+@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+ return err;
+
+ /* Transmitter Delay Compensation */
+- tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
+- -64, 63);
+- val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
+- MCP251XFD_REG_TDC_TDCMOD_AUTO) |
+- FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
++ if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
++ tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
++ else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
++ tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
++ else
++ tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
++
++ val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
++ FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
++ FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
+
+ return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
+ }
+@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
+ priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
+ priv->can.bittiming_const = &mcp251xfd_bittiming_const;
+ priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
++ priv->can.tdc_const = &mcp251xfd_tdc_const;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+ CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+ CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
+- CAN_CTRLMODE_CC_LEN8_DLC;
++ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
++ CAN_CTRLMODE_TDC_MANUAL;
+ set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+ priv->ndev = ndev;
+ priv->spi = spi;
+--
+2.39.5
+
--- /dev/null
+From 1397af743774d35406f864e934203aad7b9b3b06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 18:18:50 +0800
+Subject: erofs: ensure the extra temporary copy is valid for shortened bvecs
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 35076d2223c731f7be75af61e67f90807384d030 ]
+
+When compressed data deduplication is enabled, multiple logical extents
+may reference the same compressed physical cluster.
+
+The previous commit 94c43de73521 ("erofs: fix wrong primary bvec
+selection on deduplicated extents") already avoids using shortened
+bvecs. However, in such cases, the extra temporary buffers also
+need to be preserved for later use in z_erofs_fill_other_copies() to
+to prevent data corruption.
+
+IOWs, extra temporary buffers have to be retained not only due to
+varying start relative offsets (`pageofs_out`, as indicated by
+`pcl->multibases`) but also because of shortened bvecs.
+
+android.hardware.graphics.composer@2.1.so : 270696 bytes
+ 0: 0.. 204185 | 204185 : 628019200.. 628084736 | 65536
+-> 1: 204185.. 225536 | 21351 : 544063488.. 544129024 | 65536
+ 2: 225536.. 270696 | 45160 : 0.. 0 | 0
+
+com.android.vndk.v28.apex : 93814897 bytes
+...
+ 364: 53869896..54095257 | 225361 : 543997952.. 544063488 | 65536
+-> 365: 54095257..54309344 | 214087 : 544063488.. 544129024 | 65536
+ 366: 54309344..54514557 | 205213 : 544129024.. 544194560 | 65536
+...
+
+Both 204185 and 54095257 have the same start relative offset of 3481,
+but the logical page 55 of `android.hardware.graphics.composer@2.1.so`
+ranges from 225280 to 229632, forming a shortened bvec [225280, 225536)
+that cannot be used for decompressing the range from 54095257 to
+54309344 of `com.android.vndk.v28.apex`.
+
+Since `pcl->multibases` is already meaningless, just mark `be->keepxcpy`
+on demand for simplicity.
+
+Again, this issue can only lead to data corruption if `-Ededupe` is on.
+
+Fixes: 94c43de73521 ("erofs: fix wrong primary bvec selection on deduplicated extents")
+Reviewed-by: Hongbo Li <lihongbo22@huawei.com>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Link: https://lore.kernel.org/r/20250506101850.191506-1-hsiangkao@linux.alibaba.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/erofs/zdata.c | 31 ++++++++++++++-----------------
+ 1 file changed, 14 insertions(+), 17 deletions(-)
+
+diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
+index a8fb4b525f544..e5e94afc5af88 100644
+--- a/fs/erofs/zdata.c
++++ b/fs/erofs/zdata.c
+@@ -82,9 +82,6 @@ struct z_erofs_pcluster {
+ /* L: whether partial decompression or not */
+ bool partial;
+
+- /* L: indicate several pageofs_outs or not */
+- bool multibases;
+-
+ /* L: whether extra buffer allocations are best-effort */
+ bool besteffort;
+
+@@ -1073,8 +1070,6 @@ static int z_erofs_scan_folio(struct z_erofs_decompress_frontend *f,
+ break;
+
+ erofs_onlinefolio_split(folio);
+- if (f->pcl->pageofs_out != (map->m_la & ~PAGE_MASK))
+- f->pcl->multibases = true;
+ if (f->pcl->length < offset + end - map->m_la) {
+ f->pcl->length = offset + end - map->m_la;
+ f->pcl->pageofs_out = map->m_la & ~PAGE_MASK;
+@@ -1120,7 +1115,6 @@ struct z_erofs_decompress_backend {
+ struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
+ struct super_block *sb;
+ struct z_erofs_pcluster *pcl;
+-
+ /* pages with the longest decompressed length for deduplication */
+ struct page **decompressed_pages;
+ /* pages to keep the compressed data */
+@@ -1129,6 +1123,8 @@ struct z_erofs_decompress_backend {
+ struct list_head decompressed_secondary_bvecs;
+ struct page **pagepool;
+ unsigned int onstack_used, nr_pages;
++ /* indicate if temporary copies should be preserved for later use */
++ bool keepxcpy;
+ };
+
+ struct z_erofs_bvec_item {
+@@ -1139,18 +1135,20 @@ struct z_erofs_bvec_item {
+ static void z_erofs_do_decompressed_bvec(struct z_erofs_decompress_backend *be,
+ struct z_erofs_bvec *bvec)
+ {
++ int poff = bvec->offset + be->pcl->pageofs_out;
+ struct z_erofs_bvec_item *item;
+- unsigned int pgnr;
+-
+- if (!((bvec->offset + be->pcl->pageofs_out) & ~PAGE_MASK) &&
+- (bvec->end == PAGE_SIZE ||
+- bvec->offset + bvec->end == be->pcl->length)) {
+- pgnr = (bvec->offset + be->pcl->pageofs_out) >> PAGE_SHIFT;
+- DBG_BUGON(pgnr >= be->nr_pages);
+- if (!be->decompressed_pages[pgnr]) {
+- be->decompressed_pages[pgnr] = bvec->page;
++ struct page **page;
++
++ if (!(poff & ~PAGE_MASK) && (bvec->end == PAGE_SIZE ||
++ bvec->offset + bvec->end == be->pcl->length)) {
++ DBG_BUGON((poff >> PAGE_SHIFT) >= be->nr_pages);
++ page = be->decompressed_pages + (poff >> PAGE_SHIFT);
++ if (!*page) {
++ *page = bvec->page;
+ return;
+ }
++ } else {
++ be->keepxcpy = true;
+ }
+
+ /* (cold path) one pcluster is requested multiple times */
+@@ -1316,7 +1314,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+ .alg = pcl->algorithmformat,
+ .inplace_io = overlapped,
+ .partial_decoding = pcl->partial,
+- .fillgaps = pcl->multibases,
++ .fillgaps = be->keepxcpy,
+ .gfp = pcl->besteffort ? GFP_KERNEL :
+ GFP_NOWAIT | __GFP_NORETRY
+ }, be->pagepool);
+@@ -1370,7 +1368,6 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
+
+ pcl->length = 0;
+ pcl->partial = true;
+- pcl->multibases = false;
+ pcl->besteffort = false;
+ pcl->bvset.nextpage = NULL;
+ pcl->vcnt = 0;
+--
+2.39.5
+
--- /dev/null
+From 72d59a735d83e59a3dfa89f1f467150543c09ac3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 08:59:59 -0700
+Subject: fbnic: Actually flush_tx instead of stalling out
+
+From: Alexander Duyck <alexanderduyck@fb.com>
+
+[ Upstream commit 0f9a959a0addd9bbc47e5d16c36b3a7f97981915 ]
+
+The fbnic_mbx_flush_tx function had a number of issues.
+
+First, we were waiting 200ms for the firmware to process the packets. We
+can drop this to 20ms and in almost all cases this should be more than
+enough time. So by changing this we can significantly reduce shutdown time.
+
+Second, we were not making sure that the Tx path was actually shut off. As
+such we could still have packets added while we were flushing the mailbox.
+To prevent that we can now clear the ready flag for the Tx side and it
+should stay down since the interrupt is disabled.
+
+Third, we kept re-reading the tail due to the second issue. The tail should
+not move after we have started the flush so we can just read it once while
+we are holding the mailbox Tx lock. By doing that we are guaranteed that
+the value should be consistent.
+
+Fourth, we were keeping a count of descriptors cleaned due to the second
+and third issues called out. That count is not a valid reason to be exiting
+the cleanup, and with the tail only being read once we shouldn't see any
+cases where the tail moves after the disable so the tracking of count can
+be dropped.
+
+Fifth, we were using attempts * sleep time to determine how long we would
+wait in our polling loop to flush out the Tx. This can be very imprecise.
+In order to tighten up the timing we are shifting over to using a jiffies
+value of jiffies + 10 * HZ + 1 to determine the jiffies value we should
+stop polling at as this should be accurate within once sleep cycle for the
+total amount of time spent polling.
+
+Fixes: da3cde08209e ("eth: fbnic: Add FW communication mechanism")
+Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/174654719929.499179.16406653096197423749.stgit@ahduyck-xeon-server.home.arpa
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 31 +++++++++++-----------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 7db68fe7df940..dd30f0cb02506 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -797,35 +797,36 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+
+ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+ {
++ unsigned long timeout = jiffies + 10 * HZ + 1;
+ struct fbnic_fw_mbx *tx_mbx;
+- int attempts = 50;
+- u8 count = 0;
+-
+- /* Nothing to do if there is no mailbox */
+- if (!fbnic_fw_present(fbd))
+- return;
++ u8 tail;
+
+ /* Record current Rx stats */
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+- /* Nothing to do if mailbox never got to ready */
+- if (!tx_mbx->ready)
+- return;
++ spin_lock_irq(&fbd->fw_tx_lock);
++
++ /* Clear ready to prevent any further attempts to transmit */
++ tx_mbx->ready = false;
++
++ /* Read tail to determine the last tail state for the ring */
++ tail = tx_mbx->tail;
++
++ spin_unlock_irq(&fbd->fw_tx_lock);
+
+ /* Give firmware time to process packet,
+- * we will wait up to 10 seconds which is 50 waits of 200ms.
++ * we will wait up to 10 seconds which is 500 waits of 20ms.
+ */
+ do {
+ u8 head = tx_mbx->head;
+
+- if (head == tx_mbx->tail)
++ /* Tx ring is empty once head == tail */
++ if (head == tail)
+ break;
+
+- msleep(200);
++ msleep(20);
+ fbnic_mbx_process_tx_msgs(fbd);
+-
+- count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
+- } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
++ } while (time_is_after_jiffies(timeout));
+ }
+
+ void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
+--
+2.39.5
+
--- /dev/null
+From f04ff6e50d44898499480f8d12091b177dfaca74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 09:00:25 -0700
+Subject: fbnic: Do not allow mailbox to toggle to ready outside
+ fbnic_mbx_poll_tx_ready
+
+From: Alexander Duyck <alexanderduyck@fb.com>
+
+[ Upstream commit ce2fa1dba204c761582674cf2eb9cbe0b949b5c7 ]
+
+We had originally thought to have the mailbox go to ready in the background
+while we were doing other things. One issue with this though is that we
+can't disable it by clearing the ready state without also blocking
+interrupts or calls to mbx_poll as it will just pop back to life during an
+interrupt.
+
+In order to prevent that from happening we can pull the code for toggling
+to ready out of the interrupt path and instead place it in the
+fbnic_mbx_poll_tx_ready path so that it becomes the only spot where the
+Rx/Tx can toggle to the ready state. By doing this we can prevent races
+where we disable the DMA and/or free buffers only to have an interrupt fire
+and undo what we have done.
+
+Fixes: da3cde08209e ("eth: fbnic: Add FW communication mechanism")
+Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/174654722518.499179.11612865740376848478.stgit@ahduyck-xeon-server.home.arpa
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 27 ++++++++--------------
+ 1 file changed, 10 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 50d896dcbb04c..7775418316df5 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -299,10 +299,6 @@ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+
+- /* This is a one time init, so just exit if it is completed */
+- if (mbx->ready)
+- return;
+-
+ mbx->ready = true;
+
+ switch (mbx_idx) {
+@@ -322,21 +318,18 @@ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ }
+ }
+
+-static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
++static bool fbnic_mbx_event(struct fbnic_dev *fbd)
+ {
+- int i;
+-
+ /* We only need to do this on the first interrupt following reset.
+ * this primes the mailbox so that we will have cleared all the
+ * skip descriptors.
+ */
+ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
+- return;
++ return false;
+
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+- for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+- fbnic_mbx_init_desc_ring(fbd, i);
++ return true;
+ }
+
+ /**
+@@ -737,7 +730,7 @@ static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
+
+ void fbnic_mbx_poll(struct fbnic_dev *fbd)
+ {
+- fbnic_mbx_postinit(fbd);
++ fbnic_mbx_event(fbd);
+
+ fbnic_mbx_process_tx_msgs(fbd);
+ fbnic_mbx_process_rx_msgs(fbd);
+@@ -746,11 +739,9 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
+ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ {
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+- struct fbnic_fw_mbx *tx_mbx;
+- int err;
++ int err, i;
+
+- tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+- while (!tx_mbx->ready) {
++ do {
+ if (!time_is_after_jiffies(timeout))
+ return -ETIMEDOUT;
+
+@@ -765,9 +756,11 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ return -ENODEV;
+
+ msleep(20);
++ } while (!fbnic_mbx_event(fbd));
+
+- fbnic_mbx_poll(fbd);
+- }
++ /* FW has shown signs of life. Enable DMA and start Tx/Rx */
++ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
++ fbnic_mbx_init_desc_ring(fbd, i);
+
+ /* Request an update from the firmware. This should overwrite
+ * mgmt.version once we get the actual version from the firmware
+--
+2.39.5
+
--- /dev/null
+From 4b66861b39b467bf49b0152fb0c64c028331a413 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 08:59:39 -0700
+Subject: fbnic: Fix initialization of mailbox descriptor rings
+
+From: Alexander Duyck <alexanderduyck@fb.com>
+
+[ Upstream commit f34343cc11afc7bb1f881c3492bee3484016bf71 ]
+
+Address to issues with the FW mailbox descriptor initialization.
+
+We need to reverse the order of accesses when we invalidate an entry versus
+writing an entry. When writing an entry we write upper and then lower as
+the lower 32b contain the valid bit that makes the entire address valid.
+However for invalidation we should write it in the reverse order so that
+the upper is marked invalid before we update it.
+
+Without this change we may see FW attempt to access pages with the upper
+32b of the address set to 0 which will likely result in DMAR faults due to
+write access failures on mailbox shutdown.
+
+Fixes: da3cde08209e ("eth: fbnic: Add FW communication mechanism")
+Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/174654717972.499179.8083789731819297034.stgit@ahduyck-xeon-server.home.arpa
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 32 ++++++++++++++++------
+ 1 file changed, 23 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 8f7a2a19ddf80..612d09ea08ebb 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -17,11 +17,29 @@ static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
+ {
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
++ /* Write the upper 32b and then the lower 32b. Doing this the
++ * FW can then read lower, upper, lower to verify that the state
++ * of the descriptor wasn't changed mid-transaction.
++ */
+ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset, lower_32_bits(desc));
+ }
+
++static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
++ int desc_idx, u32 desc)
++{
++ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
++
++ /* For initialization we write the lower 32b of the descriptor first.
++ * This way we can set the state to mark it invalid before we clear the
++ * upper 32b.
++ */
++ fw_wr32(fbd, desc_offset, desc);
++ fw_wrfl(fbd);
++ fw_wr32(fbd, desc_offset + 1, 0);
++}
++
+ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+ {
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+@@ -41,21 +59,17 @@ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ * solid stop for the firmware to hit when it is done looping
+ * through the ring.
+ */
+- __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
+-
+- fw_wrfl(fbd);
++ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
+
+ /* We then fill the rest of the ring starting at the end and moving
+ * back toward descriptor 0 with skip descriptors that have no
+ * length nor address, and tell the firmware that they can skip
+ * them and just move past them to the one we initialized to 0.
+ */
+- for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
+- __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
+- FBNIC_IPC_MBX_DESC_FW_CMPL |
+- FBNIC_IPC_MBX_DESC_HOST_CMPL);
+- fw_wrfl(fbd);
+- }
++ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
++ __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
++ FBNIC_IPC_MBX_DESC_FW_CMPL |
++ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+ }
+
+ void fbnic_mbx_init(struct fbnic_dev *fbd)
+--
+2.39.5
+
--- /dev/null
+From 38c0bb2c5db56e5b35d3f25ff38a1366bed4d157 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 08:59:46 -0700
+Subject: fbnic: Gate AXI read/write enabling on FW mailbox
+
+From: Alexander Duyck <alexanderduyck@fb.com>
+
+[ Upstream commit 3b12f00ddd08e888273b2ac0488d396d90a836fc ]
+
+In order to prevent the device from throwing spurious writes and/or reads
+at us we need to gate the AXI fabric interface to the PCIe until such time
+as we know the FW is in a known good state.
+
+To accomplish this we use the mailbox as a mechanism for us to recognize
+that the FW has acknowledged our presence and is no longer sending any
+stale message data to us.
+
+We start in fbnic_mbx_init by calling fbnic_mbx_reset_desc_ring function,
+disabling the DMA in both directions, and then invalidating all the
+descriptors in each ring.
+
+We then poll the mailbox in fbnic_mbx_poll_tx_ready and when the interrupt
+is set by the FW we pick it up and mark the mailboxes as ready, while also
+enabling the DMA.
+
+Once we have completed all the transactions and need to shut down we call
+into fbnic_mbx_clean which will in turn call fbnic_mbx_reset_desc_ring for
+each ring and shut down the DMA and once again invalidate the descriptors.
+
+Fixes: 3646153161f1 ("eth: fbnic: Add register init to set PCIe/Ethernet device config")
+Fixes: da3cde08209e ("eth: fbnic: Add FW communication mechanism")
+Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/174654718623.499179.7445197308109347982.stgit@ahduyck-xeon-server.home.arpa
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_csr.h | 2 ++
+ drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 38 +++++++++++++++++----
+ drivers/net/ethernet/meta/fbnic/fbnic_mac.c | 6 ----
+ 3 files changed, 33 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+index 21db509acbc15..e91b4432fddd7 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
+@@ -700,8 +700,10 @@ enum {
+ /* PUL User Registers */
+ #define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
+ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
++#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH CSR_BIT(19)
+ #define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
+ #define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
++#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH CSR_BIT(19)
+ #define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
+ #define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 612d09ea08ebb..7db68fe7df940 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -51,10 +51,26 @@ static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+ return desc;
+ }
+
+-static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
++static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ int desc_idx;
+
++ /* Disable DMA transactions from the device,
++ * and flush any transactions triggered during cleaning
++ */
++ switch (mbx_idx) {
++ case FBNIC_IPC_MBX_RX_IDX:
++ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
++ FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
++ break;
++ case FBNIC_IPC_MBX_TX_IDX:
++ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
++ FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
++ break;
++ }
++
++ wrfl(fbd);
++
+ /* Initialize first descriptor to all 0s. Doing this gives us a
+ * solid stop for the firmware to hit when it is done looping
+ * through the ring.
+@@ -90,7 +106,7 @@ void fbnic_mbx_init(struct fbnic_dev *fbd)
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+- fbnic_mbx_init_desc_ring(fbd, i);
++ fbnic_mbx_reset_desc_ring(fbd, i);
+ }
+
+ static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+@@ -155,7 +171,7 @@ static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ int i;
+
+- fbnic_mbx_init_desc_ring(fbd, mbx_idx);
++ fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
+
+ for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
+ fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
+@@ -297,7 +313,7 @@ static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
+ return (err == -EOPNOTSUPP) ? 0 : err;
+ }
+
+-static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
++static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+
+@@ -309,10 +325,18 @@ static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
++ /* Enable DMA writes from the device */
++ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
++ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
++
+ /* Make sure we have a page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
++ /* Enable DMA reads from the device */
++ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
++ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
++
+ /* Force version to 1 if we successfully requested an update
+ * from the firmware. This should be overwritten once we get
+ * the actual version from the firmware in the capabilities
+@@ -329,7 +353,7 @@ static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+ {
+ int i;
+
+- /* We only need to do this on the first interrupt following init.
++ /* We only need to do this on the first interrupt following reset.
+ * this primes the mailbox so that we will have cleared all the
+ * skip descriptors.
+ */
+@@ -339,7 +363,7 @@ static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+- fbnic_mbx_postinit_desc_ring(fbd, i);
++ fbnic_mbx_init_desc_ring(fbd, i);
+ }
+
+ /**
+@@ -761,7 +785,7 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ * avoid the mailbox getting stuck closed if the interrupt
+ * is reset.
+ */
+- fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
++ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ msleep(200);
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+index 7b654d0a6dac6..06fa65e4f35b6 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
+@@ -79,12 +79,6 @@ static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
+ fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
+-
+- /* Enable XALI AR/AW outbound */
+- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+- FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+- wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+- FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+ }
+
+ static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
+--
+2.39.5
+
--- /dev/null
+From 255cb26379dbbc626229dddee831643c92b949ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 09:00:12 -0700
+Subject: fbnic: Improve responsiveness of fbnic_mbx_poll_tx_ready
+
+From: Alexander Duyck <alexanderduyck@fb.com>
+
+[ Upstream commit ab064f6005973d456f95ae99cd9ea0d8ab676cce ]
+
+There were a couple different issues found in fbnic_mbx_poll_tx_ready.
+Among them were the fact that we were sleeping much longer than we actually
+needed to as the actual FW could respond in under 20ms. The other issue was
+that we would just keep polling the mailbox even if the device itself had
+gone away.
+
+To address the responsiveness issues we can decrease the sleeps to 20ms and
+use a jiffies based timeout value rather than just counting the number of
+times we slept and then polled.
+
+To address the hardware going away we can move the check for the firmware
+BAR being present from where it was and place it inside the loop after the
+mailbox descriptor ring is initialized and before we sleep so that we just
+abort and return an error if the device went away during initialization.
+
+With these two changes we see a significant improvement in boot times for
+the driver.
+
+Fixes: da3cde08209e ("eth: fbnic: Add FW communication mechanism")
+Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/174654721224.499179.2698616208976624755.stgit@ahduyck-xeon-server.home.arpa
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index dd30f0cb02506..8d6af5c3a49c0 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -772,27 +772,30 @@ void fbnic_mbx_poll(struct fbnic_dev *fbd)
+
+ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ {
++ unsigned long timeout = jiffies + 10 * HZ + 1;
+ struct fbnic_fw_mbx *tx_mbx;
+- int attempts = 50;
+-
+- /* Immediate fail if BAR4 isn't there */
+- if (!fbnic_fw_present(fbd))
+- return -ENODEV;
+
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+- while (!tx_mbx->ready && --attempts) {
++ while (!tx_mbx->ready) {
++ if (!time_is_after_jiffies(timeout))
++ return -ETIMEDOUT;
++
+ /* Force the firmware to trigger an interrupt response to
+ * avoid the mailbox getting stuck closed if the interrupt
+ * is reset.
+ */
+ fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+
+- msleep(200);
++ /* Immediate fail if BAR4 went away */
++ if (!fbnic_fw_present(fbd))
++ return -ENODEV;
++
++ msleep(20);
+
+ fbnic_mbx_poll(fbd);
+ }
+
+- return attempts ? 0 : -ETIMEDOUT;
++ return 0;
+ }
+
+ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+--
+2.39.5
+
--- /dev/null
+From 6fa688f9c3989482ff3f722c26f5b3c55f280672 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 09:00:18 -0700
+Subject: fbnic: Pull fbnic_fw_xmit_cap_msg use out of interrupt context
+
+From: Alexander Duyck <alexanderduyck@fb.com>
+
+[ Upstream commit 1b34d1c1dc8384884febd83140c9afbc7c4b9eb8 ]
+
+This change pulls the call to fbnic_fw_xmit_cap_msg out of
+fbnic_mbx_init_desc_ring and instead places it in the polling function for
+getting the Tx ready. Doing that we can avoid the potential issue with an
+interrupt coming in later from the firmware that causes it to get fired in
+interrupt context.
+
+Fixes: 20d2e88cc746 ("eth: fbnic: Add initial messaging to notify FW of our presence")
+Signed-off-by: Alexander Duyck <alexanderduyck@fb.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Link: https://patch.msgid.link/174654721876.499179.9839651602256668493.stgit@ahduyck-xeon-server.home.arpa
+Reviewed-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_fw.c | 43 ++++++++--------------
+ 1 file changed, 16 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+index 8d6af5c3a49c0..50d896dcbb04c 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
+@@ -295,24 +295,6 @@ static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
+ return err;
+ }
+
+-/**
+- * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
+- * @fbd: FBNIC device structure
+- *
+- * Return: NULL on failure to allocate, error pointer on error, or pointer
+- * to new TLV test message.
+- *
+- * Sends a single TLV header indicating the host wants the firmware to
+- * confirm the capabilities and version.
+- **/
+-static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
+-{
+- int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+-
+- /* Return 0 if we are not calling this on ASIC */
+- return (err == -EOPNOTSUPP) ? 0 : err;
+-}
+-
+ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ {
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+@@ -336,15 +318,6 @@ static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+ /* Enable DMA reads from the device */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+-
+- /* Force version to 1 if we successfully requested an update
+- * from the firmware. This should be overwritten once we get
+- * the actual version from the firmware in the capabilities
+- * request message.
+- */
+- if (!fbnic_fw_xmit_cap_msg(fbd) &&
+- !fbd->fw_cap.running.mgmt.version)
+- fbd->fw_cap.running.mgmt.version = 1;
+ break;
+ }
+ }
+@@ -774,6 +747,7 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ {
+ unsigned long timeout = jiffies + 10 * HZ + 1;
+ struct fbnic_fw_mbx *tx_mbx;
++ int err;
+
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ while (!tx_mbx->ready) {
+@@ -795,7 +769,22 @@ int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+ fbnic_mbx_poll(fbd);
+ }
+
++ /* Request an update from the firmware. This should overwrite
++ * mgmt.version once we get the actual version from the firmware
++ * in the capabilities request message.
++ */
++ err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
++ if (err)
++ goto clean_mbx;
++
++ /* Use "1" to indicate we entered the state waiting for a response */
++ fbd->fw_cap.running.mgmt.version = 1;
++
+ return 0;
++clean_mbx:
++ /* Cleanup Rx buffers and disable mailbox */
++ fbnic_mbx_clean(fbd);
++ return err;
+ }
+
+ void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+--
+2.39.5
+
--- /dev/null
+From 77af055c8dbe1f86d241686eb6cf58287739f0b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 May 2025 00:57:52 +0200
+Subject: gre: Fix again IPv6 link-local address generation.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 3e6a0243ff002ddbd7ee18a8974ae61d2e6ed00d ]
+
+Use addrconf_addr_gen() to generate IPv6 link-local addresses on GRE
+devices in most cases and fall back to using add_v4_addrs() only in
+case the GRE configuration is incompatible with addrconf_addr_gen().
+
+GRE used to use addrconf_addr_gen() until commit e5dd729460ca ("ip/ip6_gre:
+use the same logic as SIT interfaces when computing v6LL address")
+restricted this use to gretap and ip6gretap devices, and created
+add_v4_addrs() (borrowed from SIT) for non-Ethernet GRE ones.
+
+The original problem came when commit 9af28511be10 ("addrconf: refuse
+isatap eui64 for INADDR_ANY") made __ipv6_isatap_ifid() fail when its
+addr parameter was 0. The commit says that this would create an invalid
+address, however, I couldn't find any RFC saying that the generated
+interface identifier would be wrong. Anyway, since gre over IPv4
+devices pass their local tunnel address to __ipv6_isatap_ifid(), that
+commit broke their IPv6 link-local address generation when the local
+address was unspecified.
+
+Then commit e5dd729460ca ("ip/ip6_gre: use the same logic as SIT
+interfaces when computing v6LL address") tried to fix that case by
+defining add_v4_addrs() and calling it to generate the IPv6 link-local
+address instead of using addrconf_addr_gen() (apart for gretap and
+ip6gretap devices, which would still use the regular
+addrconf_addr_gen(), since they have a MAC address).
+
+That broke several use cases because add_v4_addrs() isn't properly
+integrated into the rest of IPv6 Neighbor Discovery code. Several of
+these shortcomings have been fixed over time, but add_v4_addrs()
+remains broken on several aspects. In particular, it doesn't send any
+Router Sollicitations, so the SLAAC process doesn't start until the
+interface receives a Router Advertisement. Also, add_v4_addrs() mostly
+ignores the address generation mode of the interface
+(/proc/sys/net/ipv6/conf/*/addr_gen_mode), thus breaking the
+IN6_ADDR_GEN_MODE_RANDOM and IN6_ADDR_GEN_MODE_STABLE_PRIVACY cases.
+
+Fix the situation by using add_v4_addrs() only in the specific scenario
+where the normal method would fail. That is, for interfaces that have
+all of the following characteristics:
+
+ * run over IPv4,
+ * transport IP packets directly, not Ethernet (that is, not gretap
+ interfaces),
+ * tunnel endpoint is INADDR_ANY (that is, 0),
+ * device address generation mode is EUI64.
+
+In all other cases, revert back to the regular addrconf_addr_gen().
+
+Also, remove the special case for ip6gre interfaces in add_v4_addrs(),
+since ip6gre devices now always use addrconf_addr_gen() instead.
+
+Note:
+ This patch was originally applied as commit 183185a18ff9 ("gre: Fix
+ IPv6 link-local address generation."). However, it was then reverted
+ by commit fc486c2d060f ("Revert "gre: Fix IPv6 link-local address
+ generation."") because it uncovered another bug that ended up
+ breaking net/forwarding/ip6gre_custom_multipath_hash.sh. That other
+ bug has now been fixed by commit 4d0ab3a6885e ("ipv6: Start path
+ selection from the first nexthop"). Therefore we can now revive this
+ GRE patch (no changes since original commit 183185a18ff9 ("gre: Fix
+ IPv6 link-local address generation.").
+
+Fixes: e5dd729460ca ("ip/ip6_gre: use the same logic as SIT interfaces when computing v6LL address")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/a88cc5c4811af36007645d610c95102dccb360a6.1746225214.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/addrconf.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index f5d49162f7983..16ba3bb12fc4b 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3237,16 +3237,13 @@ static void add_v4_addrs(struct inet6_dev *idev)
+ struct in6_addr addr;
+ struct net_device *dev;
+ struct net *net = dev_net(idev->dev);
+- int scope, plen, offset = 0;
++ int scope, plen;
+ u32 pflags = 0;
+
+ ASSERT_RTNL();
+
+ memset(&addr, 0, sizeof(struct in6_addr));
+- /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
+- if (idev->dev->addr_len == sizeof(struct in6_addr))
+- offset = sizeof(struct in6_addr) - 4;
+- memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
++ memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
+
+ if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+ scope = IPV6_ADDR_COMPATv4;
+@@ -3557,7 +3554,13 @@ static void addrconf_gre_config(struct net_device *dev)
+ return;
+ }
+
+- if (dev->type == ARPHRD_ETHER) {
++ /* Generate the IPv6 link-local address using addrconf_addr_gen(),
++ * unless we have an IPv4 GRE device not bound to an IP address and
++ * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
++ * case). Such devices fall back to add_v4_addrs() instead.
++ */
++ if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
++ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
+ addrconf_addr_gen(idev, true);
+ return;
+ }
+--
+2.39.5
+
--- /dev/null
+From 45bd486a951ebddc96b0fc9b2210f762d020dea5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 21 Aug 2024 15:09:55 +0200
+Subject: ice: Initial support for E825C hardware in ice_adapter
+
+From: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
+
+[ Upstream commit fdb7f54700b1c88e734323a62fea986d9ce5a9c6 ]
+
+Address E825C devices by PCI ID since dual IP core configurations
+need 1 ice_adapter for both devices.
+
+Signed-off-by: Sergey Temerkhanov <sergey.temerkhanov@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 0093cb194a75 ("ice: use DSN instead of PCI BDF for ice_adapter index")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_adapter.c | 16 +++++++++++++---
+ 1 file changed, 13 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
+index f3e195974a8ef..01a08cfd0090a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
+@@ -9,12 +9,14 @@
+ #include <linux/spinlock.h>
+ #include <linux/xarray.h>
+ #include "ice_adapter.h"
++#include "ice.h"
+
+ static DEFINE_XARRAY(ice_adapters);
+ static DEFINE_MUTEX(ice_adapters_mutex);
+
+ /* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
+ #define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
++#define INDEX_FIELD_DEV GENMASK(31, 16)
+ #define INDEX_FIELD_BUS GENMASK(12, 5)
+ #define INDEX_FIELD_SLOT GENMASK(4, 0)
+
+@@ -24,9 +26,17 @@ static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+
+ WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+
+- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
+- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
++ switch (pdev->device) {
++ case ICE_DEV_ID_E825C_BACKPLANE:
++ case ICE_DEV_ID_E825C_QSFP:
++ case ICE_DEV_ID_E825C_SFP:
++ case ICE_DEV_ID_E825C_SGMII:
++ return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
++ default:
++ return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
++ FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
++ FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
++ }
+ }
+
+ static struct ice_adapter *ice_adapter_new(void)
+--
+2.39.5
+
--- /dev/null
+From ac35471c9fbe07403677eb9f13fc01e46ec43489 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 09:19:38 -0700
+Subject: ice: use DSN instead of PCI BDF for ice_adapter index
+
+From: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+
+[ Upstream commit 0093cb194a7511d1e68865fa35b763c72e44c2f0 ]
+
+Use Device Serial Number instead of PCI bus/device/function for
+the index of struct ice_adapter.
+
+Functions on the same physical device should point to the very same
+ice_adapter instance, but with two PFs, when at least one of them is
+PCI-e passed-through to a VM, it is no longer the case - PFs will get
+seemingly random PCI BDF values, and thus indices, what finally leds to
+each of them being on their own instance of ice_adapter. That causes them
+to don't attempt any synchronization of the PTP HW clock usage, or any
+other future resources.
+
+DSN works nicely in place of the index, as it is "immutable" in terms of
+virtualization.
+
+Fixes: 0e2bddf9e5f9 ("ice: add ice_adapter for shared data across PFs on the same NIC")
+Suggested-by: Jacob Keller <jacob.e.keller@intel.com>
+Suggested-by: Jakub Kicinski <kuba@kernel.org>
+Suggested-by: Jiri Pirko <jiri@resnulli.us>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Link: https://patch.msgid.link/20250505161939.2083581-1-anthony.l.nguyen@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_adapter.c | 47 ++++++++------------
+ drivers/net/ethernet/intel/ice/ice_adapter.h | 6 ++-
+ 2 files changed, 22 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
+index 01a08cfd0090a..66e070095d1bb 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.c
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
+@@ -1,7 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ // SPDX-FileCopyrightText: Copyright Red Hat
+
+-#include <linux/bitfield.h>
+ #include <linux/cleanup.h>
+ #include <linux/mutex.h>
+ #include <linux/pci.h>
+@@ -14,32 +13,16 @@
+ static DEFINE_XARRAY(ice_adapters);
+ static DEFINE_MUTEX(ice_adapters_mutex);
+
+-/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
+-#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
+-#define INDEX_FIELD_DEV GENMASK(31, 16)
+-#define INDEX_FIELD_BUS GENMASK(12, 5)
+-#define INDEX_FIELD_SLOT GENMASK(4, 0)
+-
+-static unsigned long ice_adapter_index(const struct pci_dev *pdev)
++static unsigned long ice_adapter_index(u64 dsn)
+ {
+- unsigned int domain = pci_domain_nr(pdev->bus);
+-
+- WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+-
+- switch (pdev->device) {
+- case ICE_DEV_ID_E825C_BACKPLANE:
+- case ICE_DEV_ID_E825C_QSFP:
+- case ICE_DEV_ID_E825C_SFP:
+- case ICE_DEV_ID_E825C_SGMII:
+- return FIELD_PREP(INDEX_FIELD_DEV, pdev->device);
+- default:
+- return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+- FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
+- FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+- }
++#if BITS_PER_LONG == 64
++ return dsn;
++#else
++ return (u32)dsn ^ (u32)(dsn >> 32);
++#endif
+ }
+
+-static struct ice_adapter *ice_adapter_new(void)
++static struct ice_adapter *ice_adapter_new(u64 dsn)
+ {
+ struct ice_adapter *adapter;
+
+@@ -47,6 +30,7 @@ static struct ice_adapter *ice_adapter_new(void)
+ if (!adapter)
+ return NULL;
+
++ adapter->device_serial_number = dsn;
+ spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ refcount_set(&adapter->refcount, 1);
+
+@@ -77,23 +61,26 @@ static void ice_adapter_free(struct ice_adapter *adapter)
+ * Return: Pointer to ice_adapter on success.
+ * ERR_PTR() on error. -ENOMEM is the only possible error.
+ */
+-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
++struct ice_adapter *ice_adapter_get(struct pci_dev *pdev)
+ {
+- unsigned long index = ice_adapter_index(pdev);
++ u64 dsn = pci_get_dsn(pdev);
+ struct ice_adapter *adapter;
++ unsigned long index;
+ int err;
+
++ index = ice_adapter_index(dsn);
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+ if (err == -EBUSY) {
+ adapter = xa_load(&ice_adapters, index);
+ refcount_inc(&adapter->refcount);
++ WARN_ON_ONCE(adapter->device_serial_number != dsn);
+ return adapter;
+ }
+ if (err)
+ return ERR_PTR(err);
+
+- adapter = ice_adapter_new();
++ adapter = ice_adapter_new(dsn);
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+ xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
+@@ -110,11 +97,13 @@ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+ *
+ * Context: Process, may sleep.
+ */
+-void ice_adapter_put(const struct pci_dev *pdev)
++void ice_adapter_put(struct pci_dev *pdev)
+ {
+- unsigned long index = ice_adapter_index(pdev);
++ u64 dsn = pci_get_dsn(pdev);
+ struct ice_adapter *adapter;
++ unsigned long index;
+
++ index = ice_adapter_index(dsn);
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
+index e233225848b38..ac15c0d2bc1a4 100644
+--- a/drivers/net/ethernet/intel/ice/ice_adapter.h
++++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
+@@ -32,6 +32,7 @@ struct ice_port_list {
+ * @refcount: Reference count. struct ice_pf objects hold the references.
+ * @ctrl_pf: Control PF of the adapter
+ * @ports: Ports list
++ * @device_serial_number: DSN cached for collision detection on 32bit systems
+ */
+ struct ice_adapter {
+ refcount_t refcount;
+@@ -40,9 +41,10 @@ struct ice_adapter {
+
+ struct ice_pf *ctrl_pf;
+ struct ice_port_list ports;
++ u64 device_serial_number;
+ };
+
+-struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
+-void ice_adapter_put(const struct pci_dev *pdev);
++struct ice_adapter *ice_adapter_get(struct pci_dev *pdev);
++void ice_adapter_put(struct pci_dev *pdev);
+
+ #endif /* _ICE_ADAPTER_H */
+--
+2.39.5
+
--- /dev/null
+From f5c5a97ba5d7f0e3ce640f4521874d34ab66f6a5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 May 2025 01:01:18 +0300
+Subject: ipvs: fix uninit-value for saddr in do_output_route4
+
+From: Julian Anastasov <ja@ssi.bg>
+
+[ Upstream commit e34090d7214e0516eb8722aee295cb2507317c07 ]
+
+syzbot reports for uninit-value for the saddr argument [1].
+commit 4754957f04f5 ("ipvs: do not use random local source address for
+tunnels") already implies that the input value of saddr
+should be ignored but the code is still reading it which can prevent
+to connect the route. Fix it by changing the argument to ret_saddr.
+
+[1]
+BUG: KMSAN: uninit-value in do_output_route4+0x42c/0x4d0 net/netfilter/ipvs/ip_vs_xmit.c:147
+ do_output_route4+0x42c/0x4d0 net/netfilter/ipvs/ip_vs_xmit.c:147
+ __ip_vs_get_out_rt+0x403/0x21d0 net/netfilter/ipvs/ip_vs_xmit.c:330
+ ip_vs_tunnel_xmit+0x205/0x2380 net/netfilter/ipvs/ip_vs_xmit.c:1136
+ ip_vs_in_hook+0x1aa5/0x35b0 net/netfilter/ipvs/ip_vs_core.c:2063
+ nf_hook_entry_hookfn include/linux/netfilter.h:154 [inline]
+ nf_hook_slow+0xf7/0x400 net/netfilter/core.c:626
+ nf_hook include/linux/netfilter.h:269 [inline]
+ __ip_local_out+0x758/0x7e0 net/ipv4/ip_output.c:118
+ ip_local_out net/ipv4/ip_output.c:127 [inline]
+ ip_send_skb+0x6a/0x3c0 net/ipv4/ip_output.c:1501
+ udp_send_skb+0xfda/0x1b70 net/ipv4/udp.c:1195
+ udp_sendmsg+0x2fe3/0x33c0 net/ipv4/udp.c:1483
+ inet_sendmsg+0x1fc/0x280 net/ipv4/af_inet.c:851
+ sock_sendmsg_nosec net/socket.c:712 [inline]
+ __sock_sendmsg+0x267/0x380 net/socket.c:727
+ ____sys_sendmsg+0x91b/0xda0 net/socket.c:2566
+ ___sys_sendmsg+0x28d/0x3c0 net/socket.c:2620
+ __sys_sendmmsg+0x41d/0x880 net/socket.c:2702
+ __compat_sys_sendmmsg net/compat.c:360 [inline]
+ __do_compat_sys_sendmmsg net/compat.c:367 [inline]
+ __se_compat_sys_sendmmsg net/compat.c:364 [inline]
+ __ia32_compat_sys_sendmmsg+0xc8/0x140 net/compat.c:364
+ ia32_sys_call+0x3ffa/0x41f0 arch/x86/include/generated/asm/syscalls_32.h:346
+ do_syscall_32_irqs_on arch/x86/entry/syscall_32.c:83 [inline]
+ __do_fast_syscall_32+0xb0/0x110 arch/x86/entry/syscall_32.c:306
+ do_fast_syscall_32+0x38/0x80 arch/x86/entry/syscall_32.c:331
+ do_SYSENTER_32+0x1f/0x30 arch/x86/entry/syscall_32.c:369
+ entry_SYSENTER_compat_after_hwframe+0x84/0x8e
+
+Uninit was created at:
+ slab_post_alloc_hook mm/slub.c:4167 [inline]
+ slab_alloc_node mm/slub.c:4210 [inline]
+ __kmalloc_cache_noprof+0x8fa/0xe00 mm/slub.c:4367
+ kmalloc_noprof include/linux/slab.h:905 [inline]
+ ip_vs_dest_dst_alloc net/netfilter/ipvs/ip_vs_xmit.c:61 [inline]
+ __ip_vs_get_out_rt+0x35d/0x21d0 net/netfilter/ipvs/ip_vs_xmit.c:323
+ ip_vs_tunnel_xmit+0x205/0x2380 net/netfilter/ipvs/ip_vs_xmit.c:1136
+ ip_vs_in_hook+0x1aa5/0x35b0 net/netfilter/ipvs/ip_vs_core.c:2063
+ nf_hook_entry_hookfn include/linux/netfilter.h:154 [inline]
+ nf_hook_slow+0xf7/0x400 net/netfilter/core.c:626
+ nf_hook include/linux/netfilter.h:269 [inline]
+ __ip_local_out+0x758/0x7e0 net/ipv4/ip_output.c:118
+ ip_local_out net/ipv4/ip_output.c:127 [inline]
+ ip_send_skb+0x6a/0x3c0 net/ipv4/ip_output.c:1501
+ udp_send_skb+0xfda/0x1b70 net/ipv4/udp.c:1195
+ udp_sendmsg+0x2fe3/0x33c0 net/ipv4/udp.c:1483
+ inet_sendmsg+0x1fc/0x280 net/ipv4/af_inet.c:851
+ sock_sendmsg_nosec net/socket.c:712 [inline]
+ __sock_sendmsg+0x267/0x380 net/socket.c:727
+ ____sys_sendmsg+0x91b/0xda0 net/socket.c:2566
+ ___sys_sendmsg+0x28d/0x3c0 net/socket.c:2620
+ __sys_sendmmsg+0x41d/0x880 net/socket.c:2702
+ __compat_sys_sendmmsg net/compat.c:360 [inline]
+ __do_compat_sys_sendmmsg net/compat.c:367 [inline]
+ __se_compat_sys_sendmmsg net/compat.c:364 [inline]
+ __ia32_compat_sys_sendmmsg+0xc8/0x140 net/compat.c:364
+ ia32_sys_call+0x3ffa/0x41f0 arch/x86/include/generated/asm/syscalls_32.h:346
+ do_syscall_32_irqs_on arch/x86/entry/syscall_32.c:83 [inline]
+ __do_fast_syscall_32+0xb0/0x110 arch/x86/entry/syscall_32.c:306
+ do_fast_syscall_32+0x38/0x80 arch/x86/entry/syscall_32.c:331
+ do_SYSENTER_32+0x1f/0x30 arch/x86/entry/syscall_32.c:369
+ entry_SYSENTER_compat_after_hwframe+0x84/0x8e
+
+CPU: 0 UID: 0 PID: 22408 Comm: syz.4.5165 Not tainted 6.15.0-rc3-syzkaller-00019-gbc3372351d0c #0 PREEMPT(undef)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2025
+
+Reported-by: syzbot+04b9a82855c8aed20860@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/68138dfa.050a0220.14dd7d.0017.GAE@google.com/
+Fixes: 4754957f04f5 ("ipvs: do not use random local source address for tunnels")
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipvs/ip_vs_xmit.c | 27 ++++++++-------------------
+ 1 file changed, 8 insertions(+), 19 deletions(-)
+
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 3313bceb6cc99..014f077403695 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
+ return false;
+ }
+
+-/* Get route to daddr, update *saddr, optionally bind route to saddr */
++/* Get route to daddr, optionally bind route to saddr */
+ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+- int rt_mode, __be32 *saddr)
++ int rt_mode, __be32 *ret_saddr)
+ {
+ struct flowi4 fl4;
+ struct rtable *rt;
+- bool loop = false;
+
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.daddr = daddr;
+@@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+ retry:
+ rt = ip_route_output_key(net, &fl4);
+ if (IS_ERR(rt)) {
+- /* Invalid saddr ? */
+- if (PTR_ERR(rt) == -EINVAL && *saddr &&
+- rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+- *saddr = 0;
+- flowi4_update_output(&fl4, 0, daddr, 0);
+- goto retry;
+- }
+ IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+ return NULL;
+- } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
++ }
++ if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+ ip_rt_put(rt);
+- *saddr = fl4.saddr;
+ flowi4_update_output(&fl4, 0, daddr, fl4.saddr);
+- loop = true;
++ rt_mode = 0;
+ goto retry;
+ }
+- *saddr = fl4.saddr;
++ if (ret_saddr)
++ *ret_saddr = fl4.saddr;
+ return rt;
+ }
+
+@@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+ if (ret_saddr)
+ *ret_saddr = dest_dst->dst_saddr.ip;
+ } else {
+- __be32 saddr = htonl(INADDR_ANY);
+-
+ noref = 0;
+
+ /* For such unconfigured boxes avoid many route lookups
+ * for performance reasons because we do not remember saddr
+ */
+ rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+- rt = do_output_route4(net, daddr, rt_mode, &saddr);
++ rt = do_output_route4(net, daddr, rt_mode, ret_saddr);
+ if (!rt)
+ goto err_unreach;
+- if (ret_saddr)
+- *ret_saddr = saddr;
+ }
+
+ local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
+--
+2.39.5
+
--- /dev/null
+From 9467f3dd56ab13cb69cfbe63d82b680dc7847aaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 11:16:23 +0800
+Subject: ksmbd: fix memory leak in parse_lease_state()
+
+From: Wang Zhaolong <wangzhaolong1@huawei.com>
+
+[ Upstream commit eb4447bcce915b43b691123118893fca4f372a8f ]
+
+The previous patch that added bounds check for create lease context
+introduced a memory leak. When the bounds check fails, the function
+returns NULL without freeing the previously allocated lease_ctx_info
+structure.
+
+This patch fixes the issue by adding kfree(lreq) before returning NULL
+in both boundary check cases.
+
+Fixes: bab703ed8472 ("ksmbd: add bounds check for create lease context")
+Signed-off-by: Wang Zhaolong <wangzhaolong1@huawei.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/oplock.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 81a29857b1e32..03f606afad93a 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+ sizeof(struct create_lease_v2) - 4)
+- return NULL;
++ goto err_out;
+
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ lreq->req_state = lc->lcontext.LeaseState;
+@@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+
+ if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+ sizeof(struct create_lease))
+- return NULL;
++ goto err_out;
+
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+ lreq->req_state = lc->lcontext.LeaseState;
+@@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+ lreq->version = 1;
+ }
+ return lreq;
++err_out:
++ kfree(lreq);
++ return NULL;
+ }
+
+ /**
+--
+2.39.5
+
--- /dev/null
+From e2370dfd2690bf0ce317c8b820d8078e128a325c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:00 +0200
+Subject: net: dsa: b53: allow leaky reserved multicast
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 5f93185a757ff38b36f849c659aeef368db15a68 ]
+
+Allow reserved multicast to ignore VLAN membership so STP and other
+management protocols work without a PVID VLAN configured when using a
+vlan aware bridge.
+
+Fixes: 967dd82ffc52 ("net: dsa: b53: Add support for Broadcom RoboSwitch")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-2-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index d4600ab0b70b3..f327fdeb81850 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -373,9 +373,11 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ }
+
++ vc1 &= ~VC1_RX_MCST_FWD_EN;
++
+ if (enable) {
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
++ vc1 |= VC1_RX_MCST_UNTAG_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ if (enable_filtering) {
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+
+ } else {
+ vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
++ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+
+--
+2.39.5
+
--- /dev/null
+From 02a7ddb38d6ac16f9f231bdb32b6ccf3ef9ce40b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:05 +0200
+Subject: net: dsa: b53: always rejoin default untagged VLAN on bridge leave
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 13b152ae40495966501697693f048f47430c50fd ]
+
+While JOIN_ALL_VLAN allows to join all VLANs, we still need to keep the
+default VLAN enabled so that untagged traffic stays untagged.
+
+So rejoin the default VLAN even for switches with JOIN_ALL_VLAN support.
+
+Fixes: 48aea33a77ab ("net: dsa: b53: Add JOIN_ALL_VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-7-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 25afafc4bfc7f..70a8f70d2c6d5 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -2022,12 +2022,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+- } else {
+- b53_get_vlan_entry(dev, pvid, vl);
+- vl->members |= BIT(port) | BIT(cpu_port);
+- vl->untag |= BIT(port) | BIT(cpu_port);
+- b53_set_vlan_entry(dev, pvid, vl);
+ }
++
++ b53_get_vlan_entry(dev, pvid, vl);
++ vl->members |= BIT(port) | BIT(cpu_port);
++ vl->untag |= BIT(port) | BIT(cpu_port);
++ b53_set_vlan_entry(dev, pvid, vl);
+ }
+ EXPORT_SYMBOL(b53_br_leave);
+
+--
+2.39.5
+
--- /dev/null
+From 7e250f8324faf6d9d70eabdfdb5a801f0480ac3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:06 +0200
+Subject: net: dsa: b53: do not allow to configure VLAN 0
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 45e9d59d39503bb3e6ab4d258caea4ba6496e2dc ]
+
+Since we cannot set forwarding destinations per VLAN, we should not have
+a VLAN 0 configured, as it would allow untagged traffic to work across
+ports on VLAN aware bridges regardless if a PVID untagged VLAN exists.
+
+So remove the VLAN 0 on join, an re-add it on leave. But only do so if
+we have a VLAN aware bridge, as without it, untagged traffic would
+become tagged with VID 0 on a VLAN unaware bridge.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-8-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 36 ++++++++++++++++++++++++--------
+ 1 file changed, 27 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 70a8f70d2c6d5..16d6582c931f9 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1545,6 +1545,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ if (err)
+ return err;
+
++ if (vlan->vid == 0)
++ return 0;
++
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
+ if (pvid)
+ new_pvid = vlan->vid;
+@@ -1557,10 +1560,7 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+
+ b53_get_vlan_entry(dev, vlan->vid, vl);
+
+- if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+- untagged = true;
+-
+- if (vlan->vid > 0 && dsa_is_cpu_port(ds, port))
++ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
+
+ vl->members |= BIT(port);
+@@ -1590,6 +1590,9 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
+ struct b53_vlan *vl;
+ u16 pvid;
+
++ if (vlan->vid == 0)
++ return 0;
++
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ vl = &dev->vlans[vlan->vid];
+@@ -1936,8 +1939,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ bool *tx_fwd_offload, struct netlink_ext_ack *extack)
+ {
+ struct b53_device *dev = ds->priv;
++ struct b53_vlan *vl;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+- u16 pvlan, reg;
++ u16 pvlan, reg, pvid;
+ unsigned int i;
+
+ /* On 7278, port 7 which connects to the ASP should only receive
+@@ -1946,6 +1950,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ if (dev->chip_id == BCM7278_DEVICE_ID && port == 7)
+ return -EINVAL;
+
++ pvid = b53_default_pvid(dev);
++ vl = &dev->vlans[pvid];
++
+ /* Make this port leave the all VLANs join since we will have proper
+ * VLAN entries from now on
+ */
+@@ -1957,6 +1964,15 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
+
++ if (ds->vlan_filtering) {
++ b53_get_vlan_entry(dev, pvid, vl);
++ vl->members &= ~BIT(port);
++ if (vl->members == BIT(cpu_port))
++ vl->members &= ~BIT(cpu_port);
++ vl->untag = vl->members;
++ b53_set_vlan_entry(dev, pvid, vl);
++ }
++
+ b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
+
+ b53_for_each_port(dev, i) {
+@@ -2024,10 +2040,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+ }
+
+- b53_get_vlan_entry(dev, pvid, vl);
+- vl->members |= BIT(port) | BIT(cpu_port);
+- vl->untag |= BIT(port) | BIT(cpu_port);
+- b53_set_vlan_entry(dev, pvid, vl);
++ if (ds->vlan_filtering) {
++ b53_get_vlan_entry(dev, pvid, vl);
++ vl->members |= BIT(port) | BIT(cpu_port);
++ vl->untag |= BIT(port) | BIT(cpu_port);
++ b53_set_vlan_entry(dev, pvid, vl);
++ }
+ }
+ EXPORT_SYMBOL(b53_br_leave);
+
+--
+2.39.5
+
--- /dev/null
+From 8f26b4b81db6145d6ee6f78debf30484b4c0f1d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:07 +0200
+Subject: net: dsa: b53: do not program vlans when vlan filtering is off
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit f089652b6b16452535dcc5cbaa6e2bb05acd3f93 ]
+
+Documentation/networking/switchdev.rst says:
+
+- with VLAN filtering turned off: the bridge is strictly VLAN unaware and its
+ data path will process all Ethernet frames as if they are VLAN-untagged.
+ The bridge VLAN database can still be modified, but the modifications should
+ have no effect while VLAN filtering is turned off.
+
+This breaks if we immediately apply the VLAN configuration, so skip
+writing it when vlan_filtering is off.
+
+Fixes: 0ee2af4ebbe3 ("net: dsa: set configure_vlan_while_not_filtering to true by default")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-9-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 48 +++++++++++++++++++-------------
+ 1 file changed, 28 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 16d6582c931f9..271189cf70dcc 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1548,6 +1548,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ if (vlan->vid == 0)
+ return 0;
+
++ if (!ds->vlan_filtering)
++ return 0;
++
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
+ if (pvid)
+ new_pvid = vlan->vid;
+@@ -1593,6 +1596,9 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
+ if (vlan->vid == 0)
+ return 0;
+
++ if (!ds->vlan_filtering)
++ return 0;
++
+ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
+
+ vl = &dev->vlans[vlan->vid];
+@@ -1953,18 +1959,20 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+- /* Make this port leave the all VLANs join since we will have proper
+- * VLAN entries from now on
+- */
+- if (is58xx(dev)) {
+- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
+- reg &= ~BIT(port);
+- if ((reg & BIT(cpu_port)) == BIT(cpu_port))
+- reg &= ~BIT(cpu_port);
+- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+- }
+-
+ if (ds->vlan_filtering) {
++ /* Make this port leave the all VLANs join since we will have
++ * proper VLAN entries from now on
++ */
++ if (is58xx(dev)) {
++ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
++ ®);
++ reg &= ~BIT(port);
++ if ((reg & BIT(cpu_port)) == BIT(cpu_port))
++ reg &= ~BIT(cpu_port);
++ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN,
++ reg);
++ }
++
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members &= ~BIT(port);
+ if (vl->members == BIT(cpu_port))
+@@ -2031,16 +2039,16 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+- /* Make this port join all VLANs without VLAN entries */
+- if (is58xx(dev)) {
+- b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
+- reg |= BIT(port);
+- if (!(reg & BIT(cpu_port)))
+- reg |= BIT(cpu_port);
+- b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+- }
+-
+ if (ds->vlan_filtering) {
++ /* Make this port join all VLANs without VLAN entries */
++ if (is58xx(dev)) {
++ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
++ reg |= BIT(port);
++ if (!(reg & BIT(cpu_port)))
++ reg |= BIT(cpu_port);
++ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
++ }
++
+ b53_get_vlan_entry(dev, pvid, vl);
+ vl->members |= BIT(port) | BIT(cpu_port);
+ vl->untag |= BIT(port) | BIT(cpu_port);
+--
+2.39.5
+
--- /dev/null
+From 60bc336fcbb4db92d5b6be09b9b8c93cd63cc66f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:10 +0200
+Subject: net: dsa: b53: do not set learning and unicast/multicast on up
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 2e7179c628d3cb9aee75e412473813b099e11ed4 ]
+
+When a port gets set up, b53 disables learning and enables the port for
+flooding. This can undo any bridge configuration on the port.
+
+E.g. the following flow would disable learning on a port:
+
+$ ip link add br0 type bridge
+$ ip link set sw1p1 master br0 <- enables learning for sw1p1
+$ ip link set br0 up
+$ ip link set sw1p1 up <- disables learning again
+
+Fix this by populating dsa_switch_ops::port_setup(), and set up initial
+config there.
+
+Fixes: f9b3827ee66c ("net: dsa: b53: Support setting learning on port")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-12-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 21 +++++++++++++--------
+ drivers/net/dsa/b53/b53_priv.h | 1 +
+ drivers/net/dsa/bcm_sf2.c | 1 +
+ 3 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 5ac34e6f877db..e072d2b50c987 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -578,6 +578,18 @@ static void b53_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
+ b53_write16(dev, B53_EEE_PAGE, B53_EEE_EN_CTRL, reg);
+ }
+
++int b53_setup_port(struct dsa_switch *ds, int port)
++{
++ struct b53_device *dev = ds->priv;
++
++ b53_port_set_ucast_flood(dev, port, true);
++ b53_port_set_mcast_flood(dev, port, true);
++ b53_port_set_learning(dev, port, false);
++
++ return 0;
++}
++EXPORT_SYMBOL(b53_setup_port);
++
+ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+ {
+ struct b53_device *dev = ds->priv;
+@@ -590,10 +602,6 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
+
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+- b53_port_set_ucast_flood(dev, port, true);
+- b53_port_set_mcast_flood(dev, port, true);
+- b53_port_set_learning(dev, port, false);
+-
+ if (dev->ops->irq_enable)
+ ret = dev->ops->irq_enable(dev, port);
+ if (ret)
+@@ -724,10 +732,6 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port)
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_CTRL(port), port_ctrl);
+
+ b53_brcm_hdr_setup(dev->ds, port);
+-
+- b53_port_set_ucast_flood(dev, port, true);
+- b53_port_set_mcast_flood(dev, port, true);
+- b53_port_set_learning(dev, port, false);
+ }
+
+ static void b53_enable_mib(struct b53_device *dev)
+@@ -2394,6 +2398,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
+ .phy_read = b53_phy_read16,
+ .phy_write = b53_phy_write16,
+ .phylink_get_caps = b53_phylink_get_caps,
++ .port_setup = b53_setup_port,
+ .port_enable = b53_enable_port,
+ .port_disable = b53_disable_port,
+ .get_mac_eee = b53_get_mac_eee,
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index e9aab4f8d15e1..4f8c97098d2a7 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -382,6 +382,7 @@ enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mprot);
+ void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
++int b53_setup_port(struct dsa_switch *ds, int port);
+ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy);
+ void b53_disable_port(struct dsa_switch *ds, int port);
+ void b53_brcm_hdr_setup(struct dsa_switch *ds, int port);
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index 0e663ec0c12a3..c4771a07878ea 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -1230,6 +1230,7 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
+ .resume = bcm_sf2_sw_resume,
+ .get_wol = bcm_sf2_sw_get_wol,
+ .set_wol = bcm_sf2_sw_set_wol,
++ .port_setup = b53_setup_port,
+ .port_enable = bcm_sf2_port_setup,
+ .port_disable = bcm_sf2_port_disable,
+ .get_mac_eee = b53_get_mac_eee,
+--
+2.39.5
+
--- /dev/null
+From 7bd6065b377395370cbd70921e3e5e87f5671ff5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:02 +0200
+Subject: net: dsa: b53: fix clearing PVID of a port
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit f480851981043d9bb6447ca9883ade9247b9a0ad ]
+
+Currently the PVID of ports are only set when adding/updating VLANs with
+PVID set or removing VLANs, but not when clearing the PVID flag of a
+VLAN.
+
+E.g. the following flow
+
+$ ip link add br0 type bridge vlan_filtering 1
+$ ip link set sw1p1 master bridge
+$ bridge vlan add dev sw1p1 vid 10 pvid untagged
+$ bridge vlan add dev sw1p1 vid 10 untagged
+
+Would keep the PVID set as 10, despite the flag being cleared. Fix this
+by checking if we need to unset the PVID on vlan updates.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-4-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index d66ef7ad7a604..31d20552cdb08 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1538,12 +1538,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct b53_vlan *vl;
++ u16 old_pvid, new_pvid;
+ int err;
+
+ err = b53_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
++ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
++ if (pvid)
++ new_pvid = vlan->vid;
++ else if (!pvid && vlan->vid == old_pvid)
++ new_pvid = b53_default_pvid(dev);
++ else
++ new_pvid = old_pvid;
++
+ vl = &dev->vlans[vlan->vid];
+
+ b53_get_vlan_entry(dev, vlan->vid, vl);
+@@ -1563,9 +1572,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
+
+- if (pvid && !dsa_is_cpu_port(ds, port)) {
++ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+- vlan->vid);
++ new_pvid);
+ b53_fast_age_vlan(dev, vlan->vid);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 6e9e5732edd40637867f3007a8f1f95789b6c4b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:03 +0200
+Subject: net: dsa: b53: fix flushing old pvid VLAN on pvid change
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 083c6b28c0cbcd83b6af1a10f2c82937129b3438 ]
+
+Presumably the intention here was to flush the VLAN of the old pvid, not
+the added VLAN again, which we already flushed before.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-5-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 31d20552cdb08..d450100c1d020 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1575,7 +1575,7 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ new_pvid);
+- b53_fast_age_vlan(dev, vlan->vid);
++ b53_fast_age_vlan(dev, old_pvid);
+ }
+
+ return 0;
+--
+2.39.5
+
--- /dev/null
+From 4e698ecc45598c4463eecc2563148b4270d6e908 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:09 +0200
+Subject: net: dsa: b53: fix learning on VLAN unaware bridges
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 9f34ad89bcf0e6df6f8b01f1bdab211493fc66d1 ]
+
+When VLAN filtering is off, we configure the switch to forward, but not
+learn on VLAN table misses. This effectively disables learning while not
+filtering.
+
+Fix this by switching to forward and learn. Setting the learning disable
+register will still control whether learning actually happens.
+
+Fixes: dad8d7c6452b ("net: dsa: b53: Properly account for VLAN filtering")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-11-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index d790fdf9fa3b4..5ac34e6f877db 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -383,7 +383,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+ } else {
+- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
++ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 27ffe98d9377cdba890801e4d6a103a645456589 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:08 +0200
+Subject: net: dsa: b53: fix toggling vlan_filtering
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 2dc2bd57111582895e10f54ea380329c89873f1c ]
+
+To allow runtime switching between vlan aware and vlan non-aware mode,
+we need to properly keep track of any bridge VLAN configuration.
+Likewise, we need to know when we actually switch between both modes, to
+not have to rewrite the full VLAN table every time we update the VLANs.
+
+So keep track of the current vlan_filtering mode, and on changes, apply
+the appropriate VLAN configuration.
+
+Fixes: 0ee2af4ebbe3 ("net: dsa: set configure_vlan_while_not_filtering to true by default")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-10-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 104 ++++++++++++++++++++++---------
+ drivers/net/dsa/b53/b53_priv.h | 2 +
+ 2 files changed, 75 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 271189cf70dcc..d790fdf9fa3b4 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -763,6 +763,22 @@ static bool b53_vlan_port_needs_forced_tagged(struct dsa_switch *ds, int port)
+ return dev->tag_protocol == DSA_TAG_PROTO_NONE && dsa_is_cpu_port(ds, port);
+ }
+
++static bool b53_vlan_port_may_join_untagged(struct dsa_switch *ds, int port)
++{
++ struct b53_device *dev = ds->priv;
++ struct dsa_port *dp;
++
++ if (!dev->vlan_filtering)
++ return true;
++
++ dp = dsa_to_port(ds, port);
++
++ if (dsa_port_is_cpu(dp))
++ return true;
++
++ return dp->bridge == NULL;
++}
++
+ int b53_configure_vlan(struct dsa_switch *ds)
+ {
+ struct b53_device *dev = ds->priv;
+@@ -781,7 +797,7 @@ int b53_configure_vlan(struct dsa_switch *ds)
+ b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ }
+
+- b53_enable_vlan(dev, -1, dev->vlan_enabled, ds->vlan_filtering);
++ b53_enable_vlan(dev, -1, dev->vlan_enabled, dev->vlan_filtering);
+
+ /* Create an untagged VLAN entry for the default PVID in case
+ * CONFIG_VLAN_8021Q is disabled and there are no calls to
+@@ -789,26 +805,39 @@ int b53_configure_vlan(struct dsa_switch *ds)
+ * entry. Do this only when the tagging protocol is not
+ * DSA_TAG_PROTO_NONE
+ */
++ v = &dev->vlans[def_vid];
+ b53_for_each_port(dev, i) {
+- v = &dev->vlans[def_vid];
+- v->members |= BIT(i);
++ if (!b53_vlan_port_may_join_untagged(ds, i))
++ continue;
++
++ vl.members |= BIT(i);
+ if (!b53_vlan_port_needs_forced_tagged(ds, i))
+- v->untag = v->members;
+- b53_write16(dev, B53_VLAN_PAGE,
+- B53_VLAN_PORT_DEF_TAG(i), def_vid);
++ vl.untag = vl.members;
++ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(i),
++ def_vid);
+ }
++ b53_set_vlan_entry(dev, def_vid, &vl);
+
+- /* Upon initial call we have not set-up any VLANs, but upon
+- * system resume, we need to restore all VLAN entries.
+- */
+- for (vid = def_vid; vid < dev->num_vlans; vid++) {
+- v = &dev->vlans[vid];
++ if (dev->vlan_filtering) {
++ /* Upon initial call we have not set-up any VLANs, but upon
++ * system resume, we need to restore all VLAN entries.
++ */
++ for (vid = def_vid + 1; vid < dev->num_vlans; vid++) {
++ v = &dev->vlans[vid];
+
+- if (!v->members)
+- continue;
++ if (!v->members)
++ continue;
++
++ b53_set_vlan_entry(dev, vid, v);
++ b53_fast_age_vlan(dev, vid);
++ }
+
+- b53_set_vlan_entry(dev, vid, v);
+- b53_fast_age_vlan(dev, vid);
++ b53_for_each_port(dev, i) {
++ if (!dsa_is_cpu_port(ds, i))
++ b53_write16(dev, B53_VLAN_PAGE,
++ B53_VLAN_PORT_DEF_TAG(i),
++ dev->ports[i].pvid);
++ }
+ }
+
+ return 0;
+@@ -1128,7 +1157,9 @@ EXPORT_SYMBOL(b53_setup_devlink_resources);
+ static int b53_setup(struct dsa_switch *ds)
+ {
+ struct b53_device *dev = ds->priv;
++ struct b53_vlan *vl;
+ unsigned int port;
++ u16 pvid;
+ int ret;
+
+ /* Request bridge PVID untagged when DSA_TAG_PROTO_NONE is set
+@@ -1147,6 +1178,15 @@ static int b53_setup(struct dsa_switch *ds)
+ return ret;
+ }
+
++ /* setup default vlan for filtering mode */
++ pvid = b53_default_pvid(dev);
++ vl = &dev->vlans[pvid];
++ b53_for_each_port(dev, port) {
++ vl->members |= BIT(port);
++ if (!b53_vlan_port_needs_forced_tagged(ds, port))
++ vl->untag |= BIT(port);
++ }
++
+ b53_reset_mib(dev);
+
+ ret = b53_apply_config(dev);
+@@ -1500,7 +1540,10 @@ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
+ {
+ struct b53_device *dev = ds->priv;
+
+- b53_enable_vlan(dev, port, dev->vlan_enabled, vlan_filtering);
++ if (dev->vlan_filtering != vlan_filtering) {
++ dev->vlan_filtering = vlan_filtering;
++ b53_apply_config(dev);
++ }
+
+ return 0;
+ }
+@@ -1525,7 +1568,7 @@ static int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ if (vlan->vid >= dev->num_vlans)
+ return -ERANGE;
+
+- b53_enable_vlan(dev, port, true, ds->vlan_filtering);
++ b53_enable_vlan(dev, port, true, dev->vlan_filtering);
+
+ return 0;
+ }
+@@ -1548,21 +1591,17 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ if (vlan->vid == 0)
+ return 0;
+
+- if (!ds->vlan_filtering)
+- return 0;
+-
+- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
++ old_pvid = dev->ports[port].pvid;
+ if (pvid)
+ new_pvid = vlan->vid;
+ else if (!pvid && vlan->vid == old_pvid)
+ new_pvid = b53_default_pvid(dev);
+ else
+ new_pvid = old_pvid;
++ dev->ports[port].pvid = new_pvid;
+
+ vl = &dev->vlans[vlan->vid];
+
+- b53_get_vlan_entry(dev, vlan->vid, vl);
+-
+ if (dsa_is_cpu_port(ds, port))
+ untagged = false;
+
+@@ -1572,6 +1611,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ else
+ vl->untag &= ~BIT(port);
+
++ if (!dev->vlan_filtering)
++ return 0;
++
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
+
+@@ -1596,23 +1638,22 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
+ if (vlan->vid == 0)
+ return 0;
+
+- if (!ds->vlan_filtering)
+- return 0;
+-
+- b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
++ pvid = dev->ports[port].pvid;
+
+ vl = &dev->vlans[vlan->vid];
+
+- b53_get_vlan_entry(dev, vlan->vid, vl);
+-
+ vl->members &= ~BIT(port);
+
+ if (pvid == vlan->vid)
+ pvid = b53_default_pvid(dev);
++ dev->ports[port].pvid = pvid;
+
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag &= ~(BIT(port));
+
++ if (!dev->vlan_filtering)
++ return 0;
++
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
+
+@@ -1959,7 +2000,7 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+- if (ds->vlan_filtering) {
++ if (dev->vlan_filtering) {
+ /* Make this port leave the all VLANs join since we will have
+ * proper VLAN entries from now on
+ */
+@@ -2039,7 +2080,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ pvid = b53_default_pvid(dev);
+ vl = &dev->vlans[pvid];
+
+- if (ds->vlan_filtering) {
++ if (dev->vlan_filtering) {
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+ b53_read16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, ®);
+@@ -2797,6 +2838,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
+ ds->ops = &b53_switch_ops;
+ ds->phylink_mac_ops = &b53_phylink_mac_ops;
+ dev->vlan_enabled = true;
++ dev->vlan_filtering = false;
+ /* Let DSA handle the case were multiple bridges span the same switch
+ * device and different VLAN awareness settings are requested, which
+ * would be breaking filtering semantics for any of the other bridge
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index 05141176daf50..e9aab4f8d15e1 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -95,6 +95,7 @@ struct b53_pcs {
+
+ struct b53_port {
+ u16 vlan_ctl_mask;
++ u16 pvid;
+ struct ethtool_keee eee;
+ };
+
+@@ -146,6 +147,7 @@ struct b53_device {
+ unsigned int num_vlans;
+ struct b53_vlan *vlans;
+ bool vlan_enabled;
++ bool vlan_filtering;
+ unsigned int num_ports;
+ struct b53_port *ports;
+
+--
+2.39.5
+
--- /dev/null
+From bed26ba06e69077a210590ea82634ae687cf1038 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:04 +0200
+Subject: net: dsa: b53: fix VLAN ID for untagged vlan on bridge leave
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit a1c1901c5cc881425cc45992ab6c5418174e9e5a ]
+
+The untagged default VLAN is added to the default vlan, which may be
+one, but we modify the VLAN 0 entry on bridge leave.
+
+Fix this to use the correct VLAN entry for the default pvid.
+
+Fixes: fea83353177a ("net: dsa: b53: Fix default VLAN ID")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-6-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index d450100c1d020..25afafc4bfc7f 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1987,7 +1987,7 @@ EXPORT_SYMBOL(b53_br_join);
+ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ {
+ struct b53_device *dev = ds->priv;
+- struct b53_vlan *vl = &dev->vlans[0];
++ struct b53_vlan *vl;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+@@ -2013,6 +2013,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ pvid = b53_default_pvid(dev);
++ vl = &dev->vlans[pvid];
+
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+--
+2.39.5
+
--- /dev/null
+From 39133c6b8bcbbf22946a7343592c619885d0b519 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:01 +0200
+Subject: net: dsa: b53: keep CPU port always tagged again
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 425f11d4cc9bd9e97e6825d9abb2c51a068ca7b5 ]
+
+The Broadcom management header does not carry the original VLAN tag
+state information, just the ingress port, so for untagged frames we do
+not know from which VLAN they originated.
+
+Therefore keep the CPU port always tagged except for VLAN 0.
+
+Fixes the following setup:
+
+$ ip link add br0 type bridge vlan_filtering 1
+$ ip link set sw1p1 master br0
+$ bridge vlan add dev br0 pvid untagged self
+$ ip link add sw1p2.10 link sw1p2 type vlan id 10
+
+Where VID 10 would stay untagged on the CPU port.
+
+Fixes: 2c32a3d3c233 ("net: dsa: b53: Do not force CPU to be always tagged")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-3-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index f327fdeb81850..d66ef7ad7a604 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1136,6 +1136,11 @@ static int b53_setup(struct dsa_switch *ds)
+ */
+ ds->untag_bridge_pvid = dev->tag_protocol == DSA_TAG_PROTO_NONE;
+
++ /* The switch does not tell us the original VLAN for untagged
++ * packets, so keep the CPU port always tagged.
++ */
++ ds->untag_vlan_aware_bridge_pvid = true;
++
+ ret = b53_reset_switch(dev);
+ if (ret) {
+ dev_err(ds->dev, "failed to reset switch\n");
+@@ -1546,6 +1551,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ if (vlan->vid == 0 && vlan->vid == b53_default_pvid(dev))
+ untagged = true;
+
++ if (vlan->vid > 0 && dsa_is_cpu_port(ds, port))
++ untagged = false;
++
+ vl->members |= BIT(port);
+ if (untagged && !b53_vlan_port_needs_forced_tagged(ds, port))
+ vl->untag |= BIT(port);
+--
+2.39.5
+
--- /dev/null
+From 1a518d728e8d19d9400d694783770d9ce9ec67fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 02:07:58 +0100
+Subject: net: ethernet: mtk_eth_soc: do not reset PSE when setting FE
+
+From: Frank Wunderlich <frank-w@public-files.de>
+
+[ Upstream commit e8716b5b0dff1b3d523b4a83fd5e94d57b887c5c ]
+
+Remove redundant PSE reset.
+When setting FE register there is no need to reset PSE,
+doing so may cause FE to work abnormal.
+
+Link: https://git01.mediatek.com/plugins/gitiles/openwrt/feeds/mtk-openwrt-feeds/+/3a5223473e086a4b54a2b9a44df7d9ddcc2bc75a
+Fixes: dee4dd10c79aa ("net: ethernet: mtk_eth_soc: ppe: add support for multiple PPEs")
+Signed-off-by: Frank Wunderlich <frank-w@public-files.de>
+Link: https://patch.msgid.link/18f0ac7d83f82defa3342c11ef0d1362f6b81e88.1746406763.git.daniel@makrotopia.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index d50017012ca14..0a13f7c4684e0 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3427,9 +3427,6 @@ static int mtk_open(struct net_device *dev)
+ }
+ mtk_gdm_config(eth, target_mac->id, gdm_config);
+ }
+- /* Reset and enable PSE */
+- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+- mtk_w32(eth, 0, MTK_RST_GL);
+
+ napi_enable(ð->tx_napi);
+ napi_enable(ð->rx_napi);
+--
+2.39.5
+
--- /dev/null
+From 45146f5ad1138ef32e1d1e34afa1fac4a7f67871 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 02:07:32 +0100
+Subject: net: ethernet: mtk_eth_soc: reset all TX queues on DMA free
+
+From: Daniel Golle <daniel@makrotopia.org>
+
+[ Upstream commit 4db6c75124d871fbabf8243f947d34cc7e0697fc ]
+
+The purpose of resetting the TX queue is to reset the byte and packet
+count as well as to clear the software flow control XOFF bit.
+
+MediaTek developers pointed out that netdev_reset_queue would only
+resets queue 0 of the network device.
+
+Queues that are not reset may cause unexpected issues.
+
+Packets may stop being sent after reset and "transmit timeout" log may
+be displayed.
+
+Import fix from MediaTek's SDK to resolve this issue.
+
+Link: https://git01.mediatek.com/plugins/gitiles/openwrt/feeds/mtk-openwrt-feeds/+/319c0d9905579a46dc448579f892f364f1f84818
+Fixes: f63959c7eec31 ("net: ethernet: mtk_eth_soc: implement multi-queue support for per-port queues")
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Link: https://patch.msgid.link/c9ff9adceac4f152239a0f65c397f13547639175.1746406763.git.daniel@makrotopia.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index c5d5b9ff8bc42..d50017012ca14 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3140,11 +3140,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
+ static void mtk_dma_free(struct mtk_eth *eth)
+ {
+ const struct mtk_soc_data *soc = eth->soc;
+- int i;
++ int i, j, txqs = 1;
++
++ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
++ txqs = MTK_QDMA_NUM_QUEUES;
++
++ for (i = 0; i < MTK_MAX_DEVS; i++) {
++ if (!eth->netdev[i])
++ continue;
++
++ for (j = 0; j < txqs; j++)
++ netdev_tx_reset_subqueue(eth->netdev[i], j);
++ }
+
+- for (i = 0; i < MTK_MAX_DEVS; i++)
+- if (eth->netdev[i])
+- netdev_reset_queue(eth->netdev[i]);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ dma_free_coherent(eth->dma_dev,
+ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
+--
+2.39.5
+
--- /dev/null
+From a97c8a0f53d9cbe11c95acba9b96a4171adb8b28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 17:32:20 -0700
+Subject: net: export a helper for adding up queue stats
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 23fa6a23d97182d36ca3c71e43c804fa91e46a03 ]
+
+Older drivers and drivers with lower queue counts often have a static
+array of queues, rather than allocating structs for each queue on demand.
+Add a helper for adding up qstats from a queue range. Expectation is
+that driver will pass a queue range [netdev->real_num_*x_queues, MAX).
+It was tempting to always use num_*x_queues as the end, but virtio
+seems to clamp its queue count after allocating the netdev. And this
+way we can trivaly reuse the helper for [0, real_..).
+
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://patch.msgid.link/20250507003221.823267-2-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 001160ec8c59 ("virtio-net: fix total qstat values")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netdev_queues.h | 6 ++++
+ net/core/netdev-genl.c | 69 +++++++++++++++++++++++++++----------
+ 2 files changed, 56 insertions(+), 19 deletions(-)
+
+diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h
+index 5ca019d294ca3..173bcfcd868a8 100644
+--- a/include/net/netdev_queues.h
++++ b/include/net/netdev_queues.h
+@@ -92,6 +92,12 @@ struct netdev_stat_ops {
+ struct netdev_queue_stats_tx *tx);
+ };
+
++void netdev_stat_queue_sum(struct net_device *netdev,
++ int rx_start, int rx_end,
++ struct netdev_queue_stats_rx *rx_sum,
++ int tx_start, int tx_end,
++ struct netdev_queue_stats_tx *tx_sum);
++
+ /**
+ * struct netdev_queue_mgmt_ops - netdev ops for queue management
+ *
+diff --git a/net/core/netdev-genl.c b/net/core/netdev-genl.c
+index ad426b3a03b52..0fe537781bc4d 100644
+--- a/net/core/netdev-genl.c
++++ b/net/core/netdev-genl.c
+@@ -616,25 +616,66 @@ netdev_nl_stats_by_queue(struct net_device *netdev, struct sk_buff *rsp,
+ return 0;
+ }
+
++/**
++ * netdev_stat_queue_sum() - add up queue stats from range of queues
++ * @netdev: net_device
++ * @rx_start: index of the first Rx queue to query
++ * @rx_end: index after the last Rx queue (first *not* to query)
++ * @rx_sum: output Rx stats, should be already initialized
++ * @tx_start: index of the first Tx queue to query
++ * @tx_end: index after the last Tx queue (first *not* to query)
++ * @tx_sum: output Tx stats, should be already initialized
++ *
++ * Add stats from [start, end) range of queue IDs to *x_sum structs.
++ * The sum structs must be already initialized. Usually this
++ * helper is invoked from the .get_base_stats callbacks of drivers
++ * to account for stats of disabled queues. In that case the ranges
++ * are usually [netdev->real_num_*x_queues, netdev->num_*x_queues).
++ */
++void netdev_stat_queue_sum(struct net_device *netdev,
++ int rx_start, int rx_end,
++ struct netdev_queue_stats_rx *rx_sum,
++ int tx_start, int tx_end,
++ struct netdev_queue_stats_tx *tx_sum)
++{
++ const struct netdev_stat_ops *ops;
++ struct netdev_queue_stats_rx rx;
++ struct netdev_queue_stats_tx tx;
++ int i;
++
++ ops = netdev->stat_ops;
++
++ for (i = rx_start; i < rx_end; i++) {
++ memset(&rx, 0xff, sizeof(rx));
++ if (ops->get_queue_stats_rx)
++ ops->get_queue_stats_rx(netdev, i, &rx);
++ netdev_nl_stats_add(rx_sum, &rx, sizeof(rx));
++ }
++ for (i = tx_start; i < tx_end; i++) {
++ memset(&tx, 0xff, sizeof(tx));
++ if (ops->get_queue_stats_tx)
++ ops->get_queue_stats_tx(netdev, i, &tx);
++ netdev_nl_stats_add(tx_sum, &tx, sizeof(tx));
++ }
++}
++EXPORT_SYMBOL(netdev_stat_queue_sum);
++
+ static int
+ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ const struct genl_info *info)
+ {
+- struct netdev_queue_stats_rx rx_sum, rx;
+- struct netdev_queue_stats_tx tx_sum, tx;
+- const struct netdev_stat_ops *ops;
++ struct netdev_queue_stats_rx rx_sum;
++ struct netdev_queue_stats_tx tx_sum;
+ void *hdr;
+- int i;
+
+- ops = netdev->stat_ops;
+ /* Netdev can't guarantee any complete counters */
+- if (!ops->get_base_stats)
++ if (!netdev->stat_ops->get_base_stats)
+ return 0;
+
+ memset(&rx_sum, 0xff, sizeof(rx_sum));
+ memset(&tx_sum, 0xff, sizeof(tx_sum));
+
+- ops->get_base_stats(netdev, &rx_sum, &tx_sum);
++ netdev->stat_ops->get_base_stats(netdev, &rx_sum, &tx_sum);
+
+ /* The op was there, but nothing reported, don't bother */
+ if (!memchr_inv(&rx_sum, 0xff, sizeof(rx_sum)) &&
+@@ -647,18 +688,8 @@ netdev_nl_stats_by_netdev(struct net_device *netdev, struct sk_buff *rsp,
+ if (nla_put_u32(rsp, NETDEV_A_QSTATS_IFINDEX, netdev->ifindex))
+ goto nla_put_failure;
+
+- for (i = 0; i < netdev->real_num_rx_queues; i++) {
+- memset(&rx, 0xff, sizeof(rx));
+- if (ops->get_queue_stats_rx)
+- ops->get_queue_stats_rx(netdev, i, &rx);
+- netdev_nl_stats_add(&rx_sum, &rx, sizeof(rx));
+- }
+- for (i = 0; i < netdev->real_num_tx_queues; i++) {
+- memset(&tx, 0xff, sizeof(tx));
+- if (ops->get_queue_stats_tx)
+- ops->get_queue_stats_tx(netdev, i, &tx);
+- netdev_nl_stats_add(&tx_sum, &tx, sizeof(tx));
+- }
++ netdev_stat_queue_sum(netdev, 0, netdev->real_num_rx_queues, &rx_sum,
++ 0, netdev->real_num_tx_queues, &tx_sum);
+
+ if (netdev_nl_stats_write_rx(rsp, &rx_sum) ||
+ netdev_nl_stats_write_tx(rsp, &tx_sum))
+--
+2.39.5
+
--- /dev/null
+From 5e614fec7c70dd400e9bb6cf83cad9f0a554185c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 17:01:59 +0200
+Subject: netfilter: ipset: fix region locking in hash types
+
+From: Jozsef Kadlecsik <kadlec@netfilter.org>
+
+[ Upstream commit 8478a729c0462273188263136880480729e9efca ]
+
+Region locking introduced in v5.6-rc4 contained three macros to handle
+the region locks: ahash_bucket_start(), ahash_bucket_end() which gave
+back the start and end hash bucket values belonging to a given region
+lock and ahash_region() which should give back the region lock belonging
+to a given hash bucket. The latter was incorrect which can lead to a
+race condition between the garbage collector and adding new elements
+when a hash type of set is defined with timeouts.
+
+Fixes: f66ee0410b1c ("netfilter: ipset: Fix "INFO: rcu detected stall in hash_xxx" reports")
+Reported-by: Kota Toda <kota.toda@gmo-cybersecurity.com>
+Signed-off-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_hash_gen.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index cf3ce72c3de64..5251524b96afa 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -64,7 +64,7 @@ struct hbucket {
+ #define ahash_sizeof_regions(htable_bits) \
+ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+ #define ahash_region(n, htable_bits) \
+- ((n) % ahash_numof_locks(htable_bits))
++ ((n) / jhash_size(HTABLE_REGION_BITS))
+ #define ahash_bucket_start(h, htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? 0 \
+ : (h) * jhash_size(HTABLE_REGION_BITS))
+--
+2.39.5
+
--- /dev/null
+From a4a807fe897fe0034209e45bd02dc32e0d8927b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 17:07:01 +0200
+Subject: s390/entry: Fix last breaking event handling in case of stack
+ corruption
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit ae952eea6f4a7e2193f8721a5366049946e012e7 ]
+
+In case of stack corruption stack_invalid() is called and the expectation
+is that register r10 contains the last breaking event address. This
+dependency is quite subtle and broke a couple of years ago without that
+anybody noticed.
+
+Fix this by getting rid of the dependency and read the last breaking event
+address from lowcore.
+
+Fixes: 56e62a737028 ("s390: convert to generic entry")
+Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/entry.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index a7de838f80318..669d335c87aba 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -636,7 +636,8 @@ SYM_CODE_START(stack_overflow)
+ stmg %r0,%r7,__PT_R0(%r11)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
++ GET_LC %r2
++ mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ jg kernel_stack_overflow
+--
+2.39.5
+
--- /dev/null
+From 76e47e1d3c0f1c9c050ebe701d5d7ebb61b51828 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 16:29:54 -0700
+Subject: sch_htb: make htb_deactivate() idempotent
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 3769478610135e82b262640252d90f6efb05be71 ]
+
+Alan reported a NULL pointer dereference in htb_next_rb_node()
+after we made htb_qlen_notify() idempotent.
+
+It turns out in the following case it introduced some regression:
+
+htb_dequeue_tree():
+ |-> fq_codel_dequeue()
+ |-> qdisc_tree_reduce_backlog()
+ |-> htb_qlen_notify()
+ |-> htb_deactivate()
+ |-> htb_next_rb_node()
+ |-> htb_deactivate()
+
+For htb_next_rb_node(), after calling the 1st htb_deactivate(), the
+clprio[prio]->ptr could be already set to NULL, which means
+htb_next_rb_node() is vulnerable here.
+
+For htb_deactivate(), although we checked qlen before calling it, in
+case of qlen==0 after qdisc_tree_reduce_backlog(), we may call it again
+which triggers the warning inside.
+
+To fix the issues here, we need to:
+
+1) Make htb_deactivate() idempotent, that is, simply return if we
+ already call it before.
+2) Make htb_next_rb_node() safe against ptr==NULL.
+
+Many thanks to Alan for testing and for the reproducer.
+
+Fixes: 5ba8b837b522 ("sch_htb: make htb_qlen_notify() idempotent")
+Reported-by: Alan J. Wylie <alan@wylie.me.uk>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Link: https://patch.msgid.link/20250428232955.1740419-2-xiyou.wangcong@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 12cccc84d58a0..b2494d24a5425 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
+ */
+ static inline void htb_next_rb_node(struct rb_node **n)
+ {
+- *n = rb_next(*n);
++ if (*n)
++ *n = rb_next(*n);
+ }
+
+ /**
+@@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+ */
+ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
+ {
+- WARN_ON(!cl->prio_activity);
+-
++ if (!cl->prio_activity)
++ return;
+ htb_deactivate_prios(q, cl);
+ cl->prio_activity = 0;
+ }
+@@ -1485,8 +1486,6 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ {
+ struct htb_class *cl = (struct htb_class *)arg;
+
+- if (!cl->prio_activity)
+- return;
+ htb_deactivate(qdisc_priv(sch), cl);
+ }
+
+@@ -1740,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+ if (cl->parent)
+ cl->parent->children--;
+
+- if (cl->prio_activity)
+- htb_deactivate(q, cl);
++ htb_deactivate(q, cl);
+
+ if (cl->cmode != HTB_CAN_SEND)
+ htb_safe_rb_erase(&cl->pq_node,
+@@ -1949,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+ /* turn parent into inner node */
+ qdisc_purge_queue(parent->leaf.q);
+ parent_qdisc = parent->leaf.q;
+- if (parent->prio_activity)
+- htb_deactivate(q, parent);
++ htb_deactivate(q, parent);
+
+ /* remove from evt list because of level change */
+ if (parent->cmode != HTB_CAN_SEND) {
+--
+2.39.5
+
ksmbd-prevent-out-of-bounds-stream-writes-by-validating-pos.patch
ksmbd-fix-uaf-in-__close_file_table_ids.patch
openvswitch-fix-unsafe-attribute-parsing-in-output_userspace.patch
+ksmbd-fix-memory-leak-in-parse_lease_state.patch
+s390-entry-fix-last-breaking-event-handling-in-case-.patch
+sch_htb-make-htb_deactivate-idempotent.patch
+virtio_net-xsk-bind-unbind-xsk-for-tx.patch
+virtio-net-free-xsk_buffs-on-error-in-virtnet_xsk_po.patch
+gre-fix-again-ipv6-link-local-address-generation.patch
+net-ethernet-mtk_eth_soc-reset-all-tx-queues-on-dma-.patch
+net-ethernet-mtk_eth_soc-do-not-reset-pse-when-setti.patch
+can-m_can-m_can_class_allocate_dev-initialize-spin-l.patch
+can-mcp251xfd-fix-tdc-setting-for-low-data-bit-rates.patch
+can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch
+wifi-mac80211-fix-the-type-of-status_code-for-negoti.patch
+ice-initial-support-for-e825c-hardware-in-ice_adapte.patch
+ice-use-dsn-instead-of-pci-bdf-for-ice_adapter-index.patch
+erofs-ensure-the-extra-temporary-copy-is-valid-for-s.patch
+ipvs-fix-uninit-value-for-saddr-in-do_output_route4.patch
+netfilter-ipset-fix-region-locking-in-hash-types.patch
+bpf-scrub-packet-on-bpf_redirect_peer.patch
+net-dsa-b53-allow-leaky-reserved-multicast.patch
+net-dsa-b53-keep-cpu-port-always-tagged-again.patch
+net-dsa-b53-fix-clearing-pvid-of-a-port.patch
+net-dsa-b53-fix-flushing-old-pvid-vlan-on-pvid-chang.patch
+net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch
+net-dsa-b53-always-rejoin-default-untagged-vlan-on-b.patch
+net-dsa-b53-do-not-allow-to-configure-vlan-0.patch
+net-dsa-b53-do-not-program-vlans-when-vlan-filtering.patch
+net-dsa-b53-fix-toggling-vlan_filtering.patch
+net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch
+net-dsa-b53-do-not-set-learning-and-unicast-multicas.patch
+fbnic-fix-initialization-of-mailbox-descriptor-rings.patch
+fbnic-gate-axi-read-write-enabling-on-fw-mailbox.patch
+fbnic-actually-flush_tx-instead-of-stalling-out.patch
+fbnic-improve-responsiveness-of-fbnic_mbx_poll_tx_re.patch
+fbnic-pull-fbnic_fw_xmit_cap_msg-use-out-of-interrup.patch
+fbnic-do-not-allow-mailbox-to-toggle-to-ready-outsid.patch
+net-export-a-helper-for-adding-up-queue-stats.patch
+virtio-net-fix-total-qstat-values.patch
--- /dev/null
+From 158947cde7ae4165f2f75cea5c3e769a6776c165 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 May 2025 17:32:21 -0700
+Subject: virtio-net: fix total qstat values
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 001160ec8c59115efc39e197d40829bdafd4d7f5 ]
+
+NIPA tests report that the interface statistics reported
+via qstat are lower than those reported via ip link.
+Looks like this is because some tests flip the queue
+count up and down, and we end up with some of the traffic
+accounted on disabled queues.
+
+Add up counters from disabled queues.
+
+Fixes: d888f04c09bb ("virtio-net: support queue stat")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://patch.msgid.link/20250507003221.823267-3-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 9493b1134875e..fbd1150c33cce 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -5427,6 +5427,10 @@ static void virtnet_get_base_stats(struct net_device *dev,
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
+ tx->hw_drop_ratelimits = 0;
++
++ netdev_stat_queue_sum(dev,
++ dev->real_num_rx_queues, vi->max_queue_pairs, rx,
++ dev->real_num_tx_queues, vi->max_queue_pairs, tx);
+ }
+
+ static const struct netdev_stat_ops virtnet_stat_ops = {
+--
+2.39.5
+
--- /dev/null
+From 9a85877fcb1e98d72dba165a40df8ac516cfa1c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 09:38:36 -0700
+Subject: virtio-net: free xsk_buffs on error in virtnet_xsk_pool_enable()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 4397684a292a71fbc1e815c3e283f7490ddce5ae ]
+
+The selftests added to our CI by Bui Quang Minh recently reveals
+that there is a mem leak on the error path of virtnet_xsk_pool_enable():
+
+unreferenced object 0xffff88800a68a000 (size 2048):
+ comm "xdp_helper", pid 318, jiffies 4294692778
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace (crc 0):
+ __kvmalloc_node_noprof+0x402/0x570
+ virtnet_xsk_pool_enable+0x293/0x6a0 (drivers/net/virtio_net.c:5882)
+ xp_assign_dev+0x369/0x670 (net/xdp/xsk_buff_pool.c:226)
+ xsk_bind+0x6a5/0x1ae0
+ __sys_bind+0x15e/0x230
+ __x64_sys_bind+0x72/0xb0
+ do_syscall_64+0xc1/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Acked-by: Jason Wang <jasowang@redhat.com>
+Fixes: e9f3962441c0 ("virtio_net: xsk: rx: support fill with xsk buffer")
+Link: https://patch.msgid.link/20250430163836.3029761-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 476c8a9cc494a..9493b1134875e 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -5633,8 +5633,10 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+
+ hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
+ DMA_TO_DEVICE, 0);
+- if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
+- return -ENOMEM;
++ if (virtqueue_dma_mapping_error(sq->vq, hdr_dma)) {
++ err = -ENOMEM;
++ goto err_free_buffs;
++ }
+
+ err = xsk_pool_dma_map(pool, dma_dev, 0);
+ if (err)
+@@ -5662,6 +5664,8 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ err_xsk_map:
+ virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
+ DMA_TO_DEVICE, 0);
++err_free_buffs:
++ kvfree(rq->xsk_buffs);
+ return err;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 8011d2fd76c26d78e72c1e348dc7872c40867709 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Nov 2024 09:29:24 +0800
+Subject: virtio_net: xsk: bind/unbind xsk for tx
+
+From: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+
+[ Upstream commit 21a4e3ce6dc7b0a3bc882ebe1cb921a40235ddb0 ]
+
+This patch implement the logic of bind/unbind xsk pool to sq and rq.
+
+Signed-off-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Link: https://patch.msgid.link/20241112012928.102478-10-xuanzhuo@linux.alibaba.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 4397684a292a ("virtio-net: free xsk_buffs on error in virtnet_xsk_pool_enable()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/virtio_net.c | 53 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 53 insertions(+)
+
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 60027b439021b..476c8a9cc494a 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -298,6 +298,10 @@ struct send_queue {
+
+ /* Record whether sq is in reset state. */
+ bool reset;
++
++ struct xsk_buff_pool *xsk_pool;
++
++ dma_addr_t xsk_hdr_dma_addr;
+ };
+
+ /* Internal representation of a receive virtqueue */
+@@ -501,6 +505,8 @@ struct virtio_net_common_hdr {
+ };
+ };
+
++static struct virtio_net_common_hdr xsk_hdr;
++
+ static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+ static void virtnet_sq_free_unused_buf_done(struct virtqueue *vq);
+ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+@@ -5556,6 +5562,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
+ return err;
+ }
+
++static int virtnet_sq_bind_xsk_pool(struct virtnet_info *vi,
++ struct send_queue *sq,
++ struct xsk_buff_pool *pool)
++{
++ int err, qindex;
++
++ qindex = sq - vi->sq;
++
++ virtnet_tx_pause(vi, sq);
++
++ err = virtqueue_reset(sq->vq, virtnet_sq_free_unused_buf);
++ if (err) {
++ netdev_err(vi->dev, "reset tx fail: tx queue index: %d err: %d\n", qindex, err);
++ pool = NULL;
++ }
++
++ sq->xsk_pool = pool;
++
++ virtnet_tx_resume(vi, sq);
++
++ return err;
++}
++
+ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+@@ -5564,6 +5593,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ struct receive_queue *rq;
+ struct device *dma_dev;
+ struct send_queue *sq;
++ dma_addr_t hdr_dma;
+ int err, size;
+
+ if (vi->hdr_len > xsk_pool_get_headroom(pool))
+@@ -5601,6 +5631,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ if (!rq->xsk_buffs)
+ return -ENOMEM;
+
++ hdr_dma = virtqueue_dma_map_single_attrs(sq->vq, &xsk_hdr, vi->hdr_len,
++ DMA_TO_DEVICE, 0);
++ if (virtqueue_dma_mapping_error(sq->vq, hdr_dma))
++ return -ENOMEM;
++
+ err = xsk_pool_dma_map(pool, dma_dev, 0);
+ if (err)
+ goto err_xsk_map;
+@@ -5609,11 +5644,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
+ if (err)
+ goto err_rq;
+
++ err = virtnet_sq_bind_xsk_pool(vi, sq, pool);
++ if (err)
++ goto err_sq;
++
++ /* Now, we do not support tx offload(such as tx csum), so all the tx
++ * virtnet hdr is zero. So all the tx packets can share a single hdr.
++ */
++ sq->xsk_hdr_dma_addr = hdr_dma;
++
+ return 0;
+
++err_sq:
++ virtnet_rq_bind_xsk_pool(vi, rq, NULL);
+ err_rq:
+ xsk_pool_dma_unmap(pool, 0);
+ err_xsk_map:
++ virtqueue_dma_unmap_single_attrs(rq->vq, hdr_dma, vi->hdr_len,
++ DMA_TO_DEVICE, 0);
+ return err;
+ }
+
+@@ -5622,19 +5670,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct xsk_buff_pool *pool;
+ struct receive_queue *rq;
++ struct send_queue *sq;
+ int err;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
++ sq = &vi->sq[qid];
+ rq = &vi->rq[qid];
+
+ pool = rq->xsk_pool;
+
+ err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
++ err |= virtnet_sq_bind_xsk_pool(vi, sq, NULL);
+
+ xsk_pool_dma_unmap(pool, 0);
+
++ virtqueue_dma_unmap_single_attrs(sq->vq, sq->xsk_hdr_dma_addr,
++ vi->hdr_len, DMA_TO_DEVICE, 0);
+ kvfree(rq->xsk_buffs);
+
+ return err;
+--
+2.39.5
+
--- /dev/null
+From 024c9c5b38833b0bf80135a80157143a12374b98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 16:19:46 +0800
+Subject: wifi: mac80211: fix the type of status_code for negotiated TID to
+ Link Mapping
+
+From: Michael-CY Lee <michael-cy.lee@mediatek.com>
+
+[ Upstream commit e12a42f64fc3d74872b349eedd47f90c6676b78a ]
+
+The status code should be type of __le16.
+
+Fixes: 83e897a961b8 ("wifi: ieee80211: add definitions for negotiated TID to Link map")
+Fixes: 8f500fbc6c65 ("wifi: mac80211: process and save negotiated TID to Link mapping request")
+Signed-off-by: Michael-CY Lee <michael-cy.lee@mediatek.com>
+Link: https://patch.msgid.link/20250505081946.3927214-1-michael-cy.lee@mediatek.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ieee80211.h | 2 +-
+ net/mac80211/mlme.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
+index 3750e56bfcbb3..777f6aa8efa7b 100644
+--- a/include/linux/ieee80211.h
++++ b/include/linux/ieee80211.h
+@@ -1524,7 +1524,7 @@ struct ieee80211_mgmt {
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+- u8 status_code;
++ __le16 status_code;
+ u8 variable[];
+ } __packed ttlm_res;
+ struct {
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index ad0d040569dcd..cc8c5d18b130d 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -7177,6 +7177,7 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ int hdr_len = offsetofend(struct ieee80211_mgmt, u.action.u.ttlm_res);
+ int ttlm_max_len = 2 + 1 + sizeof(struct ieee80211_ttlm_elem) + 1 +
+ 2 * 2 * IEEE80211_TTLM_NUM_TIDS;
++ u16 status_code;
+
+ skb = dev_alloc_skb(local->tx_headroom + hdr_len + ttlm_max_len);
+ if (!skb)
+@@ -7199,19 +7200,18 @@ ieee80211_send_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ WARN_ON(1);
+ fallthrough;
+ case NEG_TTLM_RES_REJECT:
+- mgmt->u.action.u.ttlm_res.status_code =
+- WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
++ status_code = WLAN_STATUS_DENIED_TID_TO_LINK_MAPPING;
+ break;
+ case NEG_TTLM_RES_ACCEPT:
+- mgmt->u.action.u.ttlm_res.status_code = WLAN_STATUS_SUCCESS;
++ status_code = WLAN_STATUS_SUCCESS;
+ break;
+ case NEG_TTLM_RES_SUGGEST_PREFERRED:
+- mgmt->u.action.u.ttlm_res.status_code =
+- WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
++ status_code = WLAN_STATUS_PREF_TID_TO_LINK_MAPPING_SUGGESTED;
+ ieee80211_neg_ttlm_add_suggested_map(skb, neg_ttlm);
+ break;
+ }
+
++ mgmt->u.action.u.ttlm_res.status_code = cpu_to_le16(status_code);
+ ieee80211_tx_skb(sdata, skb);
+ }
+
+@@ -7377,7 +7377,7 @@ void ieee80211_process_neg_ttlm_res(struct ieee80211_sub_if_data *sdata,
+ * This can be better implemented in the future, to handle request
+ * rejections.
+ */
+- if (mgmt->u.action.u.ttlm_res.status_code != WLAN_STATUS_SUCCESS)
++ if (le16_to_cpu(mgmt->u.action.u.ttlm_res.status_code) != WLAN_STATUS_SUCCESS)
+ __ieee80211_disconnect(sdata);
+ }
+
+--
+2.39.5
+