]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.6
authorSasha Levin <sashal@kernel.org>
Sun, 11 May 2025 17:52:40 +0000 (13:52 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 11 May 2025 17:52:40 +0000 (13:52 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
18 files changed:
queue-6.6/bpf-scrub-packet-on-bpf_redirect_peer.patch [new file with mode: 0644]
queue-6.6/can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch [new file with mode: 0644]
queue-6.6/can-mcp251xfd-fix-tdc-setting-for-low-data-bit-rates.patch [new file with mode: 0644]
queue-6.6/gre-fix-again-ipv6-link-local-address-generation.patch [new file with mode: 0644]
queue-6.6/ipvs-fix-uninit-value-for-saddr-in-do_output_route4.patch [new file with mode: 0644]
queue-6.6/ksmbd-fix-memory-leak-in-parse_lease_state.patch [new file with mode: 0644]
queue-6.6/net-dsa-b53-allow-leaky-reserved-multicast.patch [new file with mode: 0644]
queue-6.6/net-dsa-b53-always-rejoin-default-untagged-vlan-on-b.patch [new file with mode: 0644]
queue-6.6/net-dsa-b53-fix-clearing-pvid-of-a-port.patch [new file with mode: 0644]
queue-6.6/net-dsa-b53-fix-flushing-old-pvid-vlan-on-pvid-chang.patch [new file with mode: 0644]
queue-6.6/net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch [new file with mode: 0644]
queue-6.6/net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch [new file with mode: 0644]
queue-6.6/net-ethernet-mtk_eth_soc-reset-all-tx-queues-on-dma-.patch [new file with mode: 0644]
queue-6.6/netdevice-add-netdev_tx_reset_subqueue-shorthand.patch [new file with mode: 0644]
queue-6.6/netfilter-ipset-fix-region-locking-in-hash-types.patch [new file with mode: 0644]
queue-6.6/s390-entry-fix-last-breaking-event-handling-in-case-.patch [new file with mode: 0644]
queue-6.6/sch_htb-make-htb_deactivate-idempotent.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-6.6/bpf-scrub-packet-on-bpf_redirect_peer.patch b/queue-6.6/bpf-scrub-packet-on-bpf_redirect_peer.patch
new file mode 100644 (file)
index 0000000..ac21b3e
--- /dev/null
@@ -0,0 +1,85 @@
+From a652d8149242ad1fa51f840851c54ff1ec22ce8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 21:58:04 +0200
+Subject: bpf: Scrub packet on bpf_redirect_peer
+
+From: Paul Chaignon <paul.chaignon@gmail.com>
+
+[ Upstream commit c4327229948879814229b46aa26a750718888503 ]
+
+When bpf_redirect_peer is used to redirect packets to a device in
+another network namespace, the skb isn't scrubbed. That can lead skb
+information from one namespace to be "misused" in another namespace.
+
+As one example, this is causing Cilium to drop traffic when using
+bpf_redirect_peer to redirect packets that just went through IPsec
+decryption to a container namespace. The following pwru trace shows (1)
+the packet path from the host's XFRM layer to the container's XFRM
+layer where it's dropped and (2) the number of active skb extensions at
+each function.
+
+    NETNS       MARK  IFACE  TUPLE                                FUNC
+    4026533547  d00   eth0   10.244.3.124:35473->10.244.2.158:53  xfrm_rcv_cb
+                             .active_extensions = (__u8)2,
+    4026533547  d00   eth0   10.244.3.124:35473->10.244.2.158:53  xfrm4_rcv_cb
+                             .active_extensions = (__u8)2,
+    4026533547  d00   eth0   10.244.3.124:35473->10.244.2.158:53  gro_cells_receive
+                             .active_extensions = (__u8)2,
+    [...]
+    4026533547  0     eth0   10.244.3.124:35473->10.244.2.158:53  skb_do_redirect
+                             .active_extensions = (__u8)2,
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  ip_rcv
+                             .active_extensions = (__u8)2,
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  ip_rcv_core
+                             .active_extensions = (__u8)2,
+    [...]
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  udp_queue_rcv_one_skb
+                             .active_extensions = (__u8)2,
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  __xfrm_policy_check
+                             .active_extensions = (__u8)2,
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  __xfrm_decode_session
+                             .active_extensions = (__u8)2,
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  security_xfrm_decode_session
+                             .active_extensions = (__u8)2,
+    4026534999  0     eth0   10.244.3.124:35473->10.244.2.158:53  kfree_skb_reason(SKB_DROP_REASON_XFRM_POLICY)
+                             .active_extensions = (__u8)2,
+
+In this case, there are no XFRM policies in the container's network
+namespace so the drop is unexpected. When we decrypt the IPsec packet,
+the XFRM state used for decryption is set in the skb extensions. This
+information is preserved across the netns switch. When we reach the
+XFRM policy check in the container's netns, __xfrm_policy_check drops
+the packet with LINUX_MIB_XFRMINNOPOLS because a (container-side) XFRM
+policy can't be found that matches the (host-side) XFRM state used for
+decryption.
+
+This patch fixes this by scrubbing the packet when using
+bpf_redirect_peer, as is done on typical netns switches via veth
+devices except skb->mark and skb->tstamp are not zeroed.
+
+Fixes: 9aa1206e8f482 ("bpf: Add redirect_peer helper")
+Signed-off-by: Paul Chaignon <paul.chaignon@gmail.com>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/1728ead5e0fe45e7a6542c36bd4e3ca07a73b7d6.1746460653.git.paul.chaignon@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 066277b91a1be..5143c8a9e52ca 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2507,6 +2507,7 @@ int skb_do_redirect(struct sk_buff *skb)
+                       goto out_drop;
+               skb->dev = dev;
+               dev_sw_netstats_rx_add(dev, skb->len);
++              skb_scrub_packet(skb, false);
+               return -EAGAIN;
+       }
+       return flags & BPF_F_NEIGH ?
+-- 
+2.39.5
+
diff --git a/queue-6.6/can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch b/queue-6.6/can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch
new file mode 100644 (file)
index 0000000..302b895
--- /dev/null
@@ -0,0 +1,366 @@
+From cee7b99ba834f76039313710f61cff6b6e047165 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 09:05:55 +0200
+Subject: can: gw: fix RCU/BH usage in cgw_create_job()
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit 511e64e13d8cc72853275832e3f372607466c18c ]
+
+As reported by Sebastian Andrzej Siewior the use of local_bh_disable()
+is only feasible in uni processor systems to update the modification rules.
+The usual use-case to update the modification rules is to update the data
+of the modifications but not the modification types (AND/OR/XOR/SET) or
+the checksum functions itself.
+
+To omit additional memory allocations to maintain fast modification
+switching times, the modification description space is doubled at gw-job
+creation time so that only the reference to the active modification
+description is changed under rcu protection.
+
+Rename cgw_job::mod to cf_mod and make it a RCU pointer. Allocate in
+cgw_create_job() and free it together with cgw_job in
+cgw_job_free_rcu(). Update all users to dereference cgw_job::cf_mod with
+a RCU accessor and if possible once.
+
+[bigeasy: Replace mod1/mod2 from the Oliver's original patch with dynamic
+allocation, use RCU annotation and accessor]
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Closes: https://lore.kernel.org/linux-can/20231031112349.y0aLoBrz@linutronix.de/
+Fixes: dd895d7f21b2 ("can: cangw: introduce optional uid to reference created routing jobs")
+Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://patch.msgid.link/20250429070555.cs-7b_eZ@linutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/gw.c | 149 +++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 90 insertions(+), 59 deletions(-)
+
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 37528826935e7..e65500c52bf5c 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -130,7 +130,7 @@ struct cgw_job {
+       u32 handled_frames;
+       u32 dropped_frames;
+       u32 deleted_frames;
+-      struct cf_mod mod;
++      struct cf_mod __rcu *cf_mod;
+       union {
+               /* CAN frame data source */
+               struct net_device *dev;
+@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+       struct cgw_job *gwj = (struct cgw_job *)data;
+       struct canfd_frame *cf;
+       struct sk_buff *nskb;
++      struct cf_mod *mod;
+       int modidx = 0;
+       /* process strictly Classic CAN or CAN FD frames */
+@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+        * When there is at least one modification function activated,
+        * we need to copy the skb as we want to modify skb->data.
+        */
+-      if (gwj->mod.modfunc[0])
++      mod = rcu_dereference(gwj->cf_mod);
++      if (mod->modfunc[0])
+               nskb = skb_copy(skb, GFP_ATOMIC);
+       else
+               nskb = skb_clone(skb, GFP_ATOMIC);
+@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+       cf = (struct canfd_frame *)nskb->data;
+       /* perform preprocessed modification functions if there are any */
+-      while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+-              (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
++      while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
++              (*mod->modfunc[modidx++])(cf, mod);
+       /* Has the CAN frame been modified? */
+       if (modidx) {
+@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+               }
+               /* check for checksum updates */
+-              if (gwj->mod.csumfunc.crc8)
+-                      (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
++              if (mod->csumfunc.crc8)
++                      (*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
+-              if (gwj->mod.csumfunc.xor)
+-                      (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
++              if (mod->csumfunc.xor)
++                      (*mod->csumfunc.xor)(cf, &mod->csum.xor);
+       }
+       /* clear the skb timestamp if not configured the other way */
+@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head)
+ {
+       struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
++      /* cgw_job::cf_mod is always accessed from the same cgw_job object within
++       * the same RCU read section. Once cgw_job is scheduled for removal,
++       * cf_mod can also be removed without mandating an additional grace period.
++       */
++      kfree(rcu_access_pointer(gwj->cf_mod));
+       kmem_cache_free(cgw_cache, gwj);
+ }
++/* Return cgw_job::cf_mod with RTNL protected section */
++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
++{
++      return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
++}
++
+ static int cgw_notifier(struct notifier_block *nb,
+                       unsigned long msg, void *ptr)
+ {
+@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ {
+       struct rtcanmsg *rtcan;
+       struct nlmsghdr *nlh;
++      struct cf_mod *mod;
+       nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
+       if (!nlh)
+@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+                       goto cancel;
+       }
++      mod = cgw_job_cf_mod(gwj);
+       if (gwj->flags & CGW_FLAGS_CAN_FD) {
+               struct cgw_fdframe_mod mb;
+-              if (gwj->mod.modtype.and) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.and;
++              if (mod->modtype.and) {
++                      memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.and;
+                       if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+-              if (gwj->mod.modtype.or) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.or;
++              if (mod->modtype.or) {
++                      memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.or;
+                       if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+-              if (gwj->mod.modtype.xor) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.xor;
++              if (mod->modtype.xor) {
++                      memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.xor;
+                       if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+-              if (gwj->mod.modtype.set) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.set;
++              if (mod->modtype.set) {
++                      memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.set;
+                       if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+       } else {
+               struct cgw_frame_mod mb;
+-              if (gwj->mod.modtype.and) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.and;
++              if (mod->modtype.and) {
++                      memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.and;
+                       if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+-              if (gwj->mod.modtype.or) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.or;
++              if (mod->modtype.or) {
++                      memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.or;
+                       if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+-              if (gwj->mod.modtype.xor) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.xor;
++              if (mod->modtype.xor) {
++                      memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.xor;
+                       if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+-              if (gwj->mod.modtype.set) {
+-                      memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+-                      mb.modtype = gwj->mod.modtype.set;
++              if (mod->modtype.set) {
++                      memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++                      mb.modtype = mod->modtype.set;
+                       if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
+                               goto cancel;
+               }
+       }
+-      if (gwj->mod.uid) {
+-              if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
++      if (mod->uid) {
++              if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
+                       goto cancel;
+       }
+-      if (gwj->mod.csumfunc.crc8) {
++      if (mod->csumfunc.crc8) {
+               if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
+-                          &gwj->mod.csum.crc8) < 0)
++                          &mod->csum.crc8) < 0)
+                       goto cancel;
+       }
+-      if (gwj->mod.csumfunc.xor) {
++      if (mod->csumfunc.xor) {
+               if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
+-                          &gwj->mod.csum.xor) < 0)
++                          &mod->csum.xor) < 0)
+                       goto cancel;
+       }
+@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+       struct net *net = sock_net(skb->sk);
+       struct rtcanmsg *r;
+       struct cgw_job *gwj;
+-      struct cf_mod mod;
++      struct cf_mod *mod;
+       struct can_can_gw ccgw;
+       u8 limhops = 0;
+       int err = 0;
+@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+       if (r->gwtype != CGW_TYPE_CAN_CAN)
+               return -EINVAL;
+-      err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
++      mod = kmalloc(sizeof(*mod), GFP_KERNEL);
++      if (!mod)
++              return -ENOMEM;
++
++      err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+       if (err < 0)
+-              return err;
++              goto out_free_cf;
+-      if (mod.uid) {
++      if (mod->uid) {
+               ASSERT_RTNL();
+               /* check for updating an existing job with identical uid */
+               hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
+-                      if (gwj->mod.uid != mod.uid)
++                      struct cf_mod *old_cf;
++
++                      old_cf = cgw_job_cf_mod(gwj);
++                      if (old_cf->uid != mod->uid)
+                               continue;
+                       /* interfaces & filters must be identical */
+-                      if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+-                              return -EINVAL;
++                      if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
++                              err = -EINVAL;
++                              goto out_free_cf;
++                      }
+-                      /* update modifications with disabled softirq & quit */
+-                      local_bh_disable();
+-                      memcpy(&gwj->mod, &mod, sizeof(mod));
+-                      local_bh_enable();
++                      rcu_assign_pointer(gwj->cf_mod, mod);
++                      kfree_rcu_mightsleep(old_cf);
+                       return 0;
+               }
+       }
+       /* ifindex == 0 is not allowed for job creation */
+-      if (!ccgw.src_idx || !ccgw.dst_idx)
+-              return -ENODEV;
++      if (!ccgw.src_idx || !ccgw.dst_idx) {
++              err = -ENODEV;
++              goto out_free_cf;
++      }
+       gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
+-      if (!gwj)
+-              return -ENOMEM;
++      if (!gwj) {
++              err = -ENOMEM;
++              goto out_free_cf;
++      }
+       gwj->handled_frames = 0;
+       gwj->dropped_frames = 0;
+@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+       gwj->limit_hops = limhops;
+       /* insert already parsed information */
+-      memcpy(&gwj->mod, &mod, sizeof(mod));
++      RCU_INIT_POINTER(gwj->cf_mod, mod);
+       memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
+       err = -ENODEV;
+@@ -1152,9 +1178,11 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
+       if (!err)
+               hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
+ out:
+-      if (err)
++      if (err) {
+               kmem_cache_free(cgw_cache, gwj);
+-
++out_free_cf:
++              kfree(mod);
++      }
+       return err;
+ }
+@@ -1214,19 +1242,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+       /* remove only the first matching entry */
+       hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
++              struct cf_mod *cf_mod;
++
+               if (gwj->flags != r->flags)
+                       continue;
+               if (gwj->limit_hops != limhops)
+                       continue;
++              cf_mod = cgw_job_cf_mod(gwj);
+               /* we have a match when uid is enabled and identical */
+-              if (gwj->mod.uid || mod.uid) {
+-                      if (gwj->mod.uid != mod.uid)
++              if (cf_mod->uid || mod.uid) {
++                      if (cf_mod->uid != mod.uid)
+                               continue;
+               } else {
+                       /* no uid => check for identical modifications */
+-                      if (memcmp(&gwj->mod, &mod, sizeof(mod)))
++                      if (memcmp(cf_mod, &mod, sizeof(mod)))
+                               continue;
+               }
+-- 
+2.39.5
+
diff --git a/queue-6.6/can-mcp251xfd-fix-tdc-setting-for-low-data-bit-rates.patch b/queue-6.6/can-mcp251xfd-fix-tdc-setting-for-low-data-bit-rates.patch
new file mode 100644 (file)
index 0000000..d83016a
--- /dev/null
@@ -0,0 +1,155 @@
+From 8a2ba8134f579f13c95131b7c8ad7bb0b625995b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 09:15:01 -0700
+Subject: can: mcp251xfd: fix TDC setting for low data bit rates
+
+From: Kelsey Maes <kelsey@vpprocess.com>
+
+[ Upstream commit 5e1663810e11c64956aa7e280cf74b2f3284d816 ]
+
+The TDC is currently hardcoded enabled. This means that even for lower
+CAN-FD data bitrates (with a DBRP (data bitrate prescaler) > 2) a TDC
+is configured. This leads to a bus-off condition.
+
+ISO 11898-1 section 11.3.3 says "Transmitter delay compensation" (TDC)
+is only applicable if DBRP is 1 or 2.
+
+To fix the problem, switch the driver to use the TDC calculation
+provided by the CAN driver framework (which respects ISO 11898-1
+section 11.3.3). This has the positive side effect that userspace can
+control TDC as needed.
+
+Demonstration of the feature in action:
+| $ ip link set can0 up type can bitrate 125000 dbitrate 500000 fd on
+| $ ip -details link show can0
+| 3: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UP mode DEFAULT group default qlen 10
+|     link/can  promiscuity 0  allmulti 0 minmtu 0 maxmtu 0
+|     can <FD> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+|        bitrate 125000 sample-point 0.875
+|        tq 50 prop-seg 69 phase-seg1 70 phase-seg2 20 sjw 10 brp 2
+|        mcp251xfd: tseg1 2..256 tseg2 1..128 sjw 1..128 brp 1..256 brp_inc 1
+|        dbitrate 500000 dsample-point 0.875
+|        dtq 125 dprop-seg 6 dphase-seg1 7 dphase-seg2 2 dsjw 1 dbrp 5
+|        mcp251xfd: dtseg1 1..32 dtseg2 1..16 dsjw 1..16 dbrp 1..256 dbrp_inc 1
+|        tdcv 0..63 tdco 0..63
+|        clock 40000000 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535 tso_max_size 65536 tso_max_segs 65535 gro_max_size 65536 parentbus spi parentdev spi0.0
+| $ ip link set can0 up type can bitrate 1000000 dbitrate 4000000 fd on
+| $ ip -details link show can0
+| 3: can0: <NOARP,UP,LOWER_UP,ECHO> mtu 72 qdisc pfifo_fast state UP mode DEFAULT group default qlen 10
+|     link/can  promiscuity 0  allmulti 0 minmtu 0 maxmtu 0
+|     can <FD,TDC-AUTO> state ERROR-ACTIVE (berr-counter tx 0 rx 0) restart-ms 0
+|        bitrate 1000000 sample-point 0.750
+|        tq 25 prop-seg 14 phase-seg1 15 phase-seg2 10 sjw 5 brp 1
+|        mcp251xfd: tseg1 2..256 tseg2 1..128 sjw 1..128 brp 1..256 brp_inc 1
+|        dbitrate 4000000 dsample-point 0.700
+|        dtq 25 dprop-seg 3 dphase-seg1 3 dphase-seg2 3 dsjw 1 dbrp 1
+|        tdco 7
+|        mcp251xfd: dtseg1 1..32 dtseg2 1..16 dsjw 1..16 dbrp 1..256 dbrp_inc 1
+|        tdcv 0..63 tdco 0..63
+|        clock 40000000 numtxqueues 1 numrxqueues 1 gso_max_size 65536 gso_max_segs 65535 tso_max_size 65536 tso_max_segs 65535 gro_max_size 65536 parentbus spi parentdev spi0.0
+
+There has been some confusion about the MCP2518FD using a relative or
+absolute TDCO due to the datasheet specifying a range of [-64,63]. I
+have a custom board with a 40 MHz clock and an estimated loop delay of
+100 to 216 ns. During testing at a data bit rate of 4 Mbit/s I found
+that using can_get_relative_tdco() resulted in bus-off errors. The
+final TDCO value was 1 which corresponds to a 10% SSP in an absolute
+configuration. This behavior is expected if the TDCO value is really
+absolute and not relative. Using priv->can.tdc.tdco instead results in
+a final TDCO of 8, setting the SSP at exactly 80%. This configuration
+works.
+
+The automatic, manual, and off TDC modes were tested at speeds up to,
+and including, 8 Mbit/s on real hardware and behave as expected.
+
+Fixes: 55e5b97f003e ("can: mcp25xxfd: add driver for Microchip MCP25xxFD SPI CAN")
+Reported-by: Kelsey Maes <kelsey@vpprocess.com>
+Closes: https://lore.kernel.org/all/C2121586-C87F-4B23-A933-845362C29CA1@vpprocess.com
+Reviewed-by: Vincent Mailhol <mailhol.vincent@wanadoo.fr>
+Signed-off-by: Kelsey Maes <kelsey@vpprocess.com>
+Link: https://patch.msgid.link/20250430161501.79370-1-kelsey@vpprocess.com
+[mkl: add comment]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/can/spi/mcp251xfd/mcp251xfd-core.c    | 40 +++++++++++++++----
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+index a48996265172f..21ae3a89924e9 100644
+--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
++++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+@@ -75,6 +75,24 @@ static const struct can_bittiming_const mcp251xfd_data_bittiming_const = {
+       .brp_inc = 1,
+ };
++/* The datasheet of the mcp2518fd (DS20006027B) specifies a range of
++ * [-64,63] for TDCO, indicating a relative TDCO.
++ *
++ * Manual tests have shown, that using a relative TDCO configuration
++ * results in bus off, while an absolute configuration works.
++ *
++ * For TDCO use the max value (63) from the data sheet, but 0 as the
++ * minimum.
++ */
++static const struct can_tdc_const mcp251xfd_tdc_const = {
++      .tdcv_min = 0,
++      .tdcv_max = 63,
++      .tdco_min = 0,
++      .tdco_max = 63,
++      .tdcf_min = 0,
++      .tdcf_max = 0,
++};
++
+ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model)
+ {
+       switch (model) {
+@@ -510,8 +528,7 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+ {
+       const struct can_bittiming *bt = &priv->can.bittiming;
+       const struct can_bittiming *dbt = &priv->can.data_bittiming;
+-      u32 val = 0;
+-      s8 tdco;
++      u32 tdcmod, val = 0;
+       int err;
+       /* CAN Control Register
+@@ -575,11 +592,16 @@ static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv)
+               return err;
+       /* Transmitter Delay Compensation */
+-      tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1),
+-                     -64, 63);
+-      val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK,
+-                       MCP251XFD_REG_TDC_TDCMOD_AUTO) |
+-              FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco);
++      if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_AUTO)
++              tdcmod = MCP251XFD_REG_TDC_TDCMOD_AUTO;
++      else if (priv->can.ctrlmode & CAN_CTRLMODE_TDC_MANUAL)
++              tdcmod = MCP251XFD_REG_TDC_TDCMOD_MANUAL;
++      else
++              tdcmod = MCP251XFD_REG_TDC_TDCMOD_DISABLED;
++
++      val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, tdcmod) |
++              FIELD_PREP(MCP251XFD_REG_TDC_TDCV_MASK, priv->can.tdc.tdcv) |
++              FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, priv->can.tdc.tdco);
+       return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val);
+ }
+@@ -2083,10 +2105,12 @@ static int mcp251xfd_probe(struct spi_device *spi)
+       priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter;
+       priv->can.bittiming_const = &mcp251xfd_bittiming_const;
+       priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const;
++      priv->can.tdc_const = &mcp251xfd_tdc_const;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+               CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING |
+               CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
+-              CAN_CTRLMODE_CC_LEN8_DLC;
++              CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO |
++              CAN_CTRLMODE_TDC_MANUAL;
+       set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
+       priv->ndev = ndev;
+       priv->spi = spi;
+-- 
+2.39.5
+
diff --git a/queue-6.6/gre-fix-again-ipv6-link-local-address-generation.patch b/queue-6.6/gre-fix-again-ipv6-link-local-address-generation.patch
new file mode 100644 (file)
index 0000000..38b07a7
--- /dev/null
@@ -0,0 +1,121 @@
+From e53330c9afe531f0b1abaa997f603a5e938f49a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 May 2025 00:57:52 +0200
+Subject: gre: Fix again IPv6 link-local address generation.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 3e6a0243ff002ddbd7ee18a8974ae61d2e6ed00d ]
+
+Use addrconf_addr_gen() to generate IPv6 link-local addresses on GRE
+devices in most cases and fall back to using add_v4_addrs() only in
+case the GRE configuration is incompatible with addrconf_addr_gen().
+
+GRE used to use addrconf_addr_gen() until commit e5dd729460ca ("ip/ip6_gre:
+use the same logic as SIT interfaces when computing v6LL address")
+restricted this use to gretap and ip6gretap devices, and created
+add_v4_addrs() (borrowed from SIT) for non-Ethernet GRE ones.
+
+The original problem came when commit 9af28511be10 ("addrconf: refuse
+isatap eui64 for INADDR_ANY") made __ipv6_isatap_ifid() fail when its
+addr parameter was 0. The commit says that this would create an invalid
+address, however, I couldn't find any RFC saying that the generated
+interface identifier would be wrong. Anyway, since gre over IPv4
+devices pass their local tunnel address to __ipv6_isatap_ifid(), that
+commit broke their IPv6 link-local address generation when the local
+address was unspecified.
+
+Then commit e5dd729460ca ("ip/ip6_gre: use the same logic as SIT
+interfaces when computing v6LL address") tried to fix that case by
+defining add_v4_addrs() and calling it to generate the IPv6 link-local
+address instead of using addrconf_addr_gen() (apart for gretap and
+ip6gretap devices, which would still use the regular
+addrconf_addr_gen(), since they have a MAC address).
+
+That broke several use cases because add_v4_addrs() isn't properly
+integrated into the rest of IPv6 Neighbor Discovery code. Several of
+these shortcomings have been fixed over time, but add_v4_addrs()
+remains broken on several aspects. In particular, it doesn't send any
+Router Sollicitations, so the SLAAC process doesn't start until the
+interface receives a Router Advertisement. Also, add_v4_addrs() mostly
+ignores the address generation mode of the interface
+(/proc/sys/net/ipv6/conf/*/addr_gen_mode), thus breaking the
+IN6_ADDR_GEN_MODE_RANDOM and IN6_ADDR_GEN_MODE_STABLE_PRIVACY cases.
+
+Fix the situation by using add_v4_addrs() only in the specific scenario
+where the normal method would fail. That is, for interfaces that have
+all of the following characteristics:
+
+  * run over IPv4,
+  * transport IP packets directly, not Ethernet (that is, not gretap
+    interfaces),
+  * tunnel endpoint is INADDR_ANY (that is, 0),
+  * device address generation mode is EUI64.
+
+In all other cases, revert back to the regular addrconf_addr_gen().
+
+Also, remove the special case for ip6gre interfaces in add_v4_addrs(),
+since ip6gre devices now always use addrconf_addr_gen() instead.
+
+Note:
+  This patch was originally applied as commit 183185a18ff9 ("gre: Fix
+  IPv6 link-local address generation."). However, it was then reverted
+  by commit fc486c2d060f ("Revert "gre: Fix IPv6 link-local address
+  generation."") because it uncovered another bug that ended up
+  breaking net/forwarding/ip6gre_custom_multipath_hash.sh. That other
+  bug has now been fixed by commit 4d0ab3a6885e ("ipv6: Start path
+  selection from the first nexthop"). Therefore we can now revive this
+  GRE patch (no changes since original commit 183185a18ff9 ("gre: Fix
+  IPv6 link-local address generation.").
+
+Fixes: e5dd729460ca ("ip/ip6_gre: use the same logic as SIT interfaces when computing v6LL address")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/a88cc5c4811af36007645d610c95102dccb360a6.1746225214.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/addrconf.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index bb9add46e382a..231fa4dc6cde4 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3189,16 +3189,13 @@ static void add_v4_addrs(struct inet6_dev *idev)
+       struct in6_addr addr;
+       struct net_device *dev;
+       struct net *net = dev_net(idev->dev);
+-      int scope, plen, offset = 0;
++      int scope, plen;
+       u32 pflags = 0;
+       ASSERT_RTNL();
+       memset(&addr, 0, sizeof(struct in6_addr));
+-      /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
+-      if (idev->dev->addr_len == sizeof(struct in6_addr))
+-              offset = sizeof(struct in6_addr) - 4;
+-      memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
++      memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
+       if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+               scope = IPV6_ADDR_COMPATv4;
+@@ -3508,7 +3505,13 @@ static void addrconf_gre_config(struct net_device *dev)
+               return;
+       }
+-      if (dev->type == ARPHRD_ETHER) {
++      /* Generate the IPv6 link-local address using addrconf_addr_gen(),
++       * unless we have an IPv4 GRE device not bound to an IP address and
++       * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
++       * case). Such devices fall back to add_v4_addrs() instead.
++       */
++      if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
++            idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
+               addrconf_addr_gen(idev, true);
+               return;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.6/ipvs-fix-uninit-value-for-saddr-in-do_output_route4.patch b/queue-6.6/ipvs-fix-uninit-value-for-saddr-in-do_output_route4.patch
new file mode 100644 (file)
index 0000000..7b3755d
--- /dev/null
@@ -0,0 +1,167 @@
+From 0acd3f89049db7dbf9e4f1f3d36069f8ffc01ab8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 May 2025 01:01:18 +0300
+Subject: ipvs: fix uninit-value for saddr in do_output_route4
+
+From: Julian Anastasov <ja@ssi.bg>
+
+[ Upstream commit e34090d7214e0516eb8722aee295cb2507317c07 ]
+
+syzbot reports for uninit-value for the saddr argument [1].
+commit 4754957f04f5 ("ipvs: do not use random local source address for
+tunnels") already implies that the input value of saddr
+should be ignored but the code is still reading it which can prevent
+to connect the route. Fix it by changing the argument to ret_saddr.
+
+[1]
+BUG: KMSAN: uninit-value in do_output_route4+0x42c/0x4d0 net/netfilter/ipvs/ip_vs_xmit.c:147
+ do_output_route4+0x42c/0x4d0 net/netfilter/ipvs/ip_vs_xmit.c:147
+ __ip_vs_get_out_rt+0x403/0x21d0 net/netfilter/ipvs/ip_vs_xmit.c:330
+ ip_vs_tunnel_xmit+0x205/0x2380 net/netfilter/ipvs/ip_vs_xmit.c:1136
+ ip_vs_in_hook+0x1aa5/0x35b0 net/netfilter/ipvs/ip_vs_core.c:2063
+ nf_hook_entry_hookfn include/linux/netfilter.h:154 [inline]
+ nf_hook_slow+0xf7/0x400 net/netfilter/core.c:626
+ nf_hook include/linux/netfilter.h:269 [inline]
+ __ip_local_out+0x758/0x7e0 net/ipv4/ip_output.c:118
+ ip_local_out net/ipv4/ip_output.c:127 [inline]
+ ip_send_skb+0x6a/0x3c0 net/ipv4/ip_output.c:1501
+ udp_send_skb+0xfda/0x1b70 net/ipv4/udp.c:1195
+ udp_sendmsg+0x2fe3/0x33c0 net/ipv4/udp.c:1483
+ inet_sendmsg+0x1fc/0x280 net/ipv4/af_inet.c:851
+ sock_sendmsg_nosec net/socket.c:712 [inline]
+ __sock_sendmsg+0x267/0x380 net/socket.c:727
+ ____sys_sendmsg+0x91b/0xda0 net/socket.c:2566
+ ___sys_sendmsg+0x28d/0x3c0 net/socket.c:2620
+ __sys_sendmmsg+0x41d/0x880 net/socket.c:2702
+ __compat_sys_sendmmsg net/compat.c:360 [inline]
+ __do_compat_sys_sendmmsg net/compat.c:367 [inline]
+ __se_compat_sys_sendmmsg net/compat.c:364 [inline]
+ __ia32_compat_sys_sendmmsg+0xc8/0x140 net/compat.c:364
+ ia32_sys_call+0x3ffa/0x41f0 arch/x86/include/generated/asm/syscalls_32.h:346
+ do_syscall_32_irqs_on arch/x86/entry/syscall_32.c:83 [inline]
+ __do_fast_syscall_32+0xb0/0x110 arch/x86/entry/syscall_32.c:306
+ do_fast_syscall_32+0x38/0x80 arch/x86/entry/syscall_32.c:331
+ do_SYSENTER_32+0x1f/0x30 arch/x86/entry/syscall_32.c:369
+ entry_SYSENTER_compat_after_hwframe+0x84/0x8e
+
+Uninit was created at:
+ slab_post_alloc_hook mm/slub.c:4167 [inline]
+ slab_alloc_node mm/slub.c:4210 [inline]
+ __kmalloc_cache_noprof+0x8fa/0xe00 mm/slub.c:4367
+ kmalloc_noprof include/linux/slab.h:905 [inline]
+ ip_vs_dest_dst_alloc net/netfilter/ipvs/ip_vs_xmit.c:61 [inline]
+ __ip_vs_get_out_rt+0x35d/0x21d0 net/netfilter/ipvs/ip_vs_xmit.c:323
+ ip_vs_tunnel_xmit+0x205/0x2380 net/netfilter/ipvs/ip_vs_xmit.c:1136
+ ip_vs_in_hook+0x1aa5/0x35b0 net/netfilter/ipvs/ip_vs_core.c:2063
+ nf_hook_entry_hookfn include/linux/netfilter.h:154 [inline]
+ nf_hook_slow+0xf7/0x400 net/netfilter/core.c:626
+ nf_hook include/linux/netfilter.h:269 [inline]
+ __ip_local_out+0x758/0x7e0 net/ipv4/ip_output.c:118
+ ip_local_out net/ipv4/ip_output.c:127 [inline]
+ ip_send_skb+0x6a/0x3c0 net/ipv4/ip_output.c:1501
+ udp_send_skb+0xfda/0x1b70 net/ipv4/udp.c:1195
+ udp_sendmsg+0x2fe3/0x33c0 net/ipv4/udp.c:1483
+ inet_sendmsg+0x1fc/0x280 net/ipv4/af_inet.c:851
+ sock_sendmsg_nosec net/socket.c:712 [inline]
+ __sock_sendmsg+0x267/0x380 net/socket.c:727
+ ____sys_sendmsg+0x91b/0xda0 net/socket.c:2566
+ ___sys_sendmsg+0x28d/0x3c0 net/socket.c:2620
+ __sys_sendmmsg+0x41d/0x880 net/socket.c:2702
+ __compat_sys_sendmmsg net/compat.c:360 [inline]
+ __do_compat_sys_sendmmsg net/compat.c:367 [inline]
+ __se_compat_sys_sendmmsg net/compat.c:364 [inline]
+ __ia32_compat_sys_sendmmsg+0xc8/0x140 net/compat.c:364
+ ia32_sys_call+0x3ffa/0x41f0 arch/x86/include/generated/asm/syscalls_32.h:346
+ do_syscall_32_irqs_on arch/x86/entry/syscall_32.c:83 [inline]
+ __do_fast_syscall_32+0xb0/0x110 arch/x86/entry/syscall_32.c:306
+ do_fast_syscall_32+0x38/0x80 arch/x86/entry/syscall_32.c:331
+ do_SYSENTER_32+0x1f/0x30 arch/x86/entry/syscall_32.c:369
+ entry_SYSENTER_compat_after_hwframe+0x84/0x8e
+
+CPU: 0 UID: 0 PID: 22408 Comm: syz.4.5165 Not tainted 6.15.0-rc3-syzkaller-00019-gbc3372351d0c #0 PREEMPT(undef)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2025
+
+Reported-by: syzbot+04b9a82855c8aed20860@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/68138dfa.050a0220.14dd7d.0017.GAE@google.com/
+Fixes: 4754957f04f5 ("ipvs: do not use random local source address for tunnels")
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipvs/ip_vs_xmit.c | 27 ++++++++-------------------
+ 1 file changed, 8 insertions(+), 19 deletions(-)
+
+diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
+index 5cd511162bc03..0103c4a4d10a5 100644
+--- a/net/netfilter/ipvs/ip_vs_xmit.c
++++ b/net/netfilter/ipvs/ip_vs_xmit.c
+@@ -119,13 +119,12 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
+       return false;
+ }
+-/* Get route to daddr, update *saddr, optionally bind route to saddr */
++/* Get route to daddr, optionally bind route to saddr */
+ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+-                                     int rt_mode, __be32 *saddr)
++                                     int rt_mode, __be32 *ret_saddr)
+ {
+       struct flowi4 fl4;
+       struct rtable *rt;
+-      bool loop = false;
+       memset(&fl4, 0, sizeof(fl4));
+       fl4.daddr = daddr;
+@@ -135,23 +134,17 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
+ retry:
+       rt = ip_route_output_key(net, &fl4);
+       if (IS_ERR(rt)) {
+-              /* Invalid saddr ? */
+-              if (PTR_ERR(rt) == -EINVAL && *saddr &&
+-                  rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
+-                      *saddr = 0;
+-                      flowi4_update_output(&fl4, 0, daddr, 0);
+-                      goto retry;
+-              }
+               IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
+               return NULL;
+-      } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
++      }
++      if (rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
+               ip_rt_put(rt);
+-              *saddr = fl4.saddr;
+               flowi4_update_output(&fl4, 0, daddr, fl4.saddr);
+-              loop = true;
++              rt_mode = 0;
+               goto retry;
+       }
+-      *saddr = fl4.saddr;
++      if (ret_saddr)
++              *ret_saddr = fl4.saddr;
+       return rt;
+ }
+@@ -344,19 +337,15 @@ __ip_vs_get_out_rt(struct netns_ipvs *ipvs, int skb_af, struct sk_buff *skb,
+               if (ret_saddr)
+                       *ret_saddr = dest_dst->dst_saddr.ip;
+       } else {
+-              __be32 saddr = htonl(INADDR_ANY);
+-
+               noref = 0;
+               /* For such unconfigured boxes avoid many route lookups
+                * for performance reasons because we do not remember saddr
+                */
+               rt_mode &= ~IP_VS_RT_MODE_CONNECT;
+-              rt = do_output_route4(net, daddr, rt_mode, &saddr);
++              rt = do_output_route4(net, daddr, rt_mode, ret_saddr);
+               if (!rt)
+                       goto err_unreach;
+-              if (ret_saddr)
+-                      *ret_saddr = saddr;
+       }
+       local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
+-- 
+2.39.5
+
diff --git a/queue-6.6/ksmbd-fix-memory-leak-in-parse_lease_state.patch b/queue-6.6/ksmbd-fix-memory-leak-in-parse_lease_state.patch
new file mode 100644 (file)
index 0000000..4835daa
--- /dev/null
@@ -0,0 +1,61 @@
+From 6c57d42dc39293732a32f9f35affd99f98864488 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Apr 2025 11:16:23 +0800
+Subject: ksmbd: fix memory leak in parse_lease_state()
+
+From: Wang Zhaolong <wangzhaolong1@huawei.com>
+
+[ Upstream commit eb4447bcce915b43b691123118893fca4f372a8f ]
+
+The previous patch that added bounds check for create lease context
+introduced a memory leak. When the bounds check fails, the function
+returns NULL without freeing the previously allocated lease_ctx_info
+structure.
+
+This patch fixes the issue by adding kfree(lreq) before returning NULL
+in both boundary check cases.
+
+Fixes: bab703ed8472 ("ksmbd: add bounds check for create lease context")
+Signed-off-by: Wang Zhaolong <wangzhaolong1@huawei.com>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/oplock.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
+index 5a5277b4b53b1..72294764d4c20 100644
+--- a/fs/smb/server/oplock.c
++++ b/fs/smb/server/oplock.c
+@@ -1496,7 +1496,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+               if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+                   sizeof(struct create_lease_v2) - 4)
+-                      return NULL;
++                      goto err_out;
+               memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+               lreq->req_state = lc->lcontext.LeaseState;
+@@ -1512,7 +1512,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+               if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
+                   sizeof(struct create_lease))
+-                      return NULL;
++                      goto err_out;
+               memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
+               lreq->req_state = lc->lcontext.LeaseState;
+@@ -1521,6 +1521,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
+               lreq->version = 1;
+       }
+       return lreq;
++err_out:
++      kfree(lreq);
++      return NULL;
+ }
+ /**
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-dsa-b53-allow-leaky-reserved-multicast.patch b/queue-6.6/net-dsa-b53-allow-leaky-reserved-multicast.patch
new file mode 100644 (file)
index 0000000..1f4871d
--- /dev/null
@@ -0,0 +1,53 @@
+From e4b73debba0246ec1e6db066aa6cd62e404a4108 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:00 +0200
+Subject: net: dsa: b53: allow leaky reserved multicast
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 5f93185a757ff38b36f849c659aeef368db15a68 ]
+
+Allow reserved multicast to ignore VLAN membership so STP and other
+management protocols work without a PVID VLAN configured when using a
+vlan aware bridge.
+
+Fixes: 967dd82ffc52 ("net: dsa: b53: Add support for Broadcom RoboSwitch")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-2-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index cfcda893f1a16..c438a5d9f6d8c 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -373,9 +373,11 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+               b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+       }
++      vc1 &= ~VC1_RX_MCST_FWD_EN;
++
+       if (enable) {
+               vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+-              vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
++              vc1 |= VC1_RX_MCST_UNTAG_EN;
+               vc4 &= ~VC4_ING_VID_CHECK_MASK;
+               if (enable_filtering) {
+                       vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+       } else {
+               vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+-              vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
++              vc1 &= ~VC1_RX_MCST_UNTAG_EN;
+               vc4 &= ~VC4_ING_VID_CHECK_MASK;
+               vc5 &= ~VC5_DROP_VTABLE_MISS;
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-dsa-b53-always-rejoin-default-untagged-vlan-on-b.patch b/queue-6.6/net-dsa-b53-always-rejoin-default-untagged-vlan-on-b.patch
new file mode 100644 (file)
index 0000000..b3c68db
--- /dev/null
@@ -0,0 +1,50 @@
+From b9bffaf0d9324866d78d591c61e4abbed2f8e0a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:05 +0200
+Subject: net: dsa: b53: always rejoin default untagged VLAN on bridge leave
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 13b152ae40495966501697693f048f47430c50fd ]
+
+While JOIN_ALL_VLAN allows to join all VLANs, we still need to keep the
+default VLAN enabled so that untagged traffic stays untagged.
+
+So rejoin the default VLAN even for switches with JOIN_ALL_VLAN support.
+
+Fixes: 48aea33a77ab ("net: dsa: b53: Add JOIN_ALL_VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-7-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index b257757f0b9dc..aa449fa182683 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -2002,12 +2002,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+               if (!(reg & BIT(cpu_port)))
+                       reg |= BIT(cpu_port);
+               b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+-      } else {
+-              b53_get_vlan_entry(dev, pvid, vl);
+-              vl->members |= BIT(port) | BIT(cpu_port);
+-              vl->untag |= BIT(port) | BIT(cpu_port);
+-              b53_set_vlan_entry(dev, pvid, vl);
+       }
++
++      b53_get_vlan_entry(dev, pvid, vl);
++      vl->members |= BIT(port) | BIT(cpu_port);
++      vl->untag |= BIT(port) | BIT(cpu_port);
++      b53_set_vlan_entry(dev, pvid, vl);
+ }
+ EXPORT_SYMBOL(b53_br_leave);
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-dsa-b53-fix-clearing-pvid-of-a-port.patch b/queue-6.6/net-dsa-b53-fix-clearing-pvid-of-a-port.patch
new file mode 100644 (file)
index 0000000..59ad96a
--- /dev/null
@@ -0,0 +1,75 @@
+From 8a51fa0cdde4df483e982419fbd4c1f296992b55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:02 +0200
+Subject: net: dsa: b53: fix clearing PVID of a port
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit f480851981043d9bb6447ca9883ade9247b9a0ad ]
+
+Currently the PVID of ports are only set when adding/updating VLANs with
+PVID set or removing VLANs, but not when clearing the PVID flag of a
+VLAN.
+
+E.g. the following flow
+
+$ ip link add br0 type bridge vlan_filtering 1
+$ ip link set sw1p1 master bridge
+$ bridge vlan add dev sw1p1 vid 10 pvid untagged
+$ bridge vlan add dev sw1p1 vid 10 untagged
+
+Would keep the PVID set as 10, despite the flag being cleared. Fix this
+by checking if we need to unset the PVID on vlan updates.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-4-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index c438a5d9f6d8c..584de37a61c76 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1521,12 +1521,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+       bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+       bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+       struct b53_vlan *vl;
++      u16 old_pvid, new_pvid;
+       int err;
+       err = b53_vlan_prepare(ds, port, vlan);
+       if (err)
+               return err;
++      b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
++      if (pvid)
++              new_pvid = vlan->vid;
++      else if (!pvid && vlan->vid == old_pvid)
++              new_pvid = b53_default_pvid(dev);
++      else
++              new_pvid = old_pvid;
++
+       vl = &dev->vlans[vlan->vid];
+       b53_get_vlan_entry(dev, vlan->vid, vl);
+@@ -1543,9 +1552,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+       b53_set_vlan_entry(dev, vlan->vid, vl);
+       b53_fast_age_vlan(dev, vlan->vid);
+-      if (pvid && !dsa_is_cpu_port(ds, port)) {
++      if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+               b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+-                          vlan->vid);
++                          new_pvid);
+               b53_fast_age_vlan(dev, vlan->vid);
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-dsa-b53-fix-flushing-old-pvid-vlan-on-pvid-chang.patch b/queue-6.6/net-dsa-b53-fix-flushing-old-pvid-vlan-on-pvid-chang.patch
new file mode 100644 (file)
index 0000000..b7decca
--- /dev/null
@@ -0,0 +1,39 @@
+From 60a2b6baf5caa96a4a7d3eeb504517f88f96b072 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:03 +0200
+Subject: net: dsa: b53: fix flushing old pvid VLAN on pvid change
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 083c6b28c0cbcd83b6af1a10f2c82937129b3438 ]
+
+Presumably the intention here was to flush the VLAN of the old pvid, not
+the added VLAN again, which we already flushed before.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-5-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 584de37a61c76..55893d4e405e8 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1555,7 +1555,7 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+       if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+               b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+                           new_pvid);
+-              b53_fast_age_vlan(dev, vlan->vid);
++              b53_fast_age_vlan(dev, old_pvid);
+       }
+       return 0;
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch b/queue-6.6/net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch
new file mode 100644 (file)
index 0000000..9f9468e
--- /dev/null
@@ -0,0 +1,43 @@
+From e8c6c29e17b511308dfbce29d36a56cd5385e05f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:09 +0200
+Subject: net: dsa: b53: fix learning on VLAN unaware bridges
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 9f34ad89bcf0e6df6f8b01f1bdab211493fc66d1 ]
+
+When VLAN filtering is off, we configure the switch to forward, but not
+learn on VLAN table misses. This effectively disables learning while not
+filtering.
+
+Fix this by switching to forward and learn. Setting the learning disable
+register will still control whether learning actually happens.
+
+Fixes: dad8d7c6452b ("net: dsa: b53: Properly account for VLAN filtering")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-11-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index aa449fa182683..d2ff2c2fcbbfc 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -383,7 +383,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+                       vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+                       vc5 |= VC5_DROP_VTABLE_MISS;
+               } else {
+-                      vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
++                      vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
+                       vc5 &= ~VC5_DROP_VTABLE_MISS;
+               }
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch b/queue-6.6/net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch
new file mode 100644 (file)
index 0000000..0474ffa
--- /dev/null
@@ -0,0 +1,49 @@
+From bb256a7ec4300fce2f12382ad8f683e43f3d01d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:04 +0200
+Subject: net: dsa: b53: fix VLAN ID for untagged vlan on bridge leave
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit a1c1901c5cc881425cc45992ab6c5418174e9e5a ]
+
+The untagged default VLAN is added to the default vlan, which may be
+one, but we modify the VLAN 0 entry on bridge leave.
+
+Fix this to use the correct VLAN entry for the default pvid.
+
+Fixes: fea83353177a ("net: dsa: b53: Fix default VLAN ID")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-6-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 55893d4e405e8..b257757f0b9dc 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1967,7 +1967,7 @@ EXPORT_SYMBOL(b53_br_join);
+ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+ {
+       struct b53_device *dev = ds->priv;
+-      struct b53_vlan *vl = &dev->vlans[0];
++      struct b53_vlan *vl;
+       s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+       unsigned int i;
+       u16 pvlan, reg, pvid;
+@@ -1993,6 +1993,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
+       dev->ports[port].vlan_ctl_mask = pvlan;
+       pvid = b53_default_pvid(dev);
++      vl = &dev->vlans[pvid];
+       /* Make this port join all VLANs without VLAN entries */
+       if (is58xx(dev)) {
+-- 
+2.39.5
+
diff --git a/queue-6.6/net-ethernet-mtk_eth_soc-reset-all-tx-queues-on-dma-.patch b/queue-6.6/net-ethernet-mtk_eth_soc-reset-all-tx-queues-on-dma-.patch
new file mode 100644 (file)
index 0000000..31e4c94
--- /dev/null
@@ -0,0 +1,63 @@
+From 16eda8fca5cfcf2379b5f227da31b6417d80d3e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 May 2025 02:07:32 +0100
+Subject: net: ethernet: mtk_eth_soc: reset all TX queues on DMA free
+
+From: Daniel Golle <daniel@makrotopia.org>
+
+[ Upstream commit 4db6c75124d871fbabf8243f947d34cc7e0697fc ]
+
+The purpose of resetting the TX queue is to reset the byte and packet
+count as well as to clear the software flow control XOFF bit.
+
+MediaTek developers pointed out that netdev_reset_queue would only
+resets queue 0 of the network device.
+
+Queues that are not reset may cause unexpected issues.
+
+Packets may stop being sent after reset and "transmit timeout" log may
+be displayed.
+
+Import fix from MediaTek's SDK to resolve this issue.
+
+Link: https://git01.mediatek.com/plugins/gitiles/openwrt/feeds/mtk-openwrt-feeds/+/319c0d9905579a46dc448579f892f364f1f84818
+Fixes: f63959c7eec31 ("net: ethernet: mtk_eth_soc: implement multi-queue support for per-port queues")
+Signed-off-by: Daniel Golle <daniel@makrotopia.org>
+Link: https://patch.msgid.link/c9ff9adceac4f152239a0f65c397f13547639175.1746406763.git.daniel@makrotopia.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index d2ec8f642c2fa..c6ccfbd422657 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3117,11 +3117,19 @@ static int mtk_dma_init(struct mtk_eth *eth)
+ static void mtk_dma_free(struct mtk_eth *eth)
+ {
+       const struct mtk_soc_data *soc = eth->soc;
+-      int i;
++      int i, j, txqs = 1;
++
++      if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
++              txqs = MTK_QDMA_NUM_QUEUES;
++
++      for (i = 0; i < MTK_MAX_DEVS; i++) {
++              if (!eth->netdev[i])
++                      continue;
++
++              for (j = 0; j < txqs; j++)
++                      netdev_tx_reset_subqueue(eth->netdev[i], j);
++      }
+-      for (i = 0; i < MTK_MAX_DEVS; i++)
+-              if (eth->netdev[i])
+-                      netdev_reset_queue(eth->netdev[i]);
+       if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+               dma_free_coherent(eth->dma_dev,
+                                 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+-- 
+2.39.5
+
diff --git a/queue-6.6/netdevice-add-netdev_tx_reset_subqueue-shorthand.patch b/queue-6.6/netdevice-add-netdev_tx_reset_subqueue-shorthand.patch
new file mode 100644 (file)
index 0000000..34614c9
--- /dev/null
@@ -0,0 +1,56 @@
+From fc58e0912bfb9d2beca3c825863d034ce43f2b68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Sep 2024 17:47:45 +0200
+Subject: netdevice: add netdev_tx_reset_subqueue() shorthand
+
+From: Alexander Lobakin <aleksander.lobakin@intel.com>
+
+[ Upstream commit 3dc95a3edd0a86b4a59670b3fafcc64c7d83e2e7 ]
+
+Add a shorthand similar to other net*_subqueue() helpers for resetting
+the queue by its index w/o obtaining &netdev_tx_queue beforehand
+manually.
+
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 4db6c75124d8 ("net: ethernet: mtk_eth_soc: reset all TX queues on DMA free")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 337a9d1c558f3..0b0a172337dba 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3614,6 +3614,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+ #endif
+ }
++/**
++ * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue
++ * @dev: network device
++ * @qid: stack index of the queue to reset
++ */
++static inline void netdev_tx_reset_subqueue(const struct net_device *dev,
++                                          u32 qid)
++{
++      netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid));
++}
++
+ /**
+  *    netdev_reset_queue - reset the packets and bytes count of a network device
+  *    @dev_queue: network device
+@@ -3623,7 +3634,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+  */
+ static inline void netdev_reset_queue(struct net_device *dev_queue)
+ {
+-      netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0));
++      netdev_tx_reset_subqueue(dev_queue, 0);
+ }
+ /**
+-- 
+2.39.5
+
diff --git a/queue-6.6/netfilter-ipset-fix-region-locking-in-hash-types.patch b/queue-6.6/netfilter-ipset-fix-region-locking-in-hash-types.patch
new file mode 100644 (file)
index 0000000..ce5c518
--- /dev/null
@@ -0,0 +1,42 @@
+From 2f1c639273118b0389a23438e1781565d34909f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 17:01:59 +0200
+Subject: netfilter: ipset: fix region locking in hash types
+
+From: Jozsef Kadlecsik <kadlec@netfilter.org>
+
+[ Upstream commit 8478a729c0462273188263136880480729e9efca ]
+
+Region locking introduced in v5.6-rc4 contained three macros to handle
+the region locks: ahash_bucket_start(), ahash_bucket_end() which gave
+back the start and end hash bucket values belonging to a given region
+lock and ahash_region() which should give back the region lock belonging
+to a given hash bucket. The latter was incorrect which can lead to a
+race condition between the garbage collector and adding new elements
+when a hash type of set is defined with timeouts.
+
+Fixes: f66ee0410b1c ("netfilter: ipset: Fix "INFO: rcu detected stall in hash_xxx" reports")
+Reported-by: Kota Toda <kota.toda@gmo-cybersecurity.com>
+Signed-off-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_hash_gen.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index 20aad81fcad7e..c2d88b1b06b87 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -63,7 +63,7 @@ struct hbucket {
+ #define ahash_sizeof_regions(htable_bits)             \
+       (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+ #define ahash_region(n, htable_bits)          \
+-      ((n) % ahash_numof_locks(htable_bits))
++      ((n) / jhash_size(HTABLE_REGION_BITS))
+ #define ahash_bucket_start(h,  htable_bits)   \
+       ((htable_bits) < HTABLE_REGION_BITS ? 0 \
+               : (h) * jhash_size(HTABLE_REGION_BITS))
+-- 
+2.39.5
+
diff --git a/queue-6.6/s390-entry-fix-last-breaking-event-handling-in-case-.patch b/queue-6.6/s390-entry-fix-last-breaking-event-handling-in-case-.patch
new file mode 100644 (file)
index 0000000..073f7df
--- /dev/null
@@ -0,0 +1,44 @@
+From 3f2bc51d19c545a52d4a0b9e77fa0440d75f8148 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 17:07:01 +0200
+Subject: s390/entry: Fix last breaking event handling in case of stack
+ corruption
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit ae952eea6f4a7e2193f8721a5366049946e012e7 ]
+
+In case of stack corruption stack_invalid() is called and the expectation
+is that register r10 contains the last breaking event address. This
+dependency is quite subtle and broke a couple of years ago without that
+anybody noticed.
+
+Fix this by getting rid of the dependency and read the last breaking event
+address from lowcore.
+
+Fixes: 56e62a737028 ("s390: convert to generic entry")
+Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/entry.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index ebad8c8b8c57d..3cacc36088eb4 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -639,7 +639,8 @@ SYM_CODE_START(stack_overflow)
+       stmg    %r0,%r7,__PT_R0(%r11)
+       stmg    %r8,%r9,__PT_PSW(%r11)
+       mvc     __PT_R8(64,%r11),0(%r14)
+-      stg     %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
++      GET_LC  %r2
++      mvc     __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
+       xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+       lgr     %r2,%r11                # pass pointer to pt_regs
+       jg      kernel_stack_overflow
+-- 
+2.39.5
+
diff --git a/queue-6.6/sch_htb-make-htb_deactivate-idempotent.patch b/queue-6.6/sch_htb-make-htb_deactivate-idempotent.patch
new file mode 100644 (file)
index 0000000..4c56a7f
--- /dev/null
@@ -0,0 +1,105 @@
+From dc6d407778b9b742c55d23ca4ab676929a5d7698 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Apr 2025 16:29:54 -0700
+Subject: sch_htb: make htb_deactivate() idempotent
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 3769478610135e82b262640252d90f6efb05be71 ]
+
+Alan reported a NULL pointer dereference in htb_next_rb_node()
+after we made htb_qlen_notify() idempotent.
+
+It turns out in the following case it introduced some regression:
+
+htb_dequeue_tree():
+  |-> fq_codel_dequeue()
+    |-> qdisc_tree_reduce_backlog()
+      |-> htb_qlen_notify()
+        |-> htb_deactivate()
+  |-> htb_next_rb_node()
+  |-> htb_deactivate()
+
+For htb_next_rb_node(), after calling the 1st htb_deactivate(), the
+clprio[prio]->ptr could be already set to  NULL, which means
+htb_next_rb_node() is vulnerable here.
+
+For htb_deactivate(), although we checked qlen before calling it, in
+case of qlen==0 after qdisc_tree_reduce_backlog(), we may call it again
+which triggers the warning inside.
+
+To fix the issues here, we need to:
+
+1) Make htb_deactivate() idempotent, that is, simply return if we
+   already call it before.
+2) Make htb_next_rb_node() safe against ptr==NULL.
+
+Many thanks to Alan for testing and for the reproducer.
+
+Fixes: 5ba8b837b522 ("sch_htb: make htb_qlen_notify() idempotent")
+Reported-by: Alan J. Wylie <alan@wylie.me.uk>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Link: https://patch.msgid.link/20250428232955.1740419-2-xiyou.wangcong@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_htb.c | 15 ++++++---------
+ 1 file changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 9a3f7ea80b34b..716da8c6b3def 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
+  */
+ static inline void htb_next_rb_node(struct rb_node **n)
+ {
+-      *n = rb_next(*n);
++      if (*n)
++              *n = rb_next(*n);
+ }
+ /**
+@@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
+  */
+ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
+ {
+-      WARN_ON(!cl->prio_activity);
+-
++      if (!cl->prio_activity)
++              return;
+       htb_deactivate_prios(q, cl);
+       cl->prio_activity = 0;
+ }
+@@ -1485,8 +1486,6 @@ static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
+ {
+       struct htb_class *cl = (struct htb_class *)arg;
+-      if (!cl->prio_activity)
+-              return;
+       htb_deactivate(qdisc_priv(sch), cl);
+ }
+@@ -1740,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
+       if (cl->parent)
+               cl->parent->children--;
+-      if (cl->prio_activity)
+-              htb_deactivate(q, cl);
++      htb_deactivate(q, cl);
+       if (cl->cmode != HTB_CAN_SEND)
+               htb_safe_rb_erase(&cl->pq_node,
+@@ -1949,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
+                       /* turn parent into inner node */
+                       qdisc_purge_queue(parent->leaf.q);
+                       parent_qdisc = parent->leaf.q;
+-                      if (parent->prio_activity)
+-                              htb_deactivate(q, parent);
++                      htb_deactivate(q, parent);
+                       /* remove from evt list because of level change */
+                       if (parent->cmode != HTB_CAN_SEND) {
+-- 
+2.39.5
+
index bf7e76935987dd787981d9b08adfba2bdf42c403..29a133941ef045fa7c0b1ed9b12dfe45223d39ec 100644 (file)
@@ -7,3 +7,20 @@ ksmbd-prevent-rename-with-empty-string.patch
 ksmbd-prevent-out-of-bounds-stream-writes-by-validating-pos.patch
 ksmbd-fix-uaf-in-__close_file_table_ids.patch
 openvswitch-fix-unsafe-attribute-parsing-in-output_userspace.patch
+ksmbd-fix-memory-leak-in-parse_lease_state.patch
+s390-entry-fix-last-breaking-event-handling-in-case-.patch
+sch_htb-make-htb_deactivate-idempotent.patch
+gre-fix-again-ipv6-link-local-address-generation.patch
+netdevice-add-netdev_tx_reset_subqueue-shorthand.patch
+net-ethernet-mtk_eth_soc-reset-all-tx-queues-on-dma-.patch
+can-mcp251xfd-fix-tdc-setting-for-low-data-bit-rates.patch
+can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch
+ipvs-fix-uninit-value-for-saddr-in-do_output_route4.patch
+netfilter-ipset-fix-region-locking-in-hash-types.patch
+bpf-scrub-packet-on-bpf_redirect_peer.patch
+net-dsa-b53-allow-leaky-reserved-multicast.patch
+net-dsa-b53-fix-clearing-pvid-of-a-port.patch
+net-dsa-b53-fix-flushing-old-pvid-vlan-on-pvid-chang.patch
+net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch
+net-dsa-b53-always-rejoin-default-untagged-vlan-on-b.patch
+net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch