From: Sasha Levin Date: Sun, 11 May 2025 17:52:42 +0000 (-0400) Subject: Fixes for 5.10 X-Git-Tag: v5.15.183~67 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=75c78bd083ae63c2147f47a936b7997ecb9a3be1;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.10 Signed-off-by: Sasha Levin --- diff --git a/queue-5.10/can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch b/queue-5.10/can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch new file mode 100644 index 0000000000..96a009db1c --- /dev/null +++ b/queue-5.10/can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch @@ -0,0 +1,366 @@ +From 530364517c5e8a7d1463c8ed6518bc5d9958c48f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 29 Apr 2025 09:05:55 +0200 +Subject: can: gw: fix RCU/BH usage in cgw_create_job() + +From: Oliver Hartkopp + +[ Upstream commit 511e64e13d8cc72853275832e3f372607466c18c ] + +As reported by Sebastian Andrzej Siewior the use of local_bh_disable() +is only feasible in uni processor systems to update the modification rules. +The usual use-case to update the modification rules is to update the data +of the modifications but not the modification types (AND/OR/XOR/SET) or +the checksum functions itself. + +To omit additional memory allocations to maintain fast modification +switching times, the modification description space is doubled at gw-job +creation time so that only the reference to the active modification +description is changed under rcu protection. + +Rename cgw_job::mod to cf_mod and make it a RCU pointer. Allocate in +cgw_create_job() and free it together with cgw_job in +cgw_job_free_rcu(). Update all users to dereference cgw_job::cf_mod with +a RCU accessor and if possible once. + +[bigeasy: Replace mod1/mod2 from the Oliver's original patch with dynamic +allocation, use RCU annotation and accessor] + +Reported-by: Sebastian Andrzej Siewior +Closes: https://lore.kernel.org/linux-can/20231031112349.y0aLoBrz@linutronix.de/ +Fixes: dd895d7f21b2 ("can: cangw: introduce optional uid to reference created routing jobs") +Tested-by: Oliver Hartkopp +Signed-off-by: Oliver Hartkopp +Signed-off-by: Sebastian Andrzej Siewior +Link: https://patch.msgid.link/20250429070555.cs-7b_eZ@linutronix.de +Signed-off-by: Marc Kleine-Budde +Signed-off-by: Sasha Levin +--- + net/can/gw.c | 149 +++++++++++++++++++++++++++++++-------------------- + 1 file changed, 90 insertions(+), 59 deletions(-) + +diff --git a/net/can/gw.c b/net/can/gw.c +index 59ce23996c6e0..59b9f3e579f73 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -130,7 +130,7 @@ struct cgw_job { + u32 handled_frames; + u32 dropped_frames; + u32 deleted_frames; +- struct cf_mod mod; ++ struct cf_mod __rcu *cf_mod; + union { + /* CAN frame data source */ + struct net_device *dev; +@@ -397,6 +397,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + struct cgw_job *gwj = (struct cgw_job *)data; + struct canfd_frame *cf; + struct sk_buff *nskb; ++ struct cf_mod *mod; + int modidx = 0; + + /* process strictly Classic CAN or CAN FD frames */ +@@ -444,7 +445,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + * When there is at least one modification function activated, + * we need to copy the skb as we want to modify skb->data. + */ +- if (gwj->mod.modfunc[0]) ++ mod = rcu_dereference(gwj->cf_mod); ++ if (mod->modfunc[0]) + nskb = skb_copy(skb, GFP_ATOMIC); + else + nskb = skb_clone(skb, GFP_ATOMIC); +@@ -467,8 +469,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + cf = (struct canfd_frame *)nskb->data; + + /* perform preprocessed modification functions if there are any */ +- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx]) +- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod); ++ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx]) ++ (*mod->modfunc[modidx++])(cf, mod); + + /* Has the CAN frame been modified? */ + if (modidx) { +@@ -484,11 +486,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data) + } + + /* check for checksum updates */ +- if (gwj->mod.csumfunc.crc8) +- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8); ++ if (mod->csumfunc.crc8) ++ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8); + +- if (gwj->mod.csumfunc.xor) +- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor); ++ if (mod->csumfunc.xor) ++ (*mod->csumfunc.xor)(cf, &mod->csum.xor); + } + + /* clear the skb timestamp if not configured the other way */ +@@ -519,9 +521,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head) + { + struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); + ++ /* cgw_job::cf_mod is always accessed from the same cgw_job object within ++ * the same RCU read section. Once cgw_job is scheduled for removal, ++ * cf_mod can also be removed without mandating an additional grace period. ++ */ ++ kfree(rcu_access_pointer(gwj->cf_mod)); + kmem_cache_free(cgw_cache, gwj); + } + ++/* Return cgw_job::cf_mod with RTNL protected section */ ++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj) ++{ ++ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked()); ++} ++ + static int cgw_notifier(struct notifier_block *nb, + unsigned long msg, void *ptr) + { +@@ -554,6 +567,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, + { + struct rtcanmsg *rtcan; + struct nlmsghdr *nlh; ++ struct cf_mod *mod; + + nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags); + if (!nlh) +@@ -588,82 +602,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, + goto cancel; + } + ++ mod = cgw_job_cf_mod(gwj); + if (gwj->flags & CGW_FLAGS_CAN_FD) { + struct cgw_fdframe_mod mb; + +- if (gwj->mod.modtype.and) { +- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.and; ++ if (mod->modtype.and) { ++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.and; + if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.or) { +- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.or; ++ if (mod->modtype.or) { ++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.or; + if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.xor) { +- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.xor; ++ if (mod->modtype.xor) { ++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.xor; + if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.set) { +- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.set; ++ if (mod->modtype.set) { ++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.set; + if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } else { + struct cgw_frame_mod mb; + +- if (gwj->mod.modtype.and) { +- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.and; ++ if (mod->modtype.and) { ++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.and; + if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.or) { +- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.or; ++ if (mod->modtype.or) { ++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.or; + if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.xor) { +- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.xor; ++ if (mod->modtype.xor) { ++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.xor; + if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0) + goto cancel; + } + +- if (gwj->mod.modtype.set) { +- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf)); +- mb.modtype = gwj->mod.modtype.set; ++ if (mod->modtype.set) { ++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf)); ++ mb.modtype = mod->modtype.set; + if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0) + goto cancel; + } + } + +- if (gwj->mod.uid) { +- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) ++ if (mod->uid) { ++ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0) + goto cancel; + } + +- if (gwj->mod.csumfunc.crc8) { ++ if (mod->csumfunc.crc8) { + if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, +- &gwj->mod.csum.crc8) < 0) ++ &mod->csum.crc8) < 0) + goto cancel; + } + +- if (gwj->mod.csumfunc.xor) { ++ if (mod->csumfunc.xor) { + if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN, +- &gwj->mod.csum.xor) < 0) ++ &mod->csum.xor) < 0) + goto cancel; + } + +@@ -997,7 +1012,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + struct net *net = sock_net(skb->sk); + struct rtcanmsg *r; + struct cgw_job *gwj; +- struct cf_mod mod; ++ struct cf_mod *mod; + struct can_can_gw ccgw; + u8 limhops = 0; + int err = 0; +@@ -1016,37 +1031,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + if (r->gwtype != CGW_TYPE_CAN_CAN) + return -EINVAL; + +- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); ++ mod = kmalloc(sizeof(*mod), GFP_KERNEL); ++ if (!mod) ++ return -ENOMEM; ++ ++ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); + if (err < 0) +- return err; ++ goto out_free_cf; + +- if (mod.uid) { ++ if (mod->uid) { + ASSERT_RTNL(); + + /* check for updating an existing job with identical uid */ + hlist_for_each_entry(gwj, &net->can.cgw_list, list) { +- if (gwj->mod.uid != mod.uid) ++ struct cf_mod *old_cf; ++ ++ old_cf = cgw_job_cf_mod(gwj); ++ if (old_cf->uid != mod->uid) + continue; + + /* interfaces & filters must be identical */ +- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) +- return -EINVAL; ++ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) { ++ err = -EINVAL; ++ goto out_free_cf; ++ } + +- /* update modifications with disabled softirq & quit */ +- local_bh_disable(); +- memcpy(&gwj->mod, &mod, sizeof(mod)); +- local_bh_enable(); ++ rcu_assign_pointer(gwj->cf_mod, mod); ++ kfree_rcu_mightsleep(old_cf); + return 0; + } + } + + /* ifindex == 0 is not allowed for job creation */ +- if (!ccgw.src_idx || !ccgw.dst_idx) +- return -ENODEV; ++ if (!ccgw.src_idx || !ccgw.dst_idx) { ++ err = -ENODEV; ++ goto out_free_cf; ++ } + + gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); +- if (!gwj) +- return -ENOMEM; ++ if (!gwj) { ++ err = -ENOMEM; ++ goto out_free_cf; ++ } + + gwj->handled_frames = 0; + gwj->dropped_frames = 0; +@@ -1056,7 +1082,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + gwj->limit_hops = limhops; + + /* insert already parsed information */ +- memcpy(&gwj->mod, &mod, sizeof(mod)); ++ RCU_INIT_POINTER(gwj->cf_mod, mod); + memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); + + err = -ENODEV; +@@ -1083,9 +1109,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, + if (!err) + hlist_add_head_rcu(&gwj->list, &net->can.cgw_list); + out: +- if (err) ++ if (err) { + kmem_cache_free(cgw_cache, gwj); +- ++out_free_cf: ++ kfree(mod); ++ } + return err; + } + +@@ -1145,19 +1173,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, + + /* remove only the first matching entry */ + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { ++ struct cf_mod *cf_mod; ++ + if (gwj->flags != r->flags) + continue; + + if (gwj->limit_hops != limhops) + continue; + ++ cf_mod = cgw_job_cf_mod(gwj); + /* we have a match when uid is enabled and identical */ +- if (gwj->mod.uid || mod.uid) { +- if (gwj->mod.uid != mod.uid) ++ if (cf_mod->uid || mod.uid) { ++ if (cf_mod->uid != mod.uid) + continue; + } else { + /* no uid => check for identical modifications */ +- if (memcmp(&gwj->mod, &mod, sizeof(mod))) ++ if (memcmp(cf_mod, &mod, sizeof(mod))) + continue; + } + +-- +2.39.5 + diff --git a/queue-5.10/can-gw-use-call_rcu-instead-of-costly-synchronize_rc.patch b/queue-5.10/can-gw-use-call_rcu-instead-of-costly-synchronize_rc.patch new file mode 100644 index 0000000000..0fe9890ef0 --- /dev/null +++ b/queue-5.10/can-gw-use-call_rcu-instead-of-costly-synchronize_rc.patch @@ -0,0 +1,82 @@ +From 634f00dde02422f50492154d3a7fe1a88dc9da55 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 7 Feb 2022 11:07:06 -0800 +Subject: can: gw: use call_rcu() instead of costly synchronize_rcu() + +From: Eric Dumazet + +[ Upstream commit 181d4447905d551cc664f1e7e796b482c1eec992 ] + +Commit fb8696ab14ad ("can: gw: synchronize rcu operations +before removing gw job entry") added three synchronize_rcu() calls +to make sure one rcu grace period was observed before freeing +a "struct cgw_job" (which are tiny objects). + +This should be converted to call_rcu() to avoid adding delays +in device / network dismantles. + +Use the rcu_head that was already in struct cgw_job, +not yet used. + +Link: https://lore.kernel.org/all/20220207190706.1499190-1-eric.dumazet@gmail.com +Signed-off-by: Eric Dumazet +Cc: Oliver Hartkopp +Tested-by: Oliver Hartkopp +Signed-off-by: Marc Kleine-Budde +Stable-dep-of: 511e64e13d8c ("can: gw: fix RCU/BH usage in cgw_create_job()") +Signed-off-by: Sasha Levin +--- + net/can/gw.c | 16 ++++++++++------ + 1 file changed, 10 insertions(+), 6 deletions(-) + +diff --git a/net/can/gw.c b/net/can/gw.c +index cbb46d3aa9634..59ce23996c6e0 100644 +--- a/net/can/gw.c ++++ b/net/can/gw.c +@@ -515,6 +515,13 @@ static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj) + gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj); + } + ++static void cgw_job_free_rcu(struct rcu_head *rcu_head) ++{ ++ struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu); ++ ++ kmem_cache_free(cgw_cache, gwj); ++} ++ + static int cgw_notifier(struct notifier_block *nb, + unsigned long msg, void *ptr) + { +@@ -534,8 +541,7 @@ static int cgw_notifier(struct notifier_block *nb, + if (gwj->src.dev == dev || gwj->dst.dev == dev) { + hlist_del(&gwj->list); + cgw_unregister_filter(net, gwj); +- synchronize_rcu(); +- kmem_cache_free(cgw_cache, gwj); ++ call_rcu(&gwj->rcu, cgw_job_free_rcu); + } + } + } +@@ -1093,8 +1099,7 @@ static void cgw_remove_all_jobs(struct net *net) + hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { + hlist_del(&gwj->list); + cgw_unregister_filter(net, gwj); +- synchronize_rcu(); +- kmem_cache_free(cgw_cache, gwj); ++ call_rcu(&gwj->rcu, cgw_job_free_rcu); + } + } + +@@ -1162,8 +1167,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, + + hlist_del(&gwj->list); + cgw_unregister_filter(net, gwj); +- synchronize_rcu(); +- kmem_cache_free(cgw_cache, gwj); ++ call_rcu(&gwj->rcu, cgw_job_free_rcu); + err = 0; + break; + } +-- +2.39.5 + diff --git a/queue-5.10/net-dsa-b53-allow-leaky-reserved-multicast.patch b/queue-5.10/net-dsa-b53-allow-leaky-reserved-multicast.patch new file mode 100644 index 0000000000..87f0fecb27 --- /dev/null +++ b/queue-5.10/net-dsa-b53-allow-leaky-reserved-multicast.patch @@ -0,0 +1,53 @@ +From f22516198c6b6a53787db5f6555efe7bf1f513df Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 29 Apr 2025 22:17:00 +0200 +Subject: net: dsa: b53: allow leaky reserved multicast + +From: Jonas Gorski + +[ Upstream commit 5f93185a757ff38b36f849c659aeef368db15a68 ] + +Allow reserved multicast to ignore VLAN membership so STP and other +management protocols work without a PVID VLAN configured when using a +vlan aware bridge. + +Fixes: 967dd82ffc52 ("net: dsa: b53: Add support for Broadcom RoboSwitch") +Signed-off-by: Jonas Gorski +Tested-by: Florian Fainelli +Reviewed-by: Florian Fainelli +Link: https://patch.msgid.link/20250429201710.330937-2-jonas.gorski@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/dsa/b53/b53_common.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index d3428e62bef24..e926dd47b1308 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -373,9 +373,11 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, + b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5); + } + ++ vc1 &= ~VC1_RX_MCST_FWD_EN; ++ + if (enable) { + vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; +- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; ++ vc1 |= VC1_RX_MCST_UNTAG_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + if (enable_filtering) { + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; +@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, + + } else { + vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID); +- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN); ++ vc1 &= ~VC1_RX_MCST_UNTAG_EN; + vc4 &= ~VC4_ING_VID_CHECK_MASK; + vc5 &= ~VC5_DROP_VTABLE_MISS; + +-- +2.39.5 + diff --git a/queue-5.10/net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch b/queue-5.10/net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch new file mode 100644 index 0000000000..1beb6ef3ba --- /dev/null +++ b/queue-5.10/net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch @@ -0,0 +1,43 @@ +From 88c8f208cb04687f75ff8c3ed3c45082b0b7245f Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 29 Apr 2025 22:17:09 +0200 +Subject: net: dsa: b53: fix learning on VLAN unaware bridges + +From: Jonas Gorski + +[ Upstream commit 9f34ad89bcf0e6df6f8b01f1bdab211493fc66d1 ] + +When VLAN filtering is off, we configure the switch to forward, but not +learn on VLAN table misses. This effectively disables learning while not +filtering. + +Fix this by switching to forward and learn. Setting the learning disable +register will still control whether learning actually happens. + +Fixes: dad8d7c6452b ("net: dsa: b53: Properly account for VLAN filtering") +Signed-off-by: Jonas Gorski +Tested-by: Florian Fainelli +Reviewed-by: Florian Fainelli +Link: https://patch.msgid.link/20250429201710.330937-11-jonas.gorski@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/dsa/b53/b53_common.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index 0914982a80c11..39a56cedbc1f4 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -383,7 +383,7 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable, + vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; + vc5 |= VC5_DROP_VTABLE_MISS; + } else { +- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; ++ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S; + vc5 &= ~VC5_DROP_VTABLE_MISS; + } + +-- +2.39.5 + diff --git a/queue-5.10/net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch b/queue-5.10/net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch new file mode 100644 index 0000000000..07b7cc27a3 --- /dev/null +++ b/queue-5.10/net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch @@ -0,0 +1,49 @@ +From 53e61821ca84dd5050eff1062bba3c9df20006b4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 29 Apr 2025 22:17:04 +0200 +Subject: net: dsa: b53: fix VLAN ID for untagged vlan on bridge leave + +From: Jonas Gorski + +[ Upstream commit a1c1901c5cc881425cc45992ab6c5418174e9e5a ] + +The untagged default VLAN is added to the default vlan, which may be +one, but we modify the VLAN 0 entry on bridge leave. + +Fix this to use the correct VLAN entry for the default pvid. + +Fixes: fea83353177a ("net: dsa: b53: Fix default VLAN ID") +Signed-off-by: Jonas Gorski +Tested-by: Florian Fainelli +Reviewed-by: Florian Fainelli +Link: https://patch.msgid.link/20250429201710.330937-6-jonas.gorski@gmail.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/dsa/b53/b53_common.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index e926dd47b1308..0914982a80c11 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -1872,7 +1872,7 @@ EXPORT_SYMBOL(b53_br_join); + void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) + { + struct b53_device *dev = ds->priv; +- struct b53_vlan *vl = &dev->vlans[0]; ++ struct b53_vlan *vl; + s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + unsigned int i; + u16 pvlan, reg, pvid; +@@ -1898,6 +1898,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) + dev->ports[port].vlan_ctl_mask = pvlan; + + pvid = b53_default_pvid(dev); ++ vl = &dev->vlans[pvid]; + + /* Make this port join all VLANs without VLAN entries */ + if (is58xx(dev)) { +-- +2.39.5 + diff --git a/queue-5.10/netfilter-ipset-fix-region-locking-in-hash-types.patch b/queue-5.10/netfilter-ipset-fix-region-locking-in-hash-types.patch new file mode 100644 index 0000000000..fdddf3393a --- /dev/null +++ b/queue-5.10/netfilter-ipset-fix-region-locking-in-hash-types.patch @@ -0,0 +1,42 @@ +From d555426990a3d0dcaec01ed60cd13675a6713a47 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 7 May 2025 17:01:59 +0200 +Subject: netfilter: ipset: fix region locking in hash types + +From: Jozsef Kadlecsik + +[ Upstream commit 8478a729c0462273188263136880480729e9efca ] + +Region locking introduced in v5.6-rc4 contained three macros to handle +the region locks: ahash_bucket_start(), ahash_bucket_end() which gave +back the start and end hash bucket values belonging to a given region +lock and ahash_region() which should give back the region lock belonging +to a given hash bucket. The latter was incorrect which can lead to a +race condition between the garbage collector and adding new elements +when a hash type of set is defined with timeouts. + +Fixes: f66ee0410b1c ("netfilter: ipset: Fix "INFO: rcu detected stall in hash_xxx" reports") +Reported-by: Kota Toda +Signed-off-by: Jozsef Kadlecsik +Signed-off-by: Pablo Neira Ayuso +Signed-off-by: Sasha Levin +--- + net/netfilter/ipset/ip_set_hash_gen.h | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index 093ec52140084..636118b0f2ef4 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -88,7 +88,7 @@ struct hbucket { + #define ahash_sizeof_regions(htable_bits) \ + (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region)) + #define ahash_region(n, htable_bits) \ +- ((n) % ahash_numof_locks(htable_bits)) ++ ((n) / jhash_size(HTABLE_REGION_BITS)) + #define ahash_bucket_start(h, htable_bits) \ + ((htable_bits) < HTABLE_REGION_BITS ? 0 \ + : (h) * jhash_size(HTABLE_REGION_BITS)) +-- +2.39.5 + diff --git a/queue-5.10/rcu-kvfree-add-kvfree_rcu_mightsleep-and-kfree_rcu_m.patch b/queue-5.10/rcu-kvfree-add-kvfree_rcu_mightsleep-and-kfree_rcu_m.patch new file mode 100644 index 0000000000..722d793ce1 --- /dev/null +++ b/queue-5.10/rcu-kvfree-add-kvfree_rcu_mightsleep-and-kfree_rcu_m.patch @@ -0,0 +1,45 @@ +From ffb98ccc834a954f1c0945c6c8be2b9054d719f7 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 1 Feb 2023 16:08:07 +0100 +Subject: rcu/kvfree: Add kvfree_rcu_mightsleep() and kfree_rcu_mightsleep() + +From: Uladzislau Rezki (Sony) + +[ Upstream commit 608723c41cd951fb32ade2f8371e61c270816175 ] + +The kvfree_rcu() and kfree_rcu() APIs are hazardous in that if you forget +the second argument, it works, but might sleep. This sleeping can be a +correctness bug from atomic contexts, and even in non-atomic contexts +it might introduce unacceptable latencies. This commit therefore adds +kvfree_rcu_mightsleep() and kfree_rcu_mightsleep(), which will replace +the single-argument kvfree_rcu() and kfree_rcu(), respectively. + +This commit enables a series of commits that switch from single-argument +kvfree_rcu() and kfree_rcu() to their _mightsleep() counterparts. Once +all of these commits land, the single-argument versions will be removed. + +Signed-off-by: Uladzislau Rezki (Sony) +Signed-off-by: Paul E. McKenney +Stable-dep-of: 511e64e13d8c ("can: gw: fix RCU/BH usage in cgw_create_job()") +Signed-off-by: Sasha Levin +--- + include/linux/rcupdate.h | 3 +++ + 1 file changed, 3 insertions(+) + +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index 9db6710e6ee7b..9b3e01db654e2 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -987,6 +987,9 @@ do { \ + #define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \ + kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__) + ++#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr) ++#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr) ++ + #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME + #define kvfree_rcu_arg_2(ptr, rhf) kfree_rcu(ptr, rhf) + #define kvfree_rcu_arg_1(ptr) \ +-- +2.39.5 + diff --git a/queue-5.10/series b/queue-5.10/series index c12ac4e943..73202bd81f 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -41,3 +41,10 @@ dm-fix-copying-after-src-array-boundaries.patch scsi-target-fix-write_same-no-data-buffer-crash.patch can-mcp251xfd-mcp251xfd_remove-fix-order-of-unregistration-calls.patch openvswitch-fix-unsafe-attribute-parsing-in-output_userspace.patch +can-gw-use-call_rcu-instead-of-costly-synchronize_rc.patch +rcu-kvfree-add-kvfree_rcu_mightsleep-and-kfree_rcu_m.patch +can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch +netfilter-ipset-fix-region-locking-in-hash-types.patch +net-dsa-b53-allow-leaky-reserved-multicast.patch +net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch +net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch