--- /dev/null
+From 7785879da16a5d43cea8419b44700cfbd78d510a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 09:05:55 +0200
+Subject: can: gw: fix RCU/BH usage in cgw_create_job()
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit 511e64e13d8cc72853275832e3f372607466c18c ]
+
+As reported by Sebastian Andrzej Siewior the use of local_bh_disable()
+is only feasible in uni processor systems to update the modification rules.
+The usual use-case to update the modification rules is to update the data
+of the modifications but not the modification types (AND/OR/XOR/SET) or
+the checksum functions itself.
+
+To omit additional memory allocations to maintain fast modification
+switching times, the modification description space is doubled at gw-job
+creation time so that only the reference to the active modification
+description is changed under rcu protection.
+
+Rename cgw_job::mod to cf_mod and make it a RCU pointer. Allocate in
+cgw_create_job() and free it together with cgw_job in
+cgw_job_free_rcu(). Update all users to dereference cgw_job::cf_mod with
+a RCU accessor and if possible once.
+
+[bigeasy: Replace mod1/mod2 from the Oliver's original patch with dynamic
+allocation, use RCU annotation and accessor]
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Closes: https://lore.kernel.org/linux-can/20231031112349.y0aLoBrz@linutronix.de/
+Fixes: dd895d7f21b2 ("can: cangw: introduce optional uid to reference created routing jobs")
+Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://patch.msgid.link/20250429070555.cs-7b_eZ@linutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/gw.c | 149 +++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 90 insertions(+), 59 deletions(-)
+
+diff --git a/net/can/gw.c b/net/can/gw.c
+index 20e74fe7d0906..c48e8cf5e6506 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -130,7 +130,7 @@ struct cgw_job {
+ u32 handled_frames;
+ u32 dropped_frames;
+ u32 deleted_frames;
+- struct cf_mod mod;
++ struct cf_mod __rcu *cf_mod;
+ union {
+ /* CAN frame data source */
+ struct net_device *dev;
+@@ -459,6 +459,7 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ struct cgw_job *gwj = (struct cgw_job *)data;
+ struct canfd_frame *cf;
+ struct sk_buff *nskb;
++ struct cf_mod *mod;
+ int modidx = 0;
+
+ /* process strictly Classic CAN or CAN FD frames */
+@@ -506,7 +507,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ * When there is at least one modification function activated,
+ * we need to copy the skb as we want to modify skb->data.
+ */
+- if (gwj->mod.modfunc[0])
++ mod = rcu_dereference(gwj->cf_mod);
++ if (mod->modfunc[0])
+ nskb = skb_copy(skb, GFP_ATOMIC);
+ else
+ nskb = skb_clone(skb, GFP_ATOMIC);
+@@ -529,8 +531,8 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ cf = (struct canfd_frame *)nskb->data;
+
+ /* perform preprocessed modification functions if there are any */
+- while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
+- (*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
++ while (modidx < MAX_MODFUNCTIONS && mod->modfunc[modidx])
++ (*mod->modfunc[modidx++])(cf, mod);
+
+ /* Has the CAN frame been modified? */
+ if (modidx) {
+@@ -546,11 +548,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
+ }
+
+ /* check for checksum updates */
+- if (gwj->mod.csumfunc.crc8)
+- (*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
++ if (mod->csumfunc.crc8)
++ (*mod->csumfunc.crc8)(cf, &mod->csum.crc8);
+
+- if (gwj->mod.csumfunc.xor)
+- (*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
++ if (mod->csumfunc.xor)
++ (*mod->csumfunc.xor)(cf, &mod->csum.xor);
+ }
+
+ /* clear the skb timestamp if not configured the other way */
+@@ -581,9 +583,20 @@ static void cgw_job_free_rcu(struct rcu_head *rcu_head)
+ {
+ struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
+
++ /* cgw_job::cf_mod is always accessed from the same cgw_job object within
++ * the same RCU read section. Once cgw_job is scheduled for removal,
++ * cf_mod can also be removed without mandating an additional grace period.
++ */
++ kfree(rcu_access_pointer(gwj->cf_mod));
+ kmem_cache_free(cgw_cache, gwj);
+ }
+
++/* Return cgw_job::cf_mod with RTNL protected section */
++static struct cf_mod *cgw_job_cf_mod(struct cgw_job *gwj)
++{
++ return rcu_dereference_protected(gwj->cf_mod, rtnl_is_locked());
++}
++
+ static int cgw_notifier(struct notifier_block *nb,
+ unsigned long msg, void *ptr)
+ {
+@@ -616,6 +629,7 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ {
+ struct rtcanmsg *rtcan;
+ struct nlmsghdr *nlh;
++ struct cf_mod *mod;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
+ if (!nlh)
+@@ -650,82 +664,83 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
+ goto cancel;
+ }
+
++ mod = cgw_job_cf_mod(gwj);
+ if (gwj->flags & CGW_FLAGS_CAN_FD) {
+ struct cgw_fdframe_mod mb;
+
+- if (gwj->mod.modtype.and) {
+- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.and;
++ if (mod->modtype.and) {
++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++ mb.modtype = mod->modtype.and;
+ if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.or) {
+- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.or;
++ if (mod->modtype.or) {
++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++ mb.modtype = mod->modtype.or;
+ if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.xor) {
+- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.xor;
++ if (mod->modtype.xor) {
++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++ mb.modtype = mod->modtype.xor;
+ if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.set) {
+- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.set;
++ if (mod->modtype.set) {
++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++ mb.modtype = mod->modtype.set;
+ if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+ } else {
+ struct cgw_frame_mod mb;
+
+- if (gwj->mod.modtype.and) {
+- memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.and;
++ if (mod->modtype.and) {
++ memcpy(&mb.cf, &mod->modframe.and, sizeof(mb.cf));
++ mb.modtype = mod->modtype.and;
+ if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.or) {
+- memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.or;
++ if (mod->modtype.or) {
++ memcpy(&mb.cf, &mod->modframe.or, sizeof(mb.cf));
++ mb.modtype = mod->modtype.or;
+ if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.xor) {
+- memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.xor;
++ if (mod->modtype.xor) {
++ memcpy(&mb.cf, &mod->modframe.xor, sizeof(mb.cf));
++ mb.modtype = mod->modtype.xor;
+ if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.modtype.set) {
+- memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
+- mb.modtype = gwj->mod.modtype.set;
++ if (mod->modtype.set) {
++ memcpy(&mb.cf, &mod->modframe.set, sizeof(mb.cf));
++ mb.modtype = mod->modtype.set;
+ if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
+ goto cancel;
+ }
+ }
+
+- if (gwj->mod.uid) {
+- if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
++ if (mod->uid) {
++ if (nla_put_u32(skb, CGW_MOD_UID, mod->uid) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.csumfunc.crc8) {
++ if (mod->csumfunc.crc8) {
+ if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
+- &gwj->mod.csum.crc8) < 0)
++ &mod->csum.crc8) < 0)
+ goto cancel;
+ }
+
+- if (gwj->mod.csumfunc.xor) {
++ if (mod->csumfunc.xor) {
+ if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
+- &gwj->mod.csum.xor) < 0)
++ &mod->csum.xor) < 0)
+ goto cancel;
+ }
+
+@@ -1059,7 +1074,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct net *net = sock_net(skb->sk);
+ struct rtcanmsg *r;
+ struct cgw_job *gwj;
+- struct cf_mod mod;
++ struct cf_mod *mod;
+ struct can_can_gw ccgw;
+ u8 limhops = 0;
+ int err = 0;
+@@ -1078,37 +1093,48 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (r->gwtype != CGW_TYPE_CAN_CAN)
+ return -EINVAL;
+
+- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
++ mod = kmalloc(sizeof(*mod), GFP_KERNEL);
++ if (!mod)
++ return -ENOMEM;
++
++ err = cgw_parse_attr(nlh, mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
+ if (err < 0)
+- return err;
++ goto out_free_cf;
+
+- if (mod.uid) {
++ if (mod->uid) {
+ ASSERT_RTNL();
+
+ /* check for updating an existing job with identical uid */
+ hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
+- if (gwj->mod.uid != mod.uid)
++ struct cf_mod *old_cf;
++
++ old_cf = cgw_job_cf_mod(gwj);
++ if (old_cf->uid != mod->uid)
+ continue;
+
+ /* interfaces & filters must be identical */
+- if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
+- return -EINVAL;
++ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) {
++ err = -EINVAL;
++ goto out_free_cf;
++ }
+
+- /* update modifications with disabled softirq & quit */
+- local_bh_disable();
+- memcpy(&gwj->mod, &mod, sizeof(mod));
+- local_bh_enable();
++ rcu_assign_pointer(gwj->cf_mod, mod);
++ kfree_rcu_mightsleep(old_cf);
+ return 0;
+ }
+ }
+
+ /* ifindex == 0 is not allowed for job creation */
+- if (!ccgw.src_idx || !ccgw.dst_idx)
+- return -ENODEV;
++ if (!ccgw.src_idx || !ccgw.dst_idx) {
++ err = -ENODEV;
++ goto out_free_cf;
++ }
+
+ gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
+- if (!gwj)
+- return -ENOMEM;
++ if (!gwj) {
++ err = -ENOMEM;
++ goto out_free_cf;
++ }
+
+ gwj->handled_frames = 0;
+ gwj->dropped_frames = 0;
+@@ -1118,7 +1144,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ gwj->limit_hops = limhops;
+
+ /* insert already parsed information */
+- memcpy(&gwj->mod, &mod, sizeof(mod));
++ RCU_INIT_POINTER(gwj->cf_mod, mod);
+ memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
+
+ err = -ENODEV;
+@@ -1145,9 +1171,11 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+ if (!err)
+ hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
+ out:
+- if (err)
++ if (err) {
+ kmem_cache_free(cgw_cache, gwj);
+-
++out_free_cf:
++ kfree(mod);
++ }
+ return err;
+ }
+
+@@ -1207,19 +1235,22 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+
+ /* remove only the first matching entry */
+ hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
++ struct cf_mod *cf_mod;
++
+ if (gwj->flags != r->flags)
+ continue;
+
+ if (gwj->limit_hops != limhops)
+ continue;
+
++ cf_mod = cgw_job_cf_mod(gwj);
+ /* we have a match when uid is enabled and identical */
+- if (gwj->mod.uid || mod.uid) {
+- if (gwj->mod.uid != mod.uid)
++ if (cf_mod->uid || mod.uid) {
++ if (cf_mod->uid != mod.uid)
+ continue;
+ } else {
+ /* no uid => check for identical modifications */
+- if (memcmp(&gwj->mod, &mod, sizeof(mod)))
++ if (memcmp(cf_mod, &mod, sizeof(mod)))
+ continue;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 29cd3b2aa02d6b45bba3dd7e3c575f1ef75477e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Feb 2022 11:07:06 -0800
+Subject: can: gw: use call_rcu() instead of costly synchronize_rcu()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 181d4447905d551cc664f1e7e796b482c1eec992 ]
+
+Commit fb8696ab14ad ("can: gw: synchronize rcu operations
+before removing gw job entry") added three synchronize_rcu() calls
+to make sure one rcu grace period was observed before freeing
+a "struct cgw_job" (which are tiny objects).
+
+This should be converted to call_rcu() to avoid adding delays
+in device / network dismantles.
+
+Use the rcu_head that was already in struct cgw_job,
+not yet used.
+
+Link: https://lore.kernel.org/all/20220207190706.1499190-1-eric.dumazet@gmail.com
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Stable-dep-of: 511e64e13d8c ("can: gw: fix RCU/BH usage in cgw_create_job()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/gw.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/net/can/gw.c b/net/can/gw.c
+index d8861e862f157..20e74fe7d0906 100644
+--- a/net/can/gw.c
++++ b/net/can/gw.c
+@@ -577,6 +577,13 @@ static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj)
+ gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
+ }
+
++static void cgw_job_free_rcu(struct rcu_head *rcu_head)
++{
++ struct cgw_job *gwj = container_of(rcu_head, struct cgw_job, rcu);
++
++ kmem_cache_free(cgw_cache, gwj);
++}
++
+ static int cgw_notifier(struct notifier_block *nb,
+ unsigned long msg, void *ptr)
+ {
+@@ -596,8 +603,7 @@ static int cgw_notifier(struct notifier_block *nb,
+ if (gwj->src.dev == dev || gwj->dst.dev == dev) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(net, gwj);
+- synchronize_rcu();
+- kmem_cache_free(cgw_cache, gwj);
++ call_rcu(&gwj->rcu, cgw_job_free_rcu);
+ }
+ }
+ }
+@@ -1155,8 +1161,7 @@ static void cgw_remove_all_jobs(struct net *net)
+ hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(net, gwj);
+- synchronize_rcu();
+- kmem_cache_free(cgw_cache, gwj);
++ call_rcu(&gwj->rcu, cgw_job_free_rcu);
+ }
+ }
+
+@@ -1224,8 +1229,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
+
+ hlist_del(&gwj->list);
+ cgw_unregister_filter(net, gwj);
+- synchronize_rcu();
+- kmem_cache_free(cgw_cache, gwj);
++ call_rcu(&gwj->rcu, cgw_job_free_rcu);
+ err = 0;
+ break;
+ }
+--
+2.39.5
+
--- /dev/null
+From 165e6bfcd5024f1f9cc300a158ff43a5b2e19da9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 3 May 2025 00:57:52 +0200
+Subject: gre: Fix again IPv6 link-local address generation.
+
+From: Guillaume Nault <gnault@redhat.com>
+
+[ Upstream commit 3e6a0243ff002ddbd7ee18a8974ae61d2e6ed00d ]
+
+Use addrconf_addr_gen() to generate IPv6 link-local addresses on GRE
+devices in most cases and fall back to using add_v4_addrs() only in
+case the GRE configuration is incompatible with addrconf_addr_gen().
+
+GRE used to use addrconf_addr_gen() until commit e5dd729460ca ("ip/ip6_gre:
+use the same logic as SIT interfaces when computing v6LL address")
+restricted this use to gretap and ip6gretap devices, and created
+add_v4_addrs() (borrowed from SIT) for non-Ethernet GRE ones.
+
+The original problem came when commit 9af28511be10 ("addrconf: refuse
+isatap eui64 for INADDR_ANY") made __ipv6_isatap_ifid() fail when its
+addr parameter was 0. The commit says that this would create an invalid
+address, however, I couldn't find any RFC saying that the generated
+interface identifier would be wrong. Anyway, since gre over IPv4
+devices pass their local tunnel address to __ipv6_isatap_ifid(), that
+commit broke their IPv6 link-local address generation when the local
+address was unspecified.
+
+Then commit e5dd729460ca ("ip/ip6_gre: use the same logic as SIT
+interfaces when computing v6LL address") tried to fix that case by
+defining add_v4_addrs() and calling it to generate the IPv6 link-local
+address instead of using addrconf_addr_gen() (apart for gretap and
+ip6gretap devices, which would still use the regular
+addrconf_addr_gen(), since they have a MAC address).
+
+That broke several use cases because add_v4_addrs() isn't properly
+integrated into the rest of IPv6 Neighbor Discovery code. Several of
+these shortcomings have been fixed over time, but add_v4_addrs()
+remains broken on several aspects. In particular, it doesn't send any
+Router Sollicitations, so the SLAAC process doesn't start until the
+interface receives a Router Advertisement. Also, add_v4_addrs() mostly
+ignores the address generation mode of the interface
+(/proc/sys/net/ipv6/conf/*/addr_gen_mode), thus breaking the
+IN6_ADDR_GEN_MODE_RANDOM and IN6_ADDR_GEN_MODE_STABLE_PRIVACY cases.
+
+Fix the situation by using add_v4_addrs() only in the specific scenario
+where the normal method would fail. That is, for interfaces that have
+all of the following characteristics:
+
+ * run over IPv4,
+ * transport IP packets directly, not Ethernet (that is, not gretap
+ interfaces),
+ * tunnel endpoint is INADDR_ANY (that is, 0),
+ * device address generation mode is EUI64.
+
+In all other cases, revert back to the regular addrconf_addr_gen().
+
+Also, remove the special case for ip6gre interfaces in add_v4_addrs(),
+since ip6gre devices now always use addrconf_addr_gen() instead.
+
+Note:
+ This patch was originally applied as commit 183185a18ff9 ("gre: Fix
+ IPv6 link-local address generation."). However, it was then reverted
+ by commit fc486c2d060f ("Revert "gre: Fix IPv6 link-local address
+ generation."") because it uncovered another bug that ended up
+ breaking net/forwarding/ip6gre_custom_multipath_hash.sh. That other
+ bug has now been fixed by commit 4d0ab3a6885e ("ipv6: Start path
+ selection from the first nexthop"). Therefore we can now revive this
+ GRE patch (no changes since original commit 183185a18ff9 ("gre: Fix
+ IPv6 link-local address generation.").
+
+Fixes: e5dd729460ca ("ip/ip6_gre: use the same logic as SIT interfaces when computing v6LL address")
+Signed-off-by: Guillaume Nault <gnault@redhat.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/a88cc5c4811af36007645d610c95102dccb360a6.1746225214.git.gnault@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/addrconf.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 07b3487e3ae97..47c4a3e72bcd9 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3145,16 +3145,13 @@ static void add_v4_addrs(struct inet6_dev *idev)
+ struct in6_addr addr;
+ struct net_device *dev;
+ struct net *net = dev_net(idev->dev);
+- int scope, plen, offset = 0;
++ int scope, plen;
+ u32 pflags = 0;
+
+ ASSERT_RTNL();
+
+ memset(&addr, 0, sizeof(struct in6_addr));
+- /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
+- if (idev->dev->addr_len == sizeof(struct in6_addr))
+- offset = sizeof(struct in6_addr) - 4;
+- memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
++ memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
+
+ if (!(idev->dev->flags & IFF_POINTOPOINT) && idev->dev->type == ARPHRD_SIT) {
+ scope = IPV6_ADDR_COMPATv4;
+@@ -3462,7 +3459,13 @@ static void addrconf_gre_config(struct net_device *dev)
+ return;
+ }
+
+- if (dev->type == ARPHRD_ETHER) {
++ /* Generate the IPv6 link-local address using addrconf_addr_gen(),
++ * unless we have an IPv4 GRE device not bound to an IP address and
++ * which is in EUI64 mode (as __ipv6_isatap_ifid() would fail in this
++ * case). Such devices fall back to add_v4_addrs() instead.
++ */
++ if (!(dev->type == ARPHRD_IPGRE && *(__be32 *)dev->dev_addr == 0 &&
++ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)) {
+ addrconf_addr_gen(idev, true);
+ return;
+ }
+--
+2.39.5
+
--- /dev/null
+From 1a2fbed05689433efa485481bdb532ce3af0134f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:00 +0200
+Subject: net: dsa: b53: allow leaky reserved multicast
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 5f93185a757ff38b36f849c659aeef368db15a68 ]
+
+Allow reserved multicast to ignore VLAN membership so STP and other
+management protocols work without a PVID VLAN configured when using a
+vlan aware bridge.
+
+Fixes: 967dd82ffc52 ("net: dsa: b53: Add support for Broadcom RoboSwitch")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-2-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 27025ca5a7598..08f9929132e47 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -373,9 +373,11 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ b53_read8(dev, B53_VLAN_PAGE, B53_VLAN_CTRL5, &vc5);
+ }
+
++ vc1 &= ~VC1_RX_MCST_FWD_EN;
++
+ if (enable) {
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+- vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
++ vc1 |= VC1_RX_MCST_UNTAG_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ if (enable_filtering) {
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+@@ -393,7 +395,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+
+ } else {
+ vc0 &= ~(VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID);
+- vc1 &= ~(VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN);
++ vc1 &= ~VC1_RX_MCST_UNTAG_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+
+--
+2.39.5
+
--- /dev/null
+From 064e8c8aa087756c03139abdbd3a3436dbc3b000 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:05 +0200
+Subject: net: dsa: b53: always rejoin default untagged VLAN on bridge leave
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 13b152ae40495966501697693f048f47430c50fd ]
+
+While JOIN_ALL_VLAN allows to join all VLANs, we still need to keep the
+default VLAN enabled so that untagged traffic stays untagged.
+
+So rejoin the default VLAN even for switches with JOIN_ALL_VLAN support.
+
+Fixes: 48aea33a77ab ("net: dsa: b53: Add JOIN_ALL_VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-7-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 37b3a46b060f5..429ea5056235f 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1961,12 +1961,12 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+ if (!(reg & BIT(cpu_port)))
+ reg |= BIT(cpu_port);
+ b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
+- } else {
+- b53_get_vlan_entry(dev, pvid, vl);
+- vl->members |= BIT(port) | BIT(cpu_port);
+- vl->untag |= BIT(port) | BIT(cpu_port);
+- b53_set_vlan_entry(dev, pvid, vl);
+ }
++
++ b53_get_vlan_entry(dev, pvid, vl);
++ vl->members |= BIT(port) | BIT(cpu_port);
++ vl->untag |= BIT(port) | BIT(cpu_port);
++ b53_set_vlan_entry(dev, pvid, vl);
+ }
+ EXPORT_SYMBOL(b53_br_leave);
+
+--
+2.39.5
+
--- /dev/null
+From 221d8f630b0a3d4f8c9fb7271509e8f14d9036e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:02 +0200
+Subject: net: dsa: b53: fix clearing PVID of a port
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit f480851981043d9bb6447ca9883ade9247b9a0ad ]
+
+Currently the PVID of ports are only set when adding/updating VLANs with
+PVID set or removing VLANs, but not when clearing the PVID flag of a
+VLAN.
+
+E.g. the following flow
+
+$ ip link add br0 type bridge vlan_filtering 1
+$ ip link set sw1p1 master bridge
+$ bridge vlan add dev sw1p1 vid 10 pvid untagged
+$ bridge vlan add dev sw1p1 vid 10 untagged
+
+Would keep the PVID set as 10, despite the flag being cleared. Fix this
+by checking if we need to unset the PVID on vlan updates.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-4-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 08f9929132e47..83296ca02098c 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1502,12 +1502,21 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct b53_vlan *vl;
++ u16 old_pvid, new_pvid;
+ int err;
+
+ err = b53_vlan_prepare(ds, port, vlan);
+ if (err)
+ return err;
+
++ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &old_pvid);
++ if (pvid)
++ new_pvid = vlan->vid;
++ else if (!pvid && vlan->vid == old_pvid)
++ new_pvid = b53_default_pvid(dev);
++ else
++ new_pvid = old_pvid;
++
+ vl = &dev->vlans[vlan->vid];
+
+ b53_get_vlan_entry(dev, vlan->vid, vl);
+@@ -1524,9 +1533,9 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ b53_set_vlan_entry(dev, vlan->vid, vl);
+ b53_fast_age_vlan(dev, vlan->vid);
+
+- if (pvid && !dsa_is_cpu_port(ds, port)) {
++ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+- vlan->vid);
++ new_pvid);
+ b53_fast_age_vlan(dev, vlan->vid);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From ceea31adaa57de81b585990ffd38dea0d3364654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:03 +0200
+Subject: net: dsa: b53: fix flushing old pvid VLAN on pvid change
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 083c6b28c0cbcd83b6af1a10f2c82937129b3438 ]
+
+Presumably the intention here was to flush the VLAN of the old pvid, not
+the added VLAN again, which we already flushed before.
+
+Fixes: a2482d2ce349 ("net: dsa: b53: Plug in VLAN support")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-5-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 83296ca02098c..29bf2fb5e532f 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1536,7 +1536,7 @@ int b53_vlan_add(struct dsa_switch *ds, int port,
+ if (!dsa_is_cpu_port(ds, port) && new_pvid != old_pvid) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ new_pvid);
+- b53_fast_age_vlan(dev, vlan->vid);
++ b53_fast_age_vlan(dev, old_pvid);
+ }
+
+ return 0;
+--
+2.39.5
+
--- /dev/null
+From 265ab021aa63b8b76d41801468e312e878999f7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:09 +0200
+Subject: net: dsa: b53: fix learning on VLAN unaware bridges
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit 9f34ad89bcf0e6df6f8b01f1bdab211493fc66d1 ]
+
+When VLAN filtering is off, we configure the switch to forward, but not
+learn on VLAN table misses. This effectively disables learning while not
+filtering.
+
+Fix this by switching to forward and learn. Setting the learning disable
+register will still control whether learning actually happens.
+
+Fixes: dad8d7c6452b ("net: dsa: b53: Properly account for VLAN filtering")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-11-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 429ea5056235f..3bd0d1632b657 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -383,7 +383,7 @@ static void b53_enable_vlan(struct b53_device *dev, int port, bool enable,
+ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+ vc5 |= VC5_DROP_VTABLE_MISS;
+ } else {
+- vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
++ vc4 |= VC4_NO_ING_VID_CHK << VC4_ING_VID_CHECK_S;
+ vc5 &= ~VC5_DROP_VTABLE_MISS;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 58aaac54f4dafe068114725170d76d784ef0de52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Apr 2025 22:17:04 +0200
+Subject: net: dsa: b53: fix VLAN ID for untagged vlan on bridge leave
+
+From: Jonas Gorski <jonas.gorski@gmail.com>
+
+[ Upstream commit a1c1901c5cc881425cc45992ab6c5418174e9e5a ]
+
+The untagged default VLAN is added to the default vlan, which may be
+one, but we modify the VLAN 0 entry on bridge leave.
+
+Fix this to use the correct VLAN entry for the default pvid.
+
+Fixes: fea83353177a ("net: dsa: b53: Fix default VLAN ID")
+Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com>
+Tested-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Link: https://patch.msgid.link/20250429201710.330937-6-jonas.gorski@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/b53/b53_common.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 29bf2fb5e532f..37b3a46b060f5 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -1926,7 +1926,7 @@ EXPORT_SYMBOL(b53_br_join);
+ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+ {
+ struct b53_device *dev = ds->priv;
+- struct b53_vlan *vl = &dev->vlans[0];
++ struct b53_vlan *vl;
+ s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ unsigned int i;
+ u16 pvlan, reg, pvid;
+@@ -1952,6 +1952,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+ pvid = b53_default_pvid(dev);
++ vl = &dev->vlans[pvid];
+
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+--
+2.39.5
+
--- /dev/null
+From 9523bc2d5d0e66bc9f23c3fc7b25c3e1bfa3df47 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 May 2025 17:01:59 +0200
+Subject: netfilter: ipset: fix region locking in hash types
+
+From: Jozsef Kadlecsik <kadlec@netfilter.org>
+
+[ Upstream commit 8478a729c0462273188263136880480729e9efca ]
+
+Region locking introduced in v5.6-rc4 contained three macros to handle
+the region locks: ahash_bucket_start(), ahash_bucket_end() which gave
+back the start and end hash bucket values belonging to a given region
+lock and ahash_region() which should give back the region lock belonging
+to a given hash bucket. The latter was incorrect which can lead to a
+race condition between the garbage collector and adding new elements
+when a hash type of set is defined with timeouts.
+
+Fixes: f66ee0410b1c ("netfilter: ipset: Fix "INFO: rcu detected stall in hash_xxx" reports")
+Reported-by: Kota Toda <kota.toda@gmo-cybersecurity.com>
+Signed-off-by: Jozsef Kadlecsik <kadlec@netfilter.org>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/ipset/ip_set_hash_gen.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
+index ef04e556aadb4..0bd6bf46f05f3 100644
+--- a/net/netfilter/ipset/ip_set_hash_gen.h
++++ b/net/netfilter/ipset/ip_set_hash_gen.h
+@@ -63,7 +63,7 @@ struct hbucket {
+ #define ahash_sizeof_regions(htable_bits) \
+ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+ #define ahash_region(n, htable_bits) \
+- ((n) % ahash_numof_locks(htable_bits))
++ ((n) / jhash_size(HTABLE_REGION_BITS))
+ #define ahash_bucket_start(h, htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? 0 \
+ : (h) * jhash_size(HTABLE_REGION_BITS))
+--
+2.39.5
+
--- /dev/null
+From d5d8cff0a849e00376870aa180c396209358080e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Feb 2023 16:08:07 +0100
+Subject: rcu/kvfree: Add kvfree_rcu_mightsleep() and kfree_rcu_mightsleep()
+
+From: Uladzislau Rezki (Sony) <urezki@gmail.com>
+
+[ Upstream commit 608723c41cd951fb32ade2f8371e61c270816175 ]
+
+The kvfree_rcu() and kfree_rcu() APIs are hazardous in that if you forget
+the second argument, it works, but might sleep. This sleeping can be a
+correctness bug from atomic contexts, and even in non-atomic contexts
+it might introduce unacceptable latencies. This commit therefore adds
+kvfree_rcu_mightsleep() and kfree_rcu_mightsleep(), which will replace
+the single-argument kvfree_rcu() and kfree_rcu(), respectively.
+
+This commit enables a series of commits that switch from single-argument
+kvfree_rcu() and kfree_rcu() to their _mightsleep() counterparts. Once
+all of these commits land, the single-argument versions will be removed.
+
+Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Stable-dep-of: 511e64e13d8c ("can: gw: fix RCU/BH usage in cgw_create_job()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/rcupdate.h | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index d908af5917339..978769e545b5f 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -979,6 +979,9 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
+ #define kvfree_rcu(...) KVFREE_GET_MACRO(__VA_ARGS__, \
+ kvfree_rcu_arg_2, kvfree_rcu_arg_1)(__VA_ARGS__)
+
++#define kvfree_rcu_mightsleep(ptr) kvfree_rcu_arg_1(ptr)
++#define kfree_rcu_mightsleep(ptr) kvfree_rcu_mightsleep(ptr)
++
+ #define KVFREE_GET_MACRO(_1, _2, NAME, ...) NAME
+ #define kvfree_rcu_arg_2(ptr, rhf) \
+ do { \
+--
+2.39.5
+
--- /dev/null
+From 28d00ff8fdae79c554c5b181d4edc4c0af0b942e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Apr 2025 17:07:01 +0200
+Subject: s390/entry: Fix last breaking event handling in case of stack
+ corruption
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit ae952eea6f4a7e2193f8721a5366049946e012e7 ]
+
+In case of stack corruption stack_invalid() is called and the expectation
+is that register r10 contains the last breaking event address. This
+dependency is quite subtle and broke a couple of years ago without that
+anybody noticed.
+
+Fix this by getting rid of the dependency and read the last breaking event
+address from lowcore.
+
+Fixes: 56e62a737028 ("s390: convert to generic entry")
+Acked-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/entry.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 160290049e8cd..f85b99427080f 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -676,7 +676,8 @@ ENTRY(stack_overflow)
+ stmg %r0,%r7,__PT_R0(%r11)
+ stmg %r8,%r9,__PT_PSW(%r11)
+ mvc __PT_R8(64,%r11),0(%r14)
+- stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
++ GET_LC %r2
++ mvc __PT_ORIG_GPR2(8,%r11),__LC_PGM_LAST_BREAK(%r2)
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ lgr %r2,%r11 # pass pointer to pt_regs
+ jg kernel_stack_overflow
+--
+2.39.5
+
can-mcan-m_can_class_unregister-fix-order-of-unregistration-calls.patch
can-mcp251xfd-mcp251xfd_remove-fix-order-of-unregistration-calls.patch
openvswitch-fix-unsafe-attribute-parsing-in-output_userspace.patch
+s390-entry-fix-last-breaking-event-handling-in-case-.patch
+gre-fix-again-ipv6-link-local-address-generation.patch
+can-gw-use-call_rcu-instead-of-costly-synchronize_rc.patch
+rcu-kvfree-add-kvfree_rcu_mightsleep-and-kfree_rcu_m.patch
+can-gw-fix-rcu-bh-usage-in-cgw_create_job.patch
+netfilter-ipset-fix-region-locking-in-hash-types.patch
+net-dsa-b53-allow-leaky-reserved-multicast.patch
+net-dsa-b53-fix-clearing-pvid-of-a-port.patch
+net-dsa-b53-fix-flushing-old-pvid-vlan-on-pvid-chang.patch
+net-dsa-b53-fix-vlan-id-for-untagged-vlan-on-bridge-.patch
+net-dsa-b53-always-rejoin-default-untagged-vlan-on-b.patch
+net-dsa-b53-fix-learning-on-vlan-unaware-bridges.patch