]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 12 Oct 2014 15:18:49 +0000 (17:18 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 12 Oct 2014 15:18:49 +0000 (17:18 +0200)
added patches:
3c59x-fix-bad-split-of-cpu_to_le32-pci_map_single.patch
hyperv-fix-a-bug-in-netvsc_send.patch
ip6_gre-fix-flowi6_proto-value-in-xmit-path.patch
net-systemport-fix-bcm_sysport_insert_tsb.patch
net_sched-copy-exts-type-in-tcf_exts_change.patch
sctp-handle-association-restarts-when-the-socket-is-closed.patch
team-avoid-race-condition-in-scheduling-delayed-work.patch

queue-3.17/3c59x-fix-bad-split-of-cpu_to_le32-pci_map_single.patch [new file with mode: 0644]
queue-3.17/hyperv-fix-a-bug-in-netvsc_send.patch [new file with mode: 0644]
queue-3.17/ip6_gre-fix-flowi6_proto-value-in-xmit-path.patch [new file with mode: 0644]
queue-3.17/net-systemport-fix-bcm_sysport_insert_tsb.patch [new file with mode: 0644]
queue-3.17/net_sched-copy-exts-type-in-tcf_exts_change.patch [new file with mode: 0644]
queue-3.17/sctp-handle-association-restarts-when-the-socket-is-closed.patch [new file with mode: 0644]
queue-3.17/series [new file with mode: 0644]
queue-3.17/team-avoid-race-condition-in-scheduling-delayed-work.patch [new file with mode: 0644]

diff --git a/queue-3.17/3c59x-fix-bad-split-of-cpu_to_le32-pci_map_single.patch b/queue-3.17/3c59x-fix-bad-split-of-cpu_to_le32-pci_map_single.patch
new file mode 100644 (file)
index 0000000..4a7f64a
--- /dev/null
@@ -0,0 +1,64 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: "Sylvain \\\"ythier\\\" Hitier" <sylvain.hitier@gmail.com>
+Date: Tue, 7 Oct 2014 13:40:34 +0000
+Subject: 3c59x: fix bad split of cpu_to_le32(pci_map_single())
+
+From: "Sylvain \\\"ythier\\\" Hitier" <sylvain.hitier@gmail.com>
+
+[ Upstream commit 88b09a6d958af6c458acf055ee2eb5bc9564efda ]
+
+In commit 6f2b6a3005b2c34c39f207a87667564f64f2f91a,
+  # 3c59x: Add dma error checking and recovery
+the intent is to split out the mapping from the byte-swapping in order to
+insert a dma_mapping_error() check.
+
+Kinda this semantic patch:
+
+    // See http://coccinelle.lip6.fr/
+    //
+    // Beware, grouik-and-dirty!
+    @@
+    expression DEV, X, Y, Z;
+    @@
+    -   cpu_to_le32(pci_map_single(DEV, X, Y, Z))
+    +   dma_addr_t addr = pci_map_single(DEV, X, Y, Z);
+    +   if (dma_mapping_error(&DEV->dev, addr))
+    +       /* snip */;
+    +   cpu_to_le32(addr)
+
+However, the #else part (of the #if DO_ZEROCOPY test) is changed this way:
+
+    -   cpu_to_le32(pci_map_single(DEV, X, Y, Z))
+    +   dma_addr_t addr = cpu_to_le32(pci_map_single(DEV, X, Y, Z));
+    //                    ^^^^^^^^^^^
+    //                    That mismatches the 3 other changes!
+    +   if (dma_mapping_error(&DEV->dev, addr))
+    +       /* snip */;
+    +   cpu_to_le32(addr)
+
+Let's remove the leftover cpu_to_le32() for coherency.
+
+v2: Better changelog.
+v3: Add Acked-by
+
+Fixes: 6f2b6a3005b2c34c39f207a87667564f64f2f91a
+  # 3c59x: Add dma error checking and recovery
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: Sylvain "ythier" Hitier <sylvain.hitier@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/3com/3c59x.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/3com/3c59x.c
++++ b/drivers/net/ethernet/3com/3c59x.c
+@@ -2214,7 +2214,7 @@ boomerang_start_xmit(struct sk_buff *skb
+               }
+       }
+ #else
+-      dma_addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE));
++      dma_addr = pci_map_single(VORTEX_PCI(vp), skb->data, skb->len, PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma_addr))
+               goto out_dma_err;
+       vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
diff --git a/queue-3.17/hyperv-fix-a-bug-in-netvsc_send.patch b/queue-3.17/hyperv-fix-a-bug-in-netvsc_send.patch
new file mode 100644 (file)
index 0000000..001893f
--- /dev/null
@@ -0,0 +1,65 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: KY Srinivasan <kys@microsoft.com>
+Date: Sun, 5 Oct 2014 10:42:51 -0700
+Subject: hyperv: Fix a bug in netvsc_send()
+
+From: KY Srinivasan <kys@microsoft.com>
+
+[ Upstream commit 3a67c9ccad926a168d8b7891537a452018368a5b ]
+
+After the packet is successfully sent, we should not touch the packet
+as it may have been freed. This patch is based on the work done by
+Long Li <longli@microsoft.com>.
+
+David, please queue this up for stable.
+
+Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
+Reported-by: Sitsofe Wheeler <sitsofe@yahoo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/hyperv/netvsc.c |   15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -705,6 +705,7 @@ int netvsc_send(struct hv_device *device
+       unsigned int section_index = NETVSC_INVALID_INDEX;
+       u32 msg_size = 0;
+       struct sk_buff *skb;
++      u16 q_idx = packet->q_idx;
+       net_device = get_outbound_net_device(device);
+@@ -769,24 +770,24 @@ int netvsc_send(struct hv_device *device
+       if (ret == 0) {
+               atomic_inc(&net_device->num_outstanding_sends);
+-              atomic_inc(&net_device->queue_sends[packet->q_idx]);
++              atomic_inc(&net_device->queue_sends[q_idx]);
+               if (hv_ringbuf_avail_percent(&out_channel->outbound) <
+                       RING_AVAIL_PERCENT_LOWATER) {
+                       netif_tx_stop_queue(netdev_get_tx_queue(
+-                                          ndev, packet->q_idx));
++                                          ndev, q_idx));
+                       if (atomic_read(&net_device->
+-                              queue_sends[packet->q_idx]) < 1)
++                              queue_sends[q_idx]) < 1)
+                               netif_tx_wake_queue(netdev_get_tx_queue(
+-                                                  ndev, packet->q_idx));
++                                                  ndev, q_idx));
+               }
+       } else if (ret == -EAGAIN) {
+               netif_tx_stop_queue(netdev_get_tx_queue(
+-                                  ndev, packet->q_idx));
+-              if (atomic_read(&net_device->queue_sends[packet->q_idx]) < 1) {
++                                  ndev, q_idx));
++              if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
+                       netif_tx_wake_queue(netdev_get_tx_queue(
+-                                          ndev, packet->q_idx));
++                                          ndev, q_idx));
+                       ret = -ENOSPC;
+               }
+       } else {
diff --git a/queue-3.17/ip6_gre-fix-flowi6_proto-value-in-xmit-path.patch b/queue-3.17/ip6_gre-fix-flowi6_proto-value-in-xmit-path.patch
new file mode 100644 (file)
index 0000000..f349174
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Date: Thu, 2 Oct 2014 18:26:49 +0200
+Subject: ip6_gre: fix flowi6_proto value in xmit path
+
+From: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+
+[ Upstream commit 3be07244b7337760a3269d56b2f4a63e72218648 ]
+
+In xmit path, we build a flowi6 which will be used for the output route lookup.
+We are sending a GRE packet, neither IPv4 nor IPv6 encapsulated packet, thus the
+protocol should be IPPROTO_GRE.
+
+Fixes: c12b395a4664 ("gre: Support GRE over IPv6")
+Reported-by: Matthieu Ternisien d'Ouville <matthieu.tdo@6wind.com>
+Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_gre.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -782,7 +782,7 @@ static inline int ip6gre_xmit_ipv4(struc
+               encap_limit = t->parms.encap_limit;
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-      fl6.flowi6_proto = IPPROTO_IPIP;
++      fl6.flowi6_proto = IPPROTO_GRE;
+       dsfield = ipv4_get_dsfield(iph);
+@@ -832,7 +832,7 @@ static inline int ip6gre_xmit_ipv6(struc
+               encap_limit = t->parms.encap_limit;
+       memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
+-      fl6.flowi6_proto = IPPROTO_IPV6;
++      fl6.flowi6_proto = IPPROTO_GRE;
+       dsfield = ipv6_get_dsfield(ipv6h);
+       if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
diff --git a/queue-3.17/net-systemport-fix-bcm_sysport_insert_tsb.patch b/queue-3.17/net-systemport-fix-bcm_sysport_insert_tsb.patch
new file mode 100644 (file)
index 0000000..b165a48
--- /dev/null
@@ -0,0 +1,71 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Thu, 2 Oct 2014 09:43:16 -0700
+Subject: net: systemport: fix bcm_sysport_insert_tsb()
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit e87474a6e697857df21cff0707a2472abceca8b3 ]
+
+Similar to commit bc23333ba11fb7f959b7e87e121122f5a0fbbca8 ("net:
+bcmgenet: fix bcmgenet_put_tx_csum()"), we need to return the skb
+pointer in case we had to reallocate the SKB headroom.
+
+Fixes: 80105befdb4b8 ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bcmsysport.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -848,7 +848,8 @@ static irqreturn_t bcm_sysport_wol_isr(i
+       return IRQ_HANDLED;
+ }
+-static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
++static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
++                                            struct net_device *dev)
+ {
+       struct sk_buff *nskb;
+       struct bcm_tsb *tsb;
+@@ -864,7 +865,7 @@ static int bcm_sysport_insert_tsb(struct
+               if (!nskb) {
+                       dev->stats.tx_errors++;
+                       dev->stats.tx_dropped++;
+-                      return -ENOMEM;
++                      return NULL;
+               }
+               skb = nskb;
+       }
+@@ -883,7 +884,7 @@ static int bcm_sysport_insert_tsb(struct
+                       ip_proto = ipv6_hdr(skb)->nexthdr;
+                       break;
+               default:
+-                      return 0;
++                      return skb;
+               }
+               /* Get the checksum offset and the L4 (transport) offset */
+@@ -902,7 +903,7 @@ static int bcm_sysport_insert_tsb(struct
+               tsb->l4_ptr_dest_map = csum_info;
+       }
+-      return 0;
++      return skb;
+ }
+ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+@@ -936,8 +937,8 @@ static netdev_tx_t bcm_sysport_xmit(stru
+       /* Insert TSB and checksum infos */
+       if (priv->tsb_en) {
+-              ret = bcm_sysport_insert_tsb(skb, dev);
+-              if (ret) {
++              skb = bcm_sysport_insert_tsb(skb, dev);
++              if (!skb) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
diff --git a/queue-3.17/net_sched-copy-exts-type-in-tcf_exts_change.patch b/queue-3.17/net_sched-copy-exts-type-in-tcf_exts_change.patch
new file mode 100644 (file)
index 0000000..8068913
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: WANG Cong <xiyou.wangcong@gmail.com>
+Date: Mon, 6 Oct 2014 17:21:54 -0700
+Subject: net_sched: copy exts->type in tcf_exts_change()
+
+From: WANG Cong <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 5301e3e117d88ef0967ce278912e54757f1a31a2 ]
+
+We need to copy exts->type when committing the change, otherwise
+it would be always 0. This is a quick fix for -net and -stable,
+for net-next tcf_exts will be removed.
+
+Fixes: commit 33be627159913b094bb578e83 ("net_sched: act: use standard struct list_head")
+Reported-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: Jamal Hadi Salim <jhs@mojatatu.com>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/cls_api.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -549,6 +549,7 @@ void tcf_exts_change(struct tcf_proto *t
+       tcf_tree_lock(tp);
+       list_splice_init(&dst->actions, &tmp);
+       list_splice(&src->actions, &dst->actions);
++      dst->type = src->type;
+       tcf_tree_unlock(tp);
+       tcf_action_destroy(&tmp, TCA_ACT_UNBIND);
+ #endif
diff --git a/queue-3.17/sctp-handle-association-restarts-when-the-socket-is-closed.patch b/queue-3.17/sctp-handle-association-restarts-when-the-socket-is-closed.patch
new file mode 100644 (file)
index 0000000..07dca6e
--- /dev/null
@@ -0,0 +1,79 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: Vlad Yasevich <vyasevich@gmail.com>
+Date: Fri, 3 Oct 2014 18:16:20 -0400
+Subject: sctp: handle association restarts when the socket is closed.
+
+From: Vlad Yasevich <vyasevich@gmail.com>
+
+[ Upstream commit bdf6fa52f01b941d4a80372d56de465bdbbd1d23 ]
+
+Currently association restarts do not take into consideration the
+state of the socket.  When a restart happens, the current assocation
+simply transitions into established state.  This creates a condition
+where a remote system, through a the restart procedure, may create a
+local association that is no way reachable by user.  The conditions
+to trigger this are as follows:
+  1) Remote does not acknoledge some data causing data to remain
+     outstanding.
+  2) Local application calls close() on the socket.  Since data
+     is still outstanding, the association is placed in SHUTDOWN_PENDING
+     state.  However, the socket is closed.
+  3) The remote tries to create a new association, triggering a restart
+     on the local system.  The association moves from SHUTDOWN_PENDING
+     to ESTABLISHED.  At this point, it is no longer reachable by
+     any socket on the local system.
+
+This patch addresses the above situation by moving the newly ESTABLISHED
+association into SHUTDOWN-SENT state and bundling a SHUTDOWN after
+the COOKIE-ACK chunk.  This way, the restarted associate immidiately
+enters the shutdown procedure and forces the termination of the
+unreachable association.
+
+Reported-by: David Laight <David.Laight@aculab.com>
+Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sctp/command.h |    2 +-
+ net/sctp/sm_statefuns.c    |   19 ++++++++++++++++---
+ 2 files changed, 17 insertions(+), 4 deletions(-)
+
+--- a/include/net/sctp/command.h
++++ b/include/net/sctp/command.h
+@@ -115,7 +115,7 @@ typedef enum {
+  * analysis of the state functions, but in reality just taken from
+  * thin air in the hopes othat we don't trigger a kernel panic.
+  */
+-#define SCTP_MAX_NUM_COMMANDS 14
++#define SCTP_MAX_NUM_COMMANDS 20
+ typedef union {
+       void *zero_all; /* Set to NULL to clear the entire union */
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -1775,9 +1775,22 @@ static sctp_disposition_t sctp_sf_do_dup
+       /* Update the content of current association. */
+       sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
+       sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
+-      sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
+-                      SCTP_STATE(SCTP_STATE_ESTABLISHED));
+-      sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++      if (sctp_state(asoc, SHUTDOWN_PENDING) &&
++          (sctp_sstate(asoc->base.sk, CLOSING) ||
++           sock_flag(asoc->base.sk, SOCK_DEAD))) {
++              /* if were currently in SHUTDOWN_PENDING, but the socket
++               * has been closed by user, don't transition to ESTABLISHED.
++               * Instead trigger SHUTDOWN bundled with COOKIE_ACK.
++               */
++              sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++              return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
++                                                   SCTP_ST_CHUNK(0), NULL,
++                                                   commands);
++      } else {
++              sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
++                              SCTP_STATE(SCTP_STATE_ESTABLISHED));
++              sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
++      }
+       return SCTP_DISPOSITION_CONSUME;
+ nomem_ev:
diff --git a/queue-3.17/series b/queue-3.17/series
new file mode 100644 (file)
index 0000000..32f446e
--- /dev/null
@@ -0,0 +1,7 @@
+ip6_gre-fix-flowi6_proto-value-in-xmit-path.patch
+net-systemport-fix-bcm_sysport_insert_tsb.patch
+team-avoid-race-condition-in-scheduling-delayed-work.patch
+hyperv-fix-a-bug-in-netvsc_send.patch
+sctp-handle-association-restarts-when-the-socket-is-closed.patch
+3c59x-fix-bad-split-of-cpu_to_le32-pci_map_single.patch
+net_sched-copy-exts-type-in-tcf_exts_change.patch
diff --git a/queue-3.17/team-avoid-race-condition-in-scheduling-delayed-work.patch b/queue-3.17/team-avoid-race-condition-in-scheduling-delayed-work.patch
new file mode 100644 (file)
index 0000000..f75bd7e
--- /dev/null
@@ -0,0 +1,68 @@
+From foo@baz Sun Oct 12 17:18:11 CEST 2014
+From: Joe Lawrence <Joe.Lawrence@stratus.com>
+Date: Fri, 3 Oct 2014 09:58:34 -0400
+Subject: team: avoid race condition in scheduling delayed work
+
+From: Joe Lawrence <Joe.Lawrence@stratus.com>
+
+[ Upstream commit 47549650abd13d873fd2e5fc218db19e21031074 ]
+
+When team_notify_peers and team_mcast_rejoin are called, they both reset
+their respective .count_pending atomic variable. Then when the actual
+worker function is executed, the variable is atomically decremented.
+This pattern introduces a potential race condition where the
+.count_pending rolls over and the worker function keeps rescheduling
+until .count_pending decrements to zero again:
+
+THREAD 1                           THREAD 2
+
+========                           ========
+team_notify_peers(teamX)
+  atomic_set count_pending = 1
+  schedule_delayed_work
+                                   team_notify_peers(teamX)
+                                   atomic_set count_pending = 1
+team_notify_peers_work
+  atomic_dec_and_test
+    count_pending = 0
+  (return)
+                                   schedule_delayed_work
+                                   team_notify_peers_work
+                                   atomic_dec_and_test
+                                     count_pending = -1
+                                   schedule_delayed_work
+                                   (repeat until count_pending = 0)
+
+Instead of assigning a new value to .count_pending, use atomic_add to
+tack-on the additional desired worker function invocations.
+
+Signed-off-by: Joe Lawrence <joe.lawrence@stratus.com>
+Acked-by: Jiri Pirko <jiri@resnulli.us>
+Fixes: fc423ff00df3a19554414ee ("team: add peer notification")
+Fixes: 492b200efdd20b8fcfdac87 ("team: add support for sending multicast rejoins")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -647,7 +647,7 @@ static void team_notify_peers(struct tea
+ {
+       if (!team->notify_peers.count || !netif_running(team->dev))
+               return;
+-      atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
++      atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
+       schedule_delayed_work(&team->notify_peers.dw, 0);
+ }
+@@ -687,7 +687,7 @@ static void team_mcast_rejoin(struct tea
+ {
+       if (!team->mcast_rejoin.count || !netif_running(team->dev))
+               return;
+-      atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
++      atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
+       schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ }