]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 18 Jan 2015 02:18:46 +0000 (18:18 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 18 Jan 2015 02:18:46 +0000 (18:18 -0800)
added patches:
alx-fix-alx_poll.patch
batman-adv-avoid-null-dereferences-and-fix-if-check.patch
batman-adv-calculate-extra-tail-size-based-on-queued-fragments.patch
batman-adv-unify-fragment-size-calculation.patch
enic-fix-rx-skb-checksum.patch
gre-fix-the-inner-mac-header-in-nbma-tunnel-xmit-path.patch
in6-fix-conflict-with-glibc.patch
net-core-handle-csum-for-checksum_complete-vxlan-forwarding.patch
net-fix-stacked-vlan-offload-features-computation.patch
net-reset-secmark-when-scrubbing-packet.patch
netlink-always-copy-on-mmap-tx.patch
netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch
tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch
team-avoid-possible-underflow-of-count_pending-value-for-notify_peers-and-mcast_rejoin.patch
tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch

15 files changed:
queue-3.14/alx-fix-alx_poll.patch [new file with mode: 0644]
queue-3.14/batman-adv-avoid-null-dereferences-and-fix-if-check.patch [new file with mode: 0644]
queue-3.14/batman-adv-calculate-extra-tail-size-based-on-queued-fragments.patch [new file with mode: 0644]
queue-3.14/batman-adv-unify-fragment-size-calculation.patch [new file with mode: 0644]
queue-3.14/enic-fix-rx-skb-checksum.patch [new file with mode: 0644]
queue-3.14/gre-fix-the-inner-mac-header-in-nbma-tunnel-xmit-path.patch [new file with mode: 0644]
queue-3.14/in6-fix-conflict-with-glibc.patch [new file with mode: 0644]
queue-3.14/net-core-handle-csum-for-checksum_complete-vxlan-forwarding.patch [new file with mode: 0644]
queue-3.14/net-fix-stacked-vlan-offload-features-computation.patch [new file with mode: 0644]
queue-3.14/net-reset-secmark-when-scrubbing-packet.patch [new file with mode: 0644]
queue-3.14/netlink-always-copy-on-mmap-tx.patch [new file with mode: 0644]
queue-3.14/netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch [new file with mode: 0644]
queue-3.14/tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch [new file with mode: 0644]
queue-3.14/team-avoid-possible-underflow-of-count_pending-value-for-notify_peers-and-mcast_rejoin.patch [new file with mode: 0644]
queue-3.14/tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch [new file with mode: 0644]

diff --git a/queue-3.14/alx-fix-alx_poll.patch b/queue-3.14/alx-fix-alx_poll.patch
new file mode 100644 (file)
index 0000000..5b373a7
--- /dev/null
@@ -0,0 +1,110 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 11 Jan 2015 10:32:18 -0800
+Subject: alx: fix alx_poll()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 7a05dc64e2e4c611d89007b125b20c0d2a4d31a5 ]
+
+Commit d75b1ade567f ("net: less interrupt masking in NAPI") uncovered
+wrong alx_poll() behavior.
+
+A NAPI poll() handler is supposed to return exactly the budget when/if
+napi_complete() has not been called.
+
+It is also supposed to return number of frames that were received, so
+that netdev_budget can have a meaning.
+
+Also, in case of TX pressure, we still have to dequeue received
+packets : alx_clean_rx_irq() has to be called even if
+alx_clean_tx_irq(alx) returns false, otherwise device is half duplex.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: d75b1ade567f ("net: less interrupt masking in NAPI")
+Reported-by: Oded Gabbay <oded.gabbay@amd.com>
+Bisected-by: Oded Gabbay <oded.gabbay@amd.com>
+Tested-by: Oded Gabbay <oded.gabbay@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/atheros/alx/main.c |   24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/atheros/alx/main.c
++++ b/drivers/net/ethernet/atheros/alx/main.c
+@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct al
+       schedule_work(&alx->reset_wk);
+ }
+-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
++static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
+ {
+       struct alx_rx_queue *rxq = &alx->rxq;
+       struct alx_rrd *rrd;
+       struct alx_buffer *rxb;
+       struct sk_buff *skb;
+       u16 length, rfd_cleaned = 0;
++      int work = 0;
+-      while (budget > 0) {
++      while (work < budget) {
+               rrd = &rxq->rrd[rxq->rrd_read_idx];
+               if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+                       break;
+@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_
+                   ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+                                 RRD_NOR) != 1) {
+                       alx_schedule_reset(alx);
+-                      return 0;
++                      return work;
+               }
+               rxb = &rxq->bufs[rxq->read_idx];
+@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_
+               }
+               napi_gro_receive(&alx->napi, skb);
+-              budget--;
++              work++;
+ next_pkt:
+               if (++rxq->read_idx == alx->rx_ringsz)
+@@ -258,21 +259,22 @@ next_pkt:
+       if (rfd_cleaned)
+               alx_refill_rx_ring(alx, GFP_ATOMIC);
+-      return budget > 0;
++      return work;
+ }
+ static int alx_poll(struct napi_struct *napi, int budget)
+ {
+       struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+       struct alx_hw *hw = &alx->hw;
+-      bool complete = true;
+       unsigned long flags;
++      bool tx_complete;
++      int work;
+-      complete = alx_clean_tx_irq(alx) &&
+-                 alx_clean_rx_irq(alx, budget);
++      tx_complete = alx_clean_tx_irq(alx);
++      work = alx_clean_rx_irq(alx, budget);
+-      if (!complete)
+-              return 1;
++      if (!tx_complete || work == budget)
++              return budget;
+       napi_complete(&alx->napi);
+@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *
+       alx_post_write(hw);
+-      return 0;
++      return work;
+ }
+ static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
diff --git a/queue-3.14/batman-adv-avoid-null-dereferences-and-fix-if-check.patch b/queue-3.14/batman-adv-avoid-null-dereferences-and-fix-if-check.patch
new file mode 100644 (file)
index 0000000..501558e
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Antonio Quartulli <antonio@meshcoding.com>
+Date: Sat, 20 Dec 2014 13:48:57 +0100
+Subject: batman-adv: avoid NULL dereferences and fix if check
+
+From: Antonio Quartulli <antonio@meshcoding.com>
+
+[ Upstream commit 0d1644919578db525b9a7b6c8197ce02adbfce26 ]
+
+Gateway having bandwidth_down equal to zero are not accepted
+at all and so never added to the Gateway list.
+For this reason checking the bandwidth_down member in
+batadv_gw_out_of_range() is useless.
+
+This is probably a copy/paste error and this check was supposed
+to be "!gw_node" only. Moreover, the way the check is written
+now may also lead to a NULL dereference.
+
+Fix this by rewriting the if-condition properly.
+
+Introduced by 414254e342a0d58144de40c3da777521ebaeeb07
+("batman-adv: tvlv - gateway download/upload bandwidth container")
+
+Signed-off-by: Antonio Quartulli <antonio@meshcoding.com>
+Reported-by: David Binderman <dcb314@hotmail.com>
+Signed-off-by: Marek Lindner <mareklindner@neomailbox.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/gateway_client.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/batman-adv/gateway_client.c
++++ b/net/batman-adv/gateway_client.c
+@@ -812,7 +812,7 @@ bool batadv_gw_out_of_range(struct batad
+               goto out;
+       gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
+-      if (!gw_node->bandwidth_down == 0)
++      if (!gw_node)
+               goto out;
+       switch (atomic_read(&bat_priv->gw_mode)) {
diff --git a/queue-3.14/batman-adv-calculate-extra-tail-size-based-on-queued-fragments.patch b/queue-3.14/batman-adv-calculate-extra-tail-size-based-on-queued-fragments.patch
new file mode 100644 (file)
index 0000000..7bf3927
--- /dev/null
@@ -0,0 +1,61 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Sven Eckelmann <sven@narfation.org>
+Date: Sat, 20 Dec 2014 13:48:55 +0100
+Subject: batman-adv: Calculate extra tail size based on queued fragments
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sven Eckelmann <sven@narfation.org>
+
+[ Upstream commit 5b6698b0e4a37053de35cc24ee695b98a7eb712b ]
+
+The fragmentation code was replaced in 610bfc6bc99bc83680d190ebc69359a05fc7f605
+("batman-adv: Receive fragmented packets and merge"). The new code provided a
+mostly unused parameter skb for the merging function. It is used inside the
+function to calculate the additionally needed skb tailroom. But instead of
+increasing its own tailroom, it is only increasing the tailroom of the first
+queued skb. This is not correct in some situations because the first queued
+entry can be a different one than the parameter.
+
+An observed problem was:
+
+1. packet with size 104, total_size 1464, fragno 1 was received
+   - packet is queued
+2. packet with size 1400, total_size 1464, fragno 0 was received
+   - packet is queued at the end of the list
+3. enough data was received and can be given to the merge function
+   (1464 == (1400 - 20) + (104 - 20))
+   - merge functions gets 1400 byte large packet as skb argument
+4. merge function gets first entry in queue (104 byte)
+   - stored as skb_out
+5. merge function calculates the required extra tail as total_size - skb->len
+   - pskb_expand_head tail of skb_out with 64 bytes
+6. merge function tries to squeeze the extra 1380 bytes from the second queued
+   skb (1400 byte aka skb parameter) in the 64 extra tail bytes of skb_out
+
+Instead calculate the extra required tail bytes for skb_out also using skb_out
+instead of using the parameter skb. The skb parameter is only used to get the
+total_size from the last received packet. This is also the total_size used to
+decide that all fragments were received.
+
+Reported-by: Philipp Psurek <philipp.psurek@gmail.com>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Acked-by: Martin Hundebøll <martin@hundeboll.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/fragmentation.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_h
+       kfree(entry);
+       /* Make room for the rest of the fragments. */
+-      if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
++      if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
+               kfree_skb(skb_out);
+               skb_out = NULL;
+               goto free;
diff --git a/queue-3.14/batman-adv-unify-fragment-size-calculation.patch b/queue-3.14/batman-adv-unify-fragment-size-calculation.patch
new file mode 100644 (file)
index 0000000..3ef6ad2
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Sven Eckelmann <sven@narfation.org>
+Date: Sat, 20 Dec 2014 13:48:56 +0100
+Subject: batman-adv: Unify fragment size calculation
+
+From: Sven Eckelmann <sven@narfation.org>
+
+[ Upstream commit 0402e444cd199389b7fe47be68a67b817e09e097 ]
+
+The fragmentation code was replaced in 610bfc6bc99bc83680d190ebc69359a05fc7f605
+("batman-adv: Receive fragmented packets and merge") by an implementation which
+can handle up to 16 fragments of a packet. The packet is prepared for the split
+in fragments by the function batadv_frag_send_packet and the actual split is
+done by batadv_frag_create.
+
+Both functions calculate the size of a fragment themself. But their calculation
+differs because batadv_frag_send_packet also subtracts ETH_HLEN. Therefore,
+the check in batadv_frag_send_packet "can a full fragment can be created?" may
+return true even when batadv_frag_create cannot create a full fragment.
+
+The function batadv_frag_create doesn't check the size of the skb before
+splitting it and therefore might try to create a larger fragment than the
+remaining buffer. This creates an integer underflow and an invalid len is given
+to skb_split.
+
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/fragmentation.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/batman-adv/fragmentation.c
++++ b/net/batman-adv/fragmentation.c
+@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_b
+        * fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
+        */
+       mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
+-      max_fragment_size = (mtu - header_size - ETH_HLEN);
++      max_fragment_size = mtu - header_size;
+       max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
+       /* Don't even try to fragment, if we need more than 16 fragments */
diff --git a/queue-3.14/enic-fix-rx-skb-checksum.patch b/queue-3.14/enic-fix-rx-skb-checksum.patch
new file mode 100644 (file)
index 0000000..d3a62c9
--- /dev/null
@@ -0,0 +1,73 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Govindarajulu Varadarajan <_govind@gmx.com>
+Date: Thu, 18 Dec 2014 15:58:42 +0530
+Subject: enic: fix rx skb checksum
+
+From: Govindarajulu Varadarajan <_govind@gmx.com>
+
+[ Upstream commit 17e96834fd35997ca7cdfbf15413bcd5a36ad448 ]
+
+Hardware always provides compliment of IP pseudo checksum. Stack expects
+whole packet checksum without pseudo checksum if CHECKSUM_COMPLETE is set.
+
+This causes checksum error in nf & ovs.
+
+kernel: qg-19546f09-f2: hw csum failure
+kernel: CPU: 9 PID: 0 Comm: swapper/9 Tainted: GF          O--------------   3.10.0-123.8.1.el7.x86_64 #1
+kernel: Hardware name: Cisco Systems Inc UCSB-B200-M3/UCSB-B200-M3, BIOS B200M3.2.2.3.0.080820141339 08/08/2014
+kernel: ffff881218f40000 df68243feb35e3a8 ffff881237a43ab8 ffffffff815e237b
+kernel: ffff881237a43ad0 ffffffff814cd4ca ffff8829ec71eb00 ffff881237a43af0
+kernel: ffffffff814c6232 0000000000000286 ffff8829ec71eb00 ffff881237a43b00
+kernel: Call Trace:
+kernel: <IRQ>  [<ffffffff815e237b>] dump_stack+0x19/0x1b
+kernel: [<ffffffff814cd4ca>] netdev_rx_csum_fault+0x3a/0x40
+kernel: [<ffffffff814c6232>] __skb_checksum_complete_head+0x62/0x70
+kernel: [<ffffffff814c6251>] __skb_checksum_complete+0x11/0x20
+kernel: [<ffffffff8155a20c>] nf_ip_checksum+0xcc/0x100
+kernel: [<ffffffffa049edc7>] icmp_error+0x1f7/0x35c [nf_conntrack_ipv4]
+kernel: [<ffffffff814cf419>] ? netif_rx+0xb9/0x1d0
+kernel: [<ffffffffa040eb7b>] ? internal_dev_recv+0xdb/0x130 [openvswitch]
+kernel: [<ffffffffa04c8330>] nf_conntrack_in+0xf0/0xa80 [nf_conntrack]
+kernel: [<ffffffff81509380>] ? inet_del_offload+0x40/0x40
+kernel: [<ffffffffa049e302>] ipv4_conntrack_in+0x22/0x30 [nf_conntrack_ipv4]
+kernel: [<ffffffff815005ca>] nf_iterate+0xaa/0xc0
+kernel: [<ffffffff81509380>] ? inet_del_offload+0x40/0x40
+kernel: [<ffffffff81500664>] nf_hook_slow+0x84/0x140
+kernel: [<ffffffff81509380>] ? inet_del_offload+0x40/0x40
+kernel: [<ffffffff81509dd4>] ip_rcv+0x344/0x380
+
+Hardware verifies IP & tcp/udp header checksum but does not provide payload
+checksum, use CHECKSUM_UNNECESSARY. Set it only if its valid IP tcp/udp packet.
+
+Cc: Jiri Benc <jbenc@redhat.com>
+Cc: Stefan Assmann <sassmann@redhat.com>
+Reported-by: Sunil Choudhary <schoudha@redhat.com>
+Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
+Reviewed-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cisco/enic/enic_main.c |   12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1044,10 +1044,14 @@ static void enic_rq_indicate_buf(struct
+                                    PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
+               }
+-              if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
+-                      skb->csum = htons(checksum);
+-                      skb->ip_summed = CHECKSUM_COMPLETE;
+-              }
++              /* Hardware does not provide whole packet checksum. It only
++               * provides pseudo checksum. Since hw validates the packet
++               * checksum but not provide us the checksum value. use
++               * CHECSUM_UNNECESSARY.
++               */
++              if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
++                  ipv4_csum_ok)
++                      skb->ip_summed = CHECKSUM_UNNECESSARY;
+               if (vlan_stripped)
+                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
diff --git a/queue-3.14/gre-fix-the-inner-mac-header-in-nbma-tunnel-xmit-path.patch b/queue-3.14/gre-fix-the-inner-mac-header-in-nbma-tunnel-xmit-path.patch
new file mode 100644 (file)
index 0000000..2e25d92
--- /dev/null
@@ -0,0 +1,69 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
+Date: Mon, 15 Dec 2014 09:24:13 +0200
+Subject: gre: fix the inner mac header in nbma tunnel xmit path
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
+
+[ Upstream commit 8a0033a947403569caeca45fa5e6f7ba60d51974 ]
+
+The NBMA GRE tunnels temporarily push GRE header that contain the
+per-packet NBMA destination on the skb via header ops early in xmit
+path. It is the later pulled before the real GRE header is constructed.
+
+The inner mac was thus set differently in nbma case: the GRE header
+has been pushed by neighbor layer, and mac header points to beginning
+of the temporary gre header (set by dev_queue_xmit).
+
+Now that the offloads expect mac header to point to the gre payload,
+fix the xmit patch to:
+ - pull first the temporary gre header away
+ - and reset mac header to point to gre payload
+
+This fixes tso to work again with nbma tunnels.
+
+Fixes: 14051f0452a2 ("gre: Use inner mac length when computing tunnel length")
+Signed-off-by: Timo Teräs <timo.teras@iki.fi>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_gre.c |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -250,10 +250,6 @@ static netdev_tx_t ipgre_xmit(struct sk_
+       struct ip_tunnel *tunnel = netdev_priv(dev);
+       const struct iphdr *tnl_params;
+-      skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
+-      if (IS_ERR(skb))
+-              goto out;
+-
+       if (dev->header_ops) {
+               /* Need space for new headers */
+               if (skb_cow_head(skb, dev->needed_headroom -
+@@ -266,6 +262,7 @@ static netdev_tx_t ipgre_xmit(struct sk_
+                * to gre header.
+                */
+               skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
++              skb_reset_mac_header(skb);
+       } else {
+               if (skb_cow_head(skb, dev->needed_headroom))
+                       goto free_skb;
+@@ -273,6 +270,10 @@ static netdev_tx_t ipgre_xmit(struct sk_
+               tnl_params = &tunnel->parms.iph;
+       }
++      skb = gre_handle_offloads(skb, !!(tunnel->parms.o_flags&TUNNEL_CSUM));
++      if (IS_ERR(skb))
++              goto out;
++
+       __gre_xmit(skb, dev, tnl_params, skb->protocol);
+       return NETDEV_TX_OK;
diff --git a/queue-3.14/in6-fix-conflict-with-glibc.patch b/queue-3.14/in6-fix-conflict-with-glibc.patch
new file mode 100644 (file)
index 0000000..cbd5834
--- /dev/null
@@ -0,0 +1,70 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: stephen hemminger <stephen@networkplumber.org>
+Date: Sat, 20 Dec 2014 12:15:49 -0800
+Subject: in6: fix conflict with glibc
+
+From: stephen hemminger <stephen@networkplumber.org>
+
+[ Upstream commit 6d08acd2d32e3e877579315dc3202d7a5f336d98 ]
+
+Resolve conflicts between glibc definition of IPV6 socket options
+and those defined in Linux headers. Looks like earlier efforts to
+solve this did not cover all the definitions.
+
+It resolves warnings during iproute2 build.
+Please consider for stable as well.
+
+Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/uapi/linux/in6.h         |    3 ++-
+ include/uapi/linux/libc-compat.h |    3 +++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/include/uapi/linux/in6.h
++++ b/include/uapi/linux/in6.h
+@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
+ /*
+  *    IPV6 socket options
+  */
+-
++#if __UAPI_DEF_IPV6_OPTIONS
+ #define IPV6_ADDRFORM         1
+ #define IPV6_2292PKTINFO      2
+ #define IPV6_2292HOPOPTS      3
+@@ -192,6 +192,7 @@ struct in6_flowlabel_req {
+ #define IPV6_IPSEC_POLICY     34
+ #define IPV6_XFRM_POLICY      35
++#endif
+ /*
+  * Multicast:
+--- a/include/uapi/linux/libc-compat.h
++++ b/include/uapi/linux/libc-compat.h
+@@ -69,6 +69,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6               0
+ #define __UAPI_DEF_IPV6_MREQ          0
+ #define __UAPI_DEF_IPPROTO_V6         0
++#define __UAPI_DEF_IPV6_OPTIONS               0
+ #else
+@@ -82,6 +83,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6               1
+ #define __UAPI_DEF_IPV6_MREQ          1
+ #define __UAPI_DEF_IPPROTO_V6         1
++#define __UAPI_DEF_IPV6_OPTIONS               1
+ #endif /* _NETINET_IN_H */
+@@ -103,6 +105,7 @@
+ #define __UAPI_DEF_SOCKADDR_IN6               1
+ #define __UAPI_DEF_IPV6_MREQ          1
+ #define __UAPI_DEF_IPPROTO_V6         1
++#define __UAPI_DEF_IPV6_OPTIONS               1
+ /* Definitions for xattr.h */
+ #define __UAPI_DEF_XATTR              1
diff --git a/queue-3.14/net-core-handle-csum-for-checksum_complete-vxlan-forwarding.patch b/queue-3.14/net-core-handle-csum-for-checksum_complete-vxlan-forwarding.patch
new file mode 100644 (file)
index 0000000..f5e0b3d
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Jay Vosburgh <jay.vosburgh@canonical.com>
+Date: Fri, 19 Dec 2014 15:32:00 -0800
+Subject: net/core: Handle csum for CHECKSUM_COMPLETE VXLAN forwarding
+
+From: Jay Vosburgh <jay.vosburgh@canonical.com>
+
+[ Upstream commit 2c26d34bbcc0b3f30385d5587aa232289e2eed8e ]
+
+When using VXLAN tunnels and a sky2 device, I have experienced
+checksum failures of the following type:
+
+[ 4297.761899] eth0: hw csum failure
+[...]
+[ 4297.765223] Call Trace:
+[ 4297.765224]  <IRQ>  [<ffffffff8172f026>] dump_stack+0x46/0x58
+[ 4297.765235]  [<ffffffff8162ba52>] netdev_rx_csum_fault+0x42/0x50
+[ 4297.765238]  [<ffffffff8161c1a0>] ? skb_push+0x40/0x40
+[ 4297.765240]  [<ffffffff8162325c>] __skb_checksum_complete+0xbc/0xd0
+[ 4297.765243]  [<ffffffff8168c602>] tcp_v4_rcv+0x2e2/0x950
+[ 4297.765246]  [<ffffffff81666ca0>] ? ip_rcv_finish+0x360/0x360
+
+       These are reliably reproduced in a network topology of:
+
+container:eth0 == host(OVS VXLAN on VLAN) == bond0 == eth0 (sky2) -> switch
+
+       When VXLAN encapsulated traffic is received from a similarly
+configured peer, the above warning is generated in the receive
+processing of the encapsulated packet.  Note that the warning is
+associated with the container eth0.
+
+        The skbs from sky2 have ip_summed set to CHECKSUM_COMPLETE, and
+because the packet is an encapsulated Ethernet frame, the checksum
+generated by the hardware includes the inner protocol and Ethernet
+headers.
+
+       The receive code is careful to update the skb->csum, except in
+__dev_forward_skb, as called by dev_forward_skb.  __dev_forward_skb
+calls eth_type_trans, which in turn calls skb_pull_inline(skb, ETH_HLEN)
+to skip over the Ethernet header, but does not update skb->csum when
+doing so.
+
+       This patch resolves the problem by adding a call to
+skb_postpull_rcsum to update the skb->csum after the call to
+eth_type_trans.
+
+Signed-off-by: Jay Vosburgh <jay.vosburgh@canonical.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1709,6 +1709,7 @@ int dev_forward_skb(struct net_device *d
+       skb_scrub_packet(skb, true);
+       skb->protocol = eth_type_trans(skb, dev);
++      skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
+       return netif_rx_internal(skb);
+ }
diff --git a/queue-3.14/net-fix-stacked-vlan-offload-features-computation.patch b/queue-3.14/net-fix-stacked-vlan-offload-features-computation.patch
new file mode 100644 (file)
index 0000000..c407558
--- /dev/null
@@ -0,0 +1,47 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
+Date: Mon, 22 Dec 2014 19:04:14 +0900
+Subject: net: Fix stacked vlan offload features computation
+
+From: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
+
+[ Upstream commit 796f2da81bead71ffc91ef70912cd8d1827bf756 ]
+
+When vlan tags are stacked, it is very likely that the outer tag is stored
+in skb->vlan_tci and skb->protocol shows the inner tag's vlan_proto.
+Currently netif_skb_features() first looks at skb->protocol even if there
+is the outer tag in vlan_tci, thus it incorrectly retrieves the protocol
+encapsulated by the inner vlan instead of the inner vlan protocol.
+This allows GSO packets to be passed to HW and they end up being
+corrupted.
+
+Fixes: 58e998c6d239 ("offloading: Force software GSO for multiple vlan tags.")
+Signed-off-by: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2529,11 +2529,14 @@ netdev_features_t netif_skb_dev_features
+       if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
+               features &= ~NETIF_F_GSO_MASK;
+-      if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
+-              struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+-              protocol = veh->h_vlan_encapsulated_proto;
+-      } else if (!vlan_tx_tag_present(skb)) {
+-              return harmonize_features(skb, dev, features);
++      if (!vlan_tx_tag_present(skb)) {
++              if (unlikely(protocol == htons(ETH_P_8021Q) ||
++                           protocol == htons(ETH_P_8021AD))) {
++                      struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
++                      protocol = veh->h_vlan_encapsulated_proto;
++              } else {
++                      return harmonize_features(skb, dev, features);
++              }
+       }
+       features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
diff --git a/queue-3.14/net-reset-secmark-when-scrubbing-packet.patch b/queue-3.14/net-reset-secmark-when-scrubbing-packet.patch
new file mode 100644 (file)
index 0000000..a86074e
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Thomas Graf <tgraf@suug.ch>
+Date: Tue, 23 Dec 2014 01:13:18 +0100
+Subject: net: Reset secmark when scrubbing packet
+
+From: Thomas Graf <tgraf@suug.ch>
+
+[ Upstream commit b8fb4e0648a2ab3734140342002f68fb0c7d1602 ]
+
+skb_scrub_packet() is called when a packet switches between a context
+such as between underlay and overlay, between namespaces, or between
+L3 subnets.
+
+While we already scrub the packet mark, connection tracking entry,
+and cached destination, the security mark/context is left intact.
+
+It seems wrong to inherit the security context of a packet when going
+from overlay to underlay or across forwarding paths.
+
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Acked-by: Flavio Leitner <fbl@sysclose.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3937,6 +3937,7 @@ void skb_scrub_packet(struct sk_buff *sk
+       skb->local_df = 0;
+       skb_dst_drop(skb);
+       skb->mark = 0;
++      skb_init_secmark(skb);
+       secpath_reset(skb);
+       nf_reset(skb);
+       nf_reset_trace(skb);
diff --git a/queue-3.14/netlink-always-copy-on-mmap-tx.patch b/queue-3.14/netlink-always-copy-on-mmap-tx.patch
new file mode 100644 (file)
index 0000000..453593f
--- /dev/null
@@ -0,0 +1,127 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: David Miller <davem@davemloft.net>
+Date: Tue, 16 Dec 2014 17:58:17 -0500
+Subject: netlink: Always copy on mmap TX.
+
+From: David Miller <davem@davemloft.net>
+
+[ Upstream commit 4682a0358639b29cf69437ed909c6221f8c89847 ]
+
+Checking the file f_count and the nlk->mapped count is not completely
+sufficient to prevent the mmap'd area contents from changing from
+under us during netlink mmap sendmsg() operations.
+
+Be careful to sample the header's length field only once, because this
+could change from under us as well.
+
+Fixes: 5fd96123ee19 ("netlink: implement memory mapped sendmsg()")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Acked-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c |   52 ++++++++++++++---------------------------------
+ 1 file changed, 16 insertions(+), 36 deletions(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -510,14 +510,14 @@ out:
+       return err;
+ }
+-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
++static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
+ {
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
+       struct page *p_start, *p_end;
+       /* First page is flushed through netlink_{get,set}_status */
+       p_start = pgvec_to_page(hdr + PAGE_SIZE);
+-      p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
++      p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
+       while (p_start <= p_end) {
+               flush_dcache_page(p_start);
+               p_start++;
+@@ -699,24 +699,16 @@ static int netlink_mmap_sendmsg(struct s
+       struct nl_mmap_hdr *hdr;
+       struct sk_buff *skb;
+       unsigned int maxlen;
+-      bool excl = true;
+       int err = 0, len = 0;
+-      /* Netlink messages are validated by the receiver before processing.
+-       * In order to avoid userspace changing the contents of the message
+-       * after validation, the socket and the ring may only be used by a
+-       * single process, otherwise we fall back to copying.
+-       */
+-      if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
+-          atomic_read(&nlk->mapped) > 1)
+-              excl = false;
+-
+       mutex_lock(&nlk->pg_vec_lock);
+       ring   = &nlk->tx_ring;
+       maxlen = ring->frame_size - NL_MMAP_HDRLEN;
+       do {
++              unsigned int nm_len;
++
+               hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
+               if (hdr == NULL) {
+                       if (!(msg->msg_flags & MSG_DONTWAIT) &&
+@@ -724,35 +716,23 @@ static int netlink_mmap_sendmsg(struct s
+                               schedule();
+                       continue;
+               }
+-              if (hdr->nm_len > maxlen) {
++
++              nm_len = ACCESS_ONCE(hdr->nm_len);
++              if (nm_len > maxlen) {
+                       err = -EINVAL;
+                       goto out;
+               }
+-              netlink_frame_flush_dcache(hdr);
++              netlink_frame_flush_dcache(hdr, nm_len);
+-              if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
+-                      skb = alloc_skb_head(GFP_KERNEL);
+-                      if (skb == NULL) {
+-                              err = -ENOBUFS;
+-                              goto out;
+-                      }
+-                      sock_hold(sk);
+-                      netlink_ring_setup_skb(skb, sk, ring, hdr);
+-                      NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
+-                      __skb_put(skb, hdr->nm_len);
+-                      netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
+-                      atomic_inc(&ring->pending);
+-              } else {
+-                      skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
+-                      if (skb == NULL) {
+-                              err = -ENOBUFS;
+-                              goto out;
+-                      }
+-                      __skb_put(skb, hdr->nm_len);
+-                      memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
+-                      netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
++              skb = alloc_skb(nm_len, GFP_KERNEL);
++              if (skb == NULL) {
++                      err = -ENOBUFS;
++                      goto out;
+               }
++              __skb_put(skb, nm_len);
++              memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
++              netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+               netlink_increment_head(ring);
+@@ -798,7 +778,7 @@ static void netlink_queue_mmaped_skb(str
+       hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
+       hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+       hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+-      netlink_frame_flush_dcache(hdr);
++      netlink_frame_flush_dcache(hdr, hdr->nm_len);
+       netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+       NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
diff --git a/queue-3.14/netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch b/queue-3.14/netlink-don-t-reorder-loads-stores-before-marking-mmap-netlink-frame-as-available.patch
new file mode 100644 (file)
index 0000000..227b5dd
--- /dev/null
@@ -0,0 +1,50 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Thomas Graf <tgraf@suug.ch>
+Date: Thu, 18 Dec 2014 10:30:26 +0000
+Subject: netlink: Don't reorder loads/stores before marking mmap netlink frame as available
+
+From: Thomas Graf <tgraf@suug.ch>
+
+[ Upstream commit a18e6a186f53af06937a2c268c72443336f4ab56 ]
+
+Each mmap Netlink frame contains a status field which indicates
+whether the frame is unused, reserved, contains data or needs to
+be skipped. Both loads and stores may not be reordeded and must
+complete before the status field is changed and another CPU might
+pick up the frame for use. Use an smp_mb() to cover needs of both
+types of callers to netlink_set_status(), callers which have been
+reading data frame from the frame, and callers which have been
+filling or releasing and thus writing to the frame.
+
+- Example code path requiring a smp_rmb():
+  memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
+  netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+
+- Example code path requiring a smp_wmb():
+  hdr->nm_uid  = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
+  hdr->nm_gid  = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
+  netlink_frame_flush_dcache(hdr);
+  netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
+
+Fixes: f9c228 ("netlink: implement memory mapped recvmsg()")
+Reported-by: Eric Dumazet <eric.dumazet@gmail.com>
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -535,9 +535,9 @@ static enum nl_mmap_status netlink_get_s
+ static void netlink_set_status(struct nl_mmap_hdr *hdr,
+                              enum nl_mmap_status status)
+ {
++      smp_mb();
+       hdr->nm_status = status;
+       flush_dcache_page(pgvec_to_page(hdr));
+-      smp_wmb();
+ }
+ static struct nl_mmap_hdr *
diff --git a/queue-3.14/tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch b/queue-3.14/tcp-do-not-apply-tso-segment-limit-to-non-tso-packets.patch
new file mode 100644 (file)
index 0000000..9b24f19
--- /dev/null
@@ -0,0 +1,54 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Thu, 1 Jan 2015 00:39:23 +1100
+Subject: tcp: Do not apply TSO segment limit to non-TSO packets
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 843925f33fcc293d80acf2c5c8a78adf3344d49b ]
+
+Thomas Jarosch reported IPsec TCP stalls when a PMTU event occurs.
+
+In fact the problem was completely unrelated to IPsec.  The bug is
+also reproducible if you just disable TSO/GSO.
+
+The problem is that when the MSS goes down, existing queued packet
+on the TX queue that have not been transmitted yet all look like
+TSO packets and get treated as such.
+
+This then triggers a bug where tcp_mss_split_point tells us to
+generate a zero-sized packet on the TX queue.  Once that happens
+we're screwed because the zero-sized packet can never be removed
+by ACKs.
+
+Fixes: 1485348d242 ("tcp: Apply device TSO segment limit earlier")
+Reported-by: Thomas Jarosch <thomas.jarosch@intra2net.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+
+Cheers,
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1894,7 +1894,7 @@ static bool tcp_write_xmit(struct sock *
+               if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
+                       break;
+-              if (tso_segs == 1) {
++              if (tso_segs == 1 || !sk->sk_gso_max_segs) {
+                       if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
+                                                    (tcp_skb_is_last(sk, skb) ?
+                                                     nonagle : TCP_NAGLE_PUSH))))
+@@ -1931,7 +1931,7 @@ static bool tcp_write_xmit(struct sock *
+               }
+               limit = mss_now;
+-              if (tso_segs > 1 && !tcp_urg_mode(tp))
++              if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
+                       limit = tcp_mss_split_point(sk, skb, mss_now,
+                                                   min_t(unsigned int,
+                                                         cwnd_quota,
diff --git a/queue-3.14/team-avoid-possible-underflow-of-count_pending-value-for-notify_peers-and-mcast_rejoin.patch b/queue-3.14/team-avoid-possible-underflow-of-count_pending-value-for-notify_peers-and-mcast_rejoin.patch
new file mode 100644 (file)
index 0000000..c2302a0
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Wed, 14 Jan 2015 18:15:30 +0100
+Subject: team: avoid possible underflow of count_pending value for notify_peers and mcast_rejoin
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit b0d11b42785b70e19bc6a3122eead3f7969a7589 ]
+
+This patch is fixing a race condition that may cause setting
+count_pending to -1, which results in unwanted big bulk of arp messages
+(in case of "notify peers").
+
+Consider following scenario:
+
+count_pending == 2
+   CPU0                                           CPU1
+                                       team_notify_peers_work
+                                         atomic_dec_and_test (dec count_pending to 1)
+                                         schedule_delayed_work
+ team_notify_peers
+   atomic_add (adding 1 to count_pending)
+                                       team_notify_peers_work
+                                         atomic_dec_and_test (dec count_pending to 1)
+                                         schedule_delayed_work
+                                       team_notify_peers_work
+                                         atomic_dec_and_test (dec count_pending to 0)
+   schedule_delayed_work
+                                       team_notify_peers_work
+                                         atomic_dec_and_test (dec count_pending to -1)
+
+Fix this race by using atomic_dec_if_positive - that will prevent
+count_pending running under 0.
+
+Fixes: fc423ff00df3a1955441 ("team: add peer notification")
+Fixes: 492b200efdd20b8fcfd  ("team: add support for sending multicast rejoins")
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Jiri Benc <jbenc@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c |   16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -629,6 +629,7 @@ static int team_change_mode(struct team
+ static void team_notify_peers_work(struct work_struct *work)
+ {
+       struct team *team;
++      int val;
+       team = container_of(work, struct team, notify_peers.dw.work);
+@@ -636,9 +637,14 @@ static void team_notify_peers_work(struc
+               schedule_delayed_work(&team->notify_peers.dw, 0);
+               return;
+       }
++      val = atomic_dec_if_positive(&team->notify_peers.count_pending);
++      if (val < 0) {
++              rtnl_unlock();
++              return;
++      }
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+       rtnl_unlock();
+-      if (!atomic_dec_and_test(&team->notify_peers.count_pending))
++      if (val)
+               schedule_delayed_work(&team->notify_peers.dw,
+                                     msecs_to_jiffies(team->notify_peers.interval));
+ }
+@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struc
+ static void team_mcast_rejoin_work(struct work_struct *work)
+ {
+       struct team *team;
++      int val;
+       team = container_of(work, struct team, mcast_rejoin.dw.work);
+@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struc
+               schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+               return;
+       }
++      val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
++      if (val < 0) {
++              rtnl_unlock();
++              return;
++      }
+       call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+       rtnl_unlock();
+-      if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
++      if (val)
+               schedule_delayed_work(&team->mcast_rejoin.dw,
+                                     msecs_to_jiffies(team->mcast_rejoin.interval));
+ }
diff --git a/queue-3.14/tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch b/queue-3.14/tg3-tg3_disable_ints-using-uninitialized-mailbox-value-to-disable-interrupts.patch
new file mode 100644 (file)
index 0000000..a6ab0de
--- /dev/null
@@ -0,0 +1,87 @@
+From foo@baz Sat Jan 17 18:13:02 PST 2015
+From: Prashant Sreedharan <prashant@broadcom.com>
+Date: Sat, 20 Dec 2014 12:16:17 -0800
+Subject: tg3: tg3_disable_ints using uninitialized mailbox value to disable interrupts
+
+From: Prashant Sreedharan <prashant@broadcom.com>
+
+[ Upstream commit 05b0aa579397b734f127af58e401a30784a1e315 ]
+
+During driver load in tg3_init_one, if the driver detects DMA activity before
+intializing the chip tg3_halt is called. As part of tg3_halt interrupts are
+disabled using routine tg3_disable_ints. This routine was using mailbox value
+which was not initialized (default value is 0). As a result driver was writing
+0x00000001 to pci config space register 0, which is the vendor id / device id.
+
+This driver bug was exposed because of the commit a7877b17a667 (PCI: Check only
+the Vendor ID to identify Configuration Request Retry). Also this issue is only
+seen in older generation chipsets like 5722 because config space write to offset
+0 from driver is possible. The newer generation chips ignore writes to offset 0.
+Also without commit a7877b17a667, for these older chips when a GRC reset is
+issued the Bootcode would reprogram the vendor id/device id, which is the reason
+this bug was masked earlier.
+
+Fixed by initializing the interrupt mailbox registers before calling tg3_halt.
+
+Please queue for -stable.
+
+Reported-by: Nils Holland <nholland@tisys.org>
+Reported-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: Prashant Sreedharan <prashant@broadcom.com>
+Signed-off-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c |   34 +++++++++++++++++-----------------
+ 1 file changed, 17 insertions(+), 17 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -17731,23 +17731,6 @@ static int tg3_init_one(struct pci_dev *
+               goto err_out_apeunmap;
+       }
+-      /*
+-       * Reset chip in case UNDI or EFI driver did not shutdown
+-       * DMA self test will enable WDMAC and we'll see (spurious)
+-       * pending DMA on the PCI bus at that point.
+-       */
+-      if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+-          (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+-              tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+-              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+-      }
+-
+-      err = tg3_test_dma(tp);
+-      if (err) {
+-              dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+-              goto err_out_apeunmap;
+-      }
+-
+       intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+       rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+       sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+@@ -17792,6 +17775,23 @@ static int tg3_init_one(struct pci_dev *
+                       sndmbx += 0xc;
+       }
++      /*
++       * Reset chip in case UNDI or EFI driver did not shutdown
++       * DMA self test will enable WDMAC and we'll see (spurious)
++       * pending DMA on the PCI bus at that point.
++       */
++      if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
++          (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
++              tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
++              tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
++      }
++
++      err = tg3_test_dma(tp);
++      if (err) {
++              dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
++              goto err_out_apeunmap;
++      }
++
+       tg3_init_coal(tp);
+       pci_set_drvdata(pdev, dev);