--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Li RongQing <roy.qing.li@gmail.com>
+Date: Wed, 18 Jun 2014 13:46:02 +0800
+Subject: 8021q: fix a potential memory leak
+
+From: Li RongQing <roy.qing.li@gmail.com>
+
+[ Upstream commit 916c1689a09bc1ca81f2d7a34876f8d35aadd11b ]
+
+skb_cow called in vlan_reorder_header does not free the skb when it failed,
+and vlan_reorder_header returns NULL to reset original skb when it is called
+in vlan_untag, lead to a memory leak.
+
+Signed-off-by: Li RongQing <roy.qing.li@gmail.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/8021q/vlan_core.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/8021q/vlan_core.c
++++ b/net/8021q/vlan_core.c
+@@ -108,8 +108,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_id);
+
+ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
+ {
+- if (skb_cow(skb, skb_headroom(skb)) < 0)
++ if (skb_cow(skb, skb_headroom(skb)) < 0) {
++ kfree_skb(skb);
+ return NULL;
++ }
++
+ memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+ skb->mac_header += VLAN_HLEN;
+ return skb;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Date: Mon, 7 Jul 2014 23:22:50 +0300
+Subject: appletalk: Fix socket referencing in skb
+
+From: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+
+[ Upstream commit 36beddc272c111689f3042bf3d10a64d8a805f93 ]
+
+Setting just skb->sk without taking its reference and setting a
+destructor is invalid. However, in the places where this was done, skb
+is used in a way not requiring skb->sk setting. So dropping the setting
+of skb->sk.
+Thanks to Eric Dumazet <eric.dumazet@gmail.com> for correct solution.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=79441
+Reported-by: Ed Martin <edman007@edman007.com>
+Signed-off-by: Andrey Utkin <andrey.krieger.utkin@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/appletalk/ddp.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/net/appletalk/ddp.c
++++ b/net/appletalk/ddp.c
+@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb
+ goto drop;
+
+ /* Queue packet (standard) */
+- skb->sk = sock;
+-
+ if (sock_queue_rcv_skb(sock, skb) < 0)
+ goto drop;
+
+@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *i
+ if (!skb)
+ goto out;
+
+- skb->sk = sk;
+ skb_reserve(skb, ddp_dl->header_length);
+ skb_reserve(skb, dev->hard_header_len);
+ skb->dev = dev;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Suresh Reddy <Suresh.Reddy@emulex.com>
+Date: Fri, 11 Jul 2014 14:03:01 +0530
+Subject: be2net: set EQ DB clear-intr bit in be_open()
+
+From: Suresh Reddy <Suresh.Reddy@emulex.com>
+
+[ Upstream commit 4cad9f3b61c7268fa89ab8096e23202300399b5d ]
+
+On BE3, if the clear-interrupt bit of the EQ doorbell is not set the first
+time it is armed, ocassionally we have observed that the EQ doesn't raise
+anymore interrupts even if it is in armed state.
+This patch fixes this by setting the clear-interrupt bit when EQs are
+armed for the first time in be_open().
+
+Signed-off-by: Suresh Reddy <Suresh.Reddy@emulex.com>
+Signed-off-by: Sathya Perla <sathya.perla@emulex.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/emulex/benet/be_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -2797,7 +2797,7 @@ static int be_open(struct net_device *ne
+ for_all_evt_queues(adapter, eqo, i) {
+ napi_enable(&eqo->napi);
+ be_enable_busy_poll(eqo);
+- be_eq_notify(adapter, eqo->q.id, true, false, 0);
++ be_eq_notify(adapter, eqo->q.id, true, true, 0);
+ }
+ adapter->flags |= BE_FLAGS_NAPI_ENABLED;
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 26 Jun 2014 00:44:02 -0700
+Subject: bnx2x: fix possible panic under memory stress
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 07b0f00964def8af9321cfd6c4a7e84f6362f728 ]
+
+While it is legal to kfree(NULL), it is not wise to use :
+put_page(virt_to_head_page(NULL))
+
+ BUG: unable to handle kernel paging request at ffffeba400000000
+ IP: [<ffffffffc01f5928>] virt_to_head_page+0x36/0x44 [bnx2x]
+
+Reported-by: Michel Lespinasse <walken@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Ariel Elior <ariel.elior@qlogic.com>
+Fixes: d46d132cc021 ("bnx2x: use netdev_alloc_frag()")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -793,7 +793,8 @@ static void bnx2x_tpa_stop(struct bnx2x
+
+ return;
+ }
+- bnx2x_frag_free(fp, new_data);
++ if (new_data)
++ bnx2x_frag_free(fp, new_data);
+ drop:
+ /* drop the packet and keep the buffer in the bin */
+ DP(NETIF_MSG_RX_STATUS,
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Nikolay Aleksandrov <nikolay@redhat.com>
+Date: Sun, 13 Jul 2014 09:47:47 +0200
+Subject: bonding: fix ad_select module param check
+
+From: Nikolay Aleksandrov <nikolay@redhat.com>
+
+[ Upstream commit 548d28bd0eac840d122b691279ce9f4ce6ecbfb6 ]
+
+Obvious copy/paste error when I converted the ad_select to the new
+option API. "lacp_rate" there should be "ad_select" so we can get the
+proper value.
+
+CC: Jay Vosburgh <j.vosburgh@gmail.com>
+CC: Veaceslav Falico <vfalico@gmail.com>
+CC: Andy Gospodarek <andy@greyhouse.net>
+CC: David S. Miller <davem@davemloft.net>
+
+Fixes: 9e5f5eebe765 ("bonding: convert ad_select to use the new option
+API")
+Reported-by: Karim Scheik <karim.scheik@prisma-solutions.at>
+Signed-off-by: Nikolay Aleksandrov <nikolay@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -4068,7 +4068,7 @@ static int bond_check_params(struct bond
+ }
+
+ if (ad_select) {
+- bond_opt_initstr(&newval, lacp_rate);
++ bond_opt_initstr(&newval, ad_select);
+ valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
+ &newval);
+ if (!valptr) {
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: =?UTF-8?q?Manuel=20Sch=C3=B6lling?= <manuel.schoelling@gmx.de>
+Date: Sat, 7 Jun 2014 23:57:25 +0200
+Subject: dns_resolver: assure that dns_query() result is null-terminated
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Manuel=20Sch=C3=B6lling?= <manuel.schoelling@gmx.de>
+
+[ Upstream commit 84a7c0b1db1c17d5ded8d3800228a608e1070b40 ]
+
+dns_query() credulously assumes that keys are null-terminated and
+returns a copy of a memory block that is off by one.
+
+Signed-off-by: Manuel Schölling <manuel.schoelling@gmx.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dns_resolver/dns_query.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/dns_resolver/dns_query.c
++++ b/net/dns_resolver/dns_query.c
+@@ -149,7 +149,9 @@ int dns_query(const char *type, const ch
+ if (!*_result)
+ goto put;
+
+- memcpy(*_result, upayload->data, len + 1);
++ memcpy(*_result, upayload->data, len);
++ *_result[len] = '\0';
++
+ if (_expiry)
+ *_expiry = rkey->expiry;
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Ben Hutchings <ben@decadent.org.uk>
+Date: Mon, 21 Jul 2014 00:06:48 +0100
+Subject: dns_resolver: Null-terminate the right string
+
+From: Ben Hutchings <ben@decadent.org.uk>
+
+[ Upstream commit 640d7efe4c08f06c4ae5d31b79bd8740e7f6790a ]
+
+*_result[len] is parsed as *(_result[len]) which is not at all what we
+want to touch here.
+
+Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
+Fixes: 84a7c0b1db1c ("dns_resolver: assure that dns_query() result is null-terminated")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/dns_resolver/dns_query.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/dns_resolver/dns_query.c
++++ b/net/dns_resolver/dns_query.c
+@@ -150,7 +150,7 @@ int dns_query(const char *type, const ch
+ goto put;
+
+ memcpy(*_result, upayload->data, len);
+- *_result[len] = '\0';
++ (*_result)[len] = '\0';
+
+ if (_expiry)
+ *_expiry = rkey->expiry;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: dingtianhong <dingtianhong@huawei.com>
+Date: Wed, 2 Jul 2014 13:50:48 +0800
+Subject: igmp: fix the problem when mc leave group
+
+From: dingtianhong <dingtianhong@huawei.com>
+
+[ Upstream commit 52ad353a5344f1f700c5b777175bdfa41d3cd65a ]
+
+The problem was triggered by these steps:
+
+1) create socket, bind and then setsockopt for add mc group.
+ mreq.imr_multiaddr.s_addr = inet_addr("255.0.0.37");
+ mreq.imr_interface.s_addr = inet_addr("192.168.1.2");
+ setsockopt(sockfd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(mreq));
+
+2) drop the mc group for this socket.
+ mreq.imr_multiaddr.s_addr = inet_addr("255.0.0.37");
+ mreq.imr_interface.s_addr = inet_addr("0.0.0.0");
+ setsockopt(sockfd, IPPROTO_IP, IP_DROP_MEMBERSHIP, &mreq, sizeof(mreq));
+
+3) and then drop the socket, I found the mc group was still used by the dev:
+
+ netstat -g
+
+ Interface RefCnt Group
+ --------------- ------ ---------------------
+ eth2 1 255.0.0.37
+
+Normally even though the IP_DROP_MEMBERSHIP return error, the mc group still need
+to be released for the netdev when drop the socket, but this process was broken when
+route default is NULL, the reason is that:
+
+The ip_mc_leave_group() will choose the in_dev by the imr_interface.s_addr, if input addr
+is NULL, the default route dev will be chosen, then the ifindex is got from the dev,
+then polling the inet->mc_list and return -ENODEV, but if the default route dev is NULL,
+the in_dev and ifIndex is both NULL, when polling the inet->mc_list, the mc group will be
+released from the mc_list, but the dev didn't dec the refcnt for this mc group, so
+when dropping the socket, the mc_list is NULL and the dev still keep this group.
+
+v1->v2: According Hideaki's suggestion, we should align with IPv6 (RFC3493) and BSDs,
+ so I add the checking for the in_dev before polling the mc_list, make sure when
+ we remove the mc group, dec the refcnt to the real dev which was using the mc address.
+ The problem would never happened again.
+
+Signed-off-by: Ding Tianhong <dingtianhong@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/igmp.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/igmp.c
++++ b/net/ipv4/igmp.c
+@@ -1952,6 +1952,10 @@ int ip_mc_leave_group(struct sock *sk, s
+
+ rtnl_lock();
+ in_dev = ip_mc_find_dev(net, imr);
++ if (!in_dev) {
++ ret = -ENODEV;
++ goto out;
++ }
+ ifindex = imr->imr_ifindex;
+ for (imlp = &inet->mc_list;
+ (iml = rtnl_dereference(*imlp)) != NULL;
+@@ -1969,16 +1973,14 @@ int ip_mc_leave_group(struct sock *sk, s
+
+ *imlp = iml->next_rcu;
+
+- if (in_dev)
+- ip_mc_dec_group(in_dev, group);
++ ip_mc_dec_group(in_dev, group);
+ rtnl_unlock();
+ /* decrease mem now to avoid the memleak warning */
+ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
+ kfree_rcu(iml, rcu);
+ return 0;
+ }
+- if (!in_dev)
+- ret = -ENODEV;
++out:
+ rtnl_unlock();
+ return ret;
+ }
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Dmitry Popov <ixaphire@qrator.net>
+Date: Sat, 5 Jul 2014 02:26:37 +0400
+Subject: ip_tunnel: fix ip_tunnel_lookup
+
+From: Dmitry Popov <ixaphire@qrator.net>
+
+[ Upstream commit e0056593b61253f1a8a9941dacda22e73b963cdc ]
+
+This patch fixes 3 similar bugs where incoming packets might be routed into
+wrong non-wildcard tunnels:
+
+1) Consider the following setup:
+ ip address add 1.1.1.1/24 dev eth0
+ ip address add 1.1.1.2/24 dev eth0
+ ip tunnel add ipip1 remote 2.2.2.2 local 1.1.1.1 mode ipip dev eth0
+ ip link set ipip1 up
+
+Incoming ipip packets from 2.2.2.2 were routed into ipip1 even if it has dst =
+1.1.1.2. Moreover even if there was wildcard tunnel like
+ ip tunnel add ipip0 remote 2.2.2.2 local any mode ipip dev eth0
+but it was created before explicit one (with local 1.1.1.1), incoming ipip
+packets with src = 2.2.2.2 and dst = 1.1.1.2 were still routed into ipip1.
+
+Same issue existed with all tunnels that use ip_tunnel_lookup (gre, vti)
+
+2) ip address add 1.1.1.1/24 dev eth0
+ ip tunnel add ipip1 remote 2.2.146.85 local 1.1.1.1 mode ipip dev eth0
+ ip link set ipip1 up
+
+Incoming ipip packets with dst = 1.1.1.1 were routed into ipip1, no matter what
+src address is. Any remote ip address which has ip_tunnel_hash = 0 raised this
+issue, 2.2.146.85 is just an example, there are more than 4 million of them.
+And again, wildcard tunnel like
+ ip tunnel add ipip0 remote any local 1.1.1.1 mode ipip dev eth0
+wouldn't be ever matched if it was created before explicit tunnel like above.
+
+Gre & vti tunnels had the same issue.
+
+3) ip address add 1.1.1.1/24 dev eth0
+ ip tunnel add gre1 remote 2.2.146.84 local 1.1.1.1 key 1 mode gre dev eth0
+ ip link set gre1 up
+
+Any incoming gre packet with key = 1 were routed into gre1, no matter what
+src/dst addresses are. Any remote ip address which has ip_tunnel_hash = 0 raised
+the issue, 2.2.146.84 is just an example, there are more than 4 million of them.
+Wildcard tunnel like
+ ip tunnel add gre2 remote any local any key 1 mode gre dev eth0
+wouldn't be ever matched if it was created before explicit tunnel like above.
+
+All this stuff happened because while looking for a wildcard tunnel we didn't
+check that matched tunnel is a wildcard one. Fixed.
+
+Signed-off-by: Dmitry Popov <ixaphire@qrator.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_tunnel.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -173,6 +173,7 @@ struct ip_tunnel *ip_tunnel_lookup(struc
+
+ hlist_for_each_entry_rcu(t, head, hash_node) {
+ if (remote != t->parms.iph.daddr ||
++ t->parms.iph.saddr != 0 ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+@@ -189,10 +190,11 @@ struct ip_tunnel *ip_tunnel_lookup(struc
+ head = &itn->tunnels[hash];
+
+ hlist_for_each_entry_rcu(t, head, hash_node) {
+- if ((local != t->parms.iph.saddr &&
+- (local != t->parms.iph.daddr ||
+- !ipv4_is_multicast(local))) ||
+- !(t->dev->flags & IFF_UP))
++ if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
++ (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
++ continue;
++
++ if (!(t->dev->flags & IFF_UP))
+ continue;
+
+ if (!ip_tunnel_key_match(&t->parms, flags, key))
+@@ -209,6 +211,8 @@ struct ip_tunnel *ip_tunnel_lookup(struc
+
+ hlist_for_each_entry_rcu(t, head, hash_node) {
+ if (t->parms.i_key != key ||
++ t->parms.iph.saddr != 0 ||
++ t->parms.iph.daddr != 0 ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 21 Jul 2014 07:17:42 +0200
+Subject: ipv4: fix buffer overflow in ip_options_compile()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 10ec9472f05b45c94db3c854d22581a20b97db41 ]
+
+There is a benign buffer overflow in ip_options_compile spotted by
+AddressSanitizer[1] :
+
+Its benign because we always can access one extra byte in skb->head
+(because header is followed by struct skb_shared_info), and in this case
+this byte is not even used.
+
+[28504.910798] ==================================================================
+[28504.912046] AddressSanitizer: heap-buffer-overflow in ip_options_compile
+[28504.913170] Read of size 1 by thread T15843:
+[28504.914026] [<ffffffff81802f91>] ip_options_compile+0x121/0x9c0
+[28504.915394] [<ffffffff81804a0d>] ip_options_get_from_user+0xad/0x120
+[28504.916843] [<ffffffff8180dedf>] do_ip_setsockopt.isra.15+0x8df/0x1630
+[28504.918175] [<ffffffff8180ec60>] ip_setsockopt+0x30/0xa0
+[28504.919490] [<ffffffff8181e59b>] tcp_setsockopt+0x5b/0x90
+[28504.920835] [<ffffffff8177462f>] sock_common_setsockopt+0x5f/0x70
+[28504.922208] [<ffffffff817729c2>] SyS_setsockopt+0xa2/0x140
+[28504.923459] [<ffffffff818cfb69>] system_call_fastpath+0x16/0x1b
+[28504.924722]
+[28504.925106] Allocated by thread T15843:
+[28504.925815] [<ffffffff81804995>] ip_options_get_from_user+0x35/0x120
+[28504.926884] [<ffffffff8180dedf>] do_ip_setsockopt.isra.15+0x8df/0x1630
+[28504.927975] [<ffffffff8180ec60>] ip_setsockopt+0x30/0xa0
+[28504.929175] [<ffffffff8181e59b>] tcp_setsockopt+0x5b/0x90
+[28504.930400] [<ffffffff8177462f>] sock_common_setsockopt+0x5f/0x70
+[28504.931677] [<ffffffff817729c2>] SyS_setsockopt+0xa2/0x140
+[28504.932851] [<ffffffff818cfb69>] system_call_fastpath+0x16/0x1b
+[28504.934018]
+[28504.934377] The buggy address ffff880026382828 is located 0 bytes to the right
+[28504.934377] of 40-byte region [ffff880026382800, ffff880026382828)
+[28504.937144]
+[28504.937474] Memory state around the buggy address:
+[28504.938430] ffff880026382300: ........ rrrrrrrr rrrrrrrr rrrrrrrr
+[28504.939884] ffff880026382400: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28504.941294] ffff880026382500: .....rrr rrrrrrrr rrrrrrrr rrrrrrrr
+[28504.942504] ffff880026382600: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28504.943483] ffff880026382700: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28504.944511] >ffff880026382800: .....rrr rrrrrrrr rrrrrrrr rrrrrrrr
+[28504.945573] ^
+[28504.946277] ffff880026382900: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28505.094949] ffff880026382a00: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28505.096114] ffff880026382b00: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28505.097116] ffff880026382c00: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28505.098472] ffff880026382d00: ffffffff rrrrrrrr rrrrrrrr rrrrrrrr
+[28505.099804] Legend:
+[28505.100269] f - 8 freed bytes
+[28505.100884] r - 8 redzone bytes
+[28505.101649] . - 8 allocated bytes
+[28505.102406] x=1..7 - x allocated bytes + (8-x) redzone bytes
+[28505.103637] ==================================================================
+
+[1] https://code.google.com/p/address-sanitizer/wiki/AddressSanitizerForKernel
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_options.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
+ optptr++;
+ continue;
+ }
++ if (unlikely(l < 2)) {
++ pp_ptr = optptr;
++ goto error;
++ }
+ optlen = optptr[1];
+ if (optlen < 2 || optlen > l) {
+ pp_ptr = optptr;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 24 Jun 2014 10:05:11 -0700
+Subject: ipv4: fix dst race in sk_dst_get()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f88649721268999bdff09777847080a52004f691 ]
+
+When IP route cache had been removed in linux-3.6, we broke assumption
+that dst entries were all freed after rcu grace period. DST_NOCACHE
+dst were supposed to be freed from dst_release(). But it appears
+we want to keep such dst around, either in UDP sockets or tunnels.
+
+In sk_dst_get() we need to make sure dst refcount is not 0
+before incrementing it, or else we might end up freeing a dst
+twice.
+
+DST_NOCACHE set on a dst does not mean this dst can not be attached
+to a socket or a tunnel.
+
+Then, before actual freeing, we need to observe a rcu grace period
+to make sure all other cpus can catch the fact the dst is no longer
+usable.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Dormando <dormando@rydia.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 4 ++--
+ net/core/dst.c | 16 +++++++++++-----
+ net/ipv4/ip_tunnel.c | 14 +++++---------
+ 3 files changed, 18 insertions(+), 16 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1755,8 +1755,8 @@ sk_dst_get(struct sock *sk)
+
+ rcu_read_lock();
+ dst = rcu_dereference(sk->sk_dst_cache);
+- if (dst)
+- dst_hold(dst);
++ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
++ dst = NULL;
+ rcu_read_unlock();
+ return dst;
+ }
+--- a/net/core/dst.c
++++ b/net/core/dst.c
+@@ -267,6 +267,15 @@ again:
+ }
+ EXPORT_SYMBOL(dst_destroy);
+
++static void dst_destroy_rcu(struct rcu_head *head)
++{
++ struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
++
++ dst = dst_destroy(dst);
++ if (dst)
++ __dst_free(dst);
++}
++
+ void dst_release(struct dst_entry *dst)
+ {
+ if (dst) {
+@@ -274,11 +283,8 @@ void dst_release(struct dst_entry *dst)
+
+ newrefcnt = atomic_dec_return(&dst->__refcnt);
+ WARN_ON(newrefcnt < 0);
+- if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
+- dst = dst_destroy(dst);
+- if (dst)
+- __dst_free(dst);
+- }
++ if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
++ call_rcu(&dst->rcu_head, dst_destroy_rcu);
+ }
+ }
+ EXPORT_SYMBOL(dst_release);
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_t
+ {
+ struct dst_entry *old_dst;
+
+- if (dst) {
+- if (dst->flags & DST_NOCACHE)
+- dst = NULL;
+- else
+- dst_clone(dst);
+- }
++ dst_clone(dst);
+ old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
+ dst_release(old_dst);
+ }
+@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(
+
+ rcu_read_lock();
+ dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
++ if (dst && !atomic_inc_not_zero(&dst->__refcnt))
++ dst = NULL;
+ if (dst) {
+ if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+- rcu_read_unlock();
+ tunnel_dst_reset(t);
+- return NULL;
++ dst_release(dst);
++ dst = NULL;
+ }
+- dst_hold(dst);
+ }
+ rcu_read_unlock();
+ return (struct rtable *)dst;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Edward Allcutt <edward.allcutt@openmarket.com>
+Date: Mon, 30 Jun 2014 16:16:02 +0100
+Subject: ipv4: icmp: Fix pMTU handling for rare case
+
+From: Edward Allcutt <edward.allcutt@openmarket.com>
+
+[ Upstream commit 68b7107b62983f2cff0948292429d5f5999df096 ]
+
+Some older router implementations still send Fragmentation Needed
+errors with the Next-Hop MTU field set to zero. This is explicitly
+described as an eventuality that hosts must deal with by the
+standard (RFC 1191) since older standards specified that those
+bits must be zero.
+
+Linux had a generic (for all of IPv4) implementation of the algorithm
+described in the RFC for searching a list of MTU plateaus for a good
+value. Commit 46517008e116 ("ipv4: Kill ip_rt_frag_needed().")
+removed this as part of the changes to remove the routing cache.
+Subsequently any Fragmentation Needed packet with a zero Next-Hop
+MTU has been discarded without being passed to the per-protocol
+handlers or notifying userspace for raw sockets.
+
+When there is a router which does not implement RFC 1191 on an
+MTU limited path then this results in stalled connections since
+large packets are discarded and the local protocols are not
+notified so they never attempt to lower the pMTU.
+
+One example I have seen is an OpenBSD router terminating IPSec
+tunnels. It's worth pointing out that this case is distinct from
+the BSD 4.2 bug which incorrectly calculated the Next-Hop MTU
+since the commit in question dismissed that as a valid concern.
+
+All of the per-protocols handlers implement the simple approach from
+RFC 1191 of immediately falling back to the minimum value. Although
+this is sub-optimal it is vastly preferable to connections hanging
+indefinitely.
+
+Remove the Next-Hop MTU != 0 check and allow such packets
+to follow the normal path.
+
+Fixes: 46517008e116 ("ipv4: Kill ip_rt_frag_needed().")
+Signed-off-by: Edward Allcutt <edward.allcutt@openmarket.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/icmp.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -732,8 +732,6 @@ static void icmp_unreach(struct sk_buff
+ /* fall through */
+ case 0:
+ info = ntohs(icmph->un.frag.mtu);
+- if (!info)
+- goto out;
+ }
+ break;
+ case ICMP_SR_FAILED:
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 30 Jun 2014 01:26:23 -0700
+Subject: ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 7f502361531e9eecb396cf99bdc9e9a59f7ebd7f ]
+
+We have two different ways to handle changes to sk->sk_dst
+
+First way (used by TCP) assumes socket lock is owned by caller, and use
+no extra lock : __sk_dst_set() & __sk_dst_reset()
+
+Another way (used by UDP) uses sk_dst_lock because socket lock is not
+always taken. Note that sk_dst_lock is not softirq safe.
+
+These ways are not inter changeable for a given socket type.
+
+ipv4_sk_update_pmtu(), added in linux-3.8, added a race, as it used
+the socket lock as synchronization, but users might be UDP sockets.
+
+Instead of converting sk_dst_lock to a softirq safe version, use xchg()
+as we did for sk_rx_dst in commit e47eb5dfb296b ("udp: ipv4: do not use
+sk_dst_lock from softirq context")
+
+In a follow up patch, we probably can remove sk_dst_lock, as it is
+only used in IPv6.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Fixes: 9cb3a50c5f63e ("ipv4: Invalidate the socket cached route on pmtu events if possible")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 12 ++++++------
+ net/ipv4/route.c | 15 ++++++++-------
+ 2 files changed, 14 insertions(+), 13 deletions(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1793,9 +1793,11 @@ __sk_dst_set(struct sock *sk, struct dst
+ static inline void
+ sk_dst_set(struct sock *sk, struct dst_entry *dst)
+ {
+- spin_lock(&sk->sk_dst_lock);
+- __sk_dst_set(sk, dst);
+- spin_unlock(&sk->sk_dst_lock);
++ struct dst_entry *old_dst;
++
++ sk_tx_queue_clear(sk);
++ old_dst = xchg(&sk->sk_dst_cache, dst);
++ dst_release(old_dst);
+ }
+
+ static inline void
+@@ -1807,9 +1809,7 @@ __sk_dst_reset(struct sock *sk)
+ static inline void
+ sk_dst_reset(struct sock *sk)
+ {
+- spin_lock(&sk->sk_dst_lock);
+- __sk_dst_reset(sk);
+- spin_unlock(&sk->sk_dst_lock);
++ sk_dst_set(sk, NULL);
+ }
+
+ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1029,7 +1029,7 @@ void ipv4_sk_update_pmtu(struct sk_buff
+ const struct iphdr *iph = (const struct iphdr *) skb->data;
+ struct flowi4 fl4;
+ struct rtable *rt;
+- struct dst_entry *dst;
++ struct dst_entry *odst = NULL;
+ bool new = false;
+
+ bh_lock_sock(sk);
+@@ -1037,16 +1037,17 @@ void ipv4_sk_update_pmtu(struct sk_buff
+ if (!ip_sk_accept_pmtu(sk))
+ goto out;
+
+- rt = (struct rtable *) __sk_dst_get(sk);
++ odst = sk_dst_get(sk);
+
+- if (sock_owned_by_user(sk) || !rt) {
++ if (sock_owned_by_user(sk) || !odst) {
+ __ipv4_sk_update_pmtu(skb, sk, mtu);
+ goto out;
+ }
+
+ __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
+
+- if (!__sk_dst_check(sk, 0)) {
++ rt = (struct rtable *)odst;
++ if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
+ rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
+ if (IS_ERR(rt))
+ goto out;
+@@ -1056,8 +1057,7 @@ void ipv4_sk_update_pmtu(struct sk_buff
+
+ __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
+
+- dst = dst_check(&rt->dst, 0);
+- if (!dst) {
++ if (!dst_check(&rt->dst, 0)) {
+ if (new)
+ dst_release(&rt->dst);
+
+@@ -1069,10 +1069,11 @@ void ipv4_sk_update_pmtu(struct sk_buff
+ }
+
+ if (new)
+- __sk_dst_set(sk, &rt->dst);
++ sk_dst_set(sk, &rt->dst);
+
+ out:
+ bh_unlock_sock(sk);
++ dst_release(odst);
+ }
+ EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Loic Prylli <loicp@google.com>
+Date: Tue, 1 Jul 2014 21:39:43 -0700
+Subject: net: Fix NETDEV_CHANGE notifier usage causing spurious arp flush
+
+From: Loic Prylli <loicp@google.com>
+
+[ Upstream commit 54951194656e4853e441266fd095f880bc0398f3 ]
+
+A bug was introduced in NETDEV_CHANGE notifier sequence causing the
+arp table to be sometimes spuriously cleared (including manual arp
+entries marked permanent), upon network link carrier changes.
+
+The changed argument for the notifier was applied only to a single
+caller of NETDEV_CHANGE, missing among others netdev_state_change().
+So upon net_carrier events induced by the network, which are
+triggering a call to netdev_state_change(), arp_netdev_event() would
+decide whether to clear or not arp cache based on random/junk stack
+values (a kind of read buffer overflow).
+
+Fixes: be9efd365328 ("net: pass changed flags along with NETDEV_CHANGE event")
+Fixes: 6c8b4e3ff81b ("arp: flush arp cache on IFF_NOARP change")
+Signed-off-by: Loic Prylli <loicp@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly
+ static struct list_head offload_base __read_mostly;
+
+ static int netif_rx_internal(struct sk_buff *skb);
++static int call_netdevice_notifiers_info(unsigned long val,
++ struct net_device *dev,
++ struct netdev_notifier_info *info);
+
+ /*
+ * The @dev_base_head list is protected by @dev_base_lock and the rtnl
+@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
+ void netdev_state_change(struct net_device *dev)
+ {
+ if (dev->flags & IFF_UP) {
+- call_netdevice_notifiers(NETDEV_CHANGE, dev);
++ struct netdev_notifier_change_info change_info;
++
++ change_info.flags_changed = 0;
++ call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
++ &change_info.info);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
+ }
+ }
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 Jul 2014 02:39:38 -0700
+Subject: net: fix sparse warning in sk_dst_set()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 5925a0555bdaf0b396a84318cbc21ba085f6c0d3 ]
+
+sk_dst_cache has __rcu annotation, so we need a cast to avoid
+following sparse error :
+
+include/net/sock.h:1774:19: warning: incorrect type in initializer (different address spaces)
+include/net/sock.h:1774:19: expected struct dst_entry [noderef] <asn:4>*__ret
+include/net/sock.h:1774:19: got struct dst_entry *dst
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: kbuild test robot <fengguang.wu@intel.com>
+Fixes: 7f502361531e ("ipv4: irq safe sk_dst_[re]set() and ipv4_sk_update_pmtu() fix")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/sock.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1796,7 +1796,7 @@ sk_dst_set(struct sock *sk, struct dst_e
+ struct dst_entry *old_dst;
+
+ sk_tx_queue_clear(sk);
+- old_dst = xchg(&sk->sk_dst_cache, dst);
++ old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+ dst_release(old_dst);
+ }
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Wei-Chun Chao <weichunc@plumgrid.com>
+Date: Sun, 8 Jun 2014 23:48:54 -0700
+Subject: net: fix UDP tunnel GSO of frag_list GRO packets
+
+From: Wei-Chun Chao <weichunc@plumgrid.com>
+
+[ Upstream commit 5882a07c72093dc3a18e2d2b129fb200686bb6ee ]
+
+This patch fixes a kernel BUG_ON in skb_segment. It is hit when
+testing two VMs on openvswitch with one VM acting as VXLAN gateway.
+
+During VXLAN packet GSO, skb_segment is called with skb->data
+pointing to inner TCP payload. skb_segment calls skb_network_protocol
+to retrieve the inner protocol. skb_network_protocol actually expects
+skb->data to point to MAC and it calls pskb_may_pull with ETH_HLEN.
+This ends up pulling in ETH_HLEN data from header tail. As a result,
+pskb_trim logic is skipped and BUG_ON is hit later.
+
+Move skb_push in front of skb_network_protocol so that skb->data
+lines up properly.
+
+kernel BUG at net/core/skbuff.c:2999!
+Call Trace:
+[<ffffffff816ac412>] tcp_gso_segment+0x122/0x410
+[<ffffffff816bc74c>] inet_gso_segment+0x13c/0x390
+[<ffffffff8164b39b>] skb_mac_gso_segment+0x9b/0x170
+[<ffffffff816b3658>] skb_udp_tunnel_segment+0xd8/0x390
+[<ffffffff816b3c00>] udp4_ufo_fragment+0x120/0x140
+[<ffffffff816bc74c>] inet_gso_segment+0x13c/0x390
+[<ffffffff8109d742>] ? default_wake_function+0x12/0x20
+[<ffffffff8164b39b>] skb_mac_gso_segment+0x9b/0x170
+[<ffffffff8164b4d0>] __skb_gso_segment+0x60/0xc0
+[<ffffffff8164b6b3>] dev_hard_start_xmit+0x183/0x550
+[<ffffffff8166c91e>] sch_direct_xmit+0xfe/0x1d0
+[<ffffffff8164bc94>] __dev_queue_xmit+0x214/0x4f0
+[<ffffffff8164bf90>] dev_queue_xmit+0x10/0x20
+[<ffffffff81687edb>] ip_finish_output+0x66b/0x890
+[<ffffffff81688a58>] ip_output+0x58/0x90
+[<ffffffff816c628f>] ? fib_table_lookup+0x29f/0x350
+[<ffffffff816881c9>] ip_local_out_sk+0x39/0x50
+[<ffffffff816cbfad>] iptunnel_xmit+0x10d/0x130
+[<ffffffffa0212200>] vxlan_xmit_skb+0x1d0/0x330 [vxlan]
+[<ffffffffa02a3919>] vxlan_tnl_send+0x129/0x1a0 [openvswitch]
+[<ffffffffa02a2cd6>] ovs_vport_send+0x26/0xa0 [openvswitch]
+[<ffffffffa029931e>] do_output+0x2e/0x50 [openvswitch]
+
+Signed-off-by: Wei-Chun Chao <weichunc@plumgrid.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -2881,12 +2881,13 @@ struct sk_buff *skb_segment(struct sk_bu
+ int pos;
+ int dummy;
+
++ __skb_push(head_skb, doffset);
+ proto = skb_network_protocol(head_skb, &dummy);
+ if (unlikely(!proto))
+ return ERR_PTR(-EINVAL);
+
+ csum = !!can_checksum_protocol(features, proto);
+- __skb_push(head_skb, doffset);
++
+ headroom = skb_headroom(head_skb);
+ pos = skb_headlen(head_skb);
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Jerry Chu <hkchu@google.com>
+Date: Mon, 14 Jul 2014 15:54:46 -0700
+Subject: net-gre-gro: Fix a bug that breaks the forwarding path
+
+From: Jerry Chu <hkchu@google.com>
+
+[ Upstream commit c3caf1192f904de2f1381211f564537235d50de3 ]
+
+Fixed a bug that was introduced by my GRE-GRO patch
+(bf5a755f5e9186406bbf50f4087100af5bd68e40 net-gre-gro: Add GRE
+support to the GRO stack) that breaks the forwarding path
+because various GSO related fields were not set. The bug will
+cause on the egress path either the GSO code to fail, or a
+GRE-TSO capable (NETIF_F_GSO_GRE) NICs to choke. The following
+fix has been tested for both cases.
+
+Signed-off-by: H.K. Jerry Chu <hkchu@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 2 ++
+ net/ipv4/af_inet.c | 3 +++
+ net/ipv4/gre_offload.c | 3 +++
+ net/ipv4/tcp_offload.c | 2 +-
+ net/ipv6/tcpv6_offload.c | 2 +-
+ 5 files changed, 10 insertions(+), 2 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4058,6 +4058,8 @@ static void napi_reuse_skb(struct napi_s
+ skb->vlan_tci = 0;
+ skb->dev = napi->dev;
+ skb->skb_iif = 0;
++ skb->encapsulation = 0;
++ skb_shinfo(skb)->gso_type = 0;
+ skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+ napi->skb = skb;
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -1434,6 +1434,9 @@ static int inet_gro_complete(struct sk_b
+ int proto = iph->protocol;
+ int err = -ENOSYS;
+
++ if (skb->encapsulation)
++ skb_set_inner_network_header(skb, nhoff);
++
+ csum_replace2(&iph->check, iph->tot_len, newlen);
+ iph->tot_len = newlen;
+
+--- a/net/ipv4/gre_offload.c
++++ b/net/ipv4/gre_offload.c
+@@ -255,6 +255,9 @@ static int gre_gro_complete(struct sk_bu
+ int err = -ENOENT;
+ __be16 type;
+
++ skb->encapsulation = 1;
++ skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
++
+ type = greh->protocol;
+ if (greh->flags & GRE_KEY)
+ grehlen += GRE_HEADER_SECTION;
+--- a/net/ipv4/tcp_offload.c
++++ b/net/ipv4/tcp_offload.c
+@@ -310,7 +310,7 @@ static int tcp4_gro_complete(struct sk_b
+
+ th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
+ iph->daddr, 0);
+- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+
+ return tcp_gro_complete(skb);
+ }
+--- a/net/ipv6/tcpv6_offload.c
++++ b/net/ipv6/tcpv6_offload.c
+@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_b
+
+ th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
+ &iph->daddr, 0);
+- skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
++ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+
+ return tcp_gro_complete(skb);
+ }
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+Date: Thu, 17 Jul 2014 13:34:09 +0200
+Subject: net: huawei_cdc_ncm: add "subclass 3" devices
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+
+[ Upstream commit c2a6c7813f1ffae636e369b5d7011c9f518d3cd9 ]
+
+Huawei's usage of the subclass and protocol fields is not 100%
+clear to us, but there appears to be a very strict system.
+
+A device with the "shared" device ID 12d1:1506 and this NCM
+function was recently reported (showing only default altsetting):
+
+ Interface Descriptor:
+ bLength 9
+ bDescriptorType 4
+ bInterfaceNumber 1
+ bAlternateSetting 0
+ bNumEndpoints 1
+ bInterfaceClass 255 Vendor Specific Class
+ bInterfaceSubClass 3
+ bInterfaceProtocol 22
+ iInterface 8 CDC Network Control Model (NCM)
+ ** UNRECOGNIZED: 05 24 00 10 01
+ ** UNRECOGNIZED: 06 24 1a 00 01 1f
+ ** UNRECOGNIZED: 0c 24 1b 00 01 00 04 10 14 dc 05 20
+ ** UNRECOGNIZED: 0d 24 0f 0a 0f 00 00 00 ea 05 03 00 01
+ ** UNRECOGNIZED: 05 24 06 01 01
+ Endpoint Descriptor:
+ bLength 7
+ bDescriptorType 5
+ bEndpointAddress 0x85 EP 5 IN
+ bmAttributes 3
+ Transfer Type Interrupt
+ Synch Type None
+ Usage Type Data
+ wMaxPacketSize 0x0010 1x 16 bytes
+ bInterval 9
+
+Cc: Enrico Mioso <mrkiko.rs@gmail.com>
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/huawei_cdc_ncm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/usb/huawei_cdc_ncm.c
++++ b/drivers/net/usb/huawei_cdc_ncm.c
+@@ -207,6 +207,9 @@ static const struct usb_device_id huawei
+ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
+ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+ },
++ { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
++ .driver_info = (unsigned long)&huawei_cdc_ncm_info,
++ },
+
+ /* Terminating entry */
+ {
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+Date: Wed, 18 Jun 2014 14:21:24 +0200
+Subject: net: huawei_cdc_ncm: increase command buffer size
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+
+[ Upstream commit 3acc74619b0175b7a154cf8dc54813f6faf97aa9 ]
+
+Messages from the modem exceeding 256 bytes cause communication
+failure.
+
+The WDM protocol is strictly "read on demand", meaning that we only
+poll for unread data after receiving a notification from the modem.
+Since we have no way to know how much data the modem has to send,
+we must make sure that the buffer we provide is "big enough".
+Message truncation does not work. Truncated messages are left unread
+until the modem has another message to send. Which often won't
+happen until the userspace application has given up waiting for the
+final part of the last message, and therefore sends another command.
+
+With a proper CDC WDM function there is a descriptor telling us
+which buffer size the modem uses. But with this vendor specific
+implementation there is no known way to calculate the exact "big
+enough" number. It is an unknown property of the modem firmware.
+Experience has shown that 256 is too small. The discussion of
+this failure ended up concluding that 512 might be too small as
+well. So 1024 seems like a reasonable value for now.
+
+Fixes: 41c47d8cfd68 ("net: huawei_cdc_ncm: Introduce the huawei_cdc_ncm driver")
+Cc: Enrico Mioso <mrkiko.rs@gmail.com>
+Reported-by: Dan Williams <dcbw@redhat.com>
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Acked-By: Enrico Mioso <mrkiko.rs@gmail.com>
+Tested-by: Dan Williams <dcbw@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/huawei_cdc_ncm.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/usb/huawei_cdc_ncm.c
++++ b/drivers/net/usb/huawei_cdc_ncm.c
+@@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct us
+ ctx = drvstate->ctx;
+
+ if (usbnet_dev->status)
+- /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
+- * decimal (0x100)"
++ /* The wMaxCommand buffer must be big enough to hold
++ * any message from the modem. Experience has shown
++ * that some replies are more than 256 bytes long
+ */
+ subdriver = usb_cdc_wdm_register(ctx->control,
+ &usbnet_dev->status->desc,
+- 256, /* wMaxCommand */
++ 1024, /* wMaxCommand */
+ huawei_cdc_ncm_wdm_manage_power);
+ if (IS_ERR(subdriver)) {
+ ret = PTR_ERR(subdriver);
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Thomas Fitzsimmons <fitzsim@fitzsim.org>
+Date: Tue, 8 Jul 2014 19:44:07 -0400
+Subject: net: mvneta: Fix big endian issue in mvneta_txq_desc_csum()
+
+From: Thomas Fitzsimmons <fitzsim@fitzsim.org>
+
+[ Upstream commit 0a1985879437d14bda8c90d0dae3455c467d7642 ]
+
+This commit fixes the command value generated for CSUM calculation
+when running in big endian mode. The Ethernet protocol ID for IP was
+being unconditionally byte-swapped in the layer 3 protocol check (with
+swab16), which caused the mvneta driver to not function correctly in
+big endian mode. This patch byte-swaps the ID conditionally with
+htons.
+
+Cc: <stable@vger.kernel.org> # v3.13+
+Signed-off-by: Thomas Fitzsimmons <fitzsim@fitzsim.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1217,7 +1217,7 @@ static u32 mvneta_txq_desc_csum(int l3_o
+ command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
+ command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
+
+- if (l3_proto == swab16(ETH_P_IP))
++ if (l3_proto == htons(ETH_P_IP))
+ command |= MVNETA_TXD_IP_CSUM;
+ else
+ command |= MVNETA_TX_L3_IP6;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Date: Tue, 8 Jul 2014 10:49:43 +0200
+Subject: net: mvneta: fix operation in 10 Mbit/s mode
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+[ Upstream commit 4d12bc63ab5e48c1d78fa13883cf6fefcea3afb1 ]
+
+As reported by Maggie Mae Roxas, the mvneta driver doesn't behave
+properly in 10 Mbit/s mode. This is due to a misconfiguration of the
+MVNETA_GMAC_AUTONEG_CONFIG register: bit MVNETA_GMAC_CONFIG_MII_SPEED
+must be set for a 100 Mbit/s speed, but cleared for a 10 Mbit/s speed,
+which the driver was not properly doing. This commit adjusts that by
+setting the MVNETA_GMAC_CONFIG_MII_SPEED bit only in 100 Mbit/s mode,
+and relying on the fact that all the speed related bits of this
+register are cleared at the beginning of the mvneta_adjust_link()
+function.
+
+This problem exists since c5aff18204da0 ("net: mvneta: driver for
+Marvell Armada 370/XP network unit") which is the commit that
+introduced the mvneta driver in the kernel.
+
+Cc: <stable@vger.kernel.org> # v3.8+
+Fixes: c5aff18204da0 ("net: mvneta: driver for Marvell Armada 370/XP network unit")
+Reported-by: Maggie Mae Roxas <maggie.mae.roxas@gmail.com>
+Cc: Maggie Mae Roxas <maggie.mae.roxas@gmail.com>
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -2393,7 +2393,7 @@ static void mvneta_adjust_link(struct ne
+
+ if (phydev->speed == SPEED_1000)
+ val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
+- else
++ else if (phydev->speed == SPEED_100)
+ val |= MVNETA_GMAC_CONFIG_MII_SPEED;
+
+ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Christoph Schulz <develop@kristov.de>
+Date: Sun, 13 Jul 2014 00:53:15 +0200
+Subject: net: pppoe: use correct channel MTU when using Multilink PPP
+
+From: Christoph Schulz <develop@kristov.de>
+
+[ Upstream commit a8a3e41c67d24eb12f9ab9680cbb85e24fcd9711 ]
+
+The PPP channel MTU is used with Multilink PPP when ppp_mp_explode() (see
+ppp_generic module) tries to determine how big a fragment might be. According
+to RFC 1661, the MTU excludes the 2-byte PPP protocol field, see the
+corresponding comment and code in ppp_mp_explode():
+
+ /*
+ * hdrlen includes the 2-byte PPP protocol field, but the
+ * MTU counts only the payload excluding the protocol field.
+ * (RFC1661 Section 2)
+ */
+ mtu = pch->chan->mtu - (hdrlen - 2);
+
+However, the pppoe module *does* include the PPP protocol field in the channel
+MTU, which is wrong as it causes the PPP payload to be 1-2 bytes too big under
+certain circumstances (one byte if PPP protocol compression is used, two
+otherwise), causing the generated Ethernet packets to be dropped. So the pppoe
+module has to subtract two bytes from the channel MTU. This error only
+manifests itself when using Multilink PPP, as otherwise the channel MTU is not
+used anywhere.
+
+In the following, I will describe how to reproduce this bug. We configure two
+pppd instances for multilink PPP over two PPPoE links, say eth2 and eth3, with
+a MTU of 1492 bytes for each link and a MRRU of 2976 bytes. (This MRRU is
+computed by adding the two link MTUs and subtracting the MP header twice, which
+is 4 bytes long.) The necessary pppd statements on both sides are "multilink
+mtu 1492 mru 1492 mrru 2976". On the client side, we additionally need "plugin
+rp-pppoe.so eth2" and "plugin rp-pppoe.so eth3", respectively; on the server
+side, we additionally need to start two pppoe-server instances to be able to
+establish two PPPoE sessions, one over eth2 and one over eth3. We set the MTU
+of the PPP network interface to the MRRU (2976) on both sides of the connection
+in order to make use of the higher bandwidth. (If we didn't do that, IP
+fragmentation would kick in, which we want to avoid.)
+
+Now we send a ICMPv4 echo request with a payload of 2948 bytes from client to
+server over the PPP link. This results in the following network packet:
+
+ 2948 (echo payload)
+ + 8 (ICMPv4 header)
+ + 20 (IPv4 header)
+---------------------
+ 2976 (PPP payload)
+
+These 2976 bytes do not exceed the MTU of the PPP network interface, so the
+IP packet is not fragmented. Now the multilink PPP code in ppp_mp_explode()
+prepends one protocol byte (0x21 for IPv4), making the packet one byte bigger
+than the negotiated MRRU. So this packet would have to be divided in three
+fragments. But this does not happen as each link MTU is assumed to be two bytes
+larger. So this packet is diveded into two fragments only, one of size 1489 and
+one of size 1488. Now we have for that bigger fragment:
+
+ 1489 (PPP payload)
+ + 4 (MP header)
+ + 2 (PPP protocol field for the MP payload (0x3d))
+ + 6 (PPPoE header)
+--------------------------
+ 1501 (Ethernet payload)
+
+This packet exceeds the link MTU and is discarded.
+
+If one configures the link MTU on the client side to 1501, one can see the
+discarded Ethernet frames with tcpdump running on the client. A
+
+ping -s 2948 -c 1 192.168.15.254
+
+leads to the smaller fragment that is correctly received on the server side:
+
+(tcpdump -vvvne -i eth3 pppoes and ppp proto 0x3d)
+52:54:00:ad:87:fd > 52:54:00:79:5c:d0, ethertype PPPoE S (0x8864),
+ length 1514: PPPoE [ses 0x3] MLPPP (0x003d), length 1494: seq 0x000,
+ Flags [end], length 1492
+
+and to the bigger fragment that is not received on the server side:
+
+(tcpdump -vvvne -i eth2 pppoes and ppp proto 0x3d)
+52:54:00:70:9e:89 > 52:54:00:5d:6f:b0, ethertype PPPoE S (0x8864),
+ length 1515: PPPoE [ses 0x5] MLPPP (0x003d), length 1495: seq 0x000,
+ Flags [begin], length 1493
+
+With the patch below, we correctly obtain three fragments:
+
+52:54:00:ad:87:fd > 52:54:00:79:5c:d0, ethertype PPPoE S (0x8864),
+ length 1514: PPPoE [ses 0x1] MLPPP (0x003d), length 1494: seq 0x000,
+ Flags [begin], length 1492
+52:54:00:70:9e:89 > 52:54:00:5d:6f:b0, ethertype PPPoE S (0x8864),
+ length 1514: PPPoE [ses 0x1] MLPPP (0x003d), length 1494: seq 0x000,
+ Flags [none], length 1492
+52:54:00:ad:87:fd > 52:54:00:79:5c:d0, ethertype PPPoE S (0x8864),
+ length 27: PPPoE [ses 0x1] MLPPP (0x003d), length 7: seq 0x000,
+ Flags [end], length 5
+
+And the ICMPv4 echo request is successfully received at the server side:
+
+IP (tos 0x0, ttl 64, id 21925, offset 0, flags [DF], proto ICMP (1),
+ length 2976)
+ 192.168.222.2 > 192.168.15.254: ICMP echo request, id 30530, seq 0,
+ length 2956
+
+The bug was introduced in commit c9aa6895371b2a257401f59d3393c9f7ac5a8698
+("[PPPOE]: Advertise PPPoE MTU") from the very beginning. This patch applies
+to 3.10 upwards but the fix can be applied (with minor modifications) to
+kernels as old as 2.6.32.
+
+Signed-off-by: Christoph Schulz <develop@kristov.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ppp/pppoe.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *
+ po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
+ dev->hard_header_len);
+
+- po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
++ po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
+ po->chan.private = sk;
+ po->chan.ops = &pppoe_chan_ops;
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Bernd Wachter <bernd.wachter@jolla.com>
+Date: Tue, 1 Jul 2014 22:01:09 +0300
+Subject: net: qmi_wwan: Add ID for Telewell TW-LTE 4G v2
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bernd Wachter <bernd.wachter@jolla.com>
+
+[ Upstream commit 8dcb4b1526747d8431f9895e153dd478c9d16186 ]
+
+There's a new version of the Telewell 4G modem working with, but not
+recognized by this driver.
+
+Signed-off-by: Bernd Wachter <bernd.wachter@jolla.com>
+Acked-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -734,6 +734,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
+ {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
++ {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
+ {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
+ {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
+ {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+Date: Thu, 17 Jul 2014 13:33:51 +0200
+Subject: net: qmi_wwan: add two Sierra Wireless/Netgear devices
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Bj=C3=B8rn=20Mork?= <bjorn@mork.no>
+
+[ Upstream commit 5343330010a892b76a97fd93ad3c455a4a32a7fb ]
+
+Add two device IDs found in an out-of-tree driver downloadable
+from Netgear.
+
+Signed-off-by: Bjørn Mork <bjorn@mork.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/qmi_wwan.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -660,6 +660,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
+ {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
++ {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
+ {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
+ {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
+ {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
+@@ -747,6 +748,7 @@ static const struct usb_device_id produc
+ {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
++ {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
+ {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
+ {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
+ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Wed, 18 Jun 2014 23:46:31 +0200
+Subject: net: sctp: check proc_dointvec result in proc_sctp_do_auth
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit 24599e61b7552673dd85971cf5a35369cd8c119e ]
+
+When writing to the sysctl field net.sctp.auth_enable, it can well
+be that the user buffer we handed over to proc_dointvec() via
+proc_sctp_do_auth() handler contains something other than integers.
+
+In that case, we would set an uninitialized 4-byte value from the
+stack to net->sctp.auth_enable that can be leaked back when reading
+the sysctl variable, and it can unintentionally turn auth_enable
+on/off based on the stack content since auth_enable is interpreted
+as a boolean.
+
+Fix it up by making sure proc_dointvec() returned sucessfully.
+
+Fixes: b14878ccb7fa ("net: sctp: cache auth_enable per endpoint")
+Reported-by: Florian Westphal <fwestpha@redhat.com>
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sysctl.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -423,8 +423,7 @@ static int proc_sctp_do_auth(struct ctl_
+ tbl.data = &net->sctp.auth_enable;
+
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+-
+- if (write) {
++ if (write && ret == 0) {
+ struct sock *sk = net->sctp.ctl_sock;
+
+ net->sctp.auth_enable = new_value;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Sat, 12 Jul 2014 20:30:35 +0200
+Subject: net: sctp: fix information leaks in ulpevent layer
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit 8f2e5ae40ec193bc0a0ed99e95315c3eebca84ea ]
+
+While working on some other SCTP code, I noticed that some
+structures shared with user space are leaking uninitialized
+stack or heap buffer. In particular, struct sctp_sndrcvinfo
+has a 2 bytes hole between .sinfo_flags and .sinfo_ppid that
+remains unfilled by us in sctp_ulpevent_read_sndrcvinfo() when
+putting this into cmsg. But also struct sctp_remote_error
+contains a 2 bytes hole that we don't fill but place into a skb
+through skb_copy_expand() via sctp_ulpevent_make_remote_error().
+
+Both structures are defined by the IETF in RFC6458:
+
+* Section 5.3.2. SCTP Header Information Structure:
+
+ The sctp_sndrcvinfo structure is defined below:
+
+ struct sctp_sndrcvinfo {
+ uint16_t sinfo_stream;
+ uint16_t sinfo_ssn;
+ uint16_t sinfo_flags;
+ <-- 2 bytes hole -->
+ uint32_t sinfo_ppid;
+ uint32_t sinfo_context;
+ uint32_t sinfo_timetolive;
+ uint32_t sinfo_tsn;
+ uint32_t sinfo_cumtsn;
+ sctp_assoc_t sinfo_assoc_id;
+ };
+
+* 6.1.3. SCTP_REMOTE_ERROR:
+
+ A remote peer may send an Operation Error message to its peer.
+ This message indicates a variety of error conditions on an
+ association. The entire ERROR chunk as it appears on the wire
+ is included in an SCTP_REMOTE_ERROR event. Please refer to the
+ SCTP specification [RFC4960] and any extensions for a list of
+ possible error formats. An SCTP error notification has the
+ following format:
+
+ struct sctp_remote_error {
+ uint16_t sre_type;
+ uint16_t sre_flags;
+ uint32_t sre_length;
+ uint16_t sre_error;
+ <-- 2 bytes hole -->
+ sctp_assoc_t sre_assoc_id;
+ uint8_t sre_data[];
+ };
+
+Fix this by setting both to 0 before filling them out. We also
+have other structures shared between user and kernel space in
+SCTP that contains holes (e.g. struct sctp_paddrthlds), but we
+copy that buffer over from user space first and thus don't need
+to care about it in that cases.
+
+While at it, we can also remove lengthy comments copied from
+the draft, instead, we update the comment with the correct RFC
+number where one can look it up.
+
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/ulpevent.c | 122 ++++++----------------------------------------------
+ 1 file changed, 15 insertions(+), 107 deletions(-)
+
+--- a/net/sctp/ulpevent.c
++++ b/net/sctp/ulpevent.c
+@@ -366,9 +366,10 @@ fail:
+ * specification [SCTP] and any extensions for a list of possible
+ * error formats.
+ */
+-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
+- const struct sctp_association *asoc, struct sctp_chunk *chunk,
+- __u16 flags, gfp_t gfp)
++struct sctp_ulpevent *
++sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
++ struct sctp_chunk *chunk, __u16 flags,
++ gfp_t gfp)
+ {
+ struct sctp_ulpevent *event;
+ struct sctp_remote_error *sre;
+@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make
+ /* Copy the skb to a new skb with room for us to prepend
+ * notification with.
+ */
+- skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
+- 0, gfp);
++ skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
+
+ /* Pull off the rest of the cause TLV from the chunk. */
+ skb_pull(chunk->skb, elen);
+@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make
+ event = sctp_skb2event(skb);
+ sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
+
+- sre = (struct sctp_remote_error *)
+- skb_push(skb, sizeof(struct sctp_remote_error));
++ sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
+
+ /* Trim the buffer to the right length. */
+- skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
++ skb_trim(skb, sizeof(*sre) + elen);
+
+- /* Socket Extensions for SCTP
+- * 5.3.1.3 SCTP_REMOTE_ERROR
+- *
+- * sre_type:
+- * It should be SCTP_REMOTE_ERROR.
+- */
++ /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
++ memset(sre, 0, sizeof(*sre));
+ sre->sre_type = SCTP_REMOTE_ERROR;
+-
+- /*
+- * Socket Extensions for SCTP
+- * 5.3.1.3 SCTP_REMOTE_ERROR
+- *
+- * sre_flags: 16 bits (unsigned integer)
+- * Currently unused.
+- */
+ sre->sre_flags = 0;
+-
+- /* Socket Extensions for SCTP
+- * 5.3.1.3 SCTP_REMOTE_ERROR
+- *
+- * sre_length: sizeof (__u32)
+- *
+- * This field is the total length of the notification data,
+- * including the notification header.
+- */
+ sre->sre_length = skb->len;
+-
+- /* Socket Extensions for SCTP
+- * 5.3.1.3 SCTP_REMOTE_ERROR
+- *
+- * sre_error: 16 bits (unsigned integer)
+- * This value represents one of the Operational Error causes defined in
+- * the SCTP specification, in network byte order.
+- */
+ sre->sre_error = cause;
+-
+- /* Socket Extensions for SCTP
+- * 5.3.1.3 SCTP_REMOTE_ERROR
+- *
+- * sre_assoc_id: sizeof (sctp_assoc_t)
+- *
+- * The association id field, holds the identifier for the association.
+- * All notifications for a given association have the same association
+- * identifier. For TCP style socket, this field is ignored.
+- */
+ sctp_ulpevent_set_owner(event, asoc);
+ sre->sre_assoc_id = sctp_assoc2id(asoc);
+
+ return event;
+-
+ fail:
+ return NULL;
+ }
+@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_typ
+ return notification->sn_header.sn_type;
+ }
+
+-/* Copy out the sndrcvinfo into a msghdr. */
++/* RFC6458, Section 5.3.2. SCTP Header Information Structure
++ * (SCTP_SNDRCV, DEPRECATED)
++ */
+ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
+ struct msghdr *msghdr)
+ {
+@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const
+ if (sctp_ulpevent_is_notification(event))
+ return;
+
+- /* Sockets API Extensions for SCTP
+- * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+- *
+- * sinfo_stream: 16 bits (unsigned integer)
+- *
+- * For recvmsg() the SCTP stack places the message's stream number in
+- * this value.
+- */
++ memset(&sinfo, 0, sizeof(sinfo));
+ sinfo.sinfo_stream = event->stream;
+- /* sinfo_ssn: 16 bits (unsigned integer)
+- *
+- * For recvmsg() this value contains the stream sequence number that
+- * the remote endpoint placed in the DATA chunk. For fragmented
+- * messages this is the same number for all deliveries of the message
+- * (if more than one recvmsg() is needed to read the message).
+- */
+ sinfo.sinfo_ssn = event->ssn;
+- /* sinfo_ppid: 32 bits (unsigned integer)
+- *
+- * In recvmsg() this value is
+- * the same information that was passed by the upper layer in the peer
+- * application. Please note that byte order issues are NOT accounted
+- * for and this information is passed opaquely by the SCTP stack from
+- * one end to the other.
+- */
+ sinfo.sinfo_ppid = event->ppid;
+- /* sinfo_flags: 16 bits (unsigned integer)
+- *
+- * This field may contain any of the following flags and is composed of
+- * a bitwise OR of these values.
+- *
+- * recvmsg() flags:
+- *
+- * SCTP_UNORDERED - This flag is present when the message was sent
+- * non-ordered.
+- */
+ sinfo.sinfo_flags = event->flags;
+- /* sinfo_tsn: 32 bit (unsigned integer)
+- *
+- * For the receiving side, this field holds a TSN that was
+- * assigned to one of the SCTP Data Chunks.
+- */
+ sinfo.sinfo_tsn = event->tsn;
+- /* sinfo_cumtsn: 32 bit (unsigned integer)
+- *
+- * This field will hold the current cumulative TSN as
+- * known by the underlying SCTP layer. Note this field is
+- * ignored when sending and only valid for a receive
+- * operation when sinfo_flags are set to SCTP_UNORDERED.
+- */
+ sinfo.sinfo_cumtsn = event->cumtsn;
+- /* sinfo_assoc_id: sizeof (sctp_assoc_t)
+- *
+- * The association handle field, sinfo_assoc_id, holds the identifier
+- * for the association announced in the COMMUNICATION_UP notification.
+- * All notifications for a given association have the same identifier.
+- * Ignored for one-to-one style sockets.
+- */
+ sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
+-
+- /* context value that is set via SCTP_CONTEXT socket option. */
++ /* Context value that is set via SCTP_CONTEXT socket option. */
+ sinfo.sinfo_context = event->asoc->default_rcv_context;
+-
+ /* These fields are not used while receiving. */
+ sinfo.sinfo_timetolive = 0;
+
+ put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
+- sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
++ sizeof(sinfo), &sinfo);
+ }
+
+ /* Do accounting for bytes received and hold a reference to the association
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Thu, 19 Jun 2014 01:31:30 +0200
+Subject: net: sctp: propagate sysctl errors from proc_do* properly
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit ff5e92c1affe7166b3f6e7073e648ed65a6e2e59 ]
+
+sysctl handler proc_sctp_do_hmac_alg(), proc_sctp_do_rto_min() and
+proc_sctp_do_rto_max() do not properly reflect some error cases
+when writing values via sysctl from internal proc functions such
+as proc_dointvec() and proc_dostring().
+
+In all these cases we pass the test for write != 0 and partially
+do additional work just to notice that additional sanity checks
+fail and we return with hard-coded -EINVAL while proc_do*
+functions might also return different errors. So fix this up by
+simply testing a successful return of proc_do* right after
+calling it.
+
+This also allows to propagate its return value onwards to the user.
+While touching this, also fix up some minor style issues.
+
+Fixes: 4f3fdf3bc59c ("sctp: add check rto_min and rto_max in sysctl")
+Fixes: 3c68198e7511 ("sctp: Make hmac algorithm selection for cookie generation dynamic")
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sysctl.c | 43 +++++++++++++++++++++++--------------------
+ 1 file changed, 23 insertions(+), 20 deletions(-)
+
+--- a/net/sctp/sysctl.c
++++ b/net/sctp/sysctl.c
+@@ -307,41 +307,40 @@ static int proc_sctp_do_hmac_alg(struct
+ loff_t *ppos)
+ {
+ struct net *net = current->nsproxy->net_ns;
+- char tmp[8];
+ struct ctl_table tbl;
+- int ret;
+- int changed = 0;
++ bool changed = false;
+ char *none = "none";
++ char tmp[8];
++ int ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+
+ if (write) {
+ tbl.data = tmp;
+- tbl.maxlen = 8;
++ tbl.maxlen = sizeof(tmp);
+ } else {
+ tbl.data = net->sctp.sctp_hmac_alg ? : none;
+ tbl.maxlen = strlen(tbl.data);
+ }
+- ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+
+- if (write) {
++ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
++ if (write && ret == 0) {
+ #ifdef CONFIG_CRYPTO_MD5
+ if (!strncmp(tmp, "md5", 3)) {
+ net->sctp.sctp_hmac_alg = "md5";
+- changed = 1;
++ changed = true;
+ }
+ #endif
+ #ifdef CONFIG_CRYPTO_SHA1
+ if (!strncmp(tmp, "sha1", 4)) {
+ net->sctp.sctp_hmac_alg = "sha1";
+- changed = 1;
++ changed = true;
+ }
+ #endif
+ if (!strncmp(tmp, "none", 4)) {
+ net->sctp.sctp_hmac_alg = NULL;
+- changed = 1;
++ changed = true;
+ }
+-
+ if (!changed)
+ ret = -EINVAL;
+ }
+@@ -354,11 +353,10 @@ static int proc_sctp_do_rto_min(struct c
+ loff_t *ppos)
+ {
+ struct net *net = current->nsproxy->net_ns;
+- int new_value;
+- struct ctl_table tbl;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+- int ret;
++ struct ctl_table tbl;
++ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+@@ -367,12 +365,15 @@ static int proc_sctp_do_rto_min(struct c
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_min;
++
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+- if (write) {
+- if (ret || new_value > max || new_value < min)
++ if (write && ret == 0) {
++ if (new_value > max || new_value < min)
+ return -EINVAL;
++
+ net->sctp.rto_min = new_value;
+ }
++
+ return ret;
+ }
+
+@@ -381,11 +382,10 @@ static int proc_sctp_do_rto_max(struct c
+ loff_t *ppos)
+ {
+ struct net *net = current->nsproxy->net_ns;
+- int new_value;
+- struct ctl_table tbl;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+- int ret;
++ struct ctl_table tbl;
++ int ret, new_value;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+@@ -394,12 +394,15 @@ static int proc_sctp_do_rto_max(struct c
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_max;
++
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+- if (write) {
+- if (ret || new_value > max || new_value < min)
++ if (write && ret == 0) {
++ if (new_value > max || new_value < min)
+ return -EINVAL;
++
+ net->sctp.rto_max = new_value;
+ }
++
+ return ret;
+ }
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Ben Pfaff <blp@nicira.com>
+Date: Wed, 9 Jul 2014 10:31:22 -0700
+Subject: netlink: Fix handling of error from netlink_dump().
+
+From: Ben Pfaff <blp@nicira.com>
+
+[ Upstream commit ac30ef832e6af0505b6f0251a6659adcfa74975e ]
+
+netlink_dump() returns a negative errno value on error. Until now,
+netlink_recvmsg() directly recorded that negative value in sk->sk_err, but
+that's wrong since sk_err takes positive errno values. (This manifests as
+userspace receiving a positive return value from the recv() system call,
+falsely indicating success.) This bug was introduced in the commit that
+started checking the netlink_dump() return value, commit b44d211 (netlink:
+handle errors from netlink_dump()).
+
+Multithreaded Netlink dumps are one way to trigger this behavior in
+practice, as described in the commit message for the userspace workaround
+posted here:
+ http://openvswitch.org/pipermail/dev/2014-June/042339.html
+
+This commit also fixes the same bug in netlink_poll(), introduced in commit
+cd1df525d (netlink: add flow control for memory mapped I/O).
+
+Signed-off-by: Ben Pfaff <blp@nicira.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlink/af_netlink.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/netlink/af_netlink.c
++++ b/net/netlink/af_netlink.c
+@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct
+ while (nlk->cb_running && netlink_dump_space(nlk)) {
+ err = netlink_dump(sk);
+ if (err < 0) {
+- sk->sk_err = err;
++ sk->sk_err = -err;
+ sk->sk_error_report(sk);
+ break;
+ }
+@@ -2448,7 +2448,7 @@ static int netlink_recvmsg(struct kiocb
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ ret = netlink_dump(sk);
+ if (ret) {
+- sk->sk_err = ret;
++ sk->sk_err = -ret;
+ sk->sk_error_report(sk);
+ }
+ }
shmem-fix-faulting-into-a-hole-not-taking-i_mutex.patch
shmem-fix-splicing-from-a-hole-while-it-s-punched.patch
e1000e-fix-shra-register-access-for-82579.patch
+ip_tunnel-fix-ip_tunnel_lookup.patch
+slip-fix-deadlock-in-write_wakeup.patch
+slcan-port-write_wakeup-deadlock-fix-from-slip.patch
+net-sctp-propagate-sysctl-errors-from-proc_do-properly.patch
+tcp-fix-tcp_match_skb_to_sack-for-unaligned-sack-at-end-of-an-skb.patch
+net-sctp-check-proc_dointvec-result-in-proc_sctp_do_auth.patch
+8021q-fix-a-potential-memory-leak.patch
+net-huawei_cdc_ncm-increase-command-buffer-size.patch
+net-fix-udp-tunnel-gso-of-frag_list-gro-packets.patch
+ipv4-fix-dst-race-in-sk_dst_get.patch
+ipv4-irq-safe-sk_dst_set-and-ipv4_sk_update_pmtu-fix.patch
+net-fix-sparse-warning-in-sk_dst_set.patch
+vlan-free-percpu-stats-in-device-destructor.patch
+bnx2x-fix-possible-panic-under-memory-stress.patch
+tcp-fix-divide-by-zero-when-pushing-during-tcp-repair.patch
+ipv4-icmp-fix-pmtu-handling-for-rare-case.patch
+net-qmi_wwan-add-id-for-telewell-tw-lte-4g-v2.patch
+net-qmi_wwan-add-two-sierra-wireless-netgear-devices.patch
+net-fix-netdev_change-notifier-usage-causing-spurious-arp-flush.patch
+igmp-fix-the-problem-when-mc-leave-group.patch
+tcp-fix-false-undo-corner-cases.patch
+appletalk-fix-socket-referencing-in-skb.patch
+net-mvneta-fix-operation-in-10-mbit-s-mode.patch
+net-mvneta-fix-big-endian-issue-in-mvneta_txq_desc_csum.patch
+netlink-fix-handling-of-error-from-netlink_dump.patch
+be2net-set-eq-db-clear-intr-bit-in-be_open.patch
+tipc-clear-next-pointer-of-message-fragments-before-reassembly.patch
+net-sctp-fix-information-leaks-in-ulpevent-layer.patch
+net-pppoe-use-correct-channel-mtu-when-using-multilink-ppp.patch
+bonding-fix-ad_select-module-param-check.patch
+net-gre-gro-fix-a-bug-that-breaks-the-forwarding-path.patch
+sunvnet-clean-up-objects-created-in-vnet_new-on-vnet_exit.patch
+net-huawei_cdc_ncm-add-subclass-3-devices.patch
+dns_resolver-assure-that-dns_query-result-is-null-terminated.patch
+dns_resolver-null-terminate-the-right-string.patch
+ipv4-fix-buffer-overflow-in-ip_options_compile.patch
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Tyler Hall <tylerwhall@gmail.com>
+Date: Sun, 15 Jun 2014 22:23:17 -0400
+Subject: slcan: Port write_wakeup deadlock fix from slip
+
+From: Tyler Hall <tylerwhall@gmail.com>
+
+[ Upstream commit a8e83b17536aad603fbeae4c460f2da0ee9fe6ed ]
+
+The commit "slip: Fix deadlock in write_wakeup" fixes a deadlock caused
+by a change made in both slcan and slip. This is a direct port of that
+fix.
+
+Signed-off-by: Tyler Hall <tylerwhall@gmail.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Andre Naujoks <nautsch2@gmail.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/slcan.c | 41 +++++++++++++++++++++++++++++------------
+ 1 file changed, 29 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -52,6 +52,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/kernel.h>
++#include <linux/workqueue.h>
+ #include <linux/can.h>
+ #include <linux/can/skb.h>
+
+@@ -85,6 +86,7 @@ struct slcan {
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
++ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ /* These are pointers to the malloc()ed frame buffers. */
+ unsigned char rbuff[SLC_MTU]; /* receiver buffer */
+@@ -309,34 +311,44 @@ static void slc_encaps(struct slcan *sl,
+ sl->dev->stats.tx_bytes += cf->can_dlc;
+ }
+
+-/*
+- * Called by the driver when there's room for more data. If we have
+- * more packets to send, we send them here.
+- */
+-static void slcan_write_wakeup(struct tty_struct *tty)
++/* Write out any remaining transmit buffer. Scheduled when tty is writable */
++static void slcan_transmit(struct work_struct *work)
+ {
++ struct slcan *sl = container_of(work, struct slcan, tx_work);
+ int actual;
+- struct slcan *sl = (struct slcan *) tty->disc_data;
+
++ spin_lock_bh(&sl->lock);
+ /* First make sure we're connected. */
+- if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
++ if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
++ spin_unlock_bh(&sl->lock);
+ return;
++ }
+
+- spin_lock(&sl->lock);
+ if (sl->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->dev->stats.tx_packets++;
+- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+- spin_unlock(&sl->lock);
++ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
++ spin_unlock_bh(&sl->lock);
+ netif_wake_queue(sl->dev);
+ return;
+ }
+
+- actual = tty->ops->write(tty, sl->xhead, sl->xleft);
++ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+- spin_unlock(&sl->lock);
++ spin_unlock_bh(&sl->lock);
++}
++
++/*
++ * Called by the driver when there's room for more data.
++ * Schedule the transmit.
++ */
++static void slcan_write_wakeup(struct tty_struct *tty)
++{
++ struct slcan *sl = tty->disc_data;
++
++ schedule_work(&sl->tx_work);
+ }
+
+ /* Send a can_frame to a TTY queue. */
+@@ -522,6 +534,7 @@ static struct slcan *slc_alloc(dev_t lin
+ sl->magic = SLCAN_MAGIC;
+ sl->dev = dev;
+ spin_lock_init(&sl->lock);
++ INIT_WORK(&sl->tx_work, slcan_transmit);
+ slcan_devs[i] = dev;
+
+ return sl;
+@@ -620,8 +633,12 @@ static void slcan_close(struct tty_struc
+ if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+ return;
+
++ spin_lock_bh(&sl->lock);
+ tty->disc_data = NULL;
+ sl->tty = NULL;
++ spin_unlock_bh(&sl->lock);
++
++ flush_work(&sl->tx_work);
+
+ /* Flush network side */
+ unregister_netdev(sl->dev);
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Tyler Hall <tylerwhall@gmail.com>
+Date: Sun, 15 Jun 2014 22:23:16 -0400
+Subject: slip: Fix deadlock in write_wakeup
+
+From: Tyler Hall <tylerwhall@gmail.com>
+
+[ Upstream commit 661f7fda21b15ec52f57fcd397c03370acc28688 ]
+
+Use schedule_work() to avoid potentially taking the spinlock in
+interrupt context.
+
+Commit cc9fa74e2a ("slip/slcan: added locking in wakeup function") added
+necessary locking to the wakeup function and 367525c8c2/ddcde142be ("can:
+slcan: Fix spinlock variant") converted it to spin_lock_bh() because the lock
+is also taken in timers.
+
+Disabling softirqs is not sufficient, however, as tty drivers may call
+write_wakeup from interrupt context. This driver calls tty->ops->write() with
+its spinlock held, which may immediately cause an interrupt on the same CPU and
+subsequent spin_bug().
+
+Simply converting to spin_lock_irq/irqsave() prevents this deadlock, but
+causes lockdep to point out a possible circular locking dependency
+between these locks:
+
+(&(&sl->lock)->rlock){-.....}, at: slip_write_wakeup
+(&port_lock_key){-.....}, at: serial8250_handle_irq.part.13
+
+The slip transmit is holding the slip spinlock when calling the tty write.
+This grabs the port lock. On an interrupt, the handler grabs the port
+lock and calls write_wakeup which grabs the slip lock. This could be a
+problem if a serial interrupt occurs on another CPU during the slip
+transmit.
+
+To deal with these issues, don't grab the lock in the wakeup function by
+deferring the writeout to a workqueue. Also hold the lock during close
+when de-assigning the tty pointer to safely disarm the worker and
+timers.
+
+This bug is easily reproducible on the first transmit when slip is
+used with the standard 8250 serial driver.
+
+[<c0410b7c>] (spin_bug+0x0/0x38) from [<c006109c>] (do_raw_spin_lock+0x60/0x1d0)
+ r5:eab27000 r4:ec02754c
+[<c006103c>] (do_raw_spin_lock+0x0/0x1d0) from [<c04185c0>] (_raw_spin_lock+0x28/0x2c)
+ r10:0000001f r9:eabb814c r8:eabb8140 r7:40070193 r6:ec02754c r5:eab27000
+ r4:ec02754c r3:00000000
+[<c0418598>] (_raw_spin_lock+0x0/0x2c) from [<bf3a0220>] (slip_write_wakeup+0x50/0xe0 [slip])
+ r4:ec027540 r3:00000003
+[<bf3a01d0>] (slip_write_wakeup+0x0/0xe0 [slip]) from [<c026e420>] (tty_wakeup+0x48/0x68)
+ r6:00000000 r5:ea80c480 r4:eab27000 r3:bf3a01d0
+[<c026e3d8>] (tty_wakeup+0x0/0x68) from [<c028a8ec>] (uart_write_wakeup+0x2c/0x30)
+ r5:ed68ea90 r4:c06790d8
+[<c028a8c0>] (uart_write_wakeup+0x0/0x30) from [<c028dc44>] (serial8250_tx_chars+0x114/0x170)
+[<c028db30>] (serial8250_tx_chars+0x0/0x170) from [<c028dffc>] (serial8250_handle_irq+0xa0/0xbc)
+ r6:000000c2 r5:00000060 r4:c06790d8 r3:00000000
+[<c028df5c>] (serial8250_handle_irq+0x0/0xbc) from [<c02933a4>] (dw8250_handle_irq+0x38/0x64)
+ r7:00000000 r6:edd2f390 r5:000000c2 r4:c06790d8
+[<c029336c>] (dw8250_handle_irq+0x0/0x64) from [<c028d2f4>] (serial8250_interrupt+0x44/0xc4)
+ r6:00000000 r5:00000000 r4:c06791c4 r3:c029336c
+[<c028d2b0>] (serial8250_interrupt+0x0/0xc4) from [<c0067fe4>] (handle_irq_event_percpu+0xb4/0x2b0)
+ r10:c06790d8 r9:eab27000 r8:00000000 r7:00000000 r6:0000001f r5:edd52980
+ r4:ec53b6c0 r3:c028d2b0
+[<c0067f30>] (handle_irq_event_percpu+0x0/0x2b0) from [<c006822c>] (handle_irq_event+0x4c/0x6c)
+ r10:c06790d8 r9:eab27000 r8:c0673ae0 r7:c05c2020 r6:ec53b6c0 r5:edd529d4
+ r4:edd52980
+[<c00681e0>] (handle_irq_event+0x0/0x6c) from [<c006b140>] (handle_level_irq+0xe8/0x100)
+ r6:00000000 r5:edd529d4 r4:edd52980 r3:00022000
+[<c006b058>] (handle_level_irq+0x0/0x100) from [<c00676f8>] (generic_handle_irq+0x30/0x40)
+ r5:0000001f r4:0000001f
+[<c00676c8>] (generic_handle_irq+0x0/0x40) from [<c000f57c>] (handle_IRQ+0xd0/0x13c)
+ r4:ea997b18 r3:000000e0
+[<c000f4ac>] (handle_IRQ+0x0/0x13c) from [<c00086c4>] (armada_370_xp_handle_irq+0x4c/0x118)
+ r8:000003ff r7:ea997b18 r6:ffffffff r5:60070013 r4:c0674dc0
+[<c0008678>] (armada_370_xp_handle_irq+0x0/0x118) from [<c0013840>] (__irq_svc+0x40/0x70)
+Exception stack(0xea997b18 to 0xea997b60)
+7b00: 00000001 20070013
+7b20: 00000000 0000000b 20070013 eab27000 20070013 00000000 ed10103e eab27000
+7b40: c06790d8 ea997b74 ea997b60 ea997b60 c04186c0 c04186c8 60070013 ffffffff
+ r9:eab27000 r8:ed10103e r7:ea997b4c r6:ffffffff r5:60070013 r4:c04186c8
+[<c04186a4>] (_raw_spin_unlock_irqrestore+0x0/0x54) from [<c0288fc0>] (uart_start+0x40/0x44)
+ r4:c06790d8 r3:c028ddd8
+[<c0288f80>] (uart_start+0x0/0x44) from [<c028982c>] (uart_write+0xe4/0xf4)
+ r6:0000003e r5:00000000 r4:ed68ea90 r3:0000003e
+[<c0289748>] (uart_write+0x0/0xf4) from [<bf3a0d20>] (sl_xmit+0x1c4/0x228 [slip])
+ r10:ed388e60 r9:0000003c r8:ffffffdd r7:0000003e r6:ec02754c r5:ea717eb8
+ r4:ec027000
+[<bf3a0b5c>] (sl_xmit+0x0/0x228 [slip]) from [<c0368d74>] (dev_hard_start_xmit+0x39c/0x6d0)
+ r8:eaf163c0 r7:ec027000 r6:ea717eb8 r5:00000000 r4:00000000
+
+Signed-off-by: Tyler Hall <tylerwhall@gmail.com>
+Cc: Oliver Hartkopp <socketcan@hartkopp.net>
+Cc: Andre Naujoks <nautsch2@gmail.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: linux-kernel@vger.kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/slip/slip.c | 36 ++++++++++++++++++++++++++----------
+ drivers/net/slip/slip.h | 1 +
+ 2 files changed, 27 insertions(+), 10 deletions(-)
+
+--- a/drivers/net/slip/slip.c
++++ b/drivers/net/slip/slip.c
+@@ -83,6 +83,7 @@
+ #include <linux/delay.h>
+ #include <linux/init.h>
+ #include <linux/slab.h>
++#include <linux/workqueue.h>
+ #include "slip.h"
+ #ifdef CONFIG_INET
+ #include <linux/ip.h>
+@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, u
+ #endif
+ }
+
+-/*
+- * Called by the driver when there's room for more data. If we have
+- * more packets to send, we send them here.
+- */
+-static void slip_write_wakeup(struct tty_struct *tty)
++/* Write out any remaining transmit buffer. Scheduled when tty is writable */
++static void slip_transmit(struct work_struct *work)
+ {
++ struct slip *sl = container_of(work, struct slip, tx_work);
+ int actual;
+- struct slip *sl = tty->disc_data;
+
++ spin_lock_bh(&sl->lock);
+ /* First make sure we're connected. */
+- if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
++ if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
++ spin_unlock_bh(&sl->lock);
+ return;
++ }
+
+- spin_lock_bh(&sl->lock);
+ if (sl->xleft <= 0) {
+ /* Now serial buffer is almost free & we can start
+ * transmission of another packet */
+ sl->dev->stats.tx_packets++;
+- clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
++ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+ spin_unlock_bh(&sl->lock);
+ sl_unlock(sl);
+ return;
+ }
+
+- actual = tty->ops->write(tty, sl->xhead, sl->xleft);
++ actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
+ sl->xleft -= actual;
+ sl->xhead += actual;
+ spin_unlock_bh(&sl->lock);
+ }
+
++/*
++ * Called by the driver when there's room for more data.
++ * Schedule the transmit.
++ */
++static void slip_write_wakeup(struct tty_struct *tty)
++{
++ struct slip *sl = tty->disc_data;
++
++ schedule_work(&sl->tx_work);
++}
++
+ static void sl_tx_timeout(struct net_device *dev)
+ {
+ struct slip *sl = netdev_priv(dev);
+@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
+ sl->magic = SLIP_MAGIC;
+ sl->dev = dev;
+ spin_lock_init(&sl->lock);
++ INIT_WORK(&sl->tx_work, slip_transmit);
+ sl->mode = SL_MODE_DEFAULT;
+ #ifdef CONFIG_SLIP_SMART
+ /* initialize timer_list struct */
+@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct
+ if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
+ return;
+
++ spin_lock_bh(&sl->lock);
+ tty->disc_data = NULL;
+ sl->tty = NULL;
++ spin_unlock_bh(&sl->lock);
++
++ flush_work(&sl->tx_work);
+
+ /* VSV = very important to remove timers */
+ #ifdef CONFIG_SLIP_SMART
+--- a/drivers/net/slip/slip.h
++++ b/drivers/net/slip/slip.h
+@@ -53,6 +53,7 @@ struct slip {
+ struct tty_struct *tty; /* ptr to TTY structure */
+ struct net_device *dev; /* easy for intr handling */
+ spinlock_t lock;
++ struct work_struct tx_work; /* Flushes transmit buffer */
+
+ #ifdef SL_INCLUDE_CSLIP
+ struct slcompress *slcomp; /* for header compression */
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Date: Wed, 16 Jul 2014 10:02:26 -0400
+Subject: sunvnet: clean up objects created in vnet_new() on vnet_exit()
+
+From: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+
+[ Upstream commit a4b70a07ed12a71131cab7adce2ce91c71b37060 ]
+
+Nothing cleans up the objects created by
+vnet_new(), they are completely leaked.
+
+vnet_exit(), after doing the vio_unregister_driver() to clean
+up ports, should call a helper function that iterates over vnet_list
+and cleans up those objects. This includes unregister_netdevice()
+as well as free_netdev().
+
+Signed-off-by: Sowmini Varadhan <sowmini.varadhan@oracle.com>
+Acked-by: Dave Kleikamp <dave.kleikamp@oracle.com>
+Reviewed-by: Karl Volz <karl.volz@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sun/sunvnet.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/sun/sunvnet.c
++++ b/drivers/net/ethernet/sun/sunvnet.c
+@@ -1083,6 +1083,24 @@ static struct vnet *vnet_find_or_create(
+ return vp;
+ }
+
++static void vnet_cleanup(void)
++{
++ struct vnet *vp;
++ struct net_device *dev;
++
++ mutex_lock(&vnet_list_mutex);
++ while (!list_empty(&vnet_list)) {
++ vp = list_first_entry(&vnet_list, struct vnet, list);
++ list_del(&vp->list);
++ dev = vp->dev;
++ /* vio_unregister_driver() should have cleaned up port_list */
++ BUG_ON(!list_empty(&vp->port_list));
++ unregister_netdev(dev);
++ free_netdev(dev);
++ }
++ mutex_unlock(&vnet_list_mutex);
++}
++
+ static const char *local_mac_prop = "local-mac-address";
+
+ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
+@@ -1240,7 +1258,6 @@ static int vnet_port_remove(struct vio_d
+
+ kfree(port);
+
+- unregister_netdev(vp->dev);
+ }
+ return 0;
+ }
+@@ -1268,6 +1285,7 @@ static int __init vnet_init(void)
+ static void __exit vnet_exit(void)
+ {
+ vio_unregister_driver(&vnet_port_driver);
++ vnet_cleanup();
+ }
+
+ module_init(vnet_init);
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Christoph Paasch <christoph.paasch@uclouvain.be>
+Date: Sat, 28 Jun 2014 18:26:37 +0200
+Subject: tcp: Fix divide by zero when pushing during tcp-repair
+
+From: Christoph Paasch <christoph.paasch@uclouvain.be>
+
+[ Upstream commit 5924f17a8a30c2ae18d034a86ee7581b34accef6 ]
+
+When in repair-mode and TCP_RECV_QUEUE is set, we end up calling
+tcp_push with mss_now being 0. If data is in the send-queue and
+tcp_set_skb_tso_segs gets called, we crash because it will divide by
+mss_now:
+
+[ 347.151939] divide error: 0000 [#1] SMP
+[ 347.152907] Modules linked in:
+[ 347.152907] CPU: 1 PID: 1123 Comm: packetdrill Not tainted 3.16.0-rc2 #4
+[ 347.152907] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2007
+[ 347.152907] task: f5b88540 ti: f3c82000 task.ti: f3c82000
+[ 347.152907] EIP: 0060:[<c1601359>] EFLAGS: 00210246 CPU: 1
+[ 347.152907] EIP is at tcp_set_skb_tso_segs+0x49/0xa0
+[ 347.152907] EAX: 00000b67 EBX: f5acd080 ECX: 00000000 EDX: 00000000
+[ 347.152907] ESI: f5a28f40 EDI: f3c88f00 EBP: f3c83d10 ESP: f3c83d00
+[ 347.152907] DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
+[ 347.152907] CR0: 80050033 CR2: 083158b0 CR3: 35146000 CR4: 000006b0
+[ 347.152907] Stack:
+[ 347.152907] c167f9d9 f5acd080 000005b4 00000002 f3c83d20 c16013e6 f3c88f00 f5acd080
+[ 347.152907] f3c83da0 c1603b5a f3c83d38 c10a0188 00000000 00000000 f3c83d84 c10acc85
+[ 347.152907] c1ad5ec0 00000000 00000000 c1ad679c 010003e0 00000000 00000000 f3c88fc8
+[ 347.152907] Call Trace:
+[ 347.152907] [<c167f9d9>] ? apic_timer_interrupt+0x2d/0x34
+[ 347.152907] [<c16013e6>] tcp_init_tso_segs+0x36/0x50
+[ 347.152907] [<c1603b5a>] tcp_write_xmit+0x7a/0xbf0
+[ 347.152907] [<c10a0188>] ? up+0x28/0x40
+[ 347.152907] [<c10acc85>] ? console_unlock+0x295/0x480
+[ 347.152907] [<c10ad24f>] ? vprintk_emit+0x1ef/0x4b0
+[ 347.152907] [<c1605716>] __tcp_push_pending_frames+0x36/0xd0
+[ 347.152907] [<c15f4860>] tcp_push+0xf0/0x120
+[ 347.152907] [<c15f7641>] tcp_sendmsg+0xf1/0xbf0
+[ 347.152907] [<c116d920>] ? kmem_cache_free+0xf0/0x120
+[ 347.152907] [<c106a682>] ? __sigqueue_free+0x32/0x40
+[ 347.152907] [<c106a682>] ? __sigqueue_free+0x32/0x40
+[ 347.152907] [<c114f0f0>] ? do_wp_page+0x3e0/0x850
+[ 347.152907] [<c161c36a>] inet_sendmsg+0x4a/0xb0
+[ 347.152907] [<c1150269>] ? handle_mm_fault+0x709/0xfb0
+[ 347.152907] [<c15a006b>] sock_aio_write+0xbb/0xd0
+[ 347.152907] [<c1180b79>] do_sync_write+0x69/0xa0
+[ 347.152907] [<c1181023>] vfs_write+0x123/0x160
+[ 347.152907] [<c1181d55>] SyS_write+0x55/0xb0
+[ 347.152907] [<c167f0d8>] sysenter_do_call+0x12/0x28
+
+This can easily be reproduced with the following packetdrill-script (the
+"magic" with netem, sk_pacing and limit_output_bytes is done to prevent
+the kernel from pushing all segments, because hitting the limit without
+doing this is not so easy with packetdrill):
+
+0 socket(..., SOCK_STREAM, IPPROTO_TCP) = 3
++0 setsockopt(3, SOL_SOCKET, SO_REUSEADDR, [1], 4) = 0
+
++0 bind(3, ..., ...) = 0
++0 listen(3, 1) = 0
+
++0 < S 0:0(0) win 32792 <mss 1460>
++0 > S. 0:0(0) ack 1 <mss 1460>
++0.1 < . 1:1(0) ack 1 win 65000
+
++0 accept(3, ..., ...) = 4
+
+// This forces that not all segments of the snd-queue will be pushed
++0 `tc qdisc add dev tun0 root netem delay 10ms`
++0 `sysctl -w net.ipv4.tcp_limit_output_bytes=2`
++0 setsockopt(4, SOL_SOCKET, 47, [2], 4) = 0
+
++0 write(4,...,10000) = 10000
++0 write(4,...,10000) = 10000
+
+// Set tcp-repair stuff, particularly TCP_RECV_QUEUE
++0 setsockopt(4, SOL_TCP, 19, [1], 4) = 0
++0 setsockopt(4, SOL_TCP, 20, [1], 4) = 0
+
+// This now will make the write push the remaining segments
++0 setsockopt(4, SOL_SOCKET, 47, [20000], 4) = 0
++0 `sysctl -w net.ipv4.tcp_limit_output_bytes=130000`
+
+// Now we will crash
++0 write(4,...,1000) = 1000
+
+This happens since ec3423257508 (tcp: fix retransmission in repair
+mode). Prior to that, the call to tcp_push was prevented by a check for
+tp->repair.
+
+The patch fixes it, by adding the new goto-label out_nopush. When exiting
+tcp_sendmsg and a push is not required, which is the case for tp->repair,
+we go to this label.
+
+When repairing and calling send() with TCP_RECV_QUEUE, the data is
+actually put in the receive-queue. So, no push is required because no
+data has been added to the send-queue.
+
+Cc: Andrew Vagin <avagin@openvz.org>
+Cc: Pavel Emelyanov <xemul@parallels.com>
+Fixes: ec3423257508 (tcp: fix retransmission in repair mode)
+Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
+Acked-by: Andrew Vagin <avagin@openvz.org>
+Acked-by: Pavel Emelyanov <xemul@parallels.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, stru
+ if (unlikely(tp->repair)) {
+ if (tp->repair_queue == TCP_RECV_QUEUE) {
+ copied = tcp_send_rcvq(sk, msg, size);
+- goto out;
++ goto out_nopush;
+ }
+
+ err = -EINVAL;
+@@ -1282,6 +1282,7 @@ wait_for_memory:
+ out:
+ if (copied)
+ tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
++out_nopush:
+ release_sock(sk);
+ return copied + copied_syn;
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Yuchung Cheng <ycheng@google.com>
+Date: Wed, 2 Jul 2014 12:07:16 -0700
+Subject: tcp: fix false undo corner cases
+
+From: Yuchung Cheng <ycheng@google.com>
+
+[ Upstream commit 6e08d5e3c8236e7484229e46fdf92006e1dd4c49 ]
+
+The undo code assumes that, upon entering loss recovery, TCP
+1) always retransmit something
+2) the retransmission never fails locally (e.g., qdisc drop)
+
+so undo_marker is set in tcp_enter_recovery() and undo_retrans is
+incremented only when tcp_retransmit_skb() is successful.
+
+When the assumption is broken because TCP's cwnd is too small to
+retransmit or the retransmit fails locally. The next (DUP)ACK
+would incorrectly revert the cwnd and the congestion state in
+tcp_try_undo_dsack() or tcp_may_undo(). Subsequent (DUP)ACKs
+may enter the recovery state. The sender repeatedly enter and
+(incorrectly) exit recovery states if the retransmits continue to
+fail locally while receiving (DUP)ACKs.
+
+The fix is to initialize undo_retrans to -1 and start counting on
+the first retransmission. Always increment undo_retrans even if the
+retransmissions fail locally because they couldn't cause DSACKs to
+undo the cwnd reduction.
+
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 8 ++++----
+ net/ipv4/tcp_output.c | 6 ++++--
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1113,7 +1113,7 @@ static bool tcp_check_dsack(struct sock
+ }
+
+ /* D-SACK for already forgotten data... Do dumb counting. */
+- if (dup_sack && tp->undo_marker && tp->undo_retrans &&
++ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
+ !after(end_seq_0, prior_snd_una) &&
+ after(end_seq_0, tp->undo_marker))
+ tp->undo_retrans--;
+@@ -1193,7 +1193,7 @@ static u8 tcp_sacktag_one(struct sock *s
+
+ /* Account D-SACK for retransmitted packet. */
+ if (dup_sack && (sacked & TCPCB_RETRANS)) {
+- if (tp->undo_marker && tp->undo_retrans &&
++ if (tp->undo_marker && tp->undo_retrans > 0 &&
+ after(end_seq, tp->undo_marker))
+ tp->undo_retrans--;
+ if (sacked & TCPCB_SACKED_ACKED)
+@@ -1894,7 +1894,7 @@ static void tcp_clear_retrans_partial(st
+ tp->lost_out = 0;
+
+ tp->undo_marker = 0;
+- tp->undo_retrans = 0;
++ tp->undo_retrans = -1;
+ }
+
+ void tcp_clear_retrans(struct tcp_sock *tp)
+@@ -2663,7 +2663,7 @@ static void tcp_enter_recovery(struct so
+
+ tp->prior_ssthresh = 0;
+ tp->undo_marker = tp->snd_una;
+- tp->undo_retrans = tp->retrans_out;
++ tp->undo_retrans = tp->retrans_out ? : -1;
+
+ if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
+ if (!ece_ack)
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -2448,8 +2448,6 @@ int tcp_retransmit_skb(struct sock *sk,
+ if (!tp->retrans_stamp)
+ tp->retrans_stamp = TCP_SKB_CB(skb)->when;
+
+- tp->undo_retrans += tcp_skb_pcount(skb);
+-
+ /* snd_nxt is stored to detect loss of retransmitted segment,
+ * see tcp_input.c tcp_sacktag_write_queue().
+ */
+@@ -2457,6 +2455,10 @@ int tcp_retransmit_skb(struct sock *sk,
+ } else {
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
+ }
++
++ if (tp->undo_retrans < 0)
++ tp->undo_retrans = 0;
++ tp->undo_retrans += tcp_skb_pcount(skb);
+ return err;
+ }
+
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Neal Cardwell <ncardwell@google.com>
+Date: Wed, 18 Jun 2014 21:15:03 -0400
+Subject: tcp: fix tcp_match_skb_to_sack() for unaligned SACK at end of an skb
+
+From: Neal Cardwell <ncardwell@google.com>
+
+[ Upstream commit 2cd0d743b05e87445c54ca124a9916f22f16742e ]
+
+If there is an MSS change (or misbehaving receiver) that causes a SACK
+to arrive that covers the end of an skb but is less than one MSS, then
+tcp_match_skb_to_sack() was rounding up pkt_len to the full length of
+the skb ("Round if necessary..."), then chopping all bytes off the skb
+and creating a zero-byte skb in the write queue.
+
+This was visible now because the recently simplified TLP logic in
+bef1909ee3ed1c ("tcp: fixing TLP's FIN recovery") could find that 0-byte
+skb at the end of the write queue, and now that we do not check that
+skb's length we could send it as a TLP probe.
+
+Consider the following example scenario:
+
+ mss: 1000
+ skb: seq: 0 end_seq: 4000 len: 4000
+ SACK: start_seq: 3999 end_seq: 4000
+
+The tcp_match_skb_to_sack() code will compute:
+
+ in_sack = false
+ pkt_len = start_seq - TCP_SKB_CB(skb)->seq = 3999 - 0 = 3999
+ new_len = (pkt_len / mss) * mss = (3999/1000)*1000 = 3000
+ new_len += mss = 4000
+
+Previously we would find the new_len > skb->len check failing, so we
+would fall through and set pkt_len = new_len = 4000 and chop off
+pkt_len of 4000 from the 4000-byte skb, leaving a 0-byte segment
+afterward in the write queue.
+
+With this new commit, we notice that the new new_len >= skb->len check
+succeeds, so that we return without trying to fragment.
+
+Fixes: adb92db857ee ("tcp: Make SACK code to split only at mss boundaries")
+Reported-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Neal Cardwell <ncardwell@google.com>
+Cc: Eric Dumazet <edumazet@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Ilpo Jarvinen <ilpo.jarvinen@helsinki.fi>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -1169,7 +1169,7 @@ static int tcp_match_skb_to_sack(struct
+ unsigned int new_len = (pkt_len / mss) * mss;
+ if (!in_sack && new_len < pkt_len) {
+ new_len += mss;
+- if (new_len > skb->len)
++ if (new_len >= skb->len)
+ return 0;
+ }
+ pkt_len = new_len;
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Jon Paul Maloy <jon.maloy@ericsson.com>
+Date: Fri, 11 Jul 2014 08:45:27 -0400
+Subject: tipc: clear 'next'-pointer of message fragments before reassembly
+
+From: Jon Paul Maloy <jon.maloy@ericsson.com>
+
+[ Upstream commit 999417549c16dd0e3a382aa9f6ae61688db03181 ]
+
+If the 'next' pointer of the last fragment buffer in a message is not
+zeroed before reassembly, we risk ending up with a corrupt message,
+since the reassembly function itself isn't doing this.
+
+Currently, when a buffer is retrieved from the deferred queue of the
+broadcast link, the next pointer is not cleared, with the result as
+described above.
+
+This commit corrects this, and thereby fixes a bug that may occur when
+long broadcast messages are transmitted across dual interfaces. The bug
+has been present since 40ba3cdf542a469aaa9083fa041656e59b109b90 ("tipc:
+message reassembly using fragment chain")
+
+This commit should be applied to both net and net-next.
+
+Signed-off-by: Jon Maloy <jon.maloy@ericsson.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/tipc/bcast.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/tipc/bcast.c
++++ b/net/tipc/bcast.c
+@@ -537,6 +537,7 @@ receive:
+
+ buf = node->bclink.deferred_head;
+ node->bclink.deferred_head = buf->next;
++ buf->next = NULL;
+ node->bclink.deferred_size--;
+ goto receive;
+ }
--- /dev/null
+From foo@baz Sat Jul 26 10:08:04 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 Jul 2014 02:25:15 -0700
+Subject: vlan: free percpu stats in device destructor
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a48e5fafecfb9c0c807d7e7284b5ff884dfb7a3a ]
+
+Madalin-Cristian reported crashs happening after a recent commit
+(5a4ae5f6e7d4 "vlan: unnecessary to check if vlan_pcpu_stats is NULL")
+
+-----------------------------------------------------------------------
+root@p5040ds:~# vconfig add eth8 1
+root@p5040ds:~# vconfig rem eth8.1
+Unable to handle kernel paging request for data at address 0x2bc88028
+Faulting instruction address: 0xc058e950
+Oops: Kernel access of bad area, sig: 11 [#1]
+SMP NR_CPUS=8 CoreNet Generic
+Modules linked in:
+CPU: 3 PID: 2167 Comm: vconfig Tainted: G W 3.16.0-rc3-00346-g65e85bf #2
+task: e7264d90 ti: e2c2c000 task.ti: e2c2c000
+NIP: c058e950 LR: c058ea30 CTR: c058e900
+REGS: e2c2db20 TRAP: 0300 Tainted: G W (3.16.0-rc3-00346-g65e85bf)
+MSR: 00029002 <CE,EE,ME> CR: 48000428 XER: 20000000
+DEAR: 2bc88028 ESR: 00000000
+GPR00: c047299c e2c2dbd0 e7264d90 00000000 2bc88000 00000000 ffffffff 00000000
+GPR08: 0000000f 00000000 000000ff 00000000 28000422 10121928 10100000 10100000
+GPR16: 10100000 00000000 c07c5968 00000000 00000000 00000000 e2c2dc48 e7838000
+GPR24: c07c5bac c07c58a8 e77290cc c07b0000 00000000 c05de6c0 e7838000 e2c2dc48
+NIP [c058e950] vlan_dev_get_stats64+0x50/0x170
+LR [c058ea30] vlan_dev_get_stats64+0x130/0x170
+Call Trace:
+[e2c2dbd0] [ffffffea] 0xffffffea (unreliable)
+[e2c2dc20] [c047299c] dev_get_stats+0x4c/0x140
+[e2c2dc40] [c0488ca8] rtnl_fill_ifinfo+0x3d8/0x960
+[e2c2dd70] [c0489f4c] rtmsg_ifinfo+0x6c/0x110
+[e2c2dd90] [c04731d4] rollback_registered_many+0x344/0x3b0
+[e2c2ddd0] [c047332c] rollback_registered+0x2c/0x50
+[e2c2ddf0] [c0476058] unregister_netdevice_queue+0x78/0xf0
+[e2c2de00] [c058d800] unregister_vlan_dev+0xc0/0x160
+[e2c2de20] [c058e360] vlan_ioctl_handler+0x1c0/0x550
+[e2c2de90] [c045d11c] sock_ioctl+0x28c/0x2f0
+[e2c2deb0] [c010d070] do_vfs_ioctl+0x90/0x7b0
+[e2c2df20] [c010d7d0] SyS_ioctl+0x40/0x80
+[e2c2df40] [c000f924] ret_from_syscall+0x0/0x3c
+
+Fix this problem by freeing percpu stats from dev->destructor() instead
+of ndo_uninit()
+
+Reported-by: Madalin-Cristian Bucur <madalin.bucur@freescale.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Madalin-Cristian Bucur <madalin.bucur@freescale.com>
+Fixes: 5a4ae5f6e7d4 ("vlan: unnecessary to check if vlan_pcpu_stats is NULL")
+Cc: Li RongQing <roy.qing.li@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/8021q/vlan_dev.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -635,8 +635,6 @@ static void vlan_dev_uninit(struct net_d
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ int i;
+
+- free_percpu(vlan->vlan_pcpu_stats);
+- vlan->vlan_pcpu_stats = NULL;
+ for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+ while ((pm = vlan->egress_priority_map[i]) != NULL) {
+ vlan->egress_priority_map[i] = pm->next;
+@@ -796,6 +794,15 @@ static const struct net_device_ops vlan_
+ .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
+ };
+
++static void vlan_dev_free(struct net_device *dev)
++{
++ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
++
++ free_percpu(vlan->vlan_pcpu_stats);
++ vlan->vlan_pcpu_stats = NULL;
++ free_netdev(dev);
++}
++
+ void vlan_setup(struct net_device *dev)
+ {
+ ether_setup(dev);
+@@ -805,7 +812,7 @@ void vlan_setup(struct net_device *dev)
+ dev->tx_queue_len = 0;
+
+ dev->netdev_ops = &vlan_netdev_ops;
+- dev->destructor = free_netdev;
++ dev->destructor = vlan_dev_free;
+ dev->ethtool_ops = &vlan_ethtool_ops;
+
+ memset(dev->broadcast, 0, ETH_ALEN);