--- /dev/null
+From a6e170057815e3ed86d68ca9229facd30031f930 Mon Sep 17 00:00:00 2001
+From: stephen hemminger <stephen@networkplumber.org>
+Date: Mon, 20 May 2013 06:54:43 +0000
+Subject: 8139cp: reset BQL when ring tx ring cleared
+
+From: stephen hemminger <stephen@networkplumber.org>
+
+[ Upstream commit 98962baad72fd6d393bf39dbb7c2076532c363c6 ]
+
+This patch cures transmit timeout's with DHCP observed
+while running under KVM. When the transmit ring is cleaned out,
+the Byte Queue Limit values need to be reset.
+
+Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/8139cp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/realtek/8139cp.c
++++ b/drivers/net/ethernet/realtek/8139cp.c
+@@ -1097,6 +1097,7 @@ static void cp_clean_rings (struct cp_pr
+ cp->dev->stats.tx_dropped++;
+ }
+ }
++ netdev_reset_queue(cp->dev);
+
+ memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
+ memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
--- /dev/null
+From e97ad85630ba172b34ee1ef9ea9f8cbe41bd9498 Mon Sep 17 00:00:00 2001
+From: Zheng Li <zheng.x.li@oracle.com>
+Date: Wed, 19 Jun 2013 00:53:47 -0700
+Subject: bonding: rlb mode of bond should not alter ARP originating via bridge
+
+From: Zheng Li <zheng.x.li@oracle.com>
+
+[ Upstream commit 567b871e503316b0927e54a3d7c86d50b722d955 ]
+
+Do not modify or load balance ARP packets passing through balance-alb
+mode (wherein the ARP did not originate locally, and arrived via a bridge).
+
+Modifying pass-through ARP replies causes an incorrect MAC address
+to be placed into the ARP packet, rendering peers unable to communicate
+with the actual destination from which the ARP reply originated.
+
+Load balancing pass-through ARP requests causes an entry to be
+created for the peer in the rlb table, and bond_alb_monitor will
+occasionally issue ARP updates to all peers in the table instrucing them
+as to which MAC address they should communicate with; this occurs when
+some event sets rx_ntt. In the bridged case, however, the MAC address
+used for the update would be the MAC of the slave, not the actual source
+MAC of the originating destination. This would render peers unable to
+communicate with the destinations beyond the bridge.
+
+Signed-off-by: Zheng Li <zheng.x.li@oracle.com>
+Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Matthew O'Connor <liquidhorse@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_alb.c | 6 ++++++
+ drivers/net/bonding/bonding.h | 13 +++++++++++++
+ include/linux/etherdevice.h | 33 +++++++++++++++++++++++++++++++++
+ 3 files changed, 52 insertions(+)
+
+--- a/drivers/net/bonding/bond_alb.c
++++ b/drivers/net/bonding/bond_alb.c
+@@ -704,6 +704,12 @@ static struct slave *rlb_arp_xmit(struct
+ struct arp_pkt *arp = arp_pkt(skb);
+ struct slave *tx_slave = NULL;
+
++ /* Don't modify or load balance ARPs that do not originate locally
++ * (e.g.,arrive via a bridge).
++ */
++ if (!bond_slave_has_mac(bond, arp->mac_src))
++ return NULL;
++
+ if (arp->op_code == htons(ARPOP_REPLY)) {
+ /* the arp must be sent on the selected
+ * rx channel
+--- a/drivers/net/bonding/bonding.h
++++ b/drivers/net/bonding/bonding.h
+@@ -18,6 +18,7 @@
+ #include <linux/timer.h>
+ #include <linux/proc_fs.h>
+ #include <linux/if_bonding.h>
++#include <linux/etherdevice.h>
+ #include <linux/cpumask.h>
+ #include <linux/in6.h>
+ #include <linux/netpoll.h>
+@@ -450,6 +451,18 @@ static inline void bond_destroy_proc_dir
+ }
+ #endif
+
++static inline struct slave *bond_slave_has_mac(struct bonding *bond,
++ const u8 *mac)
++{
++ int i = 0;
++ struct slave *tmp;
++
++ bond_for_each_slave(bond, tmp, i)
++ if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
++ return tmp;
++
++ return NULL;
++}
+
+ /* exported from bond_main.c */
+ extern int bond_net_id;
+--- a/include/linux/etherdevice.h
++++ b/include/linux/etherdevice.h
+@@ -277,4 +277,37 @@ static inline unsigned long compare_ethe
+ #endif
+ }
+
++/**
++ * ether_addr_equal_64bits - Compare two Ethernet addresses
++ * @addr1: Pointer to an array of 8 bytes
++ * @addr2: Pointer to an other array of 8 bytes
++ *
++ * Compare two Ethernet addresses, returns true if equal, false otherwise.
++ *
++ * The function doesn't need any conditional branches and possibly uses
++ * word memory accesses on CPU allowing cheap unaligned memory reads.
++ * arrays = { byte1, byte2, byte3, byte4, byte5, byte6, pad1, pad2 }
++ *
++ * Please note that alignment of addr1 & addr2 are only guaranteed to be 16 bits.
++ */
++
++static inline bool ether_addr_equal_64bits(const u8 addr1[6+2],
++ const u8 addr2[6+2])
++{
++#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
++ unsigned long fold = ((*(unsigned long *)addr1) ^
++ (*(unsigned long *)addr2));
++
++ if (sizeof(fold) == 8)
++ return zap_last_2bytes(fold) == 0;
++
++ fold |= zap_last_2bytes((*(unsigned long *)(addr1 + 4)) ^
++ (*(unsigned long *)(addr2 + 4)));
++ return fold == 0;
++#else
++ return ether_addr_equal(addr1, addr2);
++#endif
++}
++
++
+ #endif /* _LINUX_ETHERDEVICE_H */
+++ /dev/null
-From 8444d5c69549aa0f0b574cc608742d4669e1cc01 Mon Sep 17 00:00:00 2001
-From: Jerome Glisse <jglisse@redhat.com>
-Date: Wed, 19 Jun 2013 10:02:28 -0400
-Subject: drm/radeon: update lockup tracking when scheduling in empty ring
-
-From: Jerome Glisse <jglisse@redhat.com>
-
-commit 8444d5c69549aa0f0b574cc608742d4669e1cc01 upstream.
-
-There might be issue with lockup detection when scheduling on an
-empty ring that have been sitting idle for a while. Thus update
-the lockup tracking data when scheduling new work in an empty ring.
-
-Signed-off-by: Jerome Glisse <jglisse@redhat.com>
-Tested-by: Andy Lutomirski <luto@amacapital.net>
-Reviewed-by: Christian König <christian.koenig@amd.com>
-Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- drivers/gpu/drm/radeon/radeon_ring.c | 7 +++++++
- 1 file changed, 7 insertions(+)
-
---- a/drivers/gpu/drm/radeon/radeon_ring.c
-+++ b/drivers/gpu/drm/radeon/radeon_ring.c
-@@ -316,6 +316,13 @@ int radeon_ring_alloc(struct radeon_devi
- return -ENOMEM;
- /* Align requested size with padding so unlock_commit can
- * pad safely */
-+ radeon_ring_free_size(rdev, ring);
-+ if (ring->ring_free_dw == (ring->ring_size / 4)) {
-+ /* This is an empty ring update lockup info to avoid
-+ * false positive.
-+ */
-+ radeon_ring_lockup_update(ring);
-+ }
- ndw = (ndw + ring->align_mask) & ~ring->align_mask;
- while (ndw > (ring->ring_free_dw - 1)) {
- radeon_ring_free_size(rdev, ring);
--- /dev/null
+From beb93acfa3abbd2cc9923f5cb34c324081650dfc Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+Date: Thu, 16 May 2013 22:25:34 +0000
+Subject: gianfar: add missing iounmap() on error in gianfar_ptp_probe()
+
+From: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+
+[ Upstream commit e5f5e380e0f3bb11f04ca5bc66a551e58e0ad26e ]
+
+Add the missing iounmap() before return from gianfar_ptp_probe()
+in the error handling case.
+
+Signed-off-by: Wei Yongjun <yongjun_wei@trendmicro.com.cn>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/freescale/gianfar_ptp.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
++++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
+@@ -520,6 +520,7 @@ static int gianfar_ptp_probe(struct plat
+ return 0;
+
+ no_clock:
++ iounmap(etsects->regs);
+ no_ioremap:
+ release_resource(etsects->rsrc);
+ no_resource:
--- /dev/null
+From b7722eeecedff96f5e4adb8cb25cd03ef5c364d4 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 24 May 2013 05:49:58 +0000
+Subject: ip_tunnel: fix kernel panic with icmp_dest_unreach
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a622260254ee481747cceaaa8609985b29a31565 ]
+
+Daniel Petre reported crashes in icmp_dst_unreach() with following call
+graph:
+
+Daniel found a similar problem mentioned in
+ http://lkml.indiana.edu/hypermail/linux/kernel/1007.0/00961.html
+
+And indeed this is the root cause : skb->cb[] contains data fooling IP
+stack.
+
+We must clear IPCB in ip_tunnel_xmit() sooner in case dst_link_failure()
+is called. Or else skb->cb[] might contain garbage from GSO segmentation
+layer.
+
+A similar fix was tested on linux-3.9, but gre code was refactored in
+linux-3.10. I'll send patches for stable kernels as well.
+
+Many thanks to Daniel for providing reports, patches and testing !
+
+Reported-by: Daniel Petre <daniel.petre@rcs-rds.ro>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_gre.c | 2 +-
+ net/ipv4/ipip.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ip_gre.c
++++ b/net/ipv4/ip_gre.c
+@@ -722,6 +722,7 @@ static netdev_tx_t ipgre_tunnel_xmit(str
+ tiph = &tunnel->parms.iph;
+ }
+
++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ if ((dst = tiph->daddr) == 0) {
+ /* NBMA tunnel */
+
+@@ -865,7 +866,6 @@ static netdev_tx_t ipgre_tunnel_xmit(str
+ skb_reset_transport_header(skb);
+ skb_push(skb, gre_hlen);
+ skb_reset_network_header(skb);
+- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -448,6 +448,7 @@ static netdev_tx_t ipip_tunnel_xmit(stru
+ if (tos & 1)
+ tos = old_iph->tos;
+
++ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ if (!dst) {
+ /* NBMA tunnel */
+ if ((rt = skb_rtable(skb)) == NULL) {
+@@ -530,7 +531,6 @@ static netdev_tx_t ipip_tunnel_xmit(stru
+ skb->transport_header = skb->network_header;
+ skb_push(skb, sizeof(struct iphdr));
+ skb_reset_network_header(skb);
+- memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_drop(skb);
--- /dev/null
+From 1814b8d9a23c63988bdd6083ee25b87403a2bbae Mon Sep 17 00:00:00 2001
+From: Gao feng <gaofeng@cn.fujitsu.com>
+Date: Sun, 2 Jun 2013 22:16:21 +0000
+Subject: ipv6: assign rt6_info to inet6_ifaddr in init_loopback
+
+From: Gao feng <gaofeng@cn.fujitsu.com>
+
+[ Upstream commit 534c877928a16ae5f9776436a497109639bf67dc ]
+
+Commit 25fb6ca4ed9cad72f14f61629b68dc03c0d9713f
+"net IPv6 : Fix broken IPv6 routing table after loopback down-up"
+forgot to assign rt6_info to the inet6_ifaddr.
+When disable the net device, the rt6_info which allocated
+in init_loopback will not be destroied in __ipv6_ifa_notify.
+
+This will trigger the waring message below
+[23527.916091] unregister_netdevice: waiting for tap0 to become free. Usage count = 1
+
+Reported-by: Arkadiusz Miskiewicz <a.miskiewicz@gmail.com>
+Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -2432,8 +2432,10 @@ static void init_loopback(struct net_dev
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+
+ /* Failure cases are ignored */
+- if (!IS_ERR(sp_rt))
++ if (!IS_ERR(sp_rt)) {
++ sp_ifa->rt = sp_rt;
+ ip6_ins_rt(sp_rt);
++ }
+ }
+ read_unlock_bh(&idev->lock);
+ }
--- /dev/null
+From 6f30b92ba3db306f87b9c1ffbdd08332ae4221d9 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 17 May 2013 04:53:13 +0000
+Subject: ipv6: fix possible crashes in ip6_cork_release()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 284041ef21fdf2e0d216ab6b787bc9072b4eb58a ]
+
+commit 0178b695fd6b4 ("ipv6: Copy cork options in ip6_append_data")
+added some code duplication and bad error recovery, leading to potential
+crash in ip6_cork_release() as kfree() could be called with garbage.
+
+use kzalloc() to make sure this wont happen.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
+Cc: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_output.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1236,7 +1236,7 @@ int ip6_append_data(struct sock *sk, int
+ if (WARN_ON(np->cork.opt))
+ return -EINVAL;
+
+- np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
++ np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
+ if (unlikely(np->cork.opt == NULL))
+ return -ENOBUFS;
+
--- /dev/null
+From 15819f0a26ae1915e9c7b13dc57bf2bc7b5cc272 Mon Sep 17 00:00:00 2001
+From: Guillaume Nault <g.nault@alphalink.fr>
+Date: Wed, 12 Jun 2013 16:07:23 +0200
+Subject: l2tp: Fix PPP header erasure and memory leak
+
+From: Guillaume Nault <g.nault@alphalink.fr>
+
+[ Upstream commit 55b92b7a11690bc377b5d373872a6b650ae88e64 ]
+
+Copy user data after PPP framing header. This prevents erasure of the
+added PPP header and avoids leaking two bytes of uninitialised memory
+at the end of skb's data buffer.
+
+Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_ppp.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -350,12 +350,12 @@ static int pppol2tp_sendmsg(struct kiocb
+ skb_put(skb, 2);
+
+ /* Copy user data into skb */
+- error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
++ error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
++ total_len);
+ if (error < 0) {
+ kfree_skb(skb);
+ goto error_put_sess_tun;
+ }
+- skb_put(skb, total_len);
+
+ l2tp_xmit_skb(session, skb, session->hdr_len);
+
--- /dev/null
+From 800cb192403a75817ecb9123aaff12340715fdc7 Mon Sep 17 00:00:00 2001
+From: Guillaume Nault <g.nault@alphalink.fr>
+Date: Wed, 12 Jun 2013 16:07:36 +0200
+Subject: l2tp: Fix sendmsg() return value
+
+From: Guillaume Nault <g.nault@alphalink.fr>
+
+[ Upstream commit a6f79d0f26704214b5b702bbac525cb72997f984 ]
+
+PPPoL2TP sockets should comply with the standard send*() return values
+(i.e. return number of bytes sent instead of 0 upon success).
+
+Signed-off-by: Guillaume Nault <g.nault@alphalink.fr>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_ppp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -362,7 +362,7 @@ static int pppol2tp_sendmsg(struct kiocb
+ sock_put(ps->tunnel_sock);
+ sock_put(sk);
+
+- return error;
++ return total_len;
+
+ error_put_sess_tun:
+ sock_put(ps->tunnel_sock);
--- /dev/null
+From d82ab26fe98468628ffccc82bdb3d970b28ff200 Mon Sep 17 00:00:00 2001
+From: Andy Lutomirski <luto@amacapital.net>
+Date: Wed, 22 May 2013 14:07:44 -0700
+Subject: net: Block MSG_CMSG_COMPAT in send(m)msg and recv(m)msg
+
+From: Andy Lutomirski <luto@amacapital.net>
+
+[ Upstream commits 1be374a0518a288147c6a7398792583200a67261 and
+ a7526eb5d06b0084ef12d7b168d008fcf516caab ]
+
+MSG_CMSG_COMPAT is (AFAIK) not intended to be part of the API --
+it's a hack that steals a bit to indicate to other networking code
+that a compat entry was used. So don't allow it from a non-compat
+syscall.
+
+This prevents an oops when running this code:
+
+int main()
+{
+ int s;
+ struct sockaddr_in addr;
+ struct msghdr *hdr;
+
+ char *highpage = mmap((void*)(TASK_SIZE_MAX - 4096), 4096,
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (highpage == MAP_FAILED)
+ err(1, "mmap");
+
+ s = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
+ if (s == -1)
+ err(1, "socket");
+
+ addr.sin_family = AF_INET;
+ addr.sin_port = htons(1);
+ addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ if (connect(s, (struct sockaddr*)&addr, sizeof(addr)) != 0)
+ err(1, "connect");
+
+ void *evil = highpage + 4096 - COMPAT_MSGHDR_SIZE;
+ printf("Evil address is %p\n", evil);
+
+ if (syscall(__NR_sendmmsg, s, evil, 1, MSG_CMSG_COMPAT) < 0)
+ err(1, "sendmmsg");
+
+ return 0;
+}
+
+Signed-off-by: Andy Lutomirski <luto@amacapital.net>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/socket.h | 3 ++
+ net/compat.c | 13 ++++++++-
+ net/socket.c | 67 ++++++++++++++++++++++++++++++++-----------------
+ 3 files changed, 59 insertions(+), 24 deletions(-)
+
+--- a/include/linux/socket.h
++++ b/include/linux/socket.h
+@@ -336,6 +336,9 @@ extern int put_cmsg(struct msghdr*, int
+
+ struct timespec;
+
++/* The __sys_...msg variants allow MSG_CMSG_COMPAT */
++extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags);
++extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
+ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
+ unsigned int flags, struct timespec *timeout);
+ extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -743,19 +743,25 @@ static unsigned char nas[21] = {
+
+ asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags)
+ {
+- return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
+ unsigned vlen, unsigned int flags)
+ {
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
+ return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
+ {
+- return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+
+ asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags)
+@@ -777,6 +783,9 @@ asmlinkage long compat_sys_recvmmsg(int
+ int datagrams;
+ struct timespec ktspec;
+
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++
+ if (COMPAT_USE_64BIT_TIME)
+ return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+ flags | MSG_CMSG_COMPAT,
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -1899,9 +1899,9 @@ struct used_address {
+ unsigned int name_len;
+ };
+
+-static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
+- struct msghdr *msg_sys, unsigned flags,
+- struct used_address *used_address)
++static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
++ struct msghdr *msg_sys, unsigned flags,
++ struct used_address *used_address)
+ {
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+@@ -2017,22 +2017,30 @@ out:
+ * BSD sendmsg interface
+ */
+
+-SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
++long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags)
+ {
+ int fput_needed, err;
+ struct msghdr msg_sys;
+- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ struct socket *sock;
+
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+- err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
++ err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL);
+
+ fput_light(sock->file, fput_needed);
+ out:
+ return err;
+ }
+
++SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
++{
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_sendmsg(fd, msg, flags);
++}
++
+ /*
+ * Linux sendmmsg interface
+ */
+@@ -2063,15 +2071,16 @@ int __sys_sendmmsg(int fd, struct mmsghd
+
+ while (datagrams < vlen) {
+ if (MSG_CMSG_COMPAT & flags) {
+- err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags, &used_address);
++ err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry,
++ &msg_sys, flags, &used_address);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+- err = __sys_sendmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags, &used_address);
++ err = ___sys_sendmsg(sock,
++ (struct msghdr __user *)entry,
++ &msg_sys, flags, &used_address);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+@@ -2095,11 +2104,13 @@ int __sys_sendmmsg(int fd, struct mmsghd
+ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
+ unsigned int, vlen, unsigned int, flags)
+ {
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
+ return __sys_sendmmsg(fd, mmsg, vlen, flags);
+ }
+
+-static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
+- struct msghdr *msg_sys, unsigned flags, int nosec)
++static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
++ struct msghdr *msg_sys, unsigned flags, int nosec)
+ {
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+@@ -2192,23 +2203,31 @@ out:
+ * BSD recvmsg interface
+ */
+
+-SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+- unsigned int, flags)
++long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags)
+ {
+ int fput_needed, err;
+ struct msghdr msg_sys;
+- struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed);
++ struct socket *sock;
+
++ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+- err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0);
++ err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0);
+
+ fput_light(sock->file, fput_needed);
+ out:
+ return err;
+ }
+
++SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
++ unsigned int, flags)
++{
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++ return __sys_recvmsg(fd, msg, flags);
++}
++
+ /*
+ * Linux recvmmsg interface
+ */
+@@ -2246,17 +2265,18 @@ int __sys_recvmmsg(int fd, struct mmsghd
+ * No need to ask LSM for more than the first datagram.
+ */
+ if (MSG_CMSG_COMPAT & flags) {
+- err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
+- &msg_sys, flags & ~MSG_WAITFORONE,
+- datagrams);
++ err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry,
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = __put_user(err, &compat_entry->msg_len);
+ ++compat_entry;
+ } else {
+- err = __sys_recvmsg(sock, (struct msghdr __user *)entry,
+- &msg_sys, flags & ~MSG_WAITFORONE,
+- datagrams);
++ err = ___sys_recvmsg(sock,
++ (struct msghdr __user *)entry,
++ &msg_sys, flags & ~MSG_WAITFORONE,
++ datagrams);
+ if (err < 0)
+ break;
+ err = put_user(err, &entry->msg_len);
+@@ -2323,6 +2343,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struc
+ int datagrams;
+ struct timespec timeout_sys;
+
++ if (flags & MSG_CMSG_COMPAT)
++ return -EINVAL;
++
+ if (!timeout)
+ return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL);
+
--- /dev/null
+From 6c8fd20ab8a15d7882d35278a1969c77acceb64d Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <eric.dumazet@gmail.com>
+Date: Wed, 29 May 2013 09:06:27 +0000
+Subject: net: force a reload of first item in hlist_nulls_for_each_entry_rcu
+
+From: Eric Dumazet <eric.dumazet@gmail.com>
+
+[ Upstream commit c87a124a5d5e8cf8e21c4363c3372bcaf53ea190 ]
+
+Roman Gushchin discovered that udp4_lib_lookup2() was not reloading
+first item in the rcu protected list, in case the loop was restarted.
+
+This produced soft lockups as in https://lkml.org/lkml/2013/4/16/37
+
+rcu_dereference(X)/ACCESS_ONCE(X) seem to not work as intended if X is
+ptr->field :
+
+In some cases, gcc caches the value or ptr->field in a register.
+
+Use a barrier() to disallow such caching, as documented in
+Documentation/atomic_ops.txt line 114
+
+Thanks a lot to Roman for providing analysis and numerous patches.
+
+Diagnosed-by: Roman Gushchin <klamm@yandex-team.ru>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Boris Zhmurov <zhmurov@yandex-team.ru>
+Signed-off-by: Roman Gushchin <klamm@yandex-team.ru>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/rculist_nulls.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/include/linux/rculist_nulls.h
++++ b/include/linux/rculist_nulls.h
+@@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_
+ * @head: the head for your list.
+ * @member: the name of the hlist_nulls_node within the struct.
+ *
++ * The barrier() is needed to make sure compiler doesn't cache first element [1],
++ * as this loop can be restarted [2]
++ * [1] Documentation/atomic_ops.txt around line 114
++ * [2] Documentation/RCU/rculist_nulls.txt around line 146
+ */
+ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
+- for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
++ for (({barrier();}), \
++ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
+ (!is_a_nulls(pos)) && \
+ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
+ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
--- /dev/null
+From 4d0a2dfeefdac1692425ba99023b7f0107b80b10 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Thu, 6 Jun 2013 15:53:47 +0200
+Subject: net: sctp: fix NULL pointer dereference in socket destruction
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit 1abd165ed757db1afdefaac0a4bc8a70f97d258c ]
+
+While stress testing sctp sockets, I hit the following panic:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000020
+IP: [<ffffffffa0490c4e>] sctp_endpoint_free+0xe/0x40 [sctp]
+PGD 7cead067 PUD 7ce76067 PMD 0
+Oops: 0000 [#1] SMP
+Modules linked in: sctp(F) libcrc32c(F) [...]
+CPU: 7 PID: 2950 Comm: acc Tainted: GF 3.10.0-rc2+ #1
+Hardware name: Dell Inc. PowerEdge T410/0H19HD, BIOS 1.6.3 02/01/2011
+task: ffff88007ce0e0c0 ti: ffff88007b568000 task.ti: ffff88007b568000
+RIP: 0010:[<ffffffffa0490c4e>] [<ffffffffa0490c4e>] sctp_endpoint_free+0xe/0x40 [sctp]
+RSP: 0018:ffff88007b569e08 EFLAGS: 00010292
+RAX: 0000000000000000 RBX: ffff88007db78a00 RCX: dead000000200200
+RDX: ffffffffa049fdb0 RSI: ffff8800379baf38 RDI: 0000000000000000
+RBP: ffff88007b569e18 R08: ffff88007c230da0 R09: 0000000000000001
+R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000
+R13: ffff880077990d00 R14: 0000000000000084 R15: ffff88007db78a00
+FS: 00007fc18ab61700(0000) GS:ffff88007fc60000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+CR2: 0000000000000020 CR3: 000000007cf9d000 CR4: 00000000000007e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
+Stack:
+ ffff88007b569e38 ffff88007db78a00 ffff88007b569e38 ffffffffa049fded
+ ffffffff81abf0c0 ffff88007db78a00 ffff88007b569e58 ffffffff8145b60e
+ 0000000000000000 0000000000000000 ffff88007b569eb8 ffffffff814df36e
+Call Trace:
+ [<ffffffffa049fded>] sctp_destroy_sock+0x3d/0x80 [sctp]
+ [<ffffffff8145b60e>] sk_common_release+0x1e/0xf0
+ [<ffffffff814df36e>] inet_create+0x2ae/0x350
+ [<ffffffff81455a6f>] __sock_create+0x11f/0x240
+ [<ffffffff81455bf0>] sock_create+0x30/0x40
+ [<ffffffff8145696c>] SyS_socket+0x4c/0xc0
+ [<ffffffff815403be>] ? do_page_fault+0xe/0x10
+ [<ffffffff8153cb32>] ? page_fault+0x22/0x30
+ [<ffffffff81544e02>] system_call_fastpath+0x16/0x1b
+Code: 0c c9 c3 66 2e 0f 1f 84 00 00 00 00 00 e8 fb fe ff ff c9 c3 66 0f
+ 1f 84 00 00 00 00 00 55 48 89 e5 53 48 83 ec 08 66 66 66 66 90 <48>
+ 8b 47 20 48 89 fb c6 47 1c 01 c6 40 12 07 e8 9e 68 01 00 48
+RIP [<ffffffffa0490c4e>] sctp_endpoint_free+0xe/0x40 [sctp]
+ RSP <ffff88007b569e08>
+CR2: 0000000000000020
+---[ end trace e0d71ec1108c1dd9 ]---
+
+I did not hit this with the lksctp-tools functional tests, but with a
+small, multi-threaded test program, that heavily allocates, binds,
+listens and waits in accept on sctp sockets, and then randomly kills
+some of them (no need for an actual client in this case to hit this).
+Then, again, allocating, binding, etc, and then killing child processes.
+
+This panic then only occurs when ``echo 1 > /proc/sys/net/sctp/auth_enable''
+is set. The cause for that is actually very simple: in sctp_endpoint_init()
+we enter the path of sctp_auth_init_hmacs(). There, we try to allocate
+our crypto transforms through crypto_alloc_hash(). In our scenario,
+it then can happen that crypto_alloc_hash() fails with -EINTR from
+crypto_larval_wait(), thus we bail out and release the socket via
+sk_common_release(), sctp_destroy_sock() and hit the NULL pointer
+dereference as soon as we try to access members in the endpoint during
+sctp_endpoint_free(), since endpoint at that time is still NULL. Now,
+if we have that case, we do not need to do any cleanup work and just
+leave the destruction handler.
+
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/socket.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3929,6 +3929,12 @@ SCTP_STATIC void sctp_destroy_sock(struc
+
+ /* Release our hold on the endpoint. */
+ sp = sctp_sk(sk);
++ /* This could happen during socket init, thus we bail out
++ * early, since the rest of the below is not setup either.
++ */
++ if (sp->ep == NULL)
++ return;
++
+ if (sp->do_auto_asconf) {
+ sp->do_auto_asconf = 0;
+ list_del(&sp->auto_asconf_list);
--- /dev/null
+From 9845a6ac809f073a62d6f51f6fac7740a3a37b92 Mon Sep 17 00:00:00 2001
+From: Paul Moore <pmoore@redhat.com>
+Date: Fri, 17 May 2013 09:08:50 +0000
+Subject: netlabel: improve domain mapping validation
+
+From: Paul Moore <pmoore@redhat.com>
+
+[ Upstream commit 6b21e1b77d1a3d58ebfd513264c885695e8a0ba5 ]
+
+The net/netlabel/netlabel_domainhash.c:netlbl_domhsh_add() function
+does not properly validate new domain hash entries resulting in
+potential problems when an administrator attempts to add an invalid
+entry. One such problem, as reported by Vlad Halilov, is a kernel
+BUG (found in netlabel_domainhash.c:netlbl_domhsh_audit_add()) when
+adding an IPv6 outbound mapping with a CIPSO configuration.
+
+This patch corrects this problem by adding the necessary validation
+code to netlbl_domhsh_add() via the newly created
+netlbl_domhsh_validate() function.
+
+Ideally this patch should also be pushed to the currently active
+-stable trees.
+
+Reported-by: Vlad Halilov <vlad.halilov@gmail.com>
+Signed-off-by: Paul Moore <pmoore@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/netlabel/netlabel_domainhash.c | 69 +++++++++++++++++++++++++++++++++++++
+ 1 file changed, 69 insertions(+)
+
+--- a/net/netlabel/netlabel_domainhash.c
++++ b/net/netlabel/netlabel_domainhash.c
+@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(stru
+ }
+ }
+
++/**
++ * netlbl_domhsh_validate - Validate a new domain mapping entry
++ * @entry: the entry to validate
++ *
++ * This function validates the new domain mapping entry to ensure that it is
++ * a valid entry. Returns zero on success, negative values on failure.
++ *
++ */
++static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
++{
++ struct netlbl_af4list *iter4;
++ struct netlbl_domaddr4_map *map4;
++#if IS_ENABLED(CONFIG_IPV6)
++ struct netlbl_af6list *iter6;
++ struct netlbl_domaddr6_map *map6;
++#endif /* IPv6 */
++
++ if (entry == NULL)
++ return -EINVAL;
++
++ switch (entry->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ if (entry->type_def.cipsov4 != NULL ||
++ entry->type_def.addrsel != NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_CIPSOV4:
++ if (entry->type_def.cipsov4 == NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_ADDRSELECT:
++ netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
++ map4 = netlbl_domhsh_addr4_entry(iter4);
++ switch (map4->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ if (map4->type_def.cipsov4 != NULL)
++ return -EINVAL;
++ break;
++ case NETLBL_NLTYPE_CIPSOV4:
++ if (map4->type_def.cipsov4 == NULL)
++ return -EINVAL;
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++#if IS_ENABLED(CONFIG_IPV6)
++ netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
++ map6 = netlbl_domhsh_addr6_entry(iter6);
++ switch (map6->type) {
++ case NETLBL_NLTYPE_UNLABELED:
++ break;
++ default:
++ return -EINVAL;
++ }
++ }
++#endif /* IPv6 */
++ break;
++ default:
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ /*
+ * Domain Hash Table Functions
+ */
+@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_
+ struct netlbl_af6list *tmp6;
+ #endif /* IPv6 */
+
++ ret_val = netlbl_domhsh_validate(entry);
++ if (ret_val != 0)
++ return ret_val;
++
+ /* XXX - we can remove this RCU read lock as the spinlock protects the
+ * entire function, but before we do we need to fixup the
+ * netlbl_af[4,6]list RCU functions to do "the right thing" with
--- /dev/null
+From adca36db5f6cf43e1459ae8840eb5dceeaa14146 Mon Sep 17 00:00:00 2001
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Wed, 12 Jun 2013 16:02:27 +0200
+Subject: packet: packet_getname_spkt: make sure string is always 0-terminated
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit 2dc85bf323515e59e15dfa858d1472bb25cad0fe ]
+
+uaddr->sa_data is exactly of size 14, which is hard-coded here and
+passed as a size argument to strncpy(). A device name can be of size
+IFNAMSIZ (== 16), meaning we might leave the destination string
+unterminated. Thus, use strlcpy() and also sizeof() while we're
+at it. We need to memset the data area beforehand, since strlcpy
+does not padd the remaining buffer with zeroes for user space, so
+that we do not possibly leak anything.
+
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2848,12 +2848,11 @@ static int packet_getname_spkt(struct so
+ return -EOPNOTSUPP;
+
+ uaddr->sa_family = AF_PACKET;
++ memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
+ rcu_read_lock();
+ dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
+ if (dev)
+- strncpy(uaddr->sa_data, dev->name, 14);
+- else
+- memset(uaddr->sa_data, 0, 14);
++ strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
+ rcu_read_unlock();
+ *uaddr_len = sizeof(*uaddr);
+
--- /dev/null
+From 362a02634174ad96998b43c166eb4edfbf27710a Mon Sep 17 00:00:00 2001
+From: Francois Romieu <romieu@fr.zoreil.com>
+Date: Sat, 18 May 2013 01:24:46 +0000
+Subject: r8169: fix offloaded tx checksum for small packets.
+
+From: Francois Romieu <romieu@fr.zoreil.com>
+
+[ Upstream commit b423e9ae49d78ea3f53b131c8d5a6087aed16fd6 ]
+
+8168evl offloaded checksums are wrong since commit
+e5195c1f31f399289347e043d6abf3ffa80f0005 ("r8169: fix 8168evl frame padding.")
+pads small packets to 60 bytes (without ethernet checksum). Typical symptoms
+appear as UDP checksums which are wrong by the count of added bytes.
+
+It isn't worth compensating. Let the driver checksum.
+
+Due to the skb length changes, TSO code is moved before the Tx descriptor gets
+written.
+
+Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>
+Tested-by: Holger Hoffstätte <holger.hoffstaette@googlemail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 41 +++++++++++++++++++++++------------
+ 1 file changed, 27 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5126,7 +5126,20 @@ err_out:
+ return -EIO;
+ }
+
+-static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
++static bool rtl_skb_pad(struct sk_buff *skb)
++{
++ if (skb_padto(skb, ETH_ZLEN))
++ return false;
++ skb_put(skb, ETH_ZLEN - skb->len);
++ return true;
++}
++
++static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
++{
++ return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
++}
++
++static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
+ struct sk_buff *skb, u32 *opts)
+ {
+ const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
+@@ -5139,13 +5152,20 @@ static inline void rtl8169_tso_csum(stru
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+
++ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++ return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
++
+ if (ip->protocol == IPPROTO_TCP)
+ opts[offset] |= info->checksum.tcp;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts[offset] |= info->checksum.udp;
+ else
+ WARN_ON_ONCE(1);
++ } else {
++ if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
++ return rtl_skb_pad(skb);
+ }
++ return true;
+ }
+
+ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+@@ -5166,17 +5186,15 @@ static netdev_tx_t rtl8169_start_xmit(st
+ goto err_stop_0;
+ }
+
+- /* 8168evl does not automatically pad to minimum length. */
+- if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
+- skb->len < ETH_ZLEN)) {
+- if (skb_padto(skb, ETH_ZLEN))
+- goto err_update_stats;
+- skb_put(skb, ETH_ZLEN - skb->len);
+- }
+-
+ if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
+ goto err_stop_0;
+
++ opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
++ opts[0] = DescOwn;
++
++ if (!rtl8169_tso_csum(tp, skb, opts))
++ goto err_update_stats;
++
+ len = skb_headlen(skb);
+ mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(d, mapping))) {
+@@ -5188,11 +5206,6 @@ static netdev_tx_t rtl8169_start_xmit(st
+ tp->tx_skb[entry].len = len;
+ txd->addr = cpu_to_le64(mapping);
+
+- opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(tp, skb));
+- opts[0] = DescOwn;
+-
+- rtl8169_tso_csum(tp, skb, opts);
+-
+ frags = rtl8169_xmit_frags(tp, skb, opts);
+ if (frags < 0)
+ goto err_dma_1;
clk-remove-notifier-from-list-before-freeing-it.patch
tilepro-work-around-module-link-error-with-gcc-4.7.patch
kvm-x86-remove-vcpu-s-cpl-check-in-host-invoked-xcr-set.patch
-drm-radeon-update-lockup-tracking-when-scheduling-in-empty-ring.patch
+tcp-fix-tcp_md5_hash_skb_data.patch
+gianfar-add-missing-iounmap-on-error-in-gianfar_ptp_probe.patch
+ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+netlabel-improve-domain-mapping-validation.patch
+r8169-fix-offloaded-tx-checksum-for-small-packets.patch
+8139cp-reset-bql-when-ring-tx-ring-cleared.patch
+tcp-bug-fix-in-proportional-rate-reduction.patch
+tcp-xps-fix-reordering-issues.patch
+ip_tunnel-fix-kernel-panic-with-icmp_dest_unreach.patch
+net-block-msg_cmsg_compat-in-send-m-msg-and-recv-m-msg.patch
+net-force-a-reload-of-first-item-in-hlist_nulls_for_each_entry_rcu.patch
+ipv6-assign-rt6_info-to-inet6_ifaddr-in-init_loopback.patch
+net-sctp-fix-null-pointer-dereference-in-socket-destruction.patch
+team-check-return-value-of-team_get_port_by_index_rcu-for-null.patch
+packet-packet_getname_spkt-make-sure-string-is-always-0-terminated.patch
+l2tp-fix-ppp-header-erasure-and-memory-leak.patch
+l2tp-fix-sendmsg-return-value.patch
+bonding-rlb-mode-of-bond-should-not-alter-arp-originating-via-bridge.patch
--- /dev/null
+From f4659a2d152d8d191c5fb88023e07a621847d0bd Mon Sep 17 00:00:00 2001
+From: Nandita Dukkipati <nanditad@google.com>
+Date: Tue, 21 May 2013 15:12:07 +0000
+Subject: tcp: bug fix in proportional rate reduction.
+
+From: Nandita Dukkipati <nanditad@google.com>
+
+[ Upstream commit 35f079ebbc860dcd1cca70890c9c8d59c1145525 ]
+
+This patch is a fix for a bug triggering newly_acked_sacked < 0
+in tcp_ack(.).
+
+The bug is triggered by sacked_out decreasing relative to prior_sacked,
+but packets_out remaining the same as pior_packets. This is because the
+snapshot of prior_packets is taken after tcp_sacktag_write_queue() while
+prior_sacked is captured before tcp_sacktag_write_queue(). The problem
+is: tcp_sacktag_write_queue (tcp_match_skb_to_sack() -> tcp_fragment)
+adjusts the pcount for packets_out and sacked_out (MSS change or other
+reason). As a result, this delta in pcount is reflected in
+(prior_sacked - sacked_out) but not in (prior_packets - packets_out).
+
+This patch does the following:
+1) initializes prior_packets at the start of tcp_ack() so as to
+capture the delta in packets_out created by tcp_fragment.
+2) introduces a new "previous_packets_out" variable that snapshots
+packets_out right before tcp_clean_rtx_queue, so pkts_acked can be
+correctly computed as before.
+3) Computes pkts_acked using previous_packets_out, and computes
+newly_acked_sacked using prior_packets.
+
+Signed-off-by: Nandita Dukkipati <nanditad@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 23 +++++++++++++----------
+ 1 file changed, 13 insertions(+), 10 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -3038,8 +3038,8 @@ static void tcp_update_cwnd_in_recovery(
+ * tcp_xmit_retransmit_queue().
+ */
+ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
+- int prior_sacked, bool is_dupack,
+- int flag)
++ int prior_sacked, int prior_packets,
++ bool is_dupack, int flag)
+ {
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+@@ -3105,7 +3105,8 @@ static void tcp_fastretrans_alert(struct
+ tcp_add_reno_sack(sk);
+ } else
+ do_lost = tcp_try_undo_partial(sk, pkts_acked);
+- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
++ newly_acked_sacked = prior_packets - tp->packets_out +
++ tp->sacked_out - prior_sacked;
+ break;
+ case TCP_CA_Loss:
+ if (flag & FLAG_DATA_ACKED)
+@@ -3127,7 +3128,8 @@ static void tcp_fastretrans_alert(struct
+ if (is_dupack)
+ tcp_add_reno_sack(sk);
+ }
+- newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
++ newly_acked_sacked = prior_packets - tp->packets_out +
++ tp->sacked_out - prior_sacked;
+
+ if (icsk->icsk_ca_state <= TCP_CA_Disorder)
+ tcp_try_undo_dsack(sk);
+@@ -3740,9 +3742,10 @@ static int tcp_ack(struct sock *sk, cons
+ bool is_dupack = false;
+ u32 prior_in_flight;
+ u32 prior_fackets;
+- int prior_packets;
++ int prior_packets = tp->packets_out;
+ int prior_sacked = tp->sacked_out;
+ int pkts_acked = 0;
++ int previous_packets_out = 0;
+ int frto_cwnd = 0;
+
+ /* If the ack is older than previous acks
+@@ -3819,14 +3822,14 @@ static int tcp_ack(struct sock *sk, cons
+ sk->sk_err_soft = 0;
+ icsk->icsk_probes_out = 0;
+ tp->rcv_tstamp = tcp_time_stamp;
+- prior_packets = tp->packets_out;
+ if (!prior_packets)
+ goto no_queue;
+
+ /* See if we can take anything off of the retransmit queue. */
++ previous_packets_out = tp->packets_out;
+ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+
+- pkts_acked = prior_packets - tp->packets_out;
++ pkts_acked = previous_packets_out - tp->packets_out;
+
+ if (tp->frto_counter)
+ frto_cwnd = tcp_process_frto(sk, flag);
+@@ -3841,7 +3844,7 @@ static int tcp_ack(struct sock *sk, cons
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+ is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
+- is_dupack, flag);
++ prior_packets, is_dupack, flag);
+ } else {
+ if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+@@ -3856,7 +3859,7 @@ no_queue:
+ /* If data was DSACKed, see if we can undo a cwnd reduction. */
+ if (flag & FLAG_DSACKING_ACK)
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
+- is_dupack, flag);
++ prior_packets, is_dupack, flag);
+ /* If this ack opens up a zero window, clear backoff. It was
+ * being used to time the probes, and is probably far higher than
+ * it needs to be for normal retransmission.
+@@ -3876,7 +3879,7 @@ old_ack:
+ if (TCP_SKB_CB(skb)->sacked) {
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
+- is_dupack, flag);
++ prior_packets, is_dupack, flag);
+ }
+
+ SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
--- /dev/null
+From 5e15f343f4948f1ca6e341f84538b06e0cc8bfd0 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 13 May 2013 21:25:52 +0000
+Subject: tcp: fix tcp_md5_hash_skb_data()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 54d27fcb338bd9c42d1dfc5a39e18f6f9d373c2e ]
+
+TCP md5 communications fail [1] for some devices, because sg/crypto code
+assume page offsets are below PAGE_SIZE.
+
+This was discovered using mlx4 driver [2], but I suspect loopback
+might trigger the same bug now we use order-3 pages in tcp_sendmsg()
+
+[1] Failure is giving following messages.
+
+huh, entered softirq 3 NET_RX ffffffff806ad230 preempt_count 00000100,
+exited with 00000101?
+
+[2] mlx4 driver uses order-2 pages to allocate RX frags
+
+Reported-by: Matt Schnall <mischnal@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Bernhard Beck <bbeck@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3055,8 +3055,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5
+
+ for (i = 0; i < shi->nr_frags; ++i) {
+ const struct skb_frag_struct *f = &shi->frags[i];
+- struct page *page = skb_frag_page(f);
+- sg_set_page(&sg, page, skb_frag_size(f), f->page_offset);
++ unsigned int offset = f->page_offset;
++ struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
++
++ sg_set_page(&sg, page, skb_frag_size(f),
++ offset_in_page(offset));
+ if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
+ return 1;
+ }
--- /dev/null
+From eb473853522f6481d2c933109be2f8236fa8913d Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 23 May 2013 07:44:20 +0000
+Subject: tcp: xps: fix reordering issues
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 547669d483e5783d722772af1483fa474da7caf9 ]
+
+commit 3853b5841c01a ("xps: Improvements in TX queue selection")
+introduced ooo_okay flag, but the condition to set it is slightly wrong.
+
+In our traces, we have seen ACK packets being received out of order,
+and RST packets sent in response.
+
+We should test if we have any packets still in host queue.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_output.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -835,11 +835,13 @@ static int tcp_transmit_skb(struct sock
+ &md5);
+ tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
+
+- if (tcp_packets_in_flight(tp) == 0) {
++ if (tcp_packets_in_flight(tp) == 0)
+ tcp_ca_event(sk, CA_EVENT_TX_START);
+- skb->ooo_okay = 1;
+- } else
+- skb->ooo_okay = 0;
++
++ /* if no packet is in qdisc/device queue, then allow XPS to select
++ * another queue.
++ */
++ skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
+
+ skb_push(skb, tcp_header_size);
+ skb_reset_transport_header(skb);
--- /dev/null
+From 83ac5bd697e4fde6357fe4611f7f03edc547f8a9 Mon Sep 17 00:00:00 2001
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Sat, 8 Jun 2013 15:00:53 +0200
+Subject: team: check return value of team_get_port_by_index_rcu() for NULL
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit 76c455decbbad31de21c727edb184a963f42b40b ]
+
+team_get_port_by_index_rcu() might return NULL due to race between port
+removal and skb tx path. Panic is easily triggeable when txing packets
+and adding/removing port in a loop.
+
+introduced by commit 3d249d4ca "net: introduce ethernet teaming device"
+and commit 753f993911b "team: introduce random mode" (for random mode)
+
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team_mode_roundrobin.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/team/team_mode_roundrobin.c
++++ b/drivers/net/team/team_mode_roundrobin.c
+@@ -52,6 +52,8 @@ static bool rr_transmit(struct team *tea
+
+ port_index = rr_priv(team)->sent_packets++ % team->port_count;
+ port = team_get_port_by_index_rcu(team, port_index);
++ if (unlikely(!port))
++ goto drop;
+ port = __get_first_port_up(team, port);
+ if (unlikely(!port))
+ goto drop;