--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Govindarajulu Varadarajan <_govind@gmx.com>
+Date: Sun, 25 Jan 2015 16:09:23 +0530
+Subject: bnx2x: fix napi poll return value for repoll
+
+From: Govindarajulu Varadarajan <_govind@gmx.com>
+
+[ Upstream commit 24e579c8898aa641ede3149234906982290934e5 ]
+
+With the commit d75b1ade567ffab ("net: less interrupt masking in NAPI") napi
+repoll is done only when work_done == budget. When in busy_poll is we return 0
+in napi_poll. We should return budget.
+
+Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -3131,7 +3131,7 @@ static int bnx2x_poll(struct napi_struct
+ }
+ #endif
+ if (!bnx2x_fp_lock_napi(fp))
+- return work_done;
++ return budget;
+
+ for_each_cos_in_tx_queue(fp, cos)
+ if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Roopa Prabhu <roopa@cumulusnetworks.com>
+Date: Wed, 28 Jan 2015 16:23:11 -0800
+Subject: bridge: dont send notification when skb->len == 0 in rtnl_bridge_notify
+
+From: Roopa Prabhu <roopa@cumulusnetworks.com>
+
+[ Upstream commit 59ccaaaa49b5b096cdc1f16706a9f931416b2332 ]
+
+Reported in: https://bugzilla.kernel.org/show_bug.cgi?id=92081
+
+This patch avoids calling rtnl_notify if the device ndo_bridge_getlink
+handler does not return any bytes in the skb.
+
+Alternately, the skb->len check can be moved inside rtnl_notify.
+
+For the bridge vlan case described in 92081, there is also a fix needed
+in bridge driver to generate a proper notification. Will fix that in
+subsequent patch.
+
+v2: rebase patch on net tree
+
+Signed-off-by: Roopa Prabhu <roopa@cumulusnetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2649,12 +2649,16 @@ static int rtnl_bridge_notify(struct net
+ goto errout;
+ }
+
++ if (!skb->len)
++ goto errout;
++
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
+ return 0;
+ errout:
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
++ if (err)
++ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ return err;
+ }
+
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Willem de Bruijn <willemb@google.com>
+Date: Thu, 15 Jan 2015 13:18:40 -0500
+Subject: ip: zero sockaddr returned on error queue
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit f812116b174e59a350acc8e4856213a166a91222 ]
+
+The sockaddr is returned in IP(V6)_RECVERR as part of errhdr. That
+structure is defined and allocated on the stack as
+
+ struct {
+ struct sock_extended_err ee;
+ struct sockaddr_in(6) offender;
+ } errhdr;
+
+The second part is only initialized for certain SO_EE_ORIGIN values.
+Always initialize it completely.
+
+An MTU exceeded error on a SOCK_RAW/IPPROTO_RAW is one example that
+would return uninitialized bytes.
+
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+
+----
+
+Also verified that there is no padding between errhdr.ee and
+errhdr.offender that could leak additional kernel data.
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_sockglue.c | 8 ++------
+ net/ipv6/datagram.c | 10 +++-------
+ 2 files changed, 5 insertions(+), 13 deletions(-)
+
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -426,15 +426,11 @@ int ip_recv_error(struct sock *sk, struc
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+ sin = &errhdr.offender;
+- sin->sin_family = AF_UNSPEC;
++ memset(sin, 0, sizeof(*sin));
+ if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
+- struct inet_sock *inet = inet_sk(sk);
+-
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+- sin->sin_port = 0;
+- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
+- if (inet->cmsg_flags)
++ if (inet_sk(sk)->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+ }
+
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -382,11 +382,10 @@ int ipv6_recv_error(struct sock *sk, str
+
+ memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
+ sin = &errhdr.offender;
+- sin->sin6_family = AF_UNSPEC;
++ memset(sin, 0, sizeof(*sin));
++
+ if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
+ sin->sin6_family = AF_INET6;
+- sin->sin6_flowinfo = 0;
+- sin->sin6_port = 0;
+ if (np->rxopt.all)
+ ip6_datagram_recv_common_ctl(sk, msg, skb);
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+@@ -397,12 +396,9 @@ int ipv6_recv_error(struct sock *sk, str
+ ipv6_iface_scope_id(&sin->sin6_addr,
+ IP6CB(skb)->iif);
+ } else {
+- struct inet_sock *inet = inet_sk(sk);
+-
+ ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
+ &sin->sin6_addr);
+- sin->sin6_scope_id = 0;
+- if (inet->cmsg_flags)
++ if (inet_sk(sk)->cmsg_flags)
+ ip_cmsg_recv(msg, skb);
+ }
+ }
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 29 Jan 2015 21:35:05 -0800
+Subject: ipv4: tcp: get rid of ugly unicast_sock
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit bdbbb8527b6f6a358dbcb70dac247034d665b8e4 ]
+
+In commit be9f4a44e7d41 ("ipv4: tcp: remove per net tcp_sock")
+I tried to address contention on a socket lock, but the solution
+I chose was horrible :
+
+commit 3a7c384ffd57e ("ipv4: tcp: unicast_sock should not land outside
+of TCP stack") addressed a selinux regression.
+
+commit 0980e56e506b ("ipv4: tcp: set unicast_sock uc_ttl to -1")
+took care of another regression.
+
+commit b5ec8eeac46 ("ipv4: fix ip_send_skb()") fixed another regression.
+
+commit 811230cd85 ("tcp: ipv4: initialize unicast_sock sk_pacing_rate")
+was another shot in the dark.
+
+Really, just use a proper socket per cpu, and remove the skb_orphan()
+call, to re-enable flow control.
+
+This solves a serious problem with FQ packet scheduler when used in
+hostile environments, as we do not want to allocate a flow structure
+for every RST packet sent in response to a spoofed packet.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h | 2 +-
+ include/net/netns/ipv4.h | 1 +
+ net/ipv4/ip_output.c | 30 +++---------------------------
+ net/ipv4/tcp_ipv4.c | 37 ++++++++++++++++++++++++++++++++-----
+ 4 files changed, 37 insertions(+), 33 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -175,7 +175,7 @@ static inline __u8 ip_reply_arg_flowi_fl
+ return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
+ }
+
+-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, const struct ip_reply_arg *arg,
+ unsigned int len);
+
+--- a/include/net/netns/ipv4.h
++++ b/include/net/netns/ipv4.h
+@@ -47,6 +47,7 @@ struct netns_ipv4 {
+ struct inet_peer_base *peers;
+ struct tcpm_hash_bucket *tcp_metrics_hash;
+ unsigned int tcp_metrics_hash_log;
++ struct sock * __percpu *tcp_sk;
+ struct netns_frags frags;
+ #ifdef CONFIG_NETFILTER
+ struct xt_table *iptable_filter;
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1460,24 +1460,8 @@ static int ip_reply_glue_bits(void *dptr
+ /*
+ * Generic function to send a packet as reply to another packet.
+ * Used to send some TCP resets/acks so far.
+- *
+- * Use a fake percpu inet socket to avoid false sharing and contention.
+ */
+-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
+- .sk = {
+- .__sk_common = {
+- .skc_refcnt = ATOMIC_INIT(1),
+- },
+- .sk_wmem_alloc = ATOMIC_INIT(1),
+- .sk_allocation = GFP_ATOMIC,
+- .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
+- .sk_pacing_rate = ~0U,
+- },
+- .pmtudisc = IP_PMTUDISC_WANT,
+- .uc_ttl = -1,
+-};
+-
+-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
++void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
+ __be32 saddr, const struct ip_reply_arg *arg,
+ unsigned int len)
+ {
+@@ -1485,9 +1469,8 @@ void ip_send_unicast_reply(struct net *n
+ struct ipcm_cookie ipc;
+ struct flowi4 fl4;
+ struct rtable *rt = skb_rtable(skb);
++ struct net *net = sock_net(sk);
+ struct sk_buff *nskb;
+- struct sock *sk;
+- struct inet_sock *inet;
+ int err;
+
+ if (ip_options_echo(&replyopts.opt.opt, skb))
+@@ -1517,15 +1500,11 @@ void ip_send_unicast_reply(struct net *n
+ if (IS_ERR(rt))
+ return;
+
+- inet = &get_cpu_var(unicast_sock);
++ inet_sk(sk)->tos = arg->tos;
+
+- inet->tos = arg->tos;
+- sk = &inet->sk;
+ sk->sk_priority = skb->priority;
+ sk->sk_protocol = ip_hdr(skb)->protocol;
+ sk->sk_bound_dev_if = arg->bound_dev_if;
+- sock_net_set(sk, net);
+- __skb_queue_head_init(&sk->sk_write_queue);
+ sk->sk_sndbuf = sysctl_wmem_default;
+ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
+ len, 0, &ipc, &rt, MSG_DONTWAIT);
+@@ -1541,13 +1520,10 @@ void ip_send_unicast_reply(struct net *n
+ arg->csumoffset) = csum_fold(csum_add(nskb->csum,
+ arg->csum));
+ nskb->ip_summed = CHECKSUM_NONE;
+- skb_orphan(nskb);
+ skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
+ ip_push_pending_frames(sk, &fl4);
+ }
+ out:
+- put_cpu_var(unicast_sock);
+-
+ ip_rt_put(rt);
+ }
+
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -691,7 +691,8 @@ static void tcp_v4_send_reset(struct soc
+
+ net = dev_net(skb_dst(skb)->dev);
+ arg.tos = ip_hdr(skb)->tos;
+- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
++ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
++ skb, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
+
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+@@ -774,7 +775,8 @@ static void tcp_v4_send_ack(struct sk_bu
+ if (oif)
+ arg.bound_dev_if = oif;
+ arg.tos = tos;
+- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
++ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
++ skb, ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
+
+ TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+@@ -2769,14 +2771,39 @@ struct proto tcp_prot = {
+ };
+ EXPORT_SYMBOL(tcp_prot);
+
++static void __net_exit tcp_sk_exit(struct net *net)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
++ free_percpu(net->ipv4.tcp_sk);
++}
++
+ static int __net_init tcp_sk_init(struct net *net)
+ {
++ int res, cpu;
++
++ net->ipv4.tcp_sk = alloc_percpu(struct sock *);
++ if (!net->ipv4.tcp_sk)
++ return -ENOMEM;
++
++ for_each_possible_cpu(cpu) {
++ struct sock *sk;
++
++ res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
++ IPPROTO_TCP, net);
++ if (res)
++ goto fail;
++ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
++ }
+ net->ipv4.sysctl_tcp_ecn = 2;
+ return 0;
+-}
+
+-static void __net_exit tcp_sk_exit(struct net *net)
+-{
++fail:
++ tcp_sk_exit(net);
++
++ return res;
+ }
+
+ static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Fri, 23 Jan 2015 12:01:26 +0100
+Subject: ipv4: try to cache dst_entries which would cause a redirect
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit df4d92549f23e1c037e83323aff58a21b3de7fe0 ]
+
+Not caching dst_entries which cause redirects could be exploited by hosts
+on the same subnet, causing a severe DoS attack. This effect aggravated
+since commit f88649721268999 ("ipv4: fix dst race in sk_dst_get()").
+
+Lookups causing redirects will be allocated with DST_NOCACHE set which
+will force dst_release to free them via RCU. Unfortunately waiting for
+RCU grace period just takes too long, we can end up with >1M dst_entries
+waiting to be released and the system will run OOM. rcuos threads cannot
+catch up under high softirq load.
+
+Attaching the flag to emit a redirect later on to the specific skb allows
+us to cache those dst_entries thus reducing the pressure on allocation
+and deallocation.
+
+This issue was discovered by Marcelo Leitner.
+
+Cc: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: Marcelo Leitner <mleitner@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip.h | 11 ++++++-----
+ net/ipv4/ip_forward.c | 3 ++-
+ net/ipv4/route.c | 9 +++++----
+ 3 files changed, 13 insertions(+), 10 deletions(-)
+
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -38,11 +38,12 @@ struct inet_skb_parm {
+ struct ip_options opt; /* Compiled IP options */
+ unsigned char flags;
+
+-#define IPSKB_FORWARDED 1
+-#define IPSKB_XFRM_TUNNEL_SIZE 2
+-#define IPSKB_XFRM_TRANSFORMED 4
+-#define IPSKB_FRAG_COMPLETE 8
+-#define IPSKB_REROUTED 16
++#define IPSKB_FORWARDED BIT(0)
++#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
++#define IPSKB_XFRM_TRANSFORMED BIT(2)
++#define IPSKB_FRAG_COMPLETE BIT(3)
++#define IPSKB_REROUTED BIT(4)
++#define IPSKB_DOREDIRECT BIT(5)
+
+ u16 frag_max_size;
+ };
+--- a/net/ipv4/ip_forward.c
++++ b/net/ipv4/ip_forward.c
+@@ -178,7 +178,8 @@ int ip_forward(struct sk_buff *skb)
+ * We now generate an ICMP HOST REDIRECT giving the route
+ * we calculated.
+ */
+- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
++ if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
++ !skb_sec_path(skb))
+ ip_rt_send_redirect(skb);
+
+ skb->priority = rt_tos2priority(iph->tos);
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buf
+
+ do_cache = res->fi && !itag;
+ if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
++ skb->protocol == htons(ETH_P_IP) &&
+ (IN_DEV_SHARED_MEDIA(out_dev) ||
+- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
+- flags |= RTCF_DOREDIRECT;
+- do_cache = false;
+- }
++ inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
++ IPCB(skb)->flags |= IPSKB_DOREDIRECT;
+
+ if (skb->protocol != htons(ETH_P_IP)) {
+ /* Not IP (i.e. ARP). Do not create route, if it is
+@@ -2305,6 +2304,8 @@ static int rt_fill_info(struct net *net,
+ r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
+ if (rt->rt_flags & RTCF_NOTIFY)
+ r->rtm_flags |= RTM_F_NOTIFY;
++ if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
++ r->rtm_flags |= RTCF_DOREDIRECT;
+
+ if (nla_put_be32(skb, RTA_DST, dst))
+ goto nla_put_failure;
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Mon, 26 Jan 2015 15:11:17 +0100
+Subject: ipv6: replacing a rt6_info needs to purge possible propagated rt6_infos too
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 6e9e16e6143b725662e47026a1d0f270721cdd24 ]
+
+Lubomir Rintel reported that during replacing a route the interface
+reference counter isn't correctly decremented.
+
+To quote bug <https://bugzilla.kernel.org/show_bug.cgi?id=91941>:
+| [root@rhel7-5 lkundrak]# sh -x lal
+| + ip link add dev0 type dummy
+| + ip link set dev0 up
+| + ip link add dev1 type dummy
+| + ip link set dev1 up
+| + ip addr add 2001:db8:8086::2/64 dev dev0
+| + ip route add 2001:db8:8086::/48 dev dev0 proto static metric 20
+| + ip route add 2001:db8:8088::/48 dev dev1 proto static metric 10
+| + ip route replace 2001:db8:8086::/48 dev dev1 proto static metric 20
+| + ip link del dev0 type dummy
+| Message from syslogd@rhel7-5 at Jan 23 10:54:41 ...
+| kernel:unregister_netdevice: waiting for dev0 to become free. Usage count = 2
+|
+| Message from syslogd@rhel7-5 at Jan 23 10:54:51 ...
+| kernel:unregister_netdevice: waiting for dev0 to become free. Usage count = 2
+
+During replacement of a rt6_info we must walk all parent nodes and check
+if the to be replaced rt6_info got propagated. If so, replace it with
+an alive one.
+
+Fixes: 4a287eba2de3957 ("IPv6 routing, NLM_F_* flag support: REPLACE and EXCL flags support, warn about missing CREATE flag")
+Reported-by: Lubomir Rintel <lkundrak@v3.sk>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Tested-by: Lubomir Rintel <lkundrak@v3.sk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_fib.c | 45 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 26 insertions(+), 19 deletions(-)
+
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -638,6 +638,29 @@ static inline bool rt6_qualify_for_ecmp(
+ RTF_GATEWAY;
+ }
+
++static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
++ struct net *net)
++{
++ if (atomic_read(&rt->rt6i_ref) != 1) {
++ /* This route is used as dummy address holder in some split
++ * nodes. It is not leaked, but it still holds other resources,
++ * which must be released in time. So, scan ascendant nodes
++ * and replace dummy references to this route with references
++ * to still alive ones.
++ */
++ while (fn) {
++ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
++ fn->leaf = fib6_find_prefix(net, fn);
++ atomic_inc(&fn->leaf->rt6i_ref);
++ rt6_release(rt);
++ }
++ fn = fn->parent;
++ }
++ /* No more references are possible at this point. */
++ BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
++ }
++}
++
+ /*
+ * Insert routing information in a node.
+ */
+@@ -775,11 +798,12 @@ add:
+ rt->dst.rt6_next = iter->dst.rt6_next;
+ atomic_inc(&rt->rt6i_ref);
+ inet6_rt_notify(RTM_NEWROUTE, rt, info);
+- rt6_release(iter);
+ if (!(fn->fn_flags & RTN_RTINFO)) {
+ info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
+ fn->fn_flags |= RTN_RTINFO;
+ }
++ fib6_purge_rt(iter, fn, info->nl_net);
++ rt6_release(iter);
+ }
+
+ return 0;
+@@ -1284,24 +1308,7 @@ static void fib6_del_route(struct fib6_n
+ fn = fib6_repair_tree(net, fn);
+ }
+
+- if (atomic_read(&rt->rt6i_ref) != 1) {
+- /* This route is used as dummy address holder in some split
+- * nodes. It is not leaked, but it still holds other resources,
+- * which must be released in time. So, scan ascendant nodes
+- * and replace dummy references to this route with references
+- * to still alive ones.
+- */
+- while (fn) {
+- if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+- fn->leaf = fib6_find_prefix(net, fn);
+- atomic_inc(&fn->leaf->rt6i_ref);
+- rt6_release(rt);
+- }
+- fn = fn->parent;
+- }
+- /* No more references are possible at this point. */
+- BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
+- }
++ fib6_purge_rt(rt, fn, net);
+
+ inet6_rt_notify(RTM_DELROUTE, rt, info);
+ rt6_release(rt);
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Hagen Paul Pfeifer <hagen@jauu.net>
+Date: Thu, 15 Jan 2015 22:34:25 +0100
+Subject: ipv6: stop sending PTB packets for MTU < 1280
+
+From: Hagen Paul Pfeifer <hagen@jauu.net>
+
+[ Upstream commit 9d289715eb5c252ae15bd547cb252ca547a3c4f2 ]
+
+Reduce the attack vector and stop generating IPv6 Fragment Header for
+paths with an MTU smaller than the minimum required IPv6 MTU
+size (1280 byte) - called atomic fragments.
+
+See IETF I-D "Deprecating the Generation of IPv6 Atomic Fragments" [1]
+for more information and how this "feature" can be misused.
+
+[1] https://tools.ietf.org/html/draft-ietf-6man-deprecate-atomfrag-generation-00
+
+Signed-off-by: Fernando Gont <fgont@si6networks.com>
+Signed-off-by: Hagen Paul Pfeifer <hagen@jauu.net>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct ds
+ struct net *net = dev_net(dst->dev);
+
+ rt6->rt6i_flags |= RTF_MODIFIED;
+- if (mtu < IPV6_MIN_MTU) {
+- u32 features = dst_metric(dst, RTAX_FEATURES);
++ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+- features |= RTAX_FEATURE_ALLFRAG;
+- dst_metric_set(dst, RTAX_FEATURES, features);
+- }
++
+ dst_metric_set(dst, RTAX_MTU, mtu);
+ rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
+ }
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Christoph Hellwig <hch@lst.de>
+Date: Tue, 27 Jan 2015 12:25:33 -0800
+Subject: net: don't OOPS on socket aio
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 06539d3071067ff146a9bffd1c801fa56d290909 ]
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/socket.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -886,9 +886,6 @@ static ssize_t sock_splice_read(struct f
+ static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
+ struct sock_iocb *siocb)
+ {
+- if (!is_sync_kiocb(iocb))
+- BUG();
+-
+ siocb->kiocb = iocb;
+ iocb->private = siocb;
+ return siocb;
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 15 Jan 2015 17:04:22 -0800
+Subject: net: rps: fix cpu unplug
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ac64da0b83d82abe62f78b3d0e21cca31aea24fa ]
+
+softnet_data.input_pkt_queue is protected by a spinlock that
+we must hold when transferring packets from victim queue to an active
+one. This is because other cpus could still be trying to enqueue packets
+into victim queue.
+
+A second problem is that when we transfert the NAPI poll_list from
+victim to current cpu, we absolutely need to special case the percpu
+backlog, because we do not want to add complex locking to protect
+process_queue : Only owner cpu is allowed to manipulate it, unless cpu
+is offline.
+
+Based on initial patch from Prasad Sodagudi & Subash Abhinov
+Kasiviswanathan.
+
+This version is better because we do not slow down packet processing,
+only make migration safer.
+
+Reported-by: Prasad Sodagudi <psodagud@codeaurora.org>
+Reported-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Tom Herbert <therbert@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 20 +++++++++++++++-----
+ 1 file changed, 15 insertions(+), 5 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -6812,10 +6812,20 @@ static int dev_cpu_callback(struct notif
+ oldsd->output_queue = NULL;
+ oldsd->output_queue_tailp = &oldsd->output_queue;
+ }
+- /* Append NAPI poll list from offline CPU. */
+- if (!list_empty(&oldsd->poll_list)) {
+- list_splice_init(&oldsd->poll_list, &sd->poll_list);
+- raise_softirq_irqoff(NET_RX_SOFTIRQ);
++ /* Append NAPI poll list from offline CPU, with one exception :
++ * process_backlog() must be called by cpu owning percpu backlog.
++ * We properly handle process_queue & input_pkt_queue later.
++ */
++ while (!list_empty(&oldsd->poll_list)) {
++ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
++ struct napi_struct,
++ poll_list);
++
++ list_del_init(&napi->poll_list);
++ if (napi->poll == process_backlog)
++ napi->state = 0;
++ else
++ ____napi_schedule(sd, napi);
+ }
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+@@ -6826,7 +6836,7 @@ static int dev_cpu_callback(struct notif
+ netif_rx_internal(skb);
+ input_queue_head_incr(oldsd);
+ }
+- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
++ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
+ netif_rx_internal(skb);
+ input_queue_head_incr(oldsd);
+ }
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Saran Maruti Ramanara <saran.neti@telus.com>
+Date: Thu, 29 Jan 2015 11:05:58 +0100
+Subject: net: sctp: fix passing wrong parameter header to param_type2af in sctp_process_param
+
+From: Saran Maruti Ramanara <saran.neti@telus.com>
+
+[ Upstream commit cfbf654efc6d78dc9812e030673b86f235bf677d ]
+
+When making use of RFC5061, section 4.2.4. for setting the primary IP
+address, we're passing a wrong parameter header to param_type2af(),
+resulting always in NULL being returned.
+
+At this point, param.p points to a sctp_addip_param struct, containing
+a sctp_paramhdr (type = 0xc004, length = var), and crr_id as a correlation
+id. Followed by that, as also presented in RFC5061 section 4.2.4., comes
+the actual sctp_addr_param, which also contains a sctp_paramhdr, but
+this time with the correct type SCTP_PARAM_IPV{4,6}_ADDRESS that
+param_type2af() can make use of. Since we already hold a pointer to
+addr_param from previous line, just reuse it for param_type2af().
+
+Fixes: d6de3097592b ("[SCTP]: Add the handling of "Set Primary IP Address" parameter to INIT")
+Signed-off-by: Saran Maruti Ramanara <saran.neti@telus.com>
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sm_make_chunk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/sm_make_chunk.c
++++ b/net/sctp/sm_make_chunk.c
+@@ -2608,7 +2608,7 @@ do_addr_param:
+
+ addr_param = param.v + sizeof(sctp_addip_param_t);
+
+- af = sctp_get_af_specific(param_type2af(param.p->type));
++ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
+ if (af == NULL)
+ break;
+
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Daniel Borkmann <dborkman@redhat.com>
+Date: Thu, 22 Jan 2015 18:26:54 +0100
+Subject: net: sctp: fix slab corruption from use after free on INIT collisions
+
+From: Daniel Borkmann <dborkman@redhat.com>
+
+[ Upstream commit 600ddd6825543962fb807884169e57b580dba208 ]
+
+When hitting an INIT collision case during the 4WHS with AUTH enabled, as
+already described in detail in commit 1be9a950c646 ("net: sctp: inherit
+auth_capable on INIT collisions"), it can happen that we occasionally
+still remotely trigger the following panic on server side which seems to
+have been uncovered after the fix from commit 1be9a950c646 ...
+
+[ 533.876389] BUG: unable to handle kernel paging request at 00000000ffffffff
+[ 533.913657] IP: [<ffffffff811ac385>] __kmalloc+0x95/0x230
+[ 533.940559] PGD 5030f2067 PUD 0
+[ 533.957104] Oops: 0000 [#1] SMP
+[ 533.974283] Modules linked in: sctp mlx4_en [...]
+[ 534.939704] Call Trace:
+[ 534.951833] [<ffffffff81294e30>] ? crypto_init_shash_ops+0x60/0xf0
+[ 534.984213] [<ffffffff81294e30>] crypto_init_shash_ops+0x60/0xf0
+[ 535.015025] [<ffffffff8128c8ed>] __crypto_alloc_tfm+0x6d/0x170
+[ 535.045661] [<ffffffff8128d12c>] crypto_alloc_base+0x4c/0xb0
+[ 535.074593] [<ffffffff8160bd42>] ? _raw_spin_lock_bh+0x12/0x50
+[ 535.105239] [<ffffffffa0418c11>] sctp_inet_listen+0x161/0x1e0 [sctp]
+[ 535.138606] [<ffffffff814e43bd>] SyS_listen+0x9d/0xb0
+[ 535.166848] [<ffffffff816149a9>] system_call_fastpath+0x16/0x1b
+
+... or depending on the the application, for example this one:
+
+[ 1370.026490] BUG: unable to handle kernel paging request at 00000000ffffffff
+[ 1370.026506] IP: [<ffffffff811ab455>] kmem_cache_alloc+0x75/0x1d0
+[ 1370.054568] PGD 633c94067 PUD 0
+[ 1370.070446] Oops: 0000 [#1] SMP
+[ 1370.085010] Modules linked in: sctp kvm_amd kvm [...]
+[ 1370.963431] Call Trace:
+[ 1370.974632] [<ffffffff8120f7cf>] ? SyS_epoll_ctl+0x53f/0x960
+[ 1371.000863] [<ffffffff8120f7cf>] SyS_epoll_ctl+0x53f/0x960
+[ 1371.027154] [<ffffffff812100d3>] ? anon_inode_getfile+0xd3/0x170
+[ 1371.054679] [<ffffffff811e3d67>] ? __alloc_fd+0xa7/0x130
+[ 1371.080183] [<ffffffff816149a9>] system_call_fastpath+0x16/0x1b
+
+With slab debugging enabled, we can see that the poison has been overwritten:
+
+[ 669.826368] BUG kmalloc-128 (Tainted: G W ): Poison overwritten
+[ 669.826385] INFO: 0xffff880228b32e50-0xffff880228b32e50. First byte 0x6a instead of 0x6b
+[ 669.826414] INFO: Allocated in sctp_auth_create_key+0x23/0x50 [sctp] age=3 cpu=0 pid=18494
+[ 669.826424] __slab_alloc+0x4bf/0x566
+[ 669.826433] __kmalloc+0x280/0x310
+[ 669.826453] sctp_auth_create_key+0x23/0x50 [sctp]
+[ 669.826471] sctp_auth_asoc_create_secret+0xcb/0x1e0 [sctp]
+[ 669.826488] sctp_auth_asoc_init_active_key+0x68/0xa0 [sctp]
+[ 669.826505] sctp_do_sm+0x29d/0x17c0 [sctp] [...]
+[ 669.826629] INFO: Freed in kzfree+0x31/0x40 age=1 cpu=0 pid=18494
+[ 669.826635] __slab_free+0x39/0x2a8
+[ 669.826643] kfree+0x1d6/0x230
+[ 669.826650] kzfree+0x31/0x40
+[ 669.826666] sctp_auth_key_put+0x19/0x20 [sctp]
+[ 669.826681] sctp_assoc_update+0x1ee/0x2d0 [sctp]
+[ 669.826695] sctp_do_sm+0x674/0x17c0 [sctp]
+
+Since this only triggers in some collision-cases with AUTH, the problem at
+heart is that sctp_auth_key_put() on asoc->asoc_shared_key is called twice
+when having refcnt 1, once directly in sctp_assoc_update() and yet again
+from within sctp_auth_asoc_init_active_key() via sctp_assoc_update() on
+the already kzfree'd memory, which is also consistent with the observation
+of the poison decrease from 0x6b to 0x6a (note: the overwrite is detected
+at a later point in time when poison is checked on new allocation).
+
+Reference counting of auth keys revisited:
+
+Shared keys for AUTH chunks are being stored in endpoints and associations
+in endpoint_shared_keys list. On endpoint creation, a null key is being
+added; on association creation, all endpoint shared keys are being cached
+and thus cloned over to the association. struct sctp_shared_key only holds
+a pointer to the actual key bytes, that is, struct sctp_auth_bytes which
+keeps track of users internally through refcounting. Naturally, on assoc
+or enpoint destruction, sctp_shared_key are being destroyed directly and
+the reference on sctp_auth_bytes dropped.
+
+User space can add keys to either list via setsockopt(2) through struct
+sctp_authkey and by passing that to sctp_auth_set_key() which replaces or
+adds a new auth key. There, sctp_auth_create_key() creates a new sctp_auth_bytes
+with refcount 1 and in case of replacement drops the reference on the old
+sctp_auth_bytes. A key can be set active from user space through setsockopt()
+on the id via sctp_auth_set_active_key(), which iterates through either
+endpoint_shared_keys and in case of an assoc, invokes (one of various places)
+sctp_auth_asoc_init_active_key().
+
+sctp_auth_asoc_init_active_key() computes the actual secret from local's
+and peer's random, hmac and shared key parameters and returns a new key
+directly as sctp_auth_bytes, that is asoc->asoc_shared_key, plus drops
+the reference if there was a previous one. The secret, which where we
+eventually double drop the ref comes from sctp_auth_asoc_set_secret() with
+intitial refcount of 1, which also stays unchanged eventually in
+sctp_assoc_update(). This key is later being used for crypto layer to
+set the key for the hash in crypto_hash_setkey() from sctp_auth_calculate_hmac().
+
+To close the loop: asoc->asoc_shared_key is freshly allocated secret
+material and independant of the sctp_shared_key management keeping track
+of only shared keys in endpoints and assocs. Hence, also commit 4184b2a79a76
+("net: sctp: fix memory leak in auth key management") is independant of
+this bug here since it concerns a different layer (though same structures
+being used eventually). asoc->asoc_shared_key is reference dropped correctly
+on assoc destruction in sctp_association_free() and when active keys are
+being replaced in sctp_auth_asoc_init_active_key(), it always has a refcount
+of 1. Hence, it's freed prematurely in sctp_assoc_update(). Simple fix is
+to remove that sctp_auth_key_put() from there which fixes these panics.
+
+Fixes: 730fc3d05cd4 ("[SCTP]: Implete SCTP-AUTH parameter processing")
+Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/associola.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1235,7 +1235,6 @@ void sctp_assoc_update(struct sctp_assoc
+ asoc->peer.peer_hmacs = new->peer.peer_hmacs;
+ new->peer.peer_hmacs = NULL;
+
+- sctp_auth_key_put(asoc->asoc_shared_key);
+ sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
+ }
+
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 22 Jan 2015 07:56:18 -0800
+Subject: netxen: fix netxen_nic_poll() logic
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6088beef3f7517717bd21d90b379714dd0837079 ]
+
+NAPI poll logic now enforces that a poller returns exactly the budget
+when it wants to be called again.
+
+If a driver limits TX completion, it has to return budget as well when
+the limit is hit, not the number of received packets.
+
+Reported-and-tested-by: Mike Galbraith <umgwanakikbuti@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: d75b1ade567f ("net: less interrupt masking in NAPI")
+Cc: Manish Chopra <manish.chopra@qlogic.com>
+Acked-by: Manish Chopra <manish.chopra@qlogic.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
++++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+@@ -2390,7 +2390,10 @@ static int netxen_nic_poll(struct napi_s
+
+ work_done = netxen_process_rcv_ring(sds_ring, budget);
+
+- if ((work_done < budget) && tx_complete) {
++ if (!tx_complete)
++ work_done = budget;
++
++ if (work_done < budget) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__NX_DEV_UP, &adapter->state))
+ netxen_nic_enable_int(sds_ring);
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: "subashab@codeaurora.org" <subashab@codeaurora.org>
+Date: Fri, 23 Jan 2015 22:26:02 +0000
+Subject: ping: Fix race in free in receive path
+
+From: "subashab@codeaurora.org" <subashab@codeaurora.org>
+
+[ Upstream commit fc752f1f43c1c038a2c6ae58cc739ebb5953ccb0 ]
+
+An exception is seen in ICMP ping receive path where the skb
+destructor sock_rfree() tries to access a freed socket. This happens
+because ping_rcv() releases socket reference with sock_put() and this
+internally frees up the socket. Later icmp_rcv() will try to free the
+skb and as part of this, skb destructor is called and which leads
+to a kernel panic as the socket is freed already in ping_rcv().
+
+-->|exception
+-007|sk_mem_uncharge
+-007|sock_rfree
+-008|skb_release_head_state
+-009|skb_release_all
+-009|__kfree_skb
+-010|kfree_skb
+-011|icmp_rcv
+-012|ip_local_deliver_finish
+
+Fix this incorrect free by cloning this skb and processing this cloned
+skb instead.
+
+This patch was suggested by Eric Dumazet
+
+Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Cc: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -973,8 +973,11 @@ void ping_rcv(struct sk_buff *skb)
+
+ sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
+ if (sk != NULL) {
++ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
++
+ pr_debug("rcv on socket %p\n", sk);
+- ping_queue_rcv_skb(sk, skb_get(skb));
++ if (skb2)
++ ping_queue_rcv_skb(sk, skb2);
+ sock_put(sk);
+ return;
+ }
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Florian Westphal <fw@strlen.de>
+Date: Wed, 28 Jan 2015 10:56:04 +0100
+Subject: ppp: deflate: never return len larger than output buffer
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit e2a4800e75780ccf4e6c2487f82b688ba736eb18 ]
+
+When we've run out of space in the output buffer to store more data, we
+will call zlib_deflate with a NULL output buffer until we've consumed
+remaining input.
+
+When this happens, olen contains the size the output buffer would have
+consumed iff we'd have had enough room.
+
+This can later cause skb_over_panic when ppp_generic skb_put()s
+the returned length.
+
+Reported-by: Iain Douglas <centos@1n6.org.uk>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ppp/ppp_deflate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ppp/ppp_deflate.c
++++ b/drivers/net/ppp/ppp_deflate.c
+@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigne
+ /*
+ * See if we managed to reduce the size of the packet.
+ */
+- if (olen < isize) {
++ if (olen < isize && olen <= osize) {
+ state->stats.comp_bytes += olen;
+ state->stats.comp_packets++;
+ } else {
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 28 Jan 2015 05:47:11 -0800
+Subject: tcp: ipv4: initialize unicast_sock sk_pacing_rate
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 811230cd853d62f09ed0addd0ce9a1b9b0e13fb5 ]
+
+When I added sk_pacing_rate field, I forgot to initialize its value
+in the per cpu unicast_sock used in ip_send_unicast_reply()
+
+This means that for sch_fq users, RST packets, or ACK packets sent
+on behalf of TIME_WAIT sockets might be sent to slowly or even dropped
+once we reach the per flow limit.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: 95bd09eb2750 ("tcp: TSO packets automatic sizing")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_output.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -1471,6 +1471,7 @@ static DEFINE_PER_CPU(struct inet_sock,
+ .sk_wmem_alloc = ATOMIC_INIT(1),
+ .sk_allocation = GFP_ATOMIC,
+ .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
++ .sk_pacing_rate = ~0U,
+ },
+ .pmtudisc = IP_PMTUDISC_WANT,
+ .uc_ttl = -1,
--- /dev/null
+From foo@baz Thu Feb 12 09:26:08 HKT 2015
+From: Herbert Xu <herbert@gondor.apana.org.au>
+Date: Sat, 24 Jan 2015 08:02:40 +1100
+Subject: udp_diag: Fix socket skipping within chain
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 86f3cddbc3037882414c7308973530167906b7e9 ]
+
+While working on rhashtable walking I noticed that the UDP diag
+dumping code is buggy. In particular, the socket skipping within
+a chain never happens, even though we record the number of sockets
+that should be skipped.
+
+As this code was supposedly copied from TCP, this patch does what
+TCP does and resets num before we walk a chain.
+
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: Pavel Emelyanov <xemul@parallels.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/udp_diag.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/ipv4/udp_diag.c
++++ b/net/ipv4/udp_diag.c
+@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *t
+ s_slot = cb->args[0];
+ num = s_num = cb->args[1];
+
+- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
++ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
+ struct sock *sk;
+ struct hlist_nulls_node *node;
+ struct udp_hslot *hslot = &table->hash[slot];
+
++ num = 0;
++
+ if (hlist_nulls_empty(&hslot->head))
+ continue;
+