]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 11 Mar 2015 12:45:28 +0000 (13:45 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 11 Mar 2015 12:45:28 +0000 (13:45 +0100)
added patches:
ematch-fix-auto-loading-of-ematch-modules.patch
flowcache-fix-kernel-panic-in-flow_cache_flush_task.patch
gen_stats.c-duplicate-xstats-buffer-for-later-use.patch
ipv4-ip_check_defrag-should-correctly-check-return-value-of-skb_copy_bits.patch
ipv4-ip_check_defrag-should-not-assume-that-skb_network_offset-is-zero.patch
ipv6-addrconf-add-missing-validate_link_af-handler.patch
ipv6-fix-fragment-id-assignment-on-le-arches.patch
ipv6-fix-ipv6_cow_metrics-for-non-dst_host-case.patch
ipv6-make-__ipv6_select_ident-static.patch
macvtap-make-sure-neighbour-code-can-push-ethernet-header.patch
net-bcmgenet-fix-software-maintained-statistics.patch
net-bcmgenet-fix-throughtput-regression.patch
net-compat-ignore-msg_cmsg_compat-in-compat_sys_-send-recv-msg.patch
net-do-not-use-rcu-in-rtnl_dump_ifinfo.patch
net-phy-fix-verification-of-eee-support-in-phy_init_eee.patch
net-ping-return-eafnosupport-when-appropriate.patch
net-pktgen-disable-xmit_clone-on-virtual-devices.patch
net-reject-creation-of-netdev-names-with-colons.patch
openvswitch-fix-net-exit.patch
pktgen-fix-udp-checksum-computation.patch
revert-r8169-add-support-for-byte-queue-limits.patch
rtnetlink-call-dellink-on-failure-when-newlink-exists.patch
rtnetlink-ifla_vf_policy-fix-misuses-of-nla_binary.patch
sh_eth-fix-lost-mac-address-on-kexec.patch
sock-sock_dequeue_err_skb-needs-hard-irq-safety.patch
tcp-make-sure-skb-is-not-shared-before-using-skb_get.patch
team-don-t-traverse-port-list-using-rcu-in-team_set_mac_address.patch
team-fix-possible-null-pointer-dereference-in-team_handle_frame.patch
udp-only-allow-ufo-for-packets-from-sock_dgram-sockets.patch
usb-plusb-add-support-for-national-instruments-host-to-host-cable.patch

30 files changed:
queue-3.19/ematch-fix-auto-loading-of-ematch-modules.patch [new file with mode: 0644]
queue-3.19/flowcache-fix-kernel-panic-in-flow_cache_flush_task.patch [new file with mode: 0644]
queue-3.19/gen_stats.c-duplicate-xstats-buffer-for-later-use.patch [new file with mode: 0644]
queue-3.19/ipv4-ip_check_defrag-should-correctly-check-return-value-of-skb_copy_bits.patch [new file with mode: 0644]
queue-3.19/ipv4-ip_check_defrag-should-not-assume-that-skb_network_offset-is-zero.patch [new file with mode: 0644]
queue-3.19/ipv6-addrconf-add-missing-validate_link_af-handler.patch [new file with mode: 0644]
queue-3.19/ipv6-fix-fragment-id-assignment-on-le-arches.patch [new file with mode: 0644]
queue-3.19/ipv6-fix-ipv6_cow_metrics-for-non-dst_host-case.patch [new file with mode: 0644]
queue-3.19/ipv6-make-__ipv6_select_ident-static.patch [new file with mode: 0644]
queue-3.19/macvtap-make-sure-neighbour-code-can-push-ethernet-header.patch [new file with mode: 0644]
queue-3.19/net-bcmgenet-fix-software-maintained-statistics.patch [new file with mode: 0644]
queue-3.19/net-bcmgenet-fix-throughtput-regression.patch [new file with mode: 0644]
queue-3.19/net-compat-ignore-msg_cmsg_compat-in-compat_sys_-send-recv-msg.patch [new file with mode: 0644]
queue-3.19/net-do-not-use-rcu-in-rtnl_dump_ifinfo.patch [new file with mode: 0644]
queue-3.19/net-phy-fix-verification-of-eee-support-in-phy_init_eee.patch [new file with mode: 0644]
queue-3.19/net-ping-return-eafnosupport-when-appropriate.patch [new file with mode: 0644]
queue-3.19/net-pktgen-disable-xmit_clone-on-virtual-devices.patch [new file with mode: 0644]
queue-3.19/net-reject-creation-of-netdev-names-with-colons.patch [new file with mode: 0644]
queue-3.19/openvswitch-fix-net-exit.patch [new file with mode: 0644]
queue-3.19/pktgen-fix-udp-checksum-computation.patch [new file with mode: 0644]
queue-3.19/revert-r8169-add-support-for-byte-queue-limits.patch [new file with mode: 0644]
queue-3.19/rtnetlink-call-dellink-on-failure-when-newlink-exists.patch [new file with mode: 0644]
queue-3.19/rtnetlink-ifla_vf_policy-fix-misuses-of-nla_binary.patch [new file with mode: 0644]
queue-3.19/sh_eth-fix-lost-mac-address-on-kexec.patch [new file with mode: 0644]
queue-3.19/sock-sock_dequeue_err_skb-needs-hard-irq-safety.patch [new file with mode: 0644]
queue-3.19/tcp-make-sure-skb-is-not-shared-before-using-skb_get.patch [new file with mode: 0644]
queue-3.19/team-don-t-traverse-port-list-using-rcu-in-team_set_mac_address.patch [new file with mode: 0644]
queue-3.19/team-fix-possible-null-pointer-dereference-in-team_handle_frame.patch [new file with mode: 0644]
queue-3.19/udp-only-allow-ufo-for-packets-from-sock_dgram-sockets.patch [new file with mode: 0644]
queue-3.19/usb-plusb-add-support-for-national-instruments-host-to-host-cable.patch [new file with mode: 0644]

diff --git a/queue-3.19/ematch-fix-auto-loading-of-ematch-modules.patch b/queue-3.19/ematch-fix-auto-loading-of-ematch-modules.patch
new file mode 100644 (file)
index 0000000..9adee03
--- /dev/null
@@ -0,0 +1,35 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: =?UTF-8?q?Ignacy=20Gaw=C4=99dzki?=
+ <ignacy.gawedzki@green-communications.fr>
+Date: Tue, 17 Feb 2015 20:15:20 +0100
+Subject: ematch: Fix auto-loading of ematch modules.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Ignacy=20Gaw=C4=99dzki?=
+
+[ Upstream commit 34eea79e2664b314cab6a30fc582fdfa7a1bb1df ]
+
+In tcf_em_validate(), after calling request_module() to load the
+kind-specific module, set em->ops to NULL before returning -EAGAIN, so
+that module_put() is not called again by tcf_em_tree_destroy().
+
+Signed-off-by: Ignacy Gawędzki <ignacy.gawedzki@green-communications.fr>
+Acked-by: Cong Wang <cwang@twopensource.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/ematch.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/net/sched/ematch.c
++++ b/net/sched/ematch.c
+@@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_pr
+                                * to replay the request.
+                                */
+                               module_put(em->ops->owner);
++                              em->ops = NULL;
+                               err = -EAGAIN;
+                       }
+ #endif
diff --git a/queue-3.19/flowcache-fix-kernel-panic-in-flow_cache_flush_task.patch b/queue-3.19/flowcache-fix-kernel-panic-in-flow_cache_flush_task.patch
new file mode 100644 (file)
index 0000000..3beffd8
--- /dev/null
@@ -0,0 +1,47 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Miroslav Urbanek <mu@miroslavurbanek.com>
+Date: Thu, 5 Feb 2015 16:36:50 +0100
+Subject: flowcache: Fix kernel panic in flow_cache_flush_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Miroslav Urbanek <mu@miroslavurbanek.com>
+
+[ Upstream commit 233c96fc077d310772375d47522fb444ff546905 ]
+
+flow_cache_flush_task references a structure member flow_cache_gc_work
+where it should reference flow_cache_flush_task instead.
+
+Kernel panic occurs on kernels using IPsec during XFRM garbage
+collection. The garbage collection interval can be shortened using the
+following sysctl settings:
+
+net.ipv4.xfrm4_gc_thresh=4
+net.ipv6.xfrm6_gc_thresh=4
+
+With the default settings, our productions servers crash approximately
+once a week. With the settings above, they crash immediately.
+
+Fixes: ca925cf1534e ("flowcache: Make flow cache name space aware")
+Reported-by: Tomáš Charvát <tc@excello.cz>
+Tested-by: Jan Hejl <jh@excello.cz>
+Signed-off-by: Miroslav Urbanek <mu@miroslavurbanek.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/flow.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -379,7 +379,7 @@ done:
+ static void flow_cache_flush_task(struct work_struct *work)
+ {
+       struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+-                                              flow_cache_gc_work);
++                                              flow_cache_flush_work);
+       struct net *net = container_of(xfrm, struct net, xfrm);
+       flow_cache_flush(net);
diff --git a/queue-3.19/gen_stats.c-duplicate-xstats-buffer-for-later-use.patch b/queue-3.19/gen_stats.c-duplicate-xstats-buffer-for-later-use.patch
new file mode 100644 (file)
index 0000000..376a01f
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: =?UTF-8?q?Ignacy=20Gaw=C4=99dzki?=
+ <ignacy.gawedzki@green-communications.fr>
+Date: Fri, 13 Feb 2015 14:47:05 -0800
+Subject: gen_stats.c: Duplicate xstats buffer for later use
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: =?UTF-8?q?Ignacy=20Gaw=C4=99dzki?=
+
+[ Upstream commit 1c4cff0cf55011792125b6041bc4e9713e46240f ]
+
+The gnet_stats_copy_app() function gets called, more often than not, with its
+second argument a pointer to an automatic variable in the caller's stack.
+Therefore, to avoid copying garbage afterwards when calling
+gnet_stats_finish_copy(), this data is better copied to a dynamically allocated
+memory that gets freed after use.
+
+[xiyou.wangcong@gmail.com: remove a useless kfree()]
+
+Signed-off-by: Ignacy Gawędzki <ignacy.gawedzki@green-communications.fr>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/gen_stats.c |   15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/net/core/gen_stats.c
++++ b/net/core/gen_stats.c
+@@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int
+       return 0;
+ nla_put_failure:
++      kfree(d->xstats);
++      d->xstats = NULL;
++      d->xstats_len = 0;
+       spin_unlock_bh(d->lock);
+       return -1;
+ }
+@@ -305,7 +308,9 @@ int
+ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
+ {
+       if (d->compat_xstats) {
+-              d->xstats = st;
++              d->xstats = kmemdup(st, len, GFP_ATOMIC);
++              if (!d->xstats)
++                      goto err_out;
+               d->xstats_len = len;
+       }
+@@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d,
+               return gnet_stats_copy(d, TCA_STATS_APP, st, len);
+       return 0;
++
++err_out:
++      d->xstats_len = 0;
++      spin_unlock_bh(d->lock);
++      return -1;
+ }
+ EXPORT_SYMBOL(gnet_stats_copy_app);
+@@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump
+                       return -1;
+       }
++      kfree(d->xstats);
++      d->xstats = NULL;
++      d->xstats_len = 0;
+       spin_unlock_bh(d->lock);
+       return 0;
+ }
diff --git a/queue-3.19/ipv4-ip_check_defrag-should-correctly-check-return-value-of-skb_copy_bits.patch b/queue-3.19/ipv4-ip_check_defrag-should-correctly-check-return-value-of-skb_copy_bits.patch
new file mode 100644 (file)
index 0000000..3ff3d9c
--- /dev/null
@@ -0,0 +1,32 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Alexander Drozdov <al.drozdov@gmail.com>
+Date: Tue, 17 Feb 2015 13:33:46 +0300
+Subject: ipv4: ip_check_defrag should correctly check return value of skb_copy_bits
+
+From: Alexander Drozdov <al.drozdov@gmail.com>
+
+[ Upstream commit fba04a9e0c869498889b6445fd06cbe7da9bb834 ]
+
+skb_copy_bits() returns zero on success and negative value on error,
+so it is needed to invert the condition in ip_check_defrag().
+
+Fixes: 1bf3751ec90c ("ipv4: ip_check_defrag must not modify skb before unsharing")
+Signed-off-by: Alexander Drozdov <al.drozdov@gmail.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -664,7 +664,7 @@ struct sk_buff *ip_check_defrag(struct s
+       if (skb->protocol != htons(ETH_P_IP))
+               return skb;
+-      if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
++      if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
+               return skb;
+       if (iph.ihl < 5 || iph.version != 4)
diff --git a/queue-3.19/ipv4-ip_check_defrag-should-not-assume-that-skb_network_offset-is-zero.patch b/queue-3.19/ipv4-ip_check_defrag-should-not-assume-that-skb_network_offset-is-zero.patch
new file mode 100644 (file)
index 0000000..42da937
--- /dev/null
@@ -0,0 +1,56 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Alexander Drozdov <al.drozdov@gmail.com>
+Date: Thu, 5 Mar 2015 10:29:39 +0300
+Subject: ipv4: ip_check_defrag should not assume that skb_network_offset is zero
+
+From: Alexander Drozdov <al.drozdov@gmail.com>
+
+[ Upstream commit 3e32e733d1bbb3f227259dc782ef01d5706bdae0 ]
+
+ip_check_defrag() may be used by af_packet to defragment outgoing packets.
+skb_network_offset() of af_packet's outgoing packets is not zero.
+
+Signed-off-by: Alexander Drozdov <al.drozdov@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_fragment.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -659,27 +659,30 @@ EXPORT_SYMBOL(ip_defrag);
+ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
+ {
+       struct iphdr iph;
++      int netoff;
+       u32 len;
+       if (skb->protocol != htons(ETH_P_IP))
+               return skb;
+-      if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
++      netoff = skb_network_offset(skb);
++
++      if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0)
+               return skb;
+       if (iph.ihl < 5 || iph.version != 4)
+               return skb;
+       len = ntohs(iph.tot_len);
+-      if (skb->len < len || len < (iph.ihl * 4))
++      if (skb->len < netoff + len || len < (iph.ihl * 4))
+               return skb;
+       if (ip_is_fragment(&iph)) {
+               skb = skb_share_check(skb, GFP_ATOMIC);
+               if (skb) {
+-                      if (!pskb_may_pull(skb, iph.ihl*4))
++                      if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
+                               return skb;
+-                      if (pskb_trim_rcsum(skb, len))
++                      if (pskb_trim_rcsum(skb, netoff + len))
+                               return skb;
+                       memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
+                       if (ip_defrag(skb, user))
diff --git a/queue-3.19/ipv6-addrconf-add-missing-validate_link_af-handler.patch b/queue-3.19/ipv6-addrconf-add-missing-validate_link_af-handler.patch
new file mode 100644 (file)
index 0000000..4ea0a01
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 5 Feb 2015 14:39:11 +0100
+Subject: ipv6: addrconf: add missing validate_link_af handler
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 11b1f8288d4341af5d755281c871bff6c3e270dd ]
+
+We still need a validate_link_af() handler with an appropriate nla policy,
+similarly as we have in IPv4 case, otherwise size validations are not being
+done properly in that case.
+
+Fixes: f53adae4eae5 ("net: ipv6: add tokenized interface identifier support")
+Fixes: bc91b0f07ada ("ipv6: addrconf: implement address generation modes")
+Cc: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/addrconf.c |   17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -4572,6 +4572,22 @@ static int inet6_set_iftoken(struct inet
+       return 0;
+ }
++static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
++      [IFLA_INET6_ADDR_GEN_MODE]      = { .type = NLA_U8 },
++      [IFLA_INET6_TOKEN]              = { .len = sizeof(struct in6_addr) },
++};
++
++static int inet6_validate_link_af(const struct net_device *dev,
++                                const struct nlattr *nla)
++{
++      struct nlattr *tb[IFLA_INET6_MAX + 1];
++
++      if (dev && !__in6_dev_get(dev))
++              return -EAFNOSUPPORT;
++
++      return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
++}
++
+ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
+ {
+       int err = -EINVAL;
+@@ -5393,6 +5409,7 @@ static struct rtnl_af_ops inet6_ops = {
+       .family           = AF_INET6,
+       .fill_link_af     = inet6_fill_link_af,
+       .get_link_af_size = inet6_get_link_af_size,
++      .validate_link_af = inet6_validate_link_af,
+       .set_link_af      = inet6_set_link_af,
+ };
diff --git a/queue-3.19/ipv6-fix-fragment-id-assignment-on-le-arches.patch b/queue-3.19/ipv6-fix-fragment-id-assignment-on-le-arches.patch
new file mode 100644 (file)
index 0000000..c3598c8
--- /dev/null
@@ -0,0 +1,46 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Vlad Yasevich <vyasevich@gmail.com>
+Date: Mon, 9 Feb 2015 09:38:20 -0500
+Subject: ipv6: Fix fragment id assignment on LE arches.
+
+From: Vlad Yasevich <vyasevich@gmail.com>
+
+[ Upstream commit 51f30770e50eb787200f30a79105e2615b379334 ]
+
+Recent commit:
+0508c07f5e0c94f38afd5434e8b2a55b84553077
+Author: Vlad Yasevich <vyasevich@gmail.com>
+Date:   Tue Feb 3 16:36:15 2015 -0500
+
+    ipv6: Select fragment id during UFO segmentation if not set.
+
+Introduced a bug on LE in how ipv6 fragment id is assigned.
+This was cought by nightly sparce check:
+
+Resolve the following sparce error:
+ net/ipv6/output_core.c:57:38: sparse: incorrect type in assignment
+ (different base types)
+   net/ipv6/output_core.c:57:38:    expected restricted __be32
+[usertype] ip6_frag_id
+   net/ipv6/output_core.c:57:38:    got unsigned int [unsigned]
+[assigned] [usertype] id
+
+Fixes: 0508c07f5e0c9 (ipv6: Select fragment id during UFO segmentation if not set.)
+Signed-off-by: Vladislav Yasevich <vyasevic@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/output_core.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -54,7 +54,7 @@ void ipv6_proxy_select_ident(struct sk_b
+       id = __ipv6_select_ident(ip6_proxy_idents_hashrnd,
+                                &addrs[1], &addrs[0]);
+-      skb_shinfo(skb)->ip6_frag_id = id;
++      skb_shinfo(skb)->ip6_frag_id = htonl(id);
+ }
+ EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
diff --git a/queue-3.19/ipv6-fix-ipv6_cow_metrics-for-non-dst_host-case.patch b/queue-3.19/ipv6-fix-ipv6_cow_metrics-for-non-dst_host-case.patch
new file mode 100644 (file)
index 0000000..da9df42
--- /dev/null
@@ -0,0 +1,64 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Martin KaFai Lau <kafai@fb.com>
+Date: Thu, 12 Feb 2015 16:14:08 -0800
+Subject: ipv6: fix ipv6_cow_metrics for non DST_HOST case
+
+From: Martin KaFai Lau <kafai@fb.com>
+
+[ Upstream commit 3b4711757d7903ab6fa88a9e7ab8901b8227da60 ]
+
+ipv6_cow_metrics() currently assumes only DST_HOST routes require
+dynamic metrics allocation from inetpeer.  The assumption breaks
+when ndisc discovered router with RTAX_MTU and RTAX_HOPLIMIT metric.
+Refer to ndisc_router_discovery() in ndisc.c and note that dst_metric_set()
+is called after the route is created.
+
+This patch creates the metrics array (by calling dst_cow_metrics_generic) in
+ipv6_cow_metrics().
+
+Test:
+radvd.conf:
+interface qemubr0
+{
+       AdvLinkMTU 1300;
+       AdvCurHopLimit 30;
+
+       prefix fd00:face:face:face::/64
+       {
+               AdvOnLink on;
+               AdvAutonomous on;
+               AdvRouterAddr off;
+       };
+};
+
+Before:
+[root@qemu1 ~]# ip -6 r show | egrep -v unreachable
+fd00:face:face:face::/64 dev eth0  proto kernel  metric 256  expires 27sec
+fe80::/64 dev eth0  proto kernel  metric 256
+default via fe80::74df:d0ff:fe23:8ef2 dev eth0  proto ra  metric 1024  expires 27sec
+
+After:
+[root@qemu1 ~]# ip -6 r show | egrep -v unreachable
+fd00:face:face:face::/64 dev eth0  proto kernel  metric 256  expires 27sec mtu 1300
+fe80::/64 dev eth0  proto kernel  metric 256  mtu 1300
+default via fe80::74df:d0ff:fe23:8ef2 dev eth0  proto ra  metric 1024  expires 27sec mtu 1300 hoplimit 30
+
+Fixes: 8e2ec639173f325 (ipv6: don't use inetpeer to store metrics for routes.)
+Signed-off-by: Martin KaFai Lau <kafai@fb.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -141,7 +141,7 @@ static u32 *ipv6_cow_metrics(struct dst_
+       u32 *p = NULL;
+       if (!(rt->dst.flags & DST_HOST))
+-              return NULL;
++              return dst_cow_metrics_generic(dst, old);
+       peer = rt6_get_peer_create(rt);
+       if (peer) {
diff --git a/queue-3.19/ipv6-make-__ipv6_select_ident-static.patch b/queue-3.19/ipv6-make-__ipv6_select_ident-static.patch
new file mode 100644 (file)
index 0000000..0e71253
--- /dev/null
@@ -0,0 +1,44 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Vlad Yasevich <vyasevich@gmail.com>
+Date: Mon, 9 Feb 2015 09:38:21 -0500
+Subject: ipv6: Make __ipv6_select_ident static
+
+From: Vlad Yasevich <vyasevich@gmail.com>
+
+[ Upstream commit 8381eacf5c3b35cf7755f4bc521c4d56d24c1cd9 ]
+
+Make __ipv6_select_ident() static as it isn't used outside
+the file.
+
+Fixes: 0508c07f5e0c9 (ipv6: Select fragment id during UFO segmentation if not set.)
+Signed-off-by: Vladislav Yasevich <vyasevic@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ipv6.h     |    2 --
+ net/ipv6/output_core.c |    3 ++-
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -671,8 +671,6 @@ static inline int ipv6_addr_diff(const s
+       return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
+ }
+-u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
+-                      struct in6_addr *src);
+ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+ void ipv6_proxy_select_ident(struct sk_buff *skb);
+--- a/net/ipv6/output_core.c
++++ b/net/ipv6/output_core.c
+@@ -9,7 +9,8 @@
+ #include <net/addrconf.h>
+ #include <net/secure_seq.h>
+-u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst, struct in6_addr *src)
++static u32 __ipv6_select_ident(u32 hashrnd, struct in6_addr *dst,
++                             struct in6_addr *src)
+ {
+       u32 hash, id;
diff --git a/queue-3.19/macvtap-make-sure-neighbour-code-can-push-ethernet-header.patch b/queue-3.19/macvtap-make-sure-neighbour-code-can-push-ethernet-header.patch
new file mode 100644 (file)
index 0000000..67df203
--- /dev/null
@@ -0,0 +1,63 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 27 Feb 2015 18:35:35 -0800
+Subject: macvtap: make sure neighbour code can push ethernet header
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2f1d8b9e8afa5a833d96afcd23abcb8cdf8d83ab ]
+
+Brian reported crashes using IPv6 traffic with macvtap/veth combo.
+
+I tracked the crashes in neigh_hh_output()
+
+-> memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
+
+Neighbour code assumes headroom to push Ethernet header is
+at least 16 bytes.
+
+It appears macvtap has only 14 bytes available on arches
+where NET_IP_ALIGN is 0 (like x86)
+
+Effect is a corruption of 2 bytes right before skb->head,
+and possible crashes if accessing non existing memory.
+
+This fix should also increase IPv4 performance, as paranoid code
+in ip_finish_output2() wont have to call skb_realloc_headroom()
+
+Reported-by: Brian Rak <brak@vultr.com>
+Tested-by: Brian Rak <brak@vultr.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/macvtap.c |    7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/macvtap.c
++++ b/drivers/net/macvtap.c
+@@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(stru
+       } /* else everything is zero */
+ }
++/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
++#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
++
+ /* Get packet from user space buffer */
+ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
+                               struct iov_iter *from, int noblock)
+ {
+-      int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
++      int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
+       struct sk_buff *skb;
+       struct macvlan_dev *vlan;
+       unsigned long total_len = iov_iter_count(from);
+@@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct m
+                       linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
+       }
+-      skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
++      skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
+                               linear, noblock, &err);
+       if (!skb)
+               goto err;
diff --git a/queue-3.19/net-bcmgenet-fix-software-maintained-statistics.patch b/queue-3.19/net-bcmgenet-fix-software-maintained-statistics.patch
new file mode 100644 (file)
index 0000000..f44975d
--- /dev/null
@@ -0,0 +1,66 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Florian Fainelli <f.fainelli@gmail.com>
+Date: Sat, 28 Feb 2015 18:09:16 -0800
+Subject: net: bcmgenet: fix software maintained statistics
+
+From: Florian Fainelli <f.fainelli@gmail.com>
+
+[ Upstream commit f62ba9c14b85a682b64a4c421f91de0bd2aa8538 ]
+
+Commit 44c8bc3ce39f ("net: bcmgenet: log RX buffer allocation and RX/TX dma
+failures") added a few software maintained statistics using
+BCMGENET_STAT_MIB_RX and BCMGENET_STAT_MIB_TX. These statistics are read from
+the hardware MIB counters, such that bcmgenet_update_mib_counters() was trying
+to read from a non-existing MIB offset for these counters.
+
+Fix this by introducing a special type: BCMGENET_STAT_SOFT, similar to
+BCMGENET_STAT_NETDEV, such that bcmgenet_get_ethtool_stats will read from the
+software mib.
+
+Fixes: 44c8bc3ce39f ("net: bcmgenet: log RX buffer allocation and RX/TX dma failures")
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
+       BCMGENET_STAT_MIB_TX,
+       BCMGENET_STAT_RUNT,
+       BCMGENET_STAT_MISC,
++      BCMGENET_STAT_SOFT,
+ };
+ struct bcmgenet_stats {
+@@ -515,6 +516,7 @@ struct bcmgenet_stats {
+ #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+ #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+ #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
++#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
+ #define STAT_GENET_MISC(str, m, offset) { \
+       .stat_string = str, \
+@@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmge
+                       UMAC_RBUF_OVFL_CNT),
+       STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+       STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+-      STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
+-      STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
+-      STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
++      STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
++      STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
++      STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
+ };
+ #define BCMGENET_STATS_LEN    ARRAY_SIZE(bcmgenet_gstrings_stats)
+@@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters
+               s = &bcmgenet_gstrings_stats[i];
+               switch (s->type) {
+               case BCMGENET_STAT_NETDEV:
++              case BCMGENET_STAT_SOFT:
+                       continue;
+               case BCMGENET_STAT_MIB_RX:
+               case BCMGENET_STAT_MIB_TX:
diff --git a/queue-3.19/net-bcmgenet-fix-throughtput-regression.patch b/queue-3.19/net-bcmgenet-fix-throughtput-regression.patch
new file mode 100644 (file)
index 0000000..edbc9bb
--- /dev/null
@@ -0,0 +1,296 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Jaedon Shin <jaedon.shin@gmail.com>
+Date: Sat, 28 Feb 2015 11:48:26 +0900
+Subject: net: bcmgenet: fix throughtput regression
+
+From: Jaedon Shin <jaedon.shin@gmail.com>
+
+[ Upstream commit 4092e6acf5cb16f56154e2dd22d647023dc3d646 ]
+
+This patch adds bcmgenet_tx_poll for the tx_rings. This can reduce the
+interrupt load and send xmit in network stack on time. This also
+separated for the completion of tx_ring16 from bcmgenet_poll.
+
+The bcmgenet_tx_reclaim of tx_ring[{0,1,2,3}] operative by an interrupt
+is to be not more than a certain number TxBDs. It is caused by too
+slowly reclaiming the transmitted skb. Therefore, performance
+degradation of xmit after 605ad7f ("tcp: refine TSO autosizing").
+
+Signed-off-by: Jaedon Shin <jaedon.shin@gmail.com>
+Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/genet/bcmgenet.c |  113 +++++++++++++++++++------
+ drivers/net/ethernet/broadcom/genet/bcmgenet.h |    2 
+ 2 files changed, 88 insertions(+), 27 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+@@ -971,13 +971,14 @@ static inline void bcmgenet_tx_ring_int_
+ }
+ /* Unlocked version of the reclaim routine */
+-static void __bcmgenet_tx_reclaim(struct net_device *dev,
+-                                struct bcmgenet_tx_ring *ring)
++static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
++                                        struct bcmgenet_tx_ring *ring)
+ {
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int last_tx_cn, last_c_index, num_tx_bds;
+       struct enet_cb *tx_cb_ptr;
+       struct netdev_queue *txq;
++      unsigned int pkts_compl = 0;
+       unsigned int bds_compl;
+       unsigned int c_index;
+@@ -1005,6 +1006,7 @@ static void __bcmgenet_tx_reclaim(struct
+               tx_cb_ptr = ring->cbs + last_c_index;
+               bds_compl = 0;
+               if (tx_cb_ptr->skb) {
++                      pkts_compl++;
+                       bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
+                       dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+                       dma_unmap_single(&dev->dev,
+@@ -1028,23 +1030,45 @@ static void __bcmgenet_tx_reclaim(struct
+               last_c_index &= (num_tx_bds - 1);
+       }
+-      if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+-              ring->int_disable(priv, ring);
+-
+-      if (netif_tx_queue_stopped(txq))
+-              netif_tx_wake_queue(txq);
++      if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
++              if (netif_tx_queue_stopped(txq))
++                      netif_tx_wake_queue(txq);
++      }
+       ring->c_index = c_index;
++
++      return pkts_compl;
+ }
+-static void bcmgenet_tx_reclaim(struct net_device *dev,
++static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
+                               struct bcmgenet_tx_ring *ring)
+ {
++      unsigned int released;
+       unsigned long flags;
+       spin_lock_irqsave(&ring->lock, flags);
+-      __bcmgenet_tx_reclaim(dev, ring);
++      released = __bcmgenet_tx_reclaim(dev, ring);
+       spin_unlock_irqrestore(&ring->lock, flags);
++
++      return released;
++}
++
++static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
++{
++      struct bcmgenet_tx_ring *ring =
++              container_of(napi, struct bcmgenet_tx_ring, napi);
++      unsigned int work_done = 0;
++
++      work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
++
++      if (work_done == 0) {
++              napi_complete(napi);
++              ring->int_enable(ring->priv, ring);
++
++              return 0;
++      }
++
++      return budget;
+ }
+ static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+@@ -1302,10 +1326,8 @@ static netdev_tx_t bcmgenet_xmit(struct
+       bcmgenet_tdma_ring_writel(priv, ring->index,
+                                 ring->prod_index, TDMA_PROD_INDEX);
+-      if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
++      if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
+               netif_tx_stop_queue(txq);
+-              ring->int_enable(priv, ring);
+-      }
+ out:
+       spin_unlock_irqrestore(&ring->lock, flags);
+@@ -1621,6 +1643,7 @@ static int init_umac(struct bcmgenet_pri
+       struct device *kdev = &priv->pdev->dev;
+       int ret;
+       u32 reg, cpu_mask_clear;
++      int index;
+       dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+@@ -1647,7 +1670,7 @@ static int init_umac(struct bcmgenet_pri
+       bcmgenet_intr_disable(priv);
+-      cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
++      cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
+       dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+@@ -1674,6 +1697,10 @@ static int init_umac(struct bcmgenet_pri
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
++      for (index = 0; index < priv->hw_params->tx_queues; index++)
++              bcmgenet_intrl2_1_writel(priv, (1 << index),
++                                       INTRL2_CPU_MASK_CLEAR);
++
+       /* Enable rx/tx engine.*/
+       dev_dbg(kdev, "done init umac\n");
+@@ -1693,6 +1720,8 @@ static void bcmgenet_init_tx_ring(struct
+       unsigned int first_bd;
+       spin_lock_init(&ring->lock);
++      ring->priv = priv;
++      netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
+       ring->index = index;
+       if (index == DESC_INDEX) {
+               ring->queue = 0;
+@@ -1738,6 +1767,17 @@ static void bcmgenet_init_tx_ring(struct
+                                 TDMA_WRITE_PTR);
+       bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+                                 DMA_END_ADDR);
++
++      napi_enable(&ring->napi);
++}
++
++static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
++                                unsigned int index)
++{
++      struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
++
++      napi_disable(&ring->napi);
++      netif_napi_del(&ring->napi);
+ }
+ /* Initialize a RDMA ring */
+@@ -1907,7 +1947,7 @@ static int bcmgenet_dma_teardown(struct
+       return ret;
+ }
+-static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
++static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+ {
+       int i;
+@@ -1926,6 +1966,18 @@ static void bcmgenet_fini_dma(struct bcm
+       kfree(priv->tx_cbs);
+ }
++static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
++{
++      int i;
++
++      bcmgenet_fini_tx_ring(priv, DESC_INDEX);
++
++      for (i = 0; i < priv->hw_params->tx_queues; i++)
++              bcmgenet_fini_tx_ring(priv, i);
++
++      __bcmgenet_fini_dma(priv);
++}
++
+ /* init_edma: Initialize DMA control register */
+ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+ {
+@@ -1952,7 +2004,7 @@ static int bcmgenet_init_dma(struct bcmg
+       priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+                              GFP_KERNEL);
+       if (!priv->tx_cbs) {
+-              bcmgenet_fini_dma(priv);
++              __bcmgenet_fini_dma(priv);
+               return -ENOMEM;
+       }
+@@ -1975,9 +2027,6 @@ static int bcmgenet_poll(struct napi_str
+                       struct bcmgenet_priv, napi);
+       unsigned int work_done;
+-      /* tx reclaim */
+-      bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+-
+       work_done = bcmgenet_desc_rx(priv, budget);
+       /* Advancing our consumer index*/
+@@ -2022,28 +2071,34 @@ static void bcmgenet_irq_task(struct wor
+ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+ {
+       struct bcmgenet_priv *priv = dev_id;
++      struct bcmgenet_tx_ring *ring;
+       unsigned int index;
+       /* Save irq status for bottom-half processing. */
+       priv->irq1_stat =
+               bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+-              ~priv->int1_mask;
++              ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+       /* clear interrupts */
+       bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+       netif_dbg(priv, intr, priv->dev,
+                 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
++
+       /* Check the MBDONE interrupts.
+        * packet is done, reclaim descriptors
+        */
+-      if (priv->irq1_stat & 0x0000ffff) {
+-              index = 0;
+-              for (index = 0; index < 16; index++) {
+-                      if (priv->irq1_stat & (1 << index))
+-                              bcmgenet_tx_reclaim(priv->dev,
+-                                                  &priv->tx_rings[index]);
++      for (index = 0; index < priv->hw_params->tx_queues; index++) {
++              if (!(priv->irq1_stat & BIT(index)))
++                      continue;
++
++              ring = &priv->tx_rings[index];
++
++              if (likely(napi_schedule_prep(&ring->napi))) {
++                      ring->int_disable(priv, ring);
++                      __napi_schedule(&ring->napi);
+               }
+       }
++
+       return IRQ_HANDLED;
+ }
+@@ -2075,8 +2130,12 @@ static irqreturn_t bcmgenet_isr0(int irq
+       }
+       if (priv->irq0_stat &
+                       (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+-              /* Tx reclaim */
+-              bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
++              struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
++
++              if (likely(napi_schedule_prep(&ring->napi))) {
++                      ring->int_disable(priv, ring);
++                      __napi_schedule(&ring->napi);
++              }
+       }
+       if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+                               UMAC_IRQ_PHY_DET_F |
+--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
++++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+@@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
+ struct bcmgenet_tx_ring {
+       spinlock_t      lock;           /* ring lock */
++      struct napi_struct napi;        /* NAPI per tx queue */
+       unsigned int    index;          /* ring index */
+       unsigned int    queue;          /* queue index */
+       struct enet_cb  *cbs;           /* tx ring buffer control block*/
+@@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
+                          struct bcmgenet_tx_ring *);
+       void (*int_disable)(struct bcmgenet_priv *priv,
+                           struct bcmgenet_tx_ring *);
++      struct bcmgenet_priv *priv;
+ };
+ /* device context */
diff --git a/queue-3.19/net-compat-ignore-msg_cmsg_compat-in-compat_sys_-send-recv-msg.patch b/queue-3.19/net-compat-ignore-msg_cmsg_compat-in-compat_sys_-send-recv-msg.patch
new file mode 100644 (file)
index 0000000..3b463ab
--- /dev/null
@@ -0,0 +1,75 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Mon, 23 Feb 2015 18:12:56 +0000
+Subject: net: compat: Ignore MSG_CMSG_COMPAT in compat_sys_{send, recv}msg
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+[ Upstream commit d720d8cec563ce4e4fa44a613d4f2dcb1caf2998 ]
+
+With commit a7526eb5d06b (net: Unbreak compat_sys_{send,recv}msg), the
+MSG_CMSG_COMPAT flag is blocked at the compat syscall entry points,
+changing the kernel compat behaviour from the one before the commit it
+was trying to fix (1be374a0518a, net: Block MSG_CMSG_COMPAT in
+send(m)msg and recv(m)msg).
+
+On 32-bit kernels (!CONFIG_COMPAT), MSG_CMSG_COMPAT is 0 and the native
+32-bit sys_sendmsg() allows flag 0x80000000 to be set (it is ignored by
+the kernel). However, on a 64-bit kernel, the compat ABI is different
+with commit a7526eb5d06b.
+
+This patch changes the compat_sys_{send,recv}msg behaviour to the one
+prior to commit 1be374a0518a.
+
+The problem was found running 32-bit LTP (sendmsg01) binary on an arm64
+kernel. Arguably, LTP should not pass 0xffffffff as flags to sendmsg()
+but the general rule is not to break user ABI (even when the user
+behaviour is not entirely sane).
+
+Fixes: a7526eb5d06b (net: Unbreak compat_sys_{send,recv}msg)
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: David S. Miller <davem@davemloft.net>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/compat.c |    9 ---------
+ 1 file changed, 9 deletions(-)
+
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -711,24 +711,18 @@ static unsigned char nas[21] = {
+ COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
+ {
+-      if (flags & MSG_CMSG_COMPAT)
+-              return -EINVAL;
+       return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+ COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
+                      unsigned int, vlen, unsigned int, flags)
+ {
+-      if (flags & MSG_CMSG_COMPAT)
+-              return -EINVAL;
+       return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+                             flags | MSG_CMSG_COMPAT);
+ }
+ COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
+ {
+-      if (flags & MSG_CMSG_COMPAT)
+-              return -EINVAL;
+       return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
+ }
+@@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd
+       int datagrams;
+       struct timespec ktspec;
+-      if (flags & MSG_CMSG_COMPAT)
+-              return -EINVAL;
+-
+       if (timeout == NULL)
+               return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
+                                     flags | MSG_CMSG_COMPAT, NULL);
diff --git a/queue-3.19/net-do-not-use-rcu-in-rtnl_dump_ifinfo.patch b/queue-3.19/net-do-not-use-rcu-in-rtnl_dump_ifinfo.patch
new file mode 100644 (file)
index 0000000..dbe998d
--- /dev/null
@@ -0,0 +1,105 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 27 Feb 2015 09:42:50 -0800
+Subject: net: do not use rcu in rtnl_dump_ifinfo()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cac5e65e8a7ea074f2626d2eaa53aa308452dec4 ]
+
+We did a failed attempt in the past to only use rcu in rtnl dump
+operations (commit e67f88dd12f6 "net: dont hold rtnl mutex during
+netlink dump callbacks")
+
+Now that dumps are holding RTNL anyway, there is no need to also
+use rcu locking, as it forbids any scheduling ability, like
+GFP_KERNEL allocations that controlling path should use instead
+of GFP_ATOMIC whenever possible.
+
+This should fix following splat Cong Wang reported :
+
+ [ INFO: suspicious RCU usage. ]
+ 3.19.0+ #805 Tainted: G        W
+
+ include/linux/rcupdate.h:538 Illegal context switch in RCU read-side critical section!
+
+ other info that might help us debug this:
+
+ rcu_scheduler_active = 1, debug_locks = 0
+ 2 locks held by ip/771:
+  #0:  (rtnl_mutex){+.+.+.}, at: [<ffffffff8182b8f4>] netlink_dump+0x21/0x26c
+  #1:  (rcu_read_lock){......}, at: [<ffffffff817d785b>] rcu_read_lock+0x0/0x6e
+
+ stack backtrace:
+ CPU: 3 PID: 771 Comm: ip Tainted: G        W       3.19.0+ #805
+ Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+  0000000000000001 ffff8800d51e7718 ffffffff81a27457 0000000029e729e6
+  ffff8800d6108000 ffff8800d51e7748 ffffffff810b539b ffffffff820013dd
+  00000000000001c8 0000000000000000 ffff8800d7448088 ffff8800d51e7758
+ Call Trace:
+  [<ffffffff81a27457>] dump_stack+0x4c/0x65
+  [<ffffffff810b539b>] lockdep_rcu_suspicious+0x107/0x110
+  [<ffffffff8109796f>] rcu_preempt_sleep_check+0x45/0x47
+  [<ffffffff8109e457>] ___might_sleep+0x1d/0x1cb
+  [<ffffffff8109e67d>] __might_sleep+0x78/0x80
+  [<ffffffff814b9b1f>] idr_alloc+0x45/0xd1
+  [<ffffffff810cb7ab>] ? rcu_read_lock_held+0x3b/0x3d
+  [<ffffffff814b9f9d>] ? idr_for_each+0x53/0x101
+  [<ffffffff817c1383>] alloc_netid+0x61/0x69
+  [<ffffffff817c14c3>] __peernet2id+0x79/0x8d
+  [<ffffffff817c1ab7>] peernet2id+0x13/0x1f
+  [<ffffffff817d8673>] rtnl_fill_ifinfo+0xa8d/0xc20
+  [<ffffffff810b17d9>] ? __lock_is_held+0x39/0x52
+  [<ffffffff817d894f>] rtnl_dump_ifinfo+0x149/0x213
+  [<ffffffff8182b9c2>] netlink_dump+0xef/0x26c
+  [<ffffffff8182bcba>] netlink_recvmsg+0x17b/0x2c5
+  [<ffffffff817b0adc>] __sock_recvmsg+0x4e/0x59
+  [<ffffffff817b1b40>] sock_recvmsg+0x3f/0x51
+  [<ffffffff817b1f9a>] ___sys_recvmsg+0xf6/0x1d9
+  [<ffffffff8115dc67>] ? handle_pte_fault+0x6e1/0xd3d
+  [<ffffffff8100a3a0>] ? native_sched_clock+0x35/0x37
+  [<ffffffff8109f45b>] ? sched_clock_local+0x12/0x72
+  [<ffffffff8109f6ac>] ? sched_clock_cpu+0x9e/0xb7
+  [<ffffffff810cb7ab>] ? rcu_read_lock_held+0x3b/0x3d
+  [<ffffffff811abde8>] ? __fcheck_files+0x4c/0x58
+  [<ffffffff811ac556>] ? __fget_light+0x2d/0x52
+  [<ffffffff817b376f>] __sys_recvmsg+0x42/0x60
+  [<ffffffff817b379f>] SyS_recvmsg+0x12/0x1c
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: 0c7aecd4bde4b7302 ("netns: add rtnl cmd to add and get peer netns ids")
+Cc: Nicolas Dichtel <nicolas.dichtel@6wind.com>
+Reported-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1274,7 +1274,6 @@ static int rtnl_dump_ifinfo(struct sk_bu
+       s_h = cb->args[0];
+       s_idx = cb->args[1];
+-      rcu_read_lock();
+       cb->seq = net->dev_base_seq;
+       /* A hack to preserve kernel<->userspace interface.
+@@ -1296,7 +1295,7 @@ static int rtnl_dump_ifinfo(struct sk_bu
+       for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+               idx = 0;
+               head = &net->dev_index_head[h];
+-              hlist_for_each_entry_rcu(dev, head, index_hlist) {
++              hlist_for_each_entry(dev, head, index_hlist) {
+                       if (idx < s_idx)
+                               goto cont;
+                       err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+@@ -1318,7 +1317,6 @@ cont:
+               }
+       }
+ out:
+-      rcu_read_unlock();
+       cb->args[1] = idx;
+       cb->args[0] = h;
diff --git a/queue-3.19/net-phy-fix-verification-of-eee-support-in-phy_init_eee.patch b/queue-3.19/net-phy-fix-verification-of-eee-support-in-phy_init_eee.patch
new file mode 100644 (file)
index 0000000..8fdafdc
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Guenter Roeck <linux@roeck-us.net>
+Date: Tue, 17 Feb 2015 09:36:22 -0800
+Subject: net: phy: Fix verification of EEE support in phy_init_eee
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 54da5a8be3c1e924c35480eb44c6e9b275f6444e ]
+
+phy_init_eee uses phy_find_setting(phydev->speed, phydev->duplex)
+to find a valid entry in the settings array for the given speed
+and duplex value. For full duplex 1000baseT, this will return
+the first matching entry, which is the entry for 1000baseKX_Full.
+
+If the phy eee does not support 1000baseKX_Full, this entry will not
+match, causing phy_init_eee to fail for no good reason.
+
+Fixes: 9a9c56cb34e6 ("net: phy: fix a bug when verify the EEE support")
+Fixes: 3e7077067e80c ("phy: Expand phy speed/duplex settings array")
+Cc: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy.c |   23 ++++++++++++++++++++---
+ 1 file changed, 20 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/phy/phy.c
++++ b/drivers/net/phy/phy.c
+@@ -236,6 +236,25 @@ static inline unsigned int phy_find_vali
+ }
+ /**
++ * phy_check_valid - check if there is a valid PHY setting which matches
++ *                 speed, duplex, and feature mask
++ * @speed: speed to match
++ * @duplex: duplex to match
++ * @features: A mask of the valid settings
++ *
++ * Description: Returns true if there is a valid setting, false otherwise.
++ */
++static inline bool phy_check_valid(int speed, int duplex, u32 features)
++{
++      unsigned int idx;
++
++      idx = phy_find_valid(phy_find_setting(speed, duplex), features);
++
++      return settings[idx].speed == speed && settings[idx].duplex == duplex &&
++              (settings[idx].setting & features);
++}
++
++/**
+  * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
+  * @phydev: the target phy_device struct
+  *
+@@ -1042,7 +1061,6 @@ int phy_init_eee(struct phy_device *phyd
+               int eee_lp, eee_cap, eee_adv;
+               u32 lp, cap, adv;
+               int status;
+-              unsigned int idx;
+               /* Read phy status to properly get the right settings */
+               status = phy_read_status(phydev);
+@@ -1074,8 +1092,7 @@ int phy_init_eee(struct phy_device *phyd
+               adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+               lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
+-              idx = phy_find_setting(phydev->speed, phydev->duplex);
+-              if (!(lp & adv & settings[idx].setting))
++              if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
+                       goto eee_exit_err;
+               if (clk_stop_enable) {
diff --git a/queue-3.19/net-ping-return-eafnosupport-when-appropriate.patch b/queue-3.19/net-ping-return-eafnosupport-when-appropriate.patch
new file mode 100644 (file)
index 0000000..041a933
--- /dev/null
@@ -0,0 +1,101 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Lorenzo Colitti <lorenzo@google.com>
+Date: Tue, 3 Mar 2015 23:16:16 +0900
+Subject: net: ping: Return EAFNOSUPPORT when appropriate.
+
+From: Lorenzo Colitti <lorenzo@google.com>
+
+[ Upstream commit 9145736d4862145684009d6a72a6e61324a9439e ]
+
+1. For an IPv4 ping socket, ping_check_bind_addr does not check
+   the family of the socket address that's passed in. Instead,
+   make it behave like inet_bind, which enforces either that the
+   address family is AF_INET, or that the family is AF_UNSPEC and
+   the address is 0.0.0.0.
+2. For an IPv6 ping socket, ping_check_bind_addr returns EINVAL
+   if the socket family is not AF_INET6. Return EAFNOSUPPORT
+   instead, for consistency with inet6_bind.
+3. Make ping_v4_sendmsg and ping_v6_sendmsg return EAFNOSUPPORT
+   instead of EINVAL if an incorrect socket address structure is
+   passed in.
+4. Make IPv6 ping sockets be IPv6-only. The code does not support
+   IPv4, and it cannot easily be made to support IPv4 because
+   the protocol numbers for ICMP and ICMPv6 are different. This
+   makes connect(::ffff:192.0.2.1) fail with EAFNOSUPPORT instead
+   of making the socket unusable.
+
+Among other things, this fixes an oops that can be triggered by:
+
+    int s = socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP);
+    struct sockaddr_in6 sin6 = {
+        .sin6_family = AF_INET6,
+        .sin6_addr = in6addr_any,
+    };
+    bind(s, (struct sockaddr *) &sin6, sizeof(sin6));
+
+Change-Id: If06ca86d9f1e4593c0d6df174caca3487c57a241
+Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ping.c |   12 ++++++++++--
+ net/ipv6/ping.c |    5 +++--
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/ping.c
++++ b/net/ipv4/ping.c
+@@ -259,6 +259,9 @@ int ping_init_sock(struct sock *sk)
+       kgid_t low, high;
+       int ret = 0;
++      if (sk->sk_family == AF_INET6)
++              sk->sk_ipv6only = 1;
++
+       inet_get_ping_group_range_net(net, &low, &high);
+       if (gid_lte(low, group) && gid_lte(group, high))
+               return 0;
+@@ -305,6 +308,11 @@ static int ping_check_bind_addr(struct s
+               if (addr_len < sizeof(*addr))
+                       return -EINVAL;
++              if (addr->sin_family != AF_INET &&
++                  !(addr->sin_family == AF_UNSPEC &&
++                    addr->sin_addr.s_addr == htonl(INADDR_ANY)))
++                      return -EAFNOSUPPORT;
++
+               pr_debug("ping_check_bind_addr(sk=%p,addr=%pI4,port=%d)\n",
+                        sk, &addr->sin_addr.s_addr, ntohs(addr->sin_port));
+@@ -330,7 +338,7 @@ static int ping_check_bind_addr(struct s
+                       return -EINVAL;
+               if (addr->sin6_family != AF_INET6)
+-                      return -EINVAL;
++                      return -EAFNOSUPPORT;
+               pr_debug("ping_check_bind_addr(sk=%p,addr=%pI6c,port=%d)\n",
+                        sk, addr->sin6_addr.s6_addr, ntohs(addr->sin6_port));
+@@ -716,7 +724,7 @@ static int ping_v4_sendmsg(struct kiocb
+               if (msg->msg_namelen < sizeof(*usin))
+                       return -EINVAL;
+               if (usin->sin_family != AF_INET)
+-                      return -EINVAL;
++                      return -EAFNOSUPPORT;
+               daddr = usin->sin_addr.s_addr;
+               /* no remote port */
+       } else {
+--- a/net/ipv6/ping.c
++++ b/net/ipv6/ping.c
+@@ -102,9 +102,10 @@ int ping_v6_sendmsg(struct kiocb *iocb,
+       if (msg->msg_name) {
+               DECLARE_SOCKADDR(struct sockaddr_in6 *, u, msg->msg_name);
+-              if (msg->msg_namelen < sizeof(struct sockaddr_in6) ||
+-                  u->sin6_family != AF_INET6) {
++              if (msg->msg_namelen < sizeof(*u))
+                       return -EINVAL;
++              if (u->sin6_family != AF_INET6) {
++                      return -EAFNOSUPPORT;
+               }
+               if (sk->sk_bound_dev_if &&
+                   sk->sk_bound_dev_if != u->sin6_scope_id) {
diff --git a/queue-3.19/net-pktgen-disable-xmit_clone-on-virtual-devices.patch b/queue-3.19/net-pktgen-disable-xmit_clone-on-virtual-devices.patch
new file mode 100644 (file)
index 0000000..febebe6
--- /dev/null
@@ -0,0 +1,37 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 22 Feb 2015 17:03:41 -0800
+Subject: net: pktgen: disable xmit_clone on virtual devices
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 52d6c8c6ca125872459054daa70f2f1c698c8e75 ]
+
+Trying to use burst capability (aka xmit_more) on a virtual device
+like bonding is not supported.
+
+For example, skb might be queued multiple times on a qdisc, with
+various list corruptions.
+
+Fixes: 38b2cf2982dc ("net: pktgen: packet bursting via skb->xmit_more")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Alexei Starovoitov <ast@plumgrid.com>
+Acked-by: Alexei Starovoitov <ast@plumgrid.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/pktgen.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct fi
+                       return len;
+               i += len;
++              if ((value > 1) &&
++                  (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
++                      return -ENOTSUPP;
+               pkt_dev->burst = value < 1 ? 1 : value;
+               sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
+               return count;
diff --git a/queue-3.19/net-reject-creation-of-netdev-names-with-colons.patch b/queue-3.19/net-reject-creation-of-netdev-names-with-colons.patch
new file mode 100644 (file)
index 0000000..364993a
--- /dev/null
@@ -0,0 +1,31 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Matthew Thode <mthode@mthode.org>
+Date: Tue, 17 Feb 2015 18:31:57 -0600
+Subject: net: reject creation of netdev names with colons
+
+From: Matthew Thode <mthode@mthode.org>
+
+[ Upstream commit a4176a9391868bfa87705bcd2e3b49e9b9dd2996 ]
+
+colons are used as a separator in netdev device lookup in dev_ioctl.c
+
+Specific functions are SIOCGIFTXQLEN SIOCETHTOOL SIOCSIFNAME
+
+Signed-off-by: Matthew Thode <mthode@mthode.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -945,7 +945,7 @@ bool dev_valid_name(const char *name)
+               return false;
+       while (*name) {
+-              if (*name == '/' || isspace(*name))
++              if (*name == '/' || *name == ':' || isspace(*name))
+                       return false;
+               name++;
+       }
diff --git a/queue-3.19/openvswitch-fix-net-exit.patch b/queue-3.19/openvswitch-fix-net-exit.patch
new file mode 100644 (file)
index 0000000..7e33d54
--- /dev/null
@@ -0,0 +1,123 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Pravin B Shelar <pshelar@nicira.com>
+Date: Tue, 17 Feb 2015 11:23:10 -0800
+Subject: openvswitch: Fix net exit.
+
+From: Pravin B Shelar <pshelar@nicira.com>
+
+[ Upstream commit 7b4577a9da3702049650f7095506e9afd9f68849 ]
+
+Open vSwitch allows moving internal vport to different namespace
+while still connected to the bridge. But when namespace deleted
+OVS does not detach these vports, that results in dangling
+pointer to netdevice which causes kernel panic as follows.
+This issue is fixed by detaching all ovs ports from the deleted
+namespace at net-exit.
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000028
+IP: [<ffffffffa0aadaa5>] ovs_vport_locate+0x35/0x80 [openvswitch]
+Oops: 0000 [#1] SMP
+Call Trace:
+ [<ffffffffa0aa6391>] lookup_vport+0x21/0xd0 [openvswitch]
+ [<ffffffffa0aa65f9>] ovs_vport_cmd_get+0x59/0xf0 [openvswitch]
+ [<ffffffff8167e07c>] genl_family_rcv_msg+0x1bc/0x3e0
+ [<ffffffff8167e319>] genl_rcv_msg+0x79/0xc0
+ [<ffffffff8167d919>] netlink_rcv_skb+0xb9/0xe0
+ [<ffffffff8167deac>] genl_rcv+0x2c/0x40
+ [<ffffffff8167cffd>] netlink_unicast+0x12d/0x1c0
+ [<ffffffff8167d3da>] netlink_sendmsg+0x34a/0x6b0
+ [<ffffffff8162e140>] sock_sendmsg+0xa0/0xe0
+ [<ffffffff8162e5e8>] ___sys_sendmsg+0x408/0x420
+ [<ffffffff8162f541>] __sys_sendmsg+0x51/0x90
+ [<ffffffff8162f592>] SyS_sendmsg+0x12/0x20
+ [<ffffffff81764ee9>] system_call_fastpath+0x12/0x17
+
+Reported-by: Assaf Muller <amuller@redhat.com>
+Fixes: 46df7b81454("openvswitch: Add support for network namespaces.")
+Signed-off-by: Pravin B Shelar <pshelar@nicira.com>
+Reviewed-by: Thomas Graf <tgraf@noironetworks.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/openvswitch/datapath.c |   45 +++++++++++++++++++++++++++++++++++++++++++--
+ net/openvswitch/vport.h    |    2 ++
+ 2 files changed, 45 insertions(+), 2 deletions(-)
+
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -2113,14 +2113,55 @@ static int __net_init ovs_init_net(struc
+       return 0;
+ }
+-static void __net_exit ovs_exit_net(struct net *net)
++static void __net_exit list_vports_from_net(struct net *net, struct net *dnet,
++                                          struct list_head *head)
+ {
+-      struct datapath *dp, *dp_next;
+       struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
++      struct datapath *dp;
++
++      list_for_each_entry(dp, &ovs_net->dps, list_node) {
++              int i;
++
++              for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
++                      struct vport *vport;
++
++                      hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) {
++                              struct netdev_vport *netdev_vport;
++
++                              if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL)
++                                      continue;
++
++                              netdev_vport = netdev_vport_priv(vport);
++                              if (dev_net(netdev_vport->dev) == dnet)
++                                      list_add(&vport->detach_list, head);
++                      }
++              }
++      }
++}
++
++static void __net_exit ovs_exit_net(struct net *dnet)
++{
++      struct datapath *dp, *dp_next;
++      struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id);
++      struct vport *vport, *vport_next;
++      struct net *net;
++      LIST_HEAD(head);
+       ovs_lock();
+       list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
+               __dp_destroy(dp);
++
++      rtnl_lock();
++      for_each_net(net)
++              list_vports_from_net(net, dnet, &head);
++      rtnl_unlock();
++
++      /* Detach all vports from given namespace. */
++      list_for_each_entry_safe(vport, vport_next, &head, detach_list) {
++              list_del(&vport->detach_list);
++              ovs_dp_detach_port(vport);
++      }
++
+       ovs_unlock();
+       cancel_work_sync(&ovs_net->dp_notify_work);
+--- a/net/openvswitch/vport.h
++++ b/net/openvswitch/vport.h
+@@ -103,6 +103,7 @@ struct vport_portids {
+  * @ops: Class structure.
+  * @percpu_stats: Points to per-CPU statistics used and maintained by vport
+  * @err_stats: Points to error statistics used and maintained by vport
++ * @detach_list: list used for detaching vport in net-exit call.
+  */
+ struct vport {
+       struct rcu_head rcu;
+@@ -117,6 +118,7 @@ struct vport {
+       struct pcpu_sw_netstats __percpu *percpu_stats;
+       struct vport_err_stats err_stats;
++      struct list_head detach_list;
+ };
+ /**
diff --git a/queue-3.19/pktgen-fix-udp-checksum-computation.patch b/queue-3.19/pktgen-fix-udp-checksum-computation.patch
new file mode 100644 (file)
index 0000000..2f3d64f
--- /dev/null
@@ -0,0 +1,87 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Sabrina Dubroca <sd@queasysnail.net>
+Date: Wed, 4 Feb 2015 23:08:50 +0100
+Subject: pktgen: fix UDP checksum computation
+
+From: Sabrina Dubroca <sd@queasysnail.net>
+
+[ Upstream commit 7744b5f3693cc06695cb9d6667671c790282730f ]
+
+This patch fixes two issues in UDP checksum computation in pktgen.
+
+First, the pseudo-header uses the source and destination IP
+addresses. Currently, the ports are used for IPv4.
+
+Second, the UDP checksum covers both header and data.  So we need to
+generate the data earlier (move pktgen_finalize_skb up), and compute
+the checksum for UDP header + data.
+
+Fixes: c26bf4a51308c ("pktgen: Add UDPCSUM flag to support UDP checksums")
+Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/pktgen.c |   16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -2842,25 +2842,25 @@ static struct sk_buff *fill_packet_ipv4(
+       skb->dev = odev;
+       skb->pkt_type = PACKET_HOST;
++      pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+       if (!(pkt_dev->flags & F_UDPCSUM)) {
+               skb->ip_summed = CHECKSUM_NONE;
+       } else if (odev->features & NETIF_F_V4_CSUM) {
+               skb->ip_summed = CHECKSUM_PARTIAL;
+               skb->csum = 0;
+-              udp4_hwcsum(skb, udph->source, udph->dest);
++              udp4_hwcsum(skb, iph->saddr, iph->daddr);
+       } else {
+-              __wsum csum = udp_csum(skb);
++              __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);
+               /* add protocol-dependent pseudo-header */
+-              udph->check = csum_tcpudp_magic(udph->source, udph->dest,
++              udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+                                               datalen + 8, IPPROTO_UDP, csum);
+               if (udph->check == 0)
+                       udph->check = CSUM_MANGLED_0;
+       }
+-      pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+ #ifdef CONFIG_XFRM
+       if (!process_ipsec(pkt_dev, skb, protocol))
+               return NULL;
+@@ -2976,6 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(
+       skb->dev = odev;
+       skb->pkt_type = PACKET_HOST;
++      pktgen_finalize_skb(pkt_dev, skb, datalen);
++
+       if (!(pkt_dev->flags & F_UDPCSUM)) {
+               skb->ip_summed = CHECKSUM_NONE;
+       } else if (odev->features & NETIF_F_V6_CSUM) {
+@@ -2984,7 +2986,7 @@ static struct sk_buff *fill_packet_ipv6(
+               skb->csum_offset = offsetof(struct udphdr, check);
+               udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+       } else {
+-              __wsum csum = udp_csum(skb);
++              __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);
+               /* add protocol-dependent pseudo-header */
+               udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+@@ -2993,8 +2995,6 @@ static struct sk_buff *fill_packet_ipv6(
+                       udph->check = CSUM_MANGLED_0;
+       }
+-      pktgen_finalize_skb(pkt_dev, skb, datalen);
+-
+       return skb;
+ }
diff --git a/queue-3.19/revert-r8169-add-support-for-byte-queue-limits.patch b/queue-3.19/revert-r8169-add-support-for-byte-queue-limits.patch
new file mode 100644 (file)
index 0000000..15f0ff3
--- /dev/null
@@ -0,0 +1,74 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: "David S. Miller" <davem@davemloft.net>
+Date: Tue, 10 Mar 2015 18:47:33 -0400
+Subject: Revert "r8169: add support for Byte Queue Limits"
+
+From: "David S. Miller" <davem@davemloft.net>
+
+This reverts commit 1e918876853aa85435e0f17fd8b4a92dcfff53d6.
+
+Revert BQL support in r8169 driver as several regressions
+point to this commit and we cannot figure out the real
+cause yet.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c |   18 ++++--------------
+ 1 file changed, 4 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_
+       RTL_W8(ChipCmd, CmdReset);
+       rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
+-
+-      netdev_reset_queue(tp->dev);
+ }
+ static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
+@@ -7089,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(st
+       txd->opts2 = cpu_to_le32(opts[1]);
+-      netdev_sent_queue(dev, skb->len);
+-
+       skb_tx_timestamp(skb);
+       /* Force memory writes to complete before releasing descriptor */
+@@ -7192,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(str
+ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ {
+       unsigned int dirty_tx, tx_left;
+-      unsigned int bytes_compl = 0, pkts_compl = 0;
+       dirty_tx = tp->dirty_tx;
+       smp_rmb();
+@@ -7216,8 +7211,10 @@ static void rtl_tx(struct net_device *de
+               rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+                                    tp->TxDescArray + entry);
+               if (status & LastFrag) {
+-                      pkts_compl++;
+-                      bytes_compl += tx_skb->skb->len;
++                      u64_stats_update_begin(&tp->tx_stats.syncp);
++                      tp->tx_stats.packets++;
++                      tp->tx_stats.bytes += tx_skb->skb->len;
++                      u64_stats_update_end(&tp->tx_stats.syncp);
+                       dev_kfree_skb_any(tx_skb->skb);
+                       tx_skb->skb = NULL;
+               }
+@@ -7226,13 +7223,6 @@ static void rtl_tx(struct net_device *de
+       }
+       if (tp->dirty_tx != dirty_tx) {
+-              netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+-
+-              u64_stats_update_begin(&tp->tx_stats.syncp);
+-              tp->tx_stats.packets += pkts_compl;
+-              tp->tx_stats.bytes += bytes_compl;
+-              u64_stats_update_end(&tp->tx_stats.syncp);
+-
+               tp->dirty_tx = dirty_tx;
+               /* Sync with rtl8169_start_xmit:
+                * - publish dirty_tx ring index (write barrier)
diff --git a/queue-3.19/rtnetlink-call-dellink-on-failure-when-newlink-exists.patch b/queue-3.19/rtnetlink-call-dellink-on-failure-when-newlink-exists.patch
new file mode 100644 (file)
index 0000000..037f9ed
--- /dev/null
@@ -0,0 +1,52 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: WANG Cong <xiyou.wangcong@gmail.com>
+Date: Fri, 13 Feb 2015 13:56:53 -0800
+Subject: rtnetlink: call ->dellink on failure when ->newlink exists
+
+From: WANG Cong <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 7afb8886a05be68e376655539a064ec672de8a8e ]
+
+Ignacy reported that when eth0 is down and add a vlan device
+on top of it like:
+
+  ip link add link eth0 name eth0.1 up type vlan id 1
+
+We will get a refcount leak:
+
+  unregister_netdevice: waiting for eth0.1 to become free. Usage count = 2
+
+The problem is when rtnl_configure_link() fails in rtnl_newlink(),
+we simply call unregister_device(), but for stacked device like vlan,
+we almost do nothing when we unregister the upper device, more work
+is done when we unregister the lower device, so call its ->dellink().
+
+Reported-by: Ignacy Gawedzki <ignacy.gawedzki@green-communications.fr>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c |   12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2124,8 +2124,16 @@ replay:
+                       }
+               }
+               err = rtnl_configure_link(dev, ifm);
+-              if (err < 0)
+-                      unregister_netdevice(dev);
++              if (err < 0) {
++                      if (ops->newlink) {
++                              LIST_HEAD(list_kill);
++
++                              ops->dellink(dev, &list_kill);
++                              unregister_netdevice_many(&list_kill);
++                      } else {
++                              unregister_netdevice(dev);
++                      }
++              }
+ out:
+               put_net(dest_net);
+               return err;
diff --git a/queue-3.19/rtnetlink-ifla_vf_policy-fix-misuses-of-nla_binary.patch b/queue-3.19/rtnetlink-ifla_vf_policy-fix-misuses-of-nla_binary.patch
new file mode 100644 (file)
index 0000000..588bf59
--- /dev/null
@@ -0,0 +1,58 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Daniel Borkmann <daniel@iogearbox.net>
+Date: Thu, 5 Feb 2015 18:44:04 +0100
+Subject: rtnetlink: ifla_vf_policy: fix misuses of NLA_BINARY
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit 364d5716a7adb91b731a35765d369602d68d2881 ]
+
+ifla_vf_policy[] is wrong in advertising its individual member types as
+NLA_BINARY since .type = NLA_BINARY in combination with .len declares the
+len member as *max* attribute length [0, len].
+
+The issue is that when do_setvfinfo() is being called to set up a VF
+through ndo handler, we could set corrupted data if the attribute length
+is less than the size of the related structure itself.
+
+The intent is exactly the opposite, namely to make sure to pass at least
+data of minimum size of len.
+
+Fixes: ebc08a6f47ee ("rtnetlink: Add VF config code to rtnetlink")
+Cc: Mitch Williams <mitch.a.williams@intel.com>
+Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c |   18 ++++++------------
+ 1 file changed, 6 insertions(+), 12 deletions(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -1237,18 +1237,12 @@ static const struct nla_policy ifla_vfin
+ };
+ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
+-      [IFLA_VF_MAC]           = { .type = NLA_BINARY,
+-                                  .len = sizeof(struct ifla_vf_mac) },
+-      [IFLA_VF_VLAN]          = { .type = NLA_BINARY,
+-                                  .len = sizeof(struct ifla_vf_vlan) },
+-      [IFLA_VF_TX_RATE]       = { .type = NLA_BINARY,
+-                                  .len = sizeof(struct ifla_vf_tx_rate) },
+-      [IFLA_VF_SPOOFCHK]      = { .type = NLA_BINARY,
+-                                  .len = sizeof(struct ifla_vf_spoofchk) },
+-      [IFLA_VF_RATE]          = { .type = NLA_BINARY,
+-                                  .len = sizeof(struct ifla_vf_rate) },
+-      [IFLA_VF_LINK_STATE]    = { .type = NLA_BINARY,
+-                                  .len = sizeof(struct ifla_vf_link_state) },
++      [IFLA_VF_MAC]           = { .len = sizeof(struct ifla_vf_mac) },
++      [IFLA_VF_VLAN]          = { .len = sizeof(struct ifla_vf_vlan) },
++      [IFLA_VF_TX_RATE]       = { .len = sizeof(struct ifla_vf_tx_rate) },
++      [IFLA_VF_SPOOFCHK]      = { .len = sizeof(struct ifla_vf_spoofchk) },
++      [IFLA_VF_RATE]          = { .len = sizeof(struct ifla_vf_rate) },
++      [IFLA_VF_LINK_STATE]    = { .len = sizeof(struct ifla_vf_link_state) },
+ };
+ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
diff --git a/queue-3.19/sh_eth-fix-lost-mac-address-on-kexec.patch b/queue-3.19/sh_eth-fix-lost-mac-address-on-kexec.patch
new file mode 100644 (file)
index 0000000..6329bae
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+Date: Fri, 27 Feb 2015 17:16:26 +0100
+Subject: sh_eth: Fix lost MAC address on kexec
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit a14c7d15ca91b444e77df08b916befdce77562ab ]
+
+Commit 740c7f31c094703c ("sh_eth: Ensure DMA engines are stopped before
+freeing buffers") added a call to sh_eth_reset() to the
+sh_eth_set_ringparam() and sh_eth_close() paths.
+
+However, setting the software reset bit(s) in the EDMR register resets
+the MAC Address Registers to zero. Hence after kexec, the new kernel
+doesn't detect a valid MAC address and assigns a random MAC address,
+breaking DHCP.
+
+Set the MAC address again after the reset in sh_eth_dev_exit() to fix
+this.
+
+Tested on r8a7740/armadillo (GETHER) and r8a7791/koelsch (FAST_RCAR).
+
+Fixes: 740c7f31c094703c ("sh_eth: Ensure DMA engines are stopped before freeing buffers")
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/renesas/sh_eth.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1392,6 +1392,9 @@ static void sh_eth_dev_exit(struct net_d
+       msleep(2); /* max frame time at 10 Mbps < 1250 us */
+       sh_eth_get_stats(ndev);
+       sh_eth_reset(ndev);
++
++      /* Set MAC address again */
++      update_mac_address(ndev);
+ }
+ /* free Tx skb function */
diff --git a/queue-3.19/sock-sock_dequeue_err_skb-needs-hard-irq-safety.patch b/queue-3.19/sock-sock_dequeue_err_skb-needs-hard-irq-safety.patch
new file mode 100644 (file)
index 0000000..f4f1c2c
--- /dev/null
@@ -0,0 +1,43 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 18 Feb 2015 05:47:55 -0800
+Subject: sock: sock_dequeue_err_skb() needs hard irq safety
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 997d5c3f4427f38562cbe207ce05bb25fdcb993b ]
+
+Non NAPI drivers can call skb_tstamp_tx() and then sock_queue_err_skb()
+from hard IRQ context.
+
+Therefore, sock_dequeue_err_skb() needs to block hard irq or
+corruptions or hangs can happen.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Fixes: 364a9e93243d1 ("sock: deduplicate errqueue dequeue")
+Fixes: cb820f8e4b7f7 ("net: Provide a generic socket error queue delivery method for Tx time stamps.")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/skbuff.c |    5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -3623,13 +3623,14 @@ struct sk_buff *sock_dequeue_err_skb(str
+ {
+       struct sk_buff_head *q = &sk->sk_error_queue;
+       struct sk_buff *skb, *skb_next;
++      unsigned long flags;
+       int err = 0;
+-      spin_lock_bh(&q->lock);
++      spin_lock_irqsave(&q->lock, flags);
+       skb = __skb_dequeue(q);
+       if (skb && (skb_next = skb_peek(q)))
+               err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
+-      spin_unlock_bh(&q->lock);
++      spin_unlock_irqrestore(&q->lock, flags);
+       sk->sk_err = err;
+       if (err)
diff --git a/queue-3.19/tcp-make-sure-skb-is-not-shared-before-using-skb_get.patch b/queue-3.19/tcp-make-sure-skb-is-not-shared-before-using-skb_get.patch
new file mode 100644 (file)
index 0000000..986a53e
--- /dev/null
@@ -0,0 +1,80 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 13 Feb 2015 04:47:12 -0800
+Subject: tcp: make sure skb is not shared before using skb_get()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ba34e6d9d346fe4e05d7e417b9edf5140772d34c ]
+
+IPv6 can keep a copy of SYN message using skb_get() in
+tcp_v6_conn_request() so that caller wont free the skb when calling
+kfree_skb() later.
+
+Therefore TCP fast open has to clone the skb it is queuing in
+child->sk_receive_queue, as all skbs consumed from receive_queue are
+freed using __kfree_skb() (ie assuming skb->users == 1)
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Fixes: 5b7ed0892f2af ("tcp: move fastopen functions to tcp_fastopen.c")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_fastopen.c |   32 ++++++++++++++++++++++++--------
+ 1 file changed, 24 insertions(+), 8 deletions(-)
+
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -134,6 +134,7 @@ static bool tcp_fastopen_create_child(st
+       struct tcp_sock *tp;
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+       struct sock *child;
++      u32 end_seq;
+       req->num_retrans = 0;
+       req->num_timeout = 0;
+@@ -185,20 +186,35 @@ static bool tcp_fastopen_create_child(st
+       /* Queue the data carried in the SYN packet. We need to first
+        * bump skb's refcnt because the caller will attempt to free it.
++       * Note that IPv6 might also have used skb_get() trick
++       * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts)
++       * So we need to eventually get a clone of the packet,
++       * before inserting it in sk_receive_queue.
+        *
+        * XXX (TFO) - we honor a zero-payload TFO request for now,
+        * (any reason not to?) but no need to queue the skb since
+        * there is no data. How about SYN+FIN?
+        */
+-      if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) {
+-              skb = skb_get(skb);
+-              skb_dst_drop(skb);
+-              __skb_pull(skb, tcp_hdr(skb)->doff * 4);
+-              skb_set_owner_r(skb, child);
+-              __skb_queue_tail(&child->sk_receive_queue, skb);
+-              tp->syn_data_acked = 1;
++      end_seq = TCP_SKB_CB(skb)->end_seq;
++      if (end_seq != TCP_SKB_CB(skb)->seq + 1) {
++              struct sk_buff *skb2;
++
++              if (unlikely(skb_shared(skb)))
++                      skb2 = skb_clone(skb, GFP_ATOMIC);
++              else
++                      skb2 = skb_get(skb);
++
++              if (likely(skb2)) {
++                      skb_dst_drop(skb2);
++                      __skb_pull(skb2, tcp_hdrlen(skb));
++                      skb_set_owner_r(skb2, child);
++                      __skb_queue_tail(&child->sk_receive_queue, skb2);
++                      tp->syn_data_acked = 1;
++              } else {
++                      end_seq = TCP_SKB_CB(skb)->seq + 1;
++              }
+       }
+-      tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
++      tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq;
+       sk->sk_data_ready(sk);
+       bh_unlock_sock(child);
+       sock_put(child);
diff --git a/queue-3.19/team-don-t-traverse-port-list-using-rcu-in-team_set_mac_address.patch b/queue-3.19/team-don-t-traverse-port-list-using-rcu-in-team_set_mac_address.patch
new file mode 100644 (file)
index 0000000..46431d5
--- /dev/null
@@ -0,0 +1,39 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Wed, 4 Mar 2015 08:36:31 +0100
+Subject: team: don't traverse port list using rcu in team_set_mac_address
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit 9215f437b85da339a7dfe3db6e288637406f88b2 ]
+
+Currently the list is traversed using rcu variant. That is not correct
+since dev_set_mac_address can be called which eventually calls
+rtmsg_ifinfo_build_skb and there, skb allocation can sleep. So fix this
+by remove the rcu usage here.
+
+Fixes: 3d249d4ca7 "net: introduce ethernet teaming device"
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -1736,11 +1736,11 @@ static int team_set_mac_address(struct n
+       if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
+               return -EADDRNOTAVAIL;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+-      rcu_read_lock();
+-      list_for_each_entry_rcu(port, &team->port_list, list)
++      mutex_lock(&team->lock);
++      list_for_each_entry(port, &team->port_list, list)
+               if (team->ops.port_change_dev_addr)
+                       team->ops.port_change_dev_addr(team, port);
+-      rcu_read_unlock();
++      mutex_unlock(&team->lock);
+       return 0;
+ }
diff --git a/queue-3.19/team-fix-possible-null-pointer-dereference-in-team_handle_frame.patch b/queue-3.19/team-fix-possible-null-pointer-dereference-in-team_handle_frame.patch
new file mode 100644 (file)
index 0000000..6fe28a1
--- /dev/null
@@ -0,0 +1,50 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Mon, 23 Feb 2015 14:02:54 +0100
+Subject: team: fix possible null pointer dereference in team_handle_frame
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit 57e595631904c827cfa1a0f7bbd7cc9a49da5745 ]
+
+Currently following race is possible in team:
+
+CPU0                                        CPU1
+                                            team_port_del
+                                              team_upper_dev_unlink
+                                                priv_flags &= ~IFF_TEAM_PORT
+team_handle_frame
+  team_port_get_rcu
+    team_port_exists
+      priv_flags & IFF_TEAM_PORT == 0
+    return NULL (instead of port got
+                 from rx_handler_data)
+                                              netdev_rx_handler_unregister
+
+The thing is that the flag is removed before rx_handler is unregistered.
+If team_handle_frame is called in between, team_port_exists returns 0
+and team_port_get_rcu will return NULL.
+So do not check the flag here. It is guaranteed by netdev_rx_handler_unregister
+that team_handle_frame will always see valid rx_handler_data pointer.
+
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Fixes: 3d249d4ca7d0 ("net: introduce ethernet teaming device")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c |    4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -42,9 +42,7 @@
+ static struct team_port *team_port_get_rcu(const struct net_device *dev)
+ {
+-      struct team_port *port = rcu_dereference(dev->rx_handler_data);
+-
+-      return team_port_exists(dev) ? port : NULL;
++      return rcu_dereference(dev->rx_handler_data);
+ }
+ static struct team_port *team_port_get_rtnl(const struct net_device *dev)
diff --git a/queue-3.19/udp-only-allow-ufo-for-packets-from-sock_dgram-sockets.patch b/queue-3.19/udp-only-allow-ufo-for-packets-from-sock_dgram-sockets.patch
new file mode 100644 (file)
index 0000000..2d50abf
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: =?UTF-8?q?Michal=20Kube=C4=8Dek?= <mkubecek@suse.cz>
+Date: Mon, 2 Mar 2015 18:27:11 +0100
+Subject: udp: only allow UFO for packets from SOCK_DGRAM sockets
+
+From: =?UTF-8?q?Michal=20Kube=C4=8Dek?= <mkubecek@suse.cz>
+
+[ Upstream commit acf8dd0a9d0b9e4cdb597c2f74802f79c699e802 ]
+
+If an over-MTU UDP datagram is sent through a SOCK_RAW socket to a
+UFO-capable device, ip_ufo_append_data() sets skb->ip_summed to
+CHECKSUM_PARTIAL unconditionally as all GSO code assumes transport layer
+checksum is to be computed on segmentation. However, in this case,
+skb->csum_start and skb->csum_offset are never set as raw socket
+transmit path bypasses udp_send_skb() where they are usually set. As a
+result, driver may access invalid memory when trying to calculate the
+checksum and store the result (as observed in virtio_net driver).
+
+Moreover, the very idea of modifying the userspace provided UDP header
+is IMHO against raw socket semantics (I wasn't able to find a document
+clearly stating this or the opposite, though). And while allowing
+CHECKSUM_NONE in the UFO case would be more efficient, it would be a bit
+too intrusive change just to handle a corner case like this. Therefore
+disallowing UFO for packets from SOCK_DGRAM seems to be the best option.
+
+Signed-off-by: Michal Kubecek <mkubecek@suse.cz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_output.c  |    3 ++-
+ net/ipv6/ip6_output.c |    3 ++-
+ 2 files changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -890,7 +890,8 @@ static int __ip_append_data(struct sock
+       cork->length += length;
+       if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+-          (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
++          (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
++          (sk->sk_type == SOCK_DGRAM)) {
+               err = ip_ufo_append_data(sk, queue, getfrag, from, length,
+                                        hh_len, fragheaderlen, transhdrlen,
+                                        maxfraglen, flags);
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1283,7 +1283,8 @@ emsgsize:
+       if (((length > mtu) ||
+            (skb && skb_is_gso(skb))) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+-          (rt->dst.dev->features & NETIF_F_UFO)) {
++          (rt->dst.dev->features & NETIF_F_UFO) &&
++          (sk->sk_type == SOCK_DGRAM)) {
+               err = ip6_ufo_append_data(sk, getfrag, from, length,
+                                         hh_len, fragheaderlen,
+                                         transhdrlen, mtu, flags, rt);
diff --git a/queue-3.19/usb-plusb-add-support-for-national-instruments-host-to-host-cable.patch b/queue-3.19/usb-plusb-add-support-for-national-instruments-host-to-host-cable.patch
new file mode 100644 (file)
index 0000000..fa52dd5
--- /dev/null
@@ -0,0 +1,33 @@
+From foo@baz Wed Mar 11 11:44:33 CET 2015
+From: Ben Shelton <ben.shelton@ni.com>
+Date: Mon, 16 Feb 2015 13:47:06 -0600
+Subject: usb: plusb: Add support for National Instruments host-to-host cable
+
+From: Ben Shelton <ben.shelton@ni.com>
+
+[ Upstream commit 42c972a1f390e3bc51ca1e434b7e28764992067f ]
+
+The National Instruments USB Host-to-Host Cable is based on the Prolific
+PL-25A1 chipset.  Add its VID/PID so the plusb driver will recognize it.
+
+Signed-off-by: Ben Shelton <ben.shelton@ni.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/plusb.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/net/usb/plusb.c
++++ b/drivers/net/usb/plusb.c
+@@ -134,6 +134,11 @@ static const struct usb_device_id produc
+ }, {
+       USB_DEVICE(0x050d, 0x258a),     /* Belkin F5U258/F5U279 (PL-25A1) */
+       .driver_info =  (unsigned long) &prolific_info,
++}, {
++      USB_DEVICE(0x3923, 0x7825),     /* National Instruments USB
++                                       * Host-to-Host Cable
++                                       */
++      .driver_info =  (unsigned long) &prolific_info,
+ },
+       { },            // END