--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Tom Lendacky <thomas.lendacky@amd.com>
+Date: Mon, 29 Jun 2015 11:22:12 -0500
+Subject: amd-xgbe: Add the __GFP_NOWARN flag to Rx buffer allocation
+
+From: Tom Lendacky <thomas.lendacky@amd.com>
+
+[ Upstream commit 472cfe7127760d68b819cf35a26e5a1b44b30f4e ]
+
+When allocating Rx related buffers, alloc_pages is called using an order
+number that is decreased until successful. A system under stress can
+experience failures during this allocation process resulting in a warning
+being issued. This message can be of concern to end users even though the
+failure is not fatal. Since the failure is not fatal and can occur
+multiple times, the driver should include the __GFP_NOWARN flag to
+suppress the warning message from being issued.
+
+Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-desc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
+@@ -263,7 +263,7 @@ static int xgbe_alloc_pages(struct xgbe_
+ int ret;
+
+ /* Try to obtain pages, decreasing order if necessary */
+- gfp |= __GFP_COLD | __GFP_COMP;
++ gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
+ while (order >= 0) {
+ pages = alloc_pages(gfp, order);
+ if (pages)
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Simon Guinot <simon.guinot@sequanux.org>
+Date: Tue, 30 Jun 2015 16:20:21 +0200
+Subject: ARM: mvebu: update Ethernet compatible string for Armada XP
+
+From: Simon Guinot <simon.guinot@sequanux.org>
+
+[ Upstream commit ea3b55fe83b5fcede82d183164b9d6831b26e33b ]
+
+This patch updates the Ethernet DT nodes for Armada XP SoCs with the
+compatible string "marvell,armada-xp-neta".
+
+Signed-off-by: Simon Guinot <simon.guinot@sequanux.org>
+Fixes: 77916519cba3 ("arm: mvebu: Armada XP MV78230 has only three Ethernet interfaces")
+Cc: <stable@vger.kernel.org> # v3.8+
+Acked-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
+Reviewed-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/boot/dts/armada-370-xp.dtsi | 2 --
+ arch/arm/boot/dts/armada-370.dtsi | 8 ++++++++
+ arch/arm/boot/dts/armada-xp-mv78260.dtsi | 2 +-
+ arch/arm/boot/dts/armada-xp-mv78460.dtsi | 2 +-
+ arch/arm/boot/dts/armada-xp.dtsi | 10 +++++++++-
+ 5 files changed, 19 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/boot/dts/armada-370-xp.dtsi
++++ b/arch/arm/boot/dts/armada-370-xp.dtsi
+@@ -270,7 +270,6 @@
+ };
+
+ eth0: ethernet@70000 {
+- compatible = "marvell,armada-370-neta";
+ reg = <0x70000 0x4000>;
+ interrupts = <8>;
+ clocks = <&gateclk 4>;
+@@ -286,7 +285,6 @@
+ };
+
+ eth1: ethernet@74000 {
+- compatible = "marvell,armada-370-neta";
+ reg = <0x74000 0x4000>;
+ interrupts = <10>;
+ clocks = <&gateclk 3>;
+--- a/arch/arm/boot/dts/armada-370.dtsi
++++ b/arch/arm/boot/dts/armada-370.dtsi
+@@ -307,6 +307,14 @@
+ dmacap,memset;
+ };
+ };
++
++ ethernet@70000 {
++ compatible = "marvell,armada-370-neta";
++ };
++
++ ethernet@74000 {
++ compatible = "marvell,armada-370-neta";
++ };
+ };
+ };
+ };
+--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+@@ -318,7 +318,7 @@
+ };
+
+ eth3: ethernet@34000 {
+- compatible = "marvell,armada-370-neta";
++ compatible = "marvell,armada-xp-neta";
+ reg = <0x34000 0x4000>;
+ interrupts = <14>;
+ clocks = <&gateclk 1>;
+--- a/arch/arm/boot/dts/armada-xp-mv78460.dtsi
++++ b/arch/arm/boot/dts/armada-xp-mv78460.dtsi
+@@ -356,7 +356,7 @@
+ };
+
+ eth3: ethernet@34000 {
+- compatible = "marvell,armada-370-neta";
++ compatible = "marvell,armada-xp-neta";
+ reg = <0x34000 0x4000>;
+ interrupts = <14>;
+ clocks = <&gateclk 1>;
+--- a/arch/arm/boot/dts/armada-xp.dtsi
++++ b/arch/arm/boot/dts/armada-xp.dtsi
+@@ -177,7 +177,7 @@
+ };
+
+ eth2: ethernet@30000 {
+- compatible = "marvell,armada-370-neta";
++ compatible = "marvell,armada-xp-neta";
+ reg = <0x30000 0x4000>;
+ interrupts = <12>;
+ clocks = <&gateclk 2>;
+@@ -220,6 +220,14 @@
+ };
+ };
+
++ ethernet@70000 {
++ compatible = "marvell,armada-xp-neta";
++ };
++
++ ethernet@74000 {
++ compatible = "marvell,armada-xp-neta";
++ };
++
+ xor@f0900 {
+ compatible = "marvell,orion-xor";
+ reg = <0xF0900 0x100
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 26 Jun 2015 07:32:29 +0200
+Subject: bnx2x: fix lockdep splat
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d53c66a5b80698620f7c9ba2372fff4017e987b8 ]
+
+Michel reported following lockdep splat
+
+[ 44.718117] INFO: trying to register non-static key.
+[ 44.723081] the code is fine but needs lockdep annotation.
+[ 44.728559] turning off the locking correctness validator.
+[ 44.734036] CPU: 8 PID: 5483 Comm: ethtool Not tainted 4.1.0
+[ 44.770289] Call Trace:
+[ 44.772741] [<ffffffff816eb1cd>] dump_stack+0x4c/0x65
+[ 44.777879] [<ffffffff8111d921>] ? console_unlock+0x1f1/0x510
+[ 44.783708] [<ffffffff811121f5>] __lock_acquire+0x1d05/0x1f10
+[ 44.789538] [<ffffffff8111370a>] ? mark_held_locks+0x6a/0x90
+[ 44.795276] [<ffffffff81113835>] ? trace_hardirqs_on_caller+0x105/0x1d0
+[ 44.801967] [<ffffffff8111390d>] ? trace_hardirqs_on+0xd/0x10
+[ 44.807793] [<ffffffff811330fa>] ? hrtimer_try_to_cancel+0x4a/0x250
+[ 44.814142] [<ffffffff81112ba6>] lock_acquire+0xb6/0x290
+[ 44.819537] [<ffffffff810d6675>] ? flush_work+0x5/0x280
+[ 44.824844] [<ffffffff810d66ad>] flush_work+0x3d/0x280
+[ 44.830061] [<ffffffff810d6675>] ? flush_work+0x5/0x280
+[ 44.835366] [<ffffffff816f3c43>] ? schedule_hrtimeout_range+0x13/0x20
+[ 44.841889] [<ffffffff8112ec9b>] ? usleep_range+0x4b/0x50
+[ 44.847365] [<ffffffff8111370a>] ? mark_held_locks+0x6a/0x90
+[ 44.853102] [<ffffffff810d8585>] ? __cancel_work_timer+0x105/0x1c0
+[ 44.859359] [<ffffffff81113835>] ? trace_hardirqs_on_caller+0x105/0x1d0
+[ 44.866045] [<ffffffff810d851f>] __cancel_work_timer+0x9f/0x1c0
+[ 44.872048] [<ffffffffa0010982>] ? bnx2x_func_stop+0x42/0x90 [bnx2x]
+[ 44.878481] [<ffffffff810d8670>] cancel_work_sync+0x10/0x20
+[ 44.884134] [<ffffffffa00259e5>] bnx2x_chip_cleanup+0x245/0x730 [bnx2x]
+[ 44.890829] [<ffffffff8110ce02>] ? up+0x32/0x50
+[ 44.895439] [<ffffffff811306b5>] ? del_timer_sync+0x5/0xd0
+[ 44.901005] [<ffffffffa005596d>] bnx2x_nic_unload+0x20d/0x8e0 [bnx2x]
+[ 44.907527] [<ffffffff811f1aef>] ? might_fault+0x5f/0xb0
+[ 44.912921] [<ffffffffa005851c>] bnx2x_reload_if_running+0x2c/0x50 [bnx2x]
+[ 44.919879] [<ffffffffa005a3c5>] bnx2x_set_ringparam+0x2b5/0x460 [bnx2x]
+[ 44.926664] [<ffffffff815d498b>] dev_ethtool+0x55b/0x1c40
+[ 44.932148] [<ffffffff815dfdc7>] ? rtnl_lock+0x17/0x20
+[ 44.937364] [<ffffffff815e7f8b>] dev_ioctl+0x17b/0x630
+[ 44.942582] [<ffffffff815abf8d>] sock_do_ioctl+0x5d/0x70
+[ 44.947972] [<ffffffff815ac013>] sock_ioctl+0x73/0x280
+[ 44.953192] [<ffffffff8124c1c8>] do_vfs_ioctl+0x88/0x5b0
+[ 44.958587] [<ffffffff8110d0b3>] ? up_read+0x23/0x40
+[ 44.963631] [<ffffffff812584cc>] ? __fget_light+0x6c/0xa0
+[ 44.969105] [<ffffffff8124c781>] SyS_ioctl+0x91/0xb0
+[ 44.974149] [<ffffffff816f4dd7>] system_call_fastpath+0x12/0x6f
+
+As bnx2x_init_ptp() is only called if bp->flags contains PTP_SUPPORTED,
+we also need to guard bnx2x_stop_ptp() with same condition, otherwise
+ptp_task workqueue is not initialized and kernel barfs on
+cancel_work_sync()
+
+Fixes: eeed018cbfa30 ("bnx2x: Add timestamping and PTP hardware clock support")
+Reported-by: Michel Lespinasse <walken@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Michal Kalderon <Michal.Kalderon@qlogic.com>
+Cc: Ariel Elior <Ariel.Elior@qlogic.com>
+Cc: Yuval Mintz <Yuval.Mintz@qlogic.com>
+Cc: David Decotigny <decot@google.com>
+Acked-by: Sony Chacko <sony.chacko@qlogic.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -9323,7 +9323,8 @@ unload_error:
+ * function stop ramrod is sent, since as part of this ramrod FW access
+ * PTP registers.
+ */
+- bnx2x_stop_ptp(bp);
++ if (bp->flags & PTP_SUPPORTED)
++ bnx2x_stop_ptp(bp);
+
+ /* Disable HW interrupts, NAPI */
+ bnx2x_netif_stop(bp, 1);
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Nikolay Aleksandrov <razor@blackwall.org>
+Date: Mon, 15 Jun 2015 20:28:51 +0300
+Subject: bridge: fix br_stp_set_bridge_priority race conditions
+
+From: Nikolay Aleksandrov <razor@blackwall.org>
+
+[ Upstream commit 2dab80a8b486f02222a69daca6859519e05781d9 ]
+
+After the ->set() spinlocks were removed br_stp_set_bridge_priority
+was left running without any protection when used via sysfs. It can
+race with port add/del and could result in use-after-free cases and
+corrupted lists. Tested by running port add/del in a loop with stp
+enabled while setting priority in a loop, crashes are easily
+reproducible.
+The spinlocks around sysfs ->set() were removed in commit:
+14f98f258f19 ("bridge: range check STP parameters")
+There's also a race condition in the netlink priority support that is
+fixed by this change, but it was introduced recently and the fixes tag
+covers it, just in case it's needed the commit is:
+af615762e972 ("bridge: add ageing_time, stp_state, priority over netlink")
+
+Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
+Fixes: 14f98f258f19 ("bridge: range check STP parameters")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_ioctl.c | 2 --
+ net/bridge/br_stp_if.c | 4 +++-
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/net/bridge/br_ioctl.c
++++ b/net/bridge/br_ioctl.c
+@@ -247,9 +247,7 @@ static int old_dev_ioctl(struct net_devi
+ if (!ns_capable(dev_net(dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
+
+- spin_lock_bh(&br->lock);
+ br_stp_set_bridge_priority(br, args[1]);
+- spin_unlock_bh(&br->lock);
+ return 0;
+
+ case BRCTL_SET_PORT_PRIORITY:
+--- a/net/bridge/br_stp_if.c
++++ b/net/bridge/br_stp_if.c
+@@ -243,12 +243,13 @@ bool br_stp_recalculate_bridge_id(struct
+ return true;
+ }
+
+-/* called under bridge lock */
++/* Acquires and releases bridge lock */
+ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
+ {
+ struct net_bridge_port *p;
+ int wasroot;
+
++ spin_lock_bh(&br->lock);
+ wasroot = br_is_root_bridge(br);
+
+ list_for_each_entry(p, &br->port_list, list) {
+@@ -266,6 +267,7 @@ void br_stp_set_bridge_priority(struct n
+ br_port_state_selection(br);
+ if (br_is_root_bridge(br) && !wasroot)
+ br_become_root_bridge(br);
++ spin_unlock_bh(&br->lock);
+ }
+
+ /* called under bridge lock */
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Julian Anastasov <ja@ssi.bg>
+Date: Tue, 23 Jun 2015 08:34:39 +0300
+Subject: ip: report the original address of ICMP messages
+
+From: Julian Anastasov <ja@ssi.bg>
+
+[ Upstream commit 34b99df4e6256ddafb663c6de0711dceceddfe0e ]
+
+ICMP messages can trigger ICMP and local errors. In this case
+serr->port is 0 and starting from Linux 4.0 we do not return
+the original target address to the error queue readers.
+Add function to define which errors provide addr_offset.
+With this fix my ping command is not silent anymore.
+
+Fixes: c247f0534cc5 ("ip: fix error queue empty skb handling")
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_sockglue.c | 11 ++++++++++-
+ net/ipv6/datagram.c | 12 +++++++++++-
+ 2 files changed, 21 insertions(+), 2 deletions(-)
+
+--- a/net/ipv4/ip_sockglue.c
++++ b/net/ipv4/ip_sockglue.c
+@@ -432,6 +432,15 @@ void ip_local_error(struct sock *sk, int
+ kfree_skb(skb);
+ }
+
++/* For some errors we have valid addr_offset even with zero payload and
++ * zero port. Also, addr_offset should be supported if port is set.
++ */
++static inline bool ipv4_datagram_support_addr(struct sock_exterr_skb *serr)
++{
++ return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
++ serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
++}
++
+ /* IPv4 supports cmsg on all imcp errors and some timestamps
+ *
+ * Timestamp code paths do not initialize the fields expected by cmsg:
+@@ -498,7 +507,7 @@ int ip_recv_error(struct sock *sk, struc
+
+ serr = SKB_EXT_ERR(skb);
+
+- if (sin && serr->port) {
++ if (sin && ipv4_datagram_support_addr(serr)) {
+ sin->sin_family = AF_INET;
+ sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) +
+ serr->addr_offset);
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -325,6 +325,16 @@ void ipv6_local_rxpmtu(struct sock *sk,
+ kfree_skb(skb);
+ }
+
++/* For some errors we have valid addr_offset even with zero payload and
++ * zero port. Also, addr_offset should be supported if port is set.
++ */
++static inline bool ipv6_datagram_support_addr(struct sock_exterr_skb *serr)
++{
++ return serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6 ||
++ serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
++ serr->ee.ee_origin == SO_EE_ORIGIN_LOCAL || serr->port;
++}
++
+ /* IPv6 supports cmsg on all origins aside from SO_EE_ORIGIN_LOCAL.
+ *
+ * At one point, excluding local errors was a quick test to identify icmp/icmp6
+@@ -389,7 +399,7 @@ int ipv6_recv_error(struct sock *sk, str
+
+ serr = SKB_EXT_ERR(skb);
+
+- if (sin && serr->port) {
++ if (sin && ipv6_datagram_support_addr(serr)) {
+ const unsigned char *nh = skb_network_header(skb);
+ sin->sin6_family = AF_INET6;
+ sin->sin6_flowinfo = 0;
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Wed, 17 Jun 2015 13:54:54 +0200
+Subject: mac80211: fix locking in update_vlan_tailroom_need_count()
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit 51f458d9612177f69c2e2c437034ae15f93078e7 ]
+
+Unfortunately, Michal's change to fix AP_VLAN crypto tailroom
+caused a locking issue that was reported by lockdep, but only
+in a few cases - the issue was a classic ABBA deadlock caused
+by taking the mtx after the key_mtx, where normally they're
+taken the other way around.
+
+As the key mutex protects the field in question (I'm adding a
+few annotations to make that clear) only the iteration needs
+to be protected, but we can also iterate the interface list
+with just RCU protection while holding the key mutex.
+
+Fixes: f9dca80b98ca ("mac80211: fix AP_VLAN crypto tailroom calculation")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/key.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+--- a/net/mac80211/key.c
++++ b/net/mac80211/key.c
+@@ -66,12 +66,15 @@ update_vlan_tailroom_need_count(struct i
+ if (sdata->vif.type != NL80211_IFTYPE_AP)
+ return;
+
+- mutex_lock(&sdata->local->mtx);
++ /* crypto_tx_tailroom_needed_cnt is protected by this */
++ assert_key_lock(sdata->local);
+
+- list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(vlan, &sdata->u.ap.vlans, u.vlan.list)
+ vlan->crypto_tx_tailroom_needed_cnt += delta;
+
+- mutex_unlock(&sdata->local->mtx);
++ rcu_read_unlock();
+ }
+
+ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
+@@ -95,6 +98,8 @@ static void increment_tailroom_need_coun
+ * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
+ */
+
++ assert_key_lock(sdata->local);
++
+ update_vlan_tailroom_need_count(sdata, 1);
+
+ if (!sdata->crypto_tx_tailroom_needed_cnt++) {
+@@ -109,6 +114,8 @@ static void increment_tailroom_need_coun
+ static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+ int delta)
+ {
++ assert_key_lock(sdata->local);
++
+ WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+
+ update_vlan_tailroom_need_count(sdata, -delta);
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Or Gerlitz <ogerlitz@mellanox.com>
+Date: Thu, 25 Jun 2015 11:29:44 +0300
+Subject: mlx4: Disable HA for SRIOV PF RoCE devices
+
+From: Or Gerlitz <ogerlitz@mellanox.com>
+
+[ Upstream commit 7254acffeeec3c0a75b9c5364c29a6eb00014930 ]
+
+When in HA mode, the driver exposes an IB (RoCE) device instance with only
+one port. Under SRIOV, the existing implementation doesn't go well with
+the PF RoCE driver's role of Special QPs Para-Virtualization, etc.
+
+As such, disable HA for the mlx4 PF RoCE device in SRIOV mode.
+
+Fixes: a57500903093 ('IB/mlx4: Add port aggregation support')
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/intf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
++++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
+@@ -93,8 +93,14 @@ int mlx4_register_interface(struct mlx4_
+ mutex_lock(&intf_mutex);
+
+ list_add_tail(&intf->list, &intf_list);
+- list_for_each_entry(priv, &dev_list, dev_list)
++ list_for_each_entry(priv, &dev_list, dev_list) {
++ if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) {
++ mlx4_dbg(&priv->dev,
++ "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol);
++ intf->flags &= ~MLX4_INTFF_BONDING;
++ }
+ mlx4_add_device(intf, priv);
++ }
+
+ mutex_unlock(&intf_mutex);
+
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Stas Sergeev <stsp@list.ru>
+Date: Thu, 18 Jun 2015 18:36:03 +0300
+Subject: mvneta: add forgotten initialization of autonegotiation bits
+
+From: Stas Sergeev <stsp@list.ru>
+
+[ Upstream commit 538761b794c1542f1c6e31eadd9d7aae118889f7 ]
+
+The commit 898b2970e2c9 ("mvneta: implement SGMII-based in-band link state
+signaling")
+changed mvneta_adjust_link() so that it does not clear the auto-negotiation
+bits in MVNETA_GMAC_AUTONEG_CONFIG register. This was necessary for
+auto-negotiation mode to work.
+Unfortunately I haven't checked if these bits are ever initialized.
+It appears they are not.
+This patch adds the missing initialization of the auto-negotiation bits
+in the MVNETA_GMAC_AUTONEG_CONFIG register.
+It fixes the following regression:
+https://www.mail-archive.com/netdev@vger.kernel.org/msg67928.html
+
+Since the patch was tested to fix a regression, it should be applied to
+stable tree.
+
+Tested-by: Arnaud Ebalard <arno@natisbad.org>
+
+CC: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+CC: Florian Fainelli <f.fainelli@gmail.com>
+CC: netdev@vger.kernel.org
+CC: linux-kernel@vger.kernel.org
+CC: stable@vger.kernel.org
+
+Signed-off-by: Stas Sergeev <stsp@users.sourceforge.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -1013,6 +1013,12 @@ static void mvneta_defaults_set(struct m
+ val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER);
+ val |= MVNETA_GMAC_1MS_CLOCK_ENABLE;
+ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val);
++ } else {
++ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
++ val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE |
++ MVNETA_GMAC_AN_SPEED_EN |
++ MVNETA_GMAC_AN_DUPLEX_EN);
++ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
+ }
+
+ mvneta_set_ucast_table(pp, -1);
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Julian Anastasov <ja@ssi.bg>
+Date: Tue, 16 Jun 2015 22:56:39 +0300
+Subject: neigh: do not modify unlinked entries
+
+From: Julian Anastasov <ja@ssi.bg>
+
+[ Upstream commit 2c51a97f76d20ebf1f50fef908b986cb051fdff9 ]
+
+The lockless lookups can return entry that is unlinked.
+Sometimes they get reference before last neigh_cleanup_and_release,
+sometimes they do not need reference. Later, any
+modification attempts may result in the following problems:
+
+1. entry is not destroyed immediately because neigh_update
+can start the timer for dead entry, eg. on change to NUD_REACHABLE
+state. As result, entry lives for some time but is invisible
+and out of control.
+
+2. __neigh_event_send can run in parallel with neigh_destroy
+while refcnt=0 but if timer is started and expired refcnt can
+reach 0 for second time leading to second neigh_destroy and
+possible crash.
+
+Thanks to Eric Dumazet and Ying Xue for their work and analyze
+on the __neigh_event_send change.
+
+Fixes: 767e97e1e0db ("neigh: RCU conversion of struct neighbour")
+Fixes: a263b3093641 ("ipv4: Make neigh lookups directly in output packet path.")
+Fixes: 6fd6ce2056de ("ipv6: Do not depend on rt->n in ip6_finish_output2().")
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Ying Xue <ying.xue@windriver.com>
+Signed-off-by: Julian Anastasov <ja@ssi.bg>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/neighbour.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -957,6 +957,8 @@ int __neigh_event_send(struct neighbour
+ rc = 0;
+ if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+ goto out_unlock_bh;
++ if (neigh->dead)
++ goto out_dead;
+
+ if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+ if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
+@@ -1013,6 +1015,13 @@ out_unlock_bh:
+ write_unlock(&neigh->lock);
+ local_bh_enable();
+ return rc;
++
++out_dead:
++ if (neigh->nud_state & NUD_STALE)
++ goto out_unlock_bh;
++ write_unlock_bh(&neigh->lock);
++ kfree_skb(skb);
++ return 1;
+ }
+ EXPORT_SYMBOL(__neigh_event_send);
+
+@@ -1076,6 +1085,8 @@ int neigh_update(struct neighbour *neigh
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ (old & (NUD_NOARP | NUD_PERMANENT)))
+ goto out;
++ if (neigh->dead)
++ goto out;
+
+ if (!(new & NUD_VALID)) {
+ neigh_del_timer(neigh);
+@@ -1225,6 +1236,8 @@ EXPORT_SYMBOL(neigh_update);
+ */
+ void __neigh_set_probe_once(struct neighbour *neigh)
+ {
++ if (neigh->dead)
++ return;
+ neigh->updated = jiffies;
+ if (!(neigh->nud_state & NUD_FAILED))
+ return;
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Ido Shamay <idos@mellanox.com>
+Date: Thu, 25 Jun 2015 11:29:43 +0300
+Subject: net/mlx4_en: Fix wrong csum complete report when rxvlan offload is disabled
+
+From: Ido Shamay <idos@mellanox.com>
+
+[ Upstream commit 79a258526ce1051cb9684018c25a89d51ac21be8 ]
+
+The check_csum() function relied on hwtstamp_rx_filter to know if rxvlan
+offload is disabled. This is wrong since rxvlan offload can be switched
+on/off regardless of hwtstamp_rx_filter.
+
+Also moved check_csum to query CQE information to identify VLAN packets
+and removed the check of IP packets, since it has been validated before.
+
+Fixes: f8c6455bb04b ('net/mlx4_en: Extend checksum offloading by CHECKSUM COMPLETE')
+Signed-off-by: Ido Shamay <idos@mellanox.com>
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_rx.c | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+@@ -723,7 +723,7 @@ static int get_fixed_ipv6_csum(__wsum hw
+ }
+ #endif
+ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
+- int hwtstamp_rx_filter)
++ netdev_features_t dev_features)
+ {
+ __wsum hw_checksum = 0;
+
+@@ -731,14 +731,8 @@ static int check_csum(struct mlx4_cqe *c
+
+ hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
+
+- if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) &&
+- hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) {
+- /* next protocol non IPv4 or IPv6 */
+- if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+- != htons(ETH_P_IP) &&
+- ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto
+- != htons(ETH_P_IPV6))
+- return -1;
++ if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) &&
++ !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
+ hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
+ hdr += sizeof(struct vlan_hdr);
+ }
+@@ -901,7 +895,8 @@ int mlx4_en_process_rx_cq(struct net_dev
+
+ if (ip_summed == CHECKSUM_COMPLETE) {
+ void *va = skb_frag_address(skb_shinfo(gro_skb)->frags);
+- if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) {
++ if (check_csum(cqe, gro_skb, va,
++ dev->features)) {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_none++;
+ ring->csum_complete--;
+@@ -956,7 +951,7 @@ int mlx4_en_process_rx_cq(struct net_dev
+ }
+
+ if (ip_summed == CHECKSUM_COMPLETE) {
+- if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) {
++ if (check_csum(cqe, skb, skb->data, dev->features)) {
+ ip_summed = CHECKSUM_NONE;
+ ring->csum_complete--;
+ ring->csum_none++;
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Eran Ben Elisha <eranbe@mellanox.com>
+Date: Thu, 25 Jun 2015 11:29:41 +0300
+Subject: net/mlx4_en: Release TX QP when destroying TX ring
+
+From: Eran Ben Elisha <eranbe@mellanox.com>
+
+[ Upstream commit 0eb08514fdbdcd16fd6870680cd638f203662e9d ]
+
+TX ring QP wasn't released at mlx4_en_destroy_tx_ring. Instead, the code
+used the deprecated base_tx_qpn field. Move TX QP release to
+mlx4_en_destroy_tx_ring and remove the base_tx_qpn field.
+
+Fixes: ddae0349fdb7 ('net/mlx4: Change QP allocation scheme')
+Signed-off-by: Eran Ben Elisha <eranbe@mellanox.com>
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_netdev.c | 4 ----
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 1 +
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 -
+ 3 files changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+@@ -1973,10 +1973,6 @@ void mlx4_en_free_resources(struct mlx4_
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ }
+
+- if (priv->base_tx_qpn) {
+- mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
+- priv->base_tx_qpn = 0;
+- }
+ }
+
+ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -180,6 +180,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4
+ mlx4_bf_free(mdev->dev, &ring->bf);
+ mlx4_qp_remove(mdev->dev, &ring->qp);
+ mlx4_qp_free(mdev->dev, &ring->qp);
++ mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ kfree(ring->bounce_buf);
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -579,7 +579,6 @@ struct mlx4_en_priv {
+ int vids[128];
+ bool wol;
+ struct device *ddev;
+- int base_tx_qpn;
+ struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE];
+ struct hwtstamp_config hwtstamp_config;
+
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Ido Shamay <idos@mellanox.com>
+Date: Thu, 25 Jun 2015 11:29:42 +0300
+Subject: net/mlx4_en: Wake TX queues only when there's enough room
+
+From: Ido Shamay <idos@mellanox.com>
+
+[ Upstream commit 488a9b48e398b157703766e2cd91ea45ac6997c5 ]
+
+Indication of a single completed packet, marked by txbbs_skipped
+being bigger then zero, in not enough in order to wake up a
+stopped TX queue. The completed packet may contain a single TXBB,
+while next packet to be sent (after the wake up) may have multiple
+TXBBs (LSO/TSO packets for example), causing overflow in queue followed
+by WQE corruption and TX queue timeout.
+Instead, wake the stopped queue only when there's enough room for the
+worst case (maximum sized WQE) packet that we should need to handle after
+the queue is opened again.
+
+Also created an helper routine - mlx4_en_is_tx_ring_full, which checks
+if the current TX ring is full or not. It provides better code readability
+and removes code duplication.
+
+Signed-off-by: Ido Shamay <idos@mellanox.com>
+Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/en_tx.c | 19 +++++++++++--------
+ drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | 1 +
+ 2 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+@@ -66,6 +66,7 @@ int mlx4_en_create_tx_ring(struct mlx4_e
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
++ ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS;
+
+ tmp = size * sizeof(struct mlx4_en_tx_info);
+ ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node);
+@@ -232,6 +233,11 @@ void mlx4_en_deactivate_tx_ring(struct m
+ MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+ }
+
++static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring)
++{
++ return ring->prod - ring->cons > ring->full_size;
++}
++
+ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, int index,
+ u8 owner)
+@@ -474,11 +480,10 @@ static bool mlx4_en_process_tx_cq(struct
+
+ netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
+
+- /*
+- * Wakeup Tx queue if this stopped, and at least 1 packet
+- * was completed
++ /* Wakeup Tx queue if this stopped, and ring is not full.
+ */
+- if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
++ if (netif_tx_queue_stopped(ring->tx_queue) &&
++ !mlx4_en_is_tx_ring_full(ring)) {
+ netif_tx_wake_queue(ring->tx_queue);
+ ring->wake_queue++;
+ }
+@@ -922,8 +927,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff
+ skb_tx_timestamp(skb);
+
+ /* Check available TXBBs And 2K spare for prefetch */
+- stop_queue = (int)(ring->prod - ring_cons) >
+- ring->size - HEADROOM - MAX_DESC_TXBBS;
++ stop_queue = mlx4_en_is_tx_ring_full(ring);
+ if (unlikely(stop_queue)) {
+ netif_tx_stop_queue(ring->tx_queue);
+ ring->queue_stopped++;
+@@ -992,8 +996,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff
+ smp_rmb();
+
+ ring_cons = ACCESS_ONCE(ring->cons);
+- if (unlikely(((int)(ring->prod - ring_cons)) <=
+- ring->size - HEADROOM - MAX_DESC_TXBBS)) {
++ if (unlikely(!mlx4_en_is_tx_ring_full(ring))) {
+ netif_tx_wake_queue(ring->tx_queue);
+ ring->wake_queue++;
+ }
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+@@ -279,6 +279,7 @@ struct mlx4_en_tx_ring {
+ u32 size; /* number of TXBBs */
+ u32 size_mask;
+ u16 stride;
++ u32 full_size;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 buf_size;
+ __be32 doorbell_qpn;
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Simon Guinot <simon.guinot@sequanux.org>
+Date: Tue, 30 Jun 2015 16:20:22 +0200
+Subject: net: mvneta: disable IP checksum with jumbo frames for Armada 370
+
+From: Simon Guinot <simon.guinot@sequanux.org>
+
+[ Upstream commit b65657fc240ae6c1d2a1e62db9a0e61ac9631d7a ]
+
+The Ethernet controller found in the Armada 370, 380 and 385 SoCs don't
+support TCP/IP checksumming with frame sizes larger than 1600 bytes.
+
+This patch fixes the issue by disabling the features NETIF_F_IP_CSUM and
+NETIF_F_TSO for the Armada 370 and compatibles SoCs when the MTU is set
+to a value greater than 1600 bytes.
+
+Signed-off-by: Simon Guinot <simon.guinot@sequanux.org>
+Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit")
+Cc: <stable@vger.kernel.org> # v3.8+
+Acked-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/mvneta.c | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -310,6 +310,7 @@ struct mvneta_port {
+ unsigned int link;
+ unsigned int duplex;
+ unsigned int speed;
++ unsigned int tx_csum_limit;
+ int use_inband_status:1;
+ };
+
+@@ -2508,8 +2509,10 @@ static int mvneta_change_mtu(struct net_
+
+ dev->mtu = mtu;
+
+- if (!netif_running(dev))
++ if (!netif_running(dev)) {
++ netdev_update_features(dev);
+ return 0;
++ }
+
+ /* The interface is running, so we have to force a
+ * reallocation of the queues
+@@ -2538,9 +2541,26 @@ static int mvneta_change_mtu(struct net_
+ mvneta_start_dev(pp);
+ mvneta_port_up(pp);
+
++ netdev_update_features(dev);
++
+ return 0;
+ }
+
++static netdev_features_t mvneta_fix_features(struct net_device *dev,
++ netdev_features_t features)
++{
++ struct mvneta_port *pp = netdev_priv(dev);
++
++ if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) {
++ features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
++ netdev_info(dev,
++ "Disable IP checksum for MTU greater than %dB\n",
++ pp->tx_csum_limit);
++ }
++
++ return features;
++}
++
+ /* Get mac address */
+ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
+ {
+@@ -2862,6 +2882,7 @@ static const struct net_device_ops mvnet
+ .ndo_set_rx_mode = mvneta_set_rx_mode,
+ .ndo_set_mac_address = mvneta_set_mac_addr,
+ .ndo_change_mtu = mvneta_change_mtu,
++ .ndo_fix_features = mvneta_fix_features,
+ .ndo_get_stats64 = mvneta_get_stats64,
+ .ndo_do_ioctl = mvneta_ioctl,
+ };
+@@ -3107,6 +3128,9 @@ static int mvneta_probe(struct platform_
+ }
+ }
+
++ if (of_device_is_compatible(dn, "marvell,armada-370-neta"))
++ pp->tx_csum_limit = 1600;
++
+ pp->tx_ring_size = MVNETA_MAX_TXD;
+ pp->rx_ring_size = MVNETA_MAX_RXD;
+
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Simon Guinot <simon.guinot@sequanux.org>
+Date: Tue, 30 Jun 2015 16:20:20 +0200
+Subject: net: mvneta: introduce compatible string "marvell, armada-xp-neta"
+
+From: Simon Guinot <simon.guinot@sequanux.org>
+
+[ Upstream commit f522a975a8101895a85354b9c143f41b8248e71a ]
+
+The mvneta driver supports the Ethernet IP found in the Armada 370, XP,
+380 and 385 SoCs. Since at least one more hardware feature is available
+for the Armada XP SoCs then a way to identify them is needed.
+
+This patch introduces a new compatible string "marvell,armada-xp-neta".
+
+Signed-off-by: Simon Guinot <simon.guinot@sequanux.org>
+Fixes: c5aff18204da ("net: mvneta: driver for Marvell Armada 370/XP network unit")
+Cc: <stable@vger.kernel.org> # v3.8+
+Acked-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
+Acked-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt | 2 +-
+ drivers/net/ethernet/marvell/mvneta.c | 1 +
+ 2 files changed, 2 insertions(+), 1 deletion(-)
+
+--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
++++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+@@ -1,7 +1,7 @@
+ * Marvell Armada 370 / Armada XP Ethernet Controller (NETA)
+
+ Required properties:
+-- compatible: should be "marvell,armada-370-neta".
++- compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta".
+ - reg: address and length of the register set for the device.
+ - interrupts: interrupt for the device
+ - phy: See ethernet.txt file in the same directory.
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3185,6 +3185,7 @@ static int mvneta_remove(struct platform
+
+ static const struct of_device_id mvneta_match[] = {
+ { .compatible = "marvell,armada-370-neta" },
++ { .compatible = "marvell,armada-xp-neta" },
+ { }
+ };
+ MODULE_DEVICE_TABLE(of, mvneta_match);
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Mugunthan V N <mugunthanvnm@ti.com>
+Date: Thu, 25 Jun 2015 22:21:02 +0530
+Subject: net: phy: fix phy link up when limiting speed via device tree
+
+From: Mugunthan V N <mugunthanvnm@ti.com>
+
+[ Upstream commit eb686231fce3770299760f24fdcf5ad041f44153 ]
+
+When limiting phy link speed using "max-speed" to 100mbps or less on a
+giga bit phy, phy never completes auto negotiation and phy state
+machine is held in PHY_AN. Fixing this issue by comparing the giga
+bit advertise though phydev->supported doesn't have it but phy has
+BMSR_ESTATEN set. So that auto negotiation is restarted as old and
+new advertise are different and link comes up fine.
+
+Signed-off-by: Mugunthan V N <mugunthanvnm@ti.com>
+Reviewed-by: Florian Fainelli <f.fainelli@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/phy/phy_device.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -796,10 +796,11 @@ static int genphy_config_advert(struct p
+ if (phydev->supported & (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full)) {
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
+- if (adv != oldadv)
+- changed = 1;
+ }
+
++ if (adv != oldadv)
++ changed = 1;
++
+ err = phy_write(phydev, MII_CTRL1000, adv);
+ if (err < 0)
+ return err;
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Willem de Bruijn <willemb@google.com>
+Date: Wed, 17 Jun 2015 15:59:34 -0400
+Subject: packet: avoid out of bounds read in round robin fanout
+
+From: Willem de Bruijn <willemb@google.com>
+
+[ Upstream commit 468479e6043c84f5a65299cc07cb08a22a28c2b1 ]
+
+PACKET_FANOUT_LB computes f->rr_cur such that it is modulo
+f->num_members. It returns the old value unconditionally, but
+f->num_members may have changed since the last store. Ensure
+that the return value is always < num.
+
+When modifying the logic, simplify it further by replacing the loop
+with an unconditional atomic increment.
+
+Fixes: dc99f600698d ("packet: Add fanout support.")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Willem de Bruijn <willemb@google.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 18 ++----------------
+ 1 file changed, 2 insertions(+), 16 deletions(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1272,16 +1272,6 @@ static void packet_sock_destruct(struct
+ sk_refcnt_debug_dec(sk);
+ }
+
+-static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
+-{
+- int x = atomic_read(&f->rr_cur) + 1;
+-
+- if (x >= num)
+- x = 0;
+-
+- return x;
+-}
+-
+ static unsigned int fanout_demux_hash(struct packet_fanout *f,
+ struct sk_buff *skb,
+ unsigned int num)
+@@ -1293,13 +1283,9 @@ static unsigned int fanout_demux_lb(stru
+ struct sk_buff *skb,
+ unsigned int num)
+ {
+- int cur, old;
++ unsigned int val = atomic_inc_return(&f->rr_cur);
+
+- cur = atomic_read(&f->rr_cur);
+- while ((old = atomic_cmpxchg(&f->rr_cur, cur,
+- fanout_rr_next(f, num))) != cur)
+- cur = old;
+- return cur;
++ return val % num;
+ }
+
+ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 16 Jun 2015 07:59:11 -0700
+Subject: packet: read num_members once in packet_rcv_fanout()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit f98f4514d07871da7a113dd9e3e330743fd70ae4 ]
+
+We need to tell compiler it must not read f->num_members multiple
+times. Otherwise testing if num is not zero is flaky, and we could
+attempt an invalid divide by 0 in fanout_demux_cpu()
+
+Note bug was present in packet_rcv_fanout_hash() and
+packet_rcv_fanout_lb() but final 3.1 had a simple location
+after commit 95ec3eb417115fb ("packet: Add 'cpu' fanout policy.")
+
+Fixes: dc99f600698dc ("packet: Add fanout support.")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/packet/af_packet.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1353,7 +1353,7 @@ static int packet_rcv_fanout(struct sk_b
+ struct packet_type *pt, struct net_device *orig_dev)
+ {
+ struct packet_fanout *f = pt->af_packet_priv;
+- unsigned int num = f->num_members;
++ unsigned int num = READ_ONCE(f->num_members);
+ struct packet_sock *po;
+ unsigned int idx;
+
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Date: Fri, 12 Jun 2015 10:16:41 -0300
+Subject: sctp: fix ASCONF list handling
+
+From: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+
+[ Upstream commit 2d45a02d0166caf2627fe91897c6ffc3b19514c4 ]
+
+->auto_asconf_splist is per namespace and mangled by functions like
+sctp_setsockopt_auto_asconf() which doesn't guarantee any serialization.
+
+Also, the call to inet_sk_copy_descendant() was backuping
+->auto_asconf_list through the copy but was not honoring
+->do_auto_asconf, which could lead to list corruption if it was
+different between both sockets.
+
+This commit thus fixes the list handling by using ->addr_wq_lock
+spinlock to protect the list. A special handling is done upon socket
+creation and destruction for that. Error handlig on sctp_init_sock()
+will never return an error after having initialized asconf, so
+sctp_destroy_sock() can be called without addrq_wq_lock. The lock now
+will be take on sctp_close_sock(), before locking the socket, so we
+don't do it in inverse order compared to sctp_addr_wq_timeout_handler().
+
+Instead of taking the lock on sctp_sock_migrate() for copying and
+restoring the list values, it's preferred to avoid rewritting it by
+implementing sctp_copy_descendant().
+
+Issue was found with a test application that kept flipping sysctl
+default_auto_asconf on and off, but one could trigger it by issuing
+simultaneous setsockopt() calls on multiple sockets or by
+creating/destroying sockets fast enough. This is only triggerable
+locally.
+
+Fixes: 9f7d653b67ae ("sctp: Add Auto-ASCONF support (core).")
+Reported-by: Ji Jianwen <jiji@redhat.com>
+Suggested-by: Neil Horman <nhorman@tuxdriver.com>
+Suggested-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/netns/sctp.h | 1 +
+ include/net/sctp/structs.h | 4 ++++
+ net/sctp/socket.c | 43 ++++++++++++++++++++++++++++++++-----------
+ 3 files changed, 37 insertions(+), 11 deletions(-)
+
+--- a/include/net/netns/sctp.h
++++ b/include/net/netns/sctp.h
+@@ -31,6 +31,7 @@ struct netns_sctp {
+ struct list_head addr_waitq;
+ struct timer_list addr_wq_timer;
+ struct list_head auto_asconf_splist;
++ /* Lock that protects both addr_waitq and auto_asconf_splist */
+ spinlock_t addr_wq_lock;
+
+ /* Lock that protects the local_addr_list writers */
+--- a/include/net/sctp/structs.h
++++ b/include/net/sctp/structs.h
+@@ -223,6 +223,10 @@ struct sctp_sock {
+ atomic_t pd_mode;
+ /* Receive to here while partial delivery is in effect. */
+ struct sk_buff_head pd_lobby;
++
++ /* These must be the last fields, as they will skipped on copies,
++ * like on accept and peeloff operations
++ */
+ struct list_head auto_asconf_list;
+ int do_auto_asconf;
+ };
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -1528,8 +1528,10 @@ static void sctp_close(struct sock *sk,
+
+ /* Supposedly, no process has access to the socket, but
+ * the net layers still may.
++ * Also, sctp_destroy_sock() needs to be called with addr_wq_lock
++ * held and that should be grabbed before socket lock.
+ */
+- local_bh_disable();
++ spin_lock_bh(&net->sctp.addr_wq_lock);
+ bh_lock_sock(sk);
+
+ /* Hold the sock, since sk_common_release() will put sock_put()
+@@ -1539,7 +1541,7 @@ static void sctp_close(struct sock *sk,
+ sk_common_release(sk);
+
+ bh_unlock_sock(sk);
+- local_bh_enable();
++ spin_unlock_bh(&net->sctp.addr_wq_lock);
+
+ sock_put(sk);
+
+@@ -3580,6 +3582,7 @@ static int sctp_setsockopt_auto_asconf(s
+ if ((val && sp->do_auto_asconf) || (!val && !sp->do_auto_asconf))
+ return 0;
+
++ spin_lock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ if (val == 0 && sp->do_auto_asconf) {
+ list_del(&sp->auto_asconf_list);
+ sp->do_auto_asconf = 0;
+@@ -3588,6 +3591,7 @@ static int sctp_setsockopt_auto_asconf(s
+ &sock_net(sk)->sctp.auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+ }
++ spin_unlock_bh(&sock_net(sk)->sctp.addr_wq_lock);
+ return 0;
+ }
+
+@@ -4121,18 +4125,28 @@ static int sctp_init_sock(struct sock *s
+ local_bh_disable();
+ percpu_counter_inc(&sctp_sockets_allocated);
+ sock_prot_inuse_add(net, sk->sk_prot, 1);
++
++ /* Nothing can fail after this block, otherwise
++ * sctp_destroy_sock() will be called without addr_wq_lock held
++ */
+ if (net->sctp.default_auto_asconf) {
++ spin_lock(&sock_net(sk)->sctp.addr_wq_lock);
+ list_add_tail(&sp->auto_asconf_list,
+ &net->sctp.auto_asconf_splist);
+ sp->do_auto_asconf = 1;
+- } else
++ spin_unlock(&sock_net(sk)->sctp.addr_wq_lock);
++ } else {
+ sp->do_auto_asconf = 0;
++ }
++
+ local_bh_enable();
+
+ return 0;
+ }
+
+-/* Cleanup any SCTP per socket resources. */
++/* Cleanup any SCTP per socket resources. Must be called with
++ * sock_net(sk)->sctp.addr_wq_lock held if sp->do_auto_asconf is true
++ */
+ static void sctp_destroy_sock(struct sock *sk)
+ {
+ struct sctp_sock *sp;
+@@ -7195,6 +7209,19 @@ void sctp_copy_sock(struct sock *newsk,
+ newinet->mc_list = NULL;
+ }
+
++static inline void sctp_copy_descendant(struct sock *sk_to,
++ const struct sock *sk_from)
++{
++ int ancestor_size = sizeof(struct inet_sock) +
++ sizeof(struct sctp_sock) -
++ offsetof(struct sctp_sock, auto_asconf_list);
++
++ if (sk_from->sk_family == PF_INET6)
++ ancestor_size += sizeof(struct ipv6_pinfo);
++
++ __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
++}
++
+ /* Populate the fields of the newsk from the oldsk and migrate the assoc
+ * and its messages to the newsk.
+ */
+@@ -7209,7 +7236,6 @@ static void sctp_sock_migrate(struct soc
+ struct sk_buff *skb, *tmp;
+ struct sctp_ulpevent *event;
+ struct sctp_bind_hashbucket *head;
+- struct list_head tmplist;
+
+ /* Migrate socket buffer sizes and all the socket level options to the
+ * new socket.
+@@ -7217,12 +7243,7 @@ static void sctp_sock_migrate(struct soc
+ newsk->sk_sndbuf = oldsk->sk_sndbuf;
+ newsk->sk_rcvbuf = oldsk->sk_rcvbuf;
+ /* Brute force copy old sctp opt. */
+- if (oldsp->do_auto_asconf) {
+- memcpy(&tmplist, &newsp->auto_asconf_list, sizeof(tmplist));
+- inet_sk_copy_descendant(newsk, oldsk);
+- memcpy(&newsp->auto_asconf_list, &tmplist, sizeof(tmplist));
+- } else
+- inet_sk_copy_descendant(newsk, oldsk);
++ sctp_copy_descendant(newsk, oldsk);
+
+ /* Restore the ep value that was overwritten with the above structure
+ * copy.
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Date: Mon, 29 Jun 2015 10:41:03 +0200
+Subject: sctp: Fix race between OOTB responce and route removal
+
+From: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+
+[ Upstream commit 29c4afc4e98f4dc0ea9df22c631841f9c220b944 ]
+
+There is NULL pointer dereference possible during statistics update if the route
+used for OOTB responce is removed at unfortunate time. If the route exists when
+we receive OOTB packet and we finally jump into sctp_packet_transmit() to send
+ABORT, but in the meantime route is removed under our feet, we take "no_route"
+path and try to update stats with IP_INC_STATS(sock_net(asoc->base.sk), ...).
+
+But sctp_ootb_pkt_new() used to prepare responce packet doesn't call
+sctp_transport_set_owner() and therefore there is no asoc associated with this
+packet. Probably temporary asoc just for OOTB responces is overkill, so just
+introduce a check like in all other places in sctp_packet_transmit(), where
+"asoc" is dereferenced.
+
+To reproduce this, one needs to
+0. ensure that sctp module is loaded (otherwise ABORT is not generated)
+1. remove default route on the machine
+2. while true; do
+ ip route del [interface-specific route]
+ ip route add [interface-specific route]
+ done
+3. send enough OOTB packets (i.e. HB REQs) from another host to trigger ABORT
+ responce
+
+On x86_64 the crash looks like this:
+
+BUG: unable to handle kernel NULL pointer dereference at 0000000000000020
+IP: [<ffffffffa05ec9ac>] sctp_packet_transmit+0x63c/0x730 [sctp]
+PGD 0
+Oops: 0000 [#1] PREEMPT SMP
+Modules linked in: ...
+CPU: 0 PID: 0 Comm: swapper/0 Tainted: G O 4.0.5-1-ARCH #1
+Hardware name: ...
+task: ffffffff818124c0 ti: ffffffff81800000 task.ti: ffffffff81800000
+RIP: 0010:[<ffffffffa05ec9ac>] [<ffffffffa05ec9ac>] sctp_packet_transmit+0x63c/0x730 [sctp]
+RSP: 0018:ffff880127c037b8 EFLAGS: 00010296
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 00000015ff66b480
+RDX: 00000015ff66b400 RSI: ffff880127c17200 RDI: ffff880123403700
+RBP: ffff880127c03888 R08: 0000000000017200 R09: ffffffff814625af
+R10: ffffea00047e4680 R11: 00000000ffffff80 R12: ffff8800b0d38a28
+R13: ffff8800b0d38a28 R14: ffff8800b3e88000 R15: ffffffffa05f24e0
+FS: 0000000000000000(0000) GS:ffff880127c00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+CR2: 0000000000000020 CR3: 00000000c855b000 CR4: 00000000000007f0
+Stack:
+ ffff880127c03910 ffff8800b0d38a28 ffffffff8189d240 ffff88011f91b400
+ ffff880127c03828 ffffffffa05c94c5 0000000000000000 ffff8800baa1c520
+ 0000000000000000 0000000000000001 0000000000000000 0000000000000000
+Call Trace:
+ <IRQ>
+ [<ffffffffa05c94c5>] ? sctp_sf_tabort_8_4_8.isra.20+0x85/0x140 [sctp]
+ [<ffffffffa05d6b42>] ? sctp_transport_put+0x52/0x80 [sctp]
+ [<ffffffffa05d0bfc>] sctp_do_sm+0xb8c/0x19a0 [sctp]
+ [<ffffffff810b0e00>] ? trigger_load_balance+0x90/0x210
+ [<ffffffff810e0329>] ? update_process_times+0x59/0x60
+ [<ffffffff812c7a40>] ? timerqueue_add+0x60/0xb0
+ [<ffffffff810e0549>] ? enqueue_hrtimer+0x29/0xa0
+ [<ffffffff8101f599>] ? read_tsc+0x9/0x10
+ [<ffffffff8116d4b5>] ? put_page+0x55/0x60
+ [<ffffffff810ee1ad>] ? clockevents_program_event+0x6d/0x100
+ [<ffffffff81462b68>] ? skb_free_head+0x58/0x80
+ [<ffffffffa029a10b>] ? chksum_update+0x1b/0x27 [crc32c_generic]
+ [<ffffffff81283f3e>] ? crypto_shash_update+0xce/0xf0
+ [<ffffffffa05d3993>] sctp_endpoint_bh_rcv+0x113/0x280 [sctp]
+ [<ffffffffa05dd4e6>] sctp_inq_push+0x46/0x60 [sctp]
+ [<ffffffffa05ed7a0>] sctp_rcv+0x880/0x910 [sctp]
+ [<ffffffffa05ecb50>] ? sctp_packet_transmit_chunk+0xb0/0xb0 [sctp]
+ [<ffffffffa05ecb70>] ? sctp_csum_update+0x20/0x20 [sctp]
+ [<ffffffff814b05a5>] ? ip_route_input_noref+0x235/0xd30
+ [<ffffffff81051d6b>] ? ack_ioapic_level+0x7b/0x150
+ [<ffffffff814b27be>] ip_local_deliver_finish+0xae/0x210
+ [<ffffffff814b2e15>] ip_local_deliver+0x35/0x90
+ [<ffffffff814b2a15>] ip_rcv_finish+0xf5/0x370
+ [<ffffffff814b3128>] ip_rcv+0x2b8/0x3a0
+ [<ffffffff81474193>] __netif_receive_skb_core+0x763/0xa50
+ [<ffffffff81476c28>] __netif_receive_skb+0x18/0x60
+ [<ffffffff81476cb0>] netif_receive_skb_internal+0x40/0xd0
+ [<ffffffff814776c8>] napi_gro_receive+0xe8/0x120
+ [<ffffffffa03946aa>] rtl8169_poll+0x2da/0x660 [r8169]
+ [<ffffffff8147896a>] net_rx_action+0x21a/0x360
+ [<ffffffff81078dc1>] __do_softirq+0xe1/0x2d0
+ [<ffffffff8107912d>] irq_exit+0xad/0xb0
+ [<ffffffff8157d158>] do_IRQ+0x58/0xf0
+ [<ffffffff8157b06d>] common_interrupt+0x6d/0x6d
+ <EOI>
+ [<ffffffff810e1218>] ? hrtimer_start+0x18/0x20
+ [<ffffffffa05d65f9>] ? sctp_transport_destroy_rcu+0x29/0x30 [sctp]
+ [<ffffffff81020c50>] ? mwait_idle+0x60/0xa0
+ [<ffffffff810216ef>] arch_cpu_idle+0xf/0x20
+ [<ffffffff810b731c>] cpu_startup_entry+0x3ec/0x480
+ [<ffffffff8156b365>] rest_init+0x85/0x90
+ [<ffffffff818eb035>] start_kernel+0x48b/0x4ac
+ [<ffffffff818ea120>] ? early_idt_handlers+0x120/0x120
+ [<ffffffff818ea339>] x86_64_start_reservations+0x2a/0x2c
+ [<ffffffff818ea49c>] x86_64_start_kernel+0x161/0x184
+Code: 90 48 8b 80 b8 00 00 00 48 89 85 70 ff ff ff 48 83 bd 70 ff ff ff 00 0f 85 cd fa ff ff 48 89 df 31 db e8 18 63 e7 e0 48 8b 45 80 <48> 8b 40 20 48 8b 40 30 48 8b 80 68 01 00 00 65 48 ff 40 78 e9
+RIP [<ffffffffa05ec9ac>] sctp_packet_transmit+0x63c/0x730 [sctp]
+ RSP <ffff880127c037b8>
+CR2: 0000000000000020
+---[ end trace 5aec7fd2dc983574 ]---
+Kernel panic - not syncing: Fatal exception in interrupt
+Kernel Offset: 0x0 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffff9fffffff)
+drm_kms_helper: panic occurred, switching back to text console
+---[ end Kernel panic - not syncing: Fatal exception in interrupt
+
+Signed-off-by: Alexander Sverdlin <alexander.sverdlin@nokia.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/output.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/sctp/output.c
++++ b/net/sctp/output.c
+@@ -599,7 +599,9 @@ out:
+ return err;
+ no_route:
+ kfree_skb(nskb);
+- IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
++
++ if (asoc)
++ IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
+
+ /* FIXME: Returning the 'err' will effect all the associations
+ * associated with a socket, although only one of the paths of the
arm-clk-imx6q-refine-sata-s-parent.patch
kvm-nsvm-check-for-nrips-support-before-updating-control-field.patch
can-fix-loss-of-can-frames-in-raw_rcv.patch
+sctp-fix-asconf-list-handling.patch
+bridge-fix-br_stp_set_bridge_priority-race-conditions.patch
+packet-read-num_members-once-in-packet_rcv_fanout.patch
+packet-avoid-out-of-bounds-read-in-round-robin-fanout.patch
+neigh-do-not-modify-unlinked-entries.patch
+mac80211-fix-locking-in-update_vlan_tailroom_need_count.patch
+mvneta-add-forgotten-initialization-of-autonegotiation-bits.patch
+tcp-do-not-call-tcp_fastopen_reset_cipher-from-interrupt-context.patch
+xen-netback-fix-a-bug-during-initialization.patch
+ip-report-the-original-address-of-icmp-messages.patch
+net-mlx4_en-release-tx-qp-when-destroying-tx-ring.patch
+net-mlx4_en-wake-tx-queues-only-when-there-s-enough-room.patch
+net-mlx4_en-fix-wrong-csum-complete-report-when-rxvlan-offload-is-disabled.patch
+mlx4-disable-ha-for-sriov-pf-roce-devices.patch
+net-phy-fix-phy-link-up-when-limiting-speed-via-device-tree.patch
+bnx2x-fix-lockdep-splat.patch
+sctp-fix-race-between-ootb-responce-and-route-removal.patch
+amd-xgbe-add-the-__gfp_nowarn-flag-to-rx-buffer-allocation.patch
+net-mvneta-introduce-compatible-string-marvell-armada-xp-neta.patch
+arm-mvebu-update-ethernet-compatible-string-for-armada-xp.patch
+net-mvneta-disable-ip-checksum-with-jumbo-frames-for-armada-370.patch
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: Christoph Paasch <cpaasch@apple.com>
+Date: Thu, 18 Jun 2015 09:15:34 -0700
+Subject: tcp: Do not call tcp_fastopen_reset_cipher from interrupt context
+
+From: Christoph Paasch <cpaasch@apple.com>
+
+[ Upstream commit dfea2aa654243f70dc53b8648d0bbdeec55a7df1 ]
+
+tcp_fastopen_reset_cipher really cannot be called from interrupt
+context. It allocates the tcp_fastopen_context with GFP_KERNEL and
+calls crypto_alloc_cipher, which allocates all kind of stuff with
+GFP_KERNEL.
+
+Thus, we might sleep when the key-generation is triggered by an
+incoming TFO cookie-request which would then happen in interrupt-
+context, as shown by enabling CONFIG_DEBUG_ATOMIC_SLEEP:
+
+[ 36.001813] BUG: sleeping function called from invalid context at mm/slub.c:1266
+[ 36.003624] in_atomic(): 1, irqs_disabled(): 0, pid: 1016, name: packetdrill
+[ 36.004859] CPU: 1 PID: 1016 Comm: packetdrill Not tainted 4.1.0-rc7 #14
+[ 36.006085] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014
+[ 36.008250] 00000000000004f2 ffff88007f8838a8 ffffffff8171d53a ffff880075a084a8
+[ 36.009630] ffff880075a08000 ffff88007f8838c8 ffffffff810967d3 ffff88007f883928
+[ 36.011076] 0000000000000000 ffff88007f8838f8 ffffffff81096892 ffff88007f89be00
+[ 36.012494] Call Trace:
+[ 36.012953] <IRQ> [<ffffffff8171d53a>] dump_stack+0x4f/0x6d
+[ 36.014085] [<ffffffff810967d3>] ___might_sleep+0x103/0x170
+[ 36.015117] [<ffffffff81096892>] __might_sleep+0x52/0x90
+[ 36.016117] [<ffffffff8118e887>] kmem_cache_alloc_trace+0x47/0x190
+[ 36.017266] [<ffffffff81680d82>] ? tcp_fastopen_reset_cipher+0x42/0x130
+[ 36.018485] [<ffffffff81680d82>] tcp_fastopen_reset_cipher+0x42/0x130
+[ 36.019679] [<ffffffff81680f01>] tcp_fastopen_init_key_once+0x61/0x70
+[ 36.020884] [<ffffffff81680f2c>] __tcp_fastopen_cookie_gen+0x1c/0x60
+[ 36.022058] [<ffffffff816814ff>] tcp_try_fastopen+0x58f/0x730
+[ 36.023118] [<ffffffff81671788>] tcp_conn_request+0x3e8/0x7b0
+[ 36.024185] [<ffffffff810e3872>] ? __module_text_address+0x12/0x60
+[ 36.025327] [<ffffffff8167b2e1>] tcp_v4_conn_request+0x51/0x60
+[ 36.026410] [<ffffffff816727e0>] tcp_rcv_state_process+0x190/0xda0
+[ 36.027556] [<ffffffff81661f97>] ? __inet_lookup_established+0x47/0x170
+[ 36.028784] [<ffffffff8167c2ad>] tcp_v4_do_rcv+0x16d/0x3d0
+[ 36.029832] [<ffffffff812e6806>] ? security_sock_rcv_skb+0x16/0x20
+[ 36.030936] [<ffffffff8167cc8a>] tcp_v4_rcv+0x77a/0x7b0
+[ 36.031875] [<ffffffff816af8c3>] ? iptable_filter_hook+0x33/0x70
+[ 36.032953] [<ffffffff81657d22>] ip_local_deliver_finish+0x92/0x1f0
+[ 36.034065] [<ffffffff81657f1a>] ip_local_deliver+0x9a/0xb0
+[ 36.035069] [<ffffffff81657c90>] ? ip_rcv+0x3d0/0x3d0
+[ 36.035963] [<ffffffff81657569>] ip_rcv_finish+0x119/0x330
+[ 36.036950] [<ffffffff81657ba7>] ip_rcv+0x2e7/0x3d0
+[ 36.037847] [<ffffffff81610652>] __netif_receive_skb_core+0x552/0x930
+[ 36.038994] [<ffffffff81610a57>] __netif_receive_skb+0x27/0x70
+[ 36.040033] [<ffffffff81610b72>] process_backlog+0xd2/0x1f0
+[ 36.041025] [<ffffffff81611482>] net_rx_action+0x122/0x310
+[ 36.042007] [<ffffffff81076743>] __do_softirq+0x103/0x2f0
+[ 36.042978] [<ffffffff81723e3c>] do_softirq_own_stack+0x1c/0x30
+
+This patch moves the call to tcp_fastopen_init_key_once to the places
+where a listener socket creates its TFO-state, which always happens in
+user-context (either from the setsockopt, or implicitly during the
+listen()-call)
+
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Fixes: 222e83d2e0ae ("tcp: switch tcp_fastopen key generation to net_get_random_once")
+Signed-off-by: Christoph Paasch <cpaasch@apple.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/af_inet.c | 2 ++
+ net/ipv4/tcp.c | 7 +++++--
+ net/ipv4/tcp_fastopen.c | 2 --
+ 3 files changed, 7 insertions(+), 4 deletions(-)
+
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -228,6 +228,8 @@ int inet_listen(struct socket *sock, int
+ err = 0;
+ if (err)
+ goto out;
++
++ tcp_fastopen_init_key_once(true);
+ }
+ err = inet_csk_listen_start(sk, backlog);
+ if (err)
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2545,10 +2545,13 @@ static int do_tcp_setsockopt(struct sock
+
+ case TCP_FASTOPEN:
+ if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
+- TCPF_LISTEN)))
++ TCPF_LISTEN))) {
++ tcp_fastopen_init_key_once(true);
++
+ err = fastopen_init_queue(sk, val);
+- else
++ } else {
+ err = -EINVAL;
++ }
+ break;
+ case TCP_TIMESTAMP:
+ if (!tp->repair)
+--- a/net/ipv4/tcp_fastopen.c
++++ b/net/ipv4/tcp_fastopen.c
+@@ -78,8 +78,6 @@ static bool __tcp_fastopen_cookie_gen(co
+ struct tcp_fastopen_context *ctx;
+ bool ok = false;
+
+- tcp_fastopen_init_key_once(true);
+-
+ rcu_read_lock();
+ ctx = rcu_dereference(tcp_fastopen_ctx);
+ if (ctx) {
--- /dev/null
+From foo@baz Fri Jul 3 20:00:25 PDT 2015
+From: "Palik, Imre" <imrep@amazon.de>
+Date: Fri, 19 Jun 2015 14:21:51 +0200
+Subject: xen-netback: fix a BUG() during initialization
+
+From: "Palik, Imre" <imrep@amazon.de>
+
+[ Upstream commit 12b322ac85208de564ecf23aa754d796a91de21f ]
+
+Commit edafc132baac ("xen-netback: making the bandwidth limiter runtime settable")
+introduced the capability to change the bandwidth rate limit at runtime.
+But it also introduced a possible crashing bug.
+
+If netback receives two XenbusStateConnected without getting the
+hotplug-status watch firing in between, then it will try to register the
+watches for the rate limiter again. But this triggers a BUG() in the watch
+registration code.
+
+The fix modifies connect() to remove the possibly existing packet-rate
+watches before trying to install those watches. This behaviour is in line
+with how connect() deals with the hotplug-status watch.
+
+Signed-off-by: Imre Palik <imrep@amazon.de>
+Cc: Matt Wilson <msw@amazon.com>
+Acked-by: Wei Liu <wei.liu2@citrix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netback/xenbus.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/xen-netback/xenbus.c
++++ b/drivers/net/xen-netback/xenbus.c
+@@ -681,6 +681,9 @@ static int xen_register_watchers(struct
+ char *node;
+ unsigned maxlen = strlen(dev->nodename) + sizeof("/rate");
+
++ if (vif->credit_watch.node)
++ return -EADDRINUSE;
++
+ node = kmalloc(maxlen, GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+@@ -770,6 +773,7 @@ static void connect(struct backend_info
+ }
+
+ xen_net_read_rate(dev, &credit_bytes, &credit_usec);
++ xen_unregister_watchers(be->vif);
+ xen_register_watchers(dev, be->vif);
+ read_xenbus_vif_flags(be);
+