--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Wei Yang <weiyang@linux.vnet.ibm.com>
+Date: Sun, 1 Jun 2014 15:25:19 +0800
+Subject: mlx4_core: Stash PCI ID driver_data in mlx4_priv structure
+
+From: Wei Yang <weiyang@linux.vnet.ibm.com>
+
+[ No upstream commit, this is a cherry picked backport enabler. ]
+
+From: Roland Dreier <roland@purestorage.com>
+
+That way we can check flags later on, when we've finished with the
+pci_device_id structure. Also convert the "is VF" flag to an enum:
+"Never do in the preprocessor what can be done in C."
+
+Signed-off-by: Roland Dreier <roland@purestorage.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/main.c | 27 +++++++++++++++------------
+ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 6 ++++++
+ 2 files changed, 21 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -92,8 +92,6 @@ MODULE_PARM_DESC(log_num_mgm_entry_size,
+ " 10 gives 248.range: 9<="
+ " log_num_mgm_entry_size <= 12");
+
+-#define MLX4_VF (1 << 0)
+-
+ #define HCA_GLOBAL_CAP_MASK 0
+ #define PF_CONTEXT_BEHAVIOUR_MASK 0
+
+@@ -1731,7 +1729,7 @@ static void mlx4_free_ownership(struct m
+ iounmap(owner);
+ }
+
+-static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
++static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
+ {
+ struct mlx4_priv *priv;
+ struct mlx4_dev *dev;
+@@ -1754,12 +1752,11 @@ static int __mlx4_init_one(struct pci_de
+ /*
+ * Check for BARs.
+ */
+- if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
++ if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
+ !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing DCS, aborting."
+- "(id == 0X%p, id->driver_data: 0x%lx,"
+- " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+- id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
++ "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
++ pci_dev_data, pci_resource_flags(pdev, 0));
+ err = -ENODEV;
+ goto err_disable_pdev;
+ }
+@@ -1824,7 +1821,7 @@ static int __mlx4_init_one(struct pci_de
+
+ dev->rev_id = pdev->revision;
+ /* Detect if this device is a virtual function */
+- if (id && id->driver_data & MLX4_VF) {
++ if (pci_dev_data & MLX4_PCI_DEV_IS_VF) {
+ /* When acting as pf, we normally skip vfs unless explicitly
+ * requested to probe them. */
+ if (num_vfs && extended_func_num(pdev) > probe_vf) {
+@@ -1970,6 +1967,7 @@ slave_start:
+ mlx4_sense_init(dev);
+ mlx4_start_sense(dev);
+
++ priv->pci_dev_data = pci_dev_data;
+ pci_set_drvdata(pdev, dev);
+
+ return 0;
+@@ -2039,7 +2037,7 @@ static int __devinit mlx4_init_one(struc
+ {
+ printk_once(KERN_INFO "%s", mlx4_version);
+
+- return __mlx4_init_one(pdev, id);
++ return __mlx4_init_one(pdev, id->driver_data);
+ }
+
+ static void mlx4_remove_one(struct pci_dev *pdev)
+@@ -2108,8 +2106,13 @@ static void mlx4_remove_one(struct pci_d
+
+ int mlx4_restart_one(struct pci_dev *pdev)
+ {
++ struct mlx4_dev *dev = pci_get_drvdata(pdev);
++ struct mlx4_priv *priv = mlx4_priv(dev);
++ int pci_dev_data;
++
++ pci_dev_data = priv->pci_dev_data;
+ mlx4_remove_one(pdev);
+- return __mlx4_init_one(pdev, NULL);
++ return __mlx4_init_one(pdev, pci_dev_data);
+ }
+
+ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
+@@ -2138,11 +2141,11 @@ static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_
+ /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ /* MT25400 Family [ConnectX-2 Virtual Function] */
+- { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
++ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_PCI_DEV_IS_VF },
+ /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ /* MT27500 Family [ConnectX-3 Virtual Function] */
+- { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
++ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_PCI_DEV_IS_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+@@ -711,6 +711,10 @@ struct mlx4_steer {
+ struct list_head steer_entries[MLX4_NUM_STEERS];
+ };
+
++enum {
++ MLX4_PCI_DEV_IS_VF = 1 << 0,
++};
++
+ struct mlx4_priv {
+ struct mlx4_dev dev;
+
+@@ -718,6 +722,8 @@ struct mlx4_priv {
+ struct list_head ctx_list;
+ spinlock_t ctx_lock;
+
++ int pci_dev_data;
++
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
+
--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 29 May 2014 08:45:14 -0700
+Subject: net: fix inet_getid() and ipv6_select_ident() bugs
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 39c36094d78c39e038c1e499b2364e13bce36f54 ]
+
+I noticed we were sending wrong IPv4 ID in TCP flows when MTU discovery
+is disabled.
+Note how GSO/TSO packets do not have monotonically incrementing ID.
+
+06:37:41.575531 IP (id 14227, proto: TCP (6), length: 4396)
+06:37:41.575534 IP (id 14272, proto: TCP (6), length: 65212)
+06:37:41.575544 IP (id 14312, proto: TCP (6), length: 57972)
+06:37:41.575678 IP (id 14317, proto: TCP (6), length: 7292)
+06:37:41.575683 IP (id 14361, proto: TCP (6), length: 63764)
+
+It appears I introduced this bug in linux-3.1.
+
+inet_getid() must return the old value of peer->ip_id_count,
+not the new one.
+
+Lets revert this part, and remove the prevention of
+a null identification field in IPv6 Fragment Extension Header,
+which is dubious and not even done properly.
+
+Fixes: 87c48fa3b463 ("ipv6: make fragment identifications less predictable")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inetpeer.h | 9 +--------
+ net/ipv6/ip6_output.c | 11 +++--------
+ 2 files changed, 4 insertions(+), 16 deletions(-)
+
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -114,16 +114,9 @@ static inline void inet_peer_refcheck(co
+ /* can be called with or without local BH being disabled */
+ static inline int inet_getid(struct inet_peer *p, int more)
+ {
+- int old, new;
+ more++;
+ inet_peer_refcheck(p);
+- do {
+- old = atomic_read(&p->ip_id_count);
+- new = old + more;
+- if (!new)
+- new = 1;
+- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
+- return new;
++ return atomic_add_return(more, &p->ip_id_count) - more;
+ }
+
+ #endif /* _NET_INETPEER_H */
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -611,7 +611,7 @@ int ip6_find_1stfragopt(struct sk_buff *
+ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
+ {
+ static atomic_t ipv6_fragmentation_id;
+- int old, new;
++ int ident;
+
+ if (rt && !(rt->dst.flags & DST_NOPEER)) {
+ struct inet_peer *peer;
+@@ -624,13 +624,8 @@ void ipv6_select_ident(struct frag_hdr *
+ return;
+ }
+ }
+- do {
+- old = atomic_read(&ipv6_fragmentation_id);
+- new = old + 1;
+- if (!new)
+- new = 1;
+- } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
+- fhdr->identification = htonl(new);
++ ident = atomic_inc_return(&ipv6_fragmentation_id);
++ fhdr->identification = htonl(ident);
+ }
+
+ int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Wei Yang <weiyang@linux.vnet.ibm.com>
+Date: Sun, 1 Jun 2014 15:25:20 +0800
+Subject: net/mlx4_core: Preserve pci_dev_data after __mlx4_remove_one()
+
+From: Wei Yang <weiyang@linux.vnet.ibm.com>
+
+[ Upstream commit befdf8978accecac2e0739e6b5075afc62db37fe ]
+
+This patch wrap up a helper function __mlx4_remove_one() which does the tear
+down function but preserve the drv_data. Functions like
+mlx4_pci_err_detected() and mlx4_restart_one() will call this one with out
+releasing drvdata.
+
+Signed-off-by: Wei Yang <weiyang@linux.vnet.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/mellanox/mlx4/main.c | 145 ++++++++++++++++--------------
+ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 1
+ 2 files changed, 83 insertions(+), 63 deletions(-)
+
+--- a/drivers/net/ethernet/mellanox/mlx4/main.c
++++ b/drivers/net/ethernet/mellanox/mlx4/main.c
+@@ -1798,15 +1798,8 @@ static int __mlx4_init_one(struct pci_de
+ /* Allow large DMA segments, up to the firmware limit of 1 GB */
+ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
+
+- priv = kzalloc(sizeof *priv, GFP_KERNEL);
+- if (!priv) {
+- dev_err(&pdev->dev, "Device struct alloc failed, "
+- "aborting.\n");
+- err = -ENOMEM;
+- goto err_release_regions;
+- }
+-
+- dev = &priv->dev;
++ dev = pci_get_drvdata(pdev);
++ priv = mlx4_priv(dev);
+ dev->pdev = pdev;
+ INIT_LIST_HEAD(&priv->ctx_list);
+ spin_lock_init(&priv->ctx_lock);
+@@ -1967,8 +1960,7 @@ slave_start:
+ mlx4_sense_init(dev);
+ mlx4_start_sense(dev);
+
+- priv->pci_dev_data = pci_dev_data;
+- pci_set_drvdata(pdev, dev);
++ priv->removed = 0;
+
+ return 0;
+
+@@ -2035,73 +2027,100 @@ err_disable_pdev:
+ static int __devinit mlx4_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+ {
++ struct mlx4_priv *priv;
++ struct mlx4_dev *dev;
++
+ printk_once(KERN_INFO "%s", mlx4_version);
+
++ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
++ if (!priv)
++ return -ENOMEM;
++
++ dev = &priv->dev;
++ pci_set_drvdata(pdev, dev);
++ priv->pci_dev_data = id->driver_data;
++
+ return __mlx4_init_one(pdev, id->driver_data);
+ }
+
+-static void mlx4_remove_one(struct pci_dev *pdev)
++static void __mlx4_remove_one(struct pci_dev *pdev)
+ {
+ struct mlx4_dev *dev = pci_get_drvdata(pdev);
+ struct mlx4_priv *priv = mlx4_priv(dev);
++ int pci_dev_data;
+ int p;
+
+- if (dev) {
+- /* in SRIOV it is not allowed to unload the pf's
+- * driver while there are alive vf's */
+- if (mlx4_is_master(dev)) {
+- if (mlx4_how_many_lives_vf(dev))
+- printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+- }
+- mlx4_stop_sense(dev);
+- mlx4_unregister_device(dev);
++ if (priv->removed)
++ return;
+
+- for (p = 1; p <= dev->caps.num_ports; p++) {
+- mlx4_cleanup_port_info(&priv->port[p]);
+- mlx4_CLOSE_PORT(dev, p);
+- }
++ pci_dev_data = priv->pci_dev_data;
+
+- mlx4_cleanup_counters_table(dev);
+- mlx4_cleanup_mcg_table(dev);
+- mlx4_cleanup_qp_table(dev);
+- mlx4_cleanup_srq_table(dev);
+- mlx4_cleanup_cq_table(dev);
+- mlx4_cmd_use_polling(dev);
+- mlx4_cleanup_eq_table(dev);
+- mlx4_cleanup_mr_table(dev);
+- mlx4_cleanup_xrcd_table(dev);
+- mlx4_cleanup_pd_table(dev);
++ /* in SRIOV it is not allowed to unload the pf's
++ * driver while there are alive vf's */
++ if (mlx4_is_master(dev)) {
++ if (mlx4_how_many_lives_vf(dev))
++ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
++ }
++ mlx4_stop_sense(dev);
++ mlx4_unregister_device(dev);
+
+- if (mlx4_is_master(dev))
+- mlx4_free_resource_tracker(dev);
++ for (p = 1; p <= dev->caps.num_ports; p++) {
++ mlx4_cleanup_port_info(&priv->port[p]);
++ mlx4_CLOSE_PORT(dev, p);
++ }
+
+- iounmap(priv->kar);
+- mlx4_uar_free(dev, &priv->driver_uar);
+- mlx4_cleanup_uar_table(dev);
+- if (!mlx4_is_slave(dev))
+- mlx4_clear_steering(dev);
+- mlx4_free_eq_table(dev);
+- if (mlx4_is_master(dev))
+- mlx4_multi_func_cleanup(dev);
+- mlx4_close_hca(dev);
+- if (mlx4_is_slave(dev))
+- mlx4_multi_func_cleanup(dev);
+- mlx4_cmd_cleanup(dev);
+-
+- if (dev->flags & MLX4_FLAG_MSI_X)
+- pci_disable_msix(pdev);
+- if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+- mlx4_warn(dev, "Disabling sriov\n");
+- pci_disable_sriov(pdev);
+- }
++ mlx4_cleanup_counters_table(dev);
++ mlx4_cleanup_mcg_table(dev);
++ mlx4_cleanup_qp_table(dev);
++ mlx4_cleanup_srq_table(dev);
++ mlx4_cleanup_cq_table(dev);
++ mlx4_cmd_use_polling(dev);
++ mlx4_cleanup_eq_table(dev);
++ mlx4_cleanup_mr_table(dev);
++ mlx4_cleanup_xrcd_table(dev);
++ mlx4_cleanup_pd_table(dev);
++
++ if (mlx4_is_master(dev))
++ mlx4_free_resource_tracker(dev);
++
++ iounmap(priv->kar);
++ mlx4_uar_free(dev, &priv->driver_uar);
++ mlx4_cleanup_uar_table(dev);
++ if (!mlx4_is_slave(dev))
++ mlx4_clear_steering(dev);
++ mlx4_free_eq_table(dev);
++ if (mlx4_is_master(dev))
++ mlx4_multi_func_cleanup(dev);
++ mlx4_close_hca(dev);
++ if (mlx4_is_slave(dev))
++ mlx4_multi_func_cleanup(dev);
++ mlx4_cmd_cleanup(dev);
+
+- if (!mlx4_is_slave(dev))
+- mlx4_free_ownership(dev);
+- kfree(priv);
+- pci_release_regions(pdev);
+- pci_disable_device(pdev);
+- pci_set_drvdata(pdev, NULL);
++ if (dev->flags & MLX4_FLAG_MSI_X)
++ pci_disable_msix(pdev);
++ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
++ mlx4_warn(dev, "Disabling sriov\n");
++ pci_disable_sriov(pdev);
+ }
++
++ if (!mlx4_is_slave(dev))
++ mlx4_free_ownership(dev);
++
++ pci_release_regions(pdev);
++ pci_disable_device(pdev);
++ memset(priv, 0, sizeof(*priv));
++ priv->pci_dev_data = pci_dev_data;
++ priv->removed = 1;
++}
++
++static void mlx4_remove_one(struct pci_dev *pdev)
++{
++ struct mlx4_dev *dev = pci_get_drvdata(pdev);
++ struct mlx4_priv *priv = mlx4_priv(dev);
++
++ __mlx4_remove_one(pdev);
++ kfree(priv);
++ pci_set_drvdata(pdev, NULL);
+ }
+
+ int mlx4_restart_one(struct pci_dev *pdev)
+@@ -2111,7 +2130,7 @@ int mlx4_restart_one(struct pci_dev *pde
+ int pci_dev_data;
+
+ pci_dev_data = priv->pci_dev_data;
+- mlx4_remove_one(pdev);
++ __mlx4_remove_one(pdev);
+ return __mlx4_init_one(pdev, pci_dev_data);
+ }
+
+--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
++++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+@@ -723,6 +723,7 @@ struct mlx4_priv {
+ spinlock_t ctx_lock;
+
+ int pci_dev_data;
++ int removed;
+
+ struct list_head pgdir_list;
+ struct mutex pgdir_mutex;
--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Tom Gundersen <teg@jklm.no>
+Date: Thu, 15 May 2014 23:21:30 +0200
+Subject: net: tunnels - enable module autoloading
+
+From: Tom Gundersen <teg@jklm.no>
+
+[ Upstream commit f98f89a0104454f35a62d681683c844f6dbf4043 ]
+
+Enable the module alias hookup to allow tunnel modules to be autoloaded on demand.
+
+This is in line with how most other netdev kinds work, and will allow userspace
+to create tunnels without having CAP_SYS_MODULE.
+
+Signed-off-by: Tom Gundersen <teg@jklm.no>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ipip.c | 1 +
+ net/ipv6/ip6_tunnel.c | 1 +
+ net/ipv6/sit.c | 1 +
+ 3 files changed, 3 insertions(+)
+
+--- a/net/ipv4/ipip.c
++++ b/net/ipv4/ipip.c
+@@ -908,4 +908,5 @@ static void __exit ipip_fini(void)
+ module_init(ipip_init);
+ module_exit(ipip_fini);
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("ipip");
+ MODULE_ALIAS_NETDEV("tunl0");
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -57,6 +57,7 @@
+ MODULE_AUTHOR("Ville Nuorvala");
+ MODULE_DESCRIPTION("IPv6 tunneling device");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("ip6tnl");
+ MODULE_ALIAS_NETDEV("ip6tnl0");
+
+ #ifdef IP6_TNL_DEBUG
+--- a/net/ipv6/sit.c
++++ b/net/ipv6/sit.c
+@@ -1303,4 +1303,5 @@ static int __init sit_init(void)
+ module_init(sit_init);
+ module_exit(sit_cleanup);
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS_RTNL_LINK("sit");
+ MODULE_ALIAS_NETDEV("sit0");
--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Michal Schmidt <mschmidt@redhat.com>
+Date: Mon, 2 Jun 2014 18:25:02 +0200
+Subject: netlink: rate-limit leftover bytes warning and print process name
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit bfc5184b69cf9eeb286137640351c650c27f118a ]
+
+Any process is able to send netlink messages with leftover bytes.
+Make the warning rate-limited to prevent too much log spam.
+
+The warning is supposed to help find userspace bugs, so print the
+triggering command name to implicate the buggy program.
+
+[v2: Use pr_warn_ratelimited instead of printk_ratelimited.]
+
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/nlattr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/lib/nlattr.c
++++ b/lib/nlattr.c
+@@ -197,8 +197,8 @@ int nla_parse(struct nlattr **tb, int ma
+ }
+
+ if (unlikely(rem > 0))
+- printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
+- "attributes.\n", rem);
++ pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
++ rem, current->comm);
+
+ err = 0;
+ errout:
--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Xufeng Zhang <xufeng.zhang@windriver.com>
+Date: Thu, 12 Jun 2014 10:53:36 +0800
+Subject: sctp: Fix sk_ack_backlog wrap-around problem
+
+From: Xufeng Zhang <xufeng.zhang@windriver.com>
+
+[ Upstream commit d3217b15a19a4779c39b212358a5c71d725822ee ]
+
+Consider the scenario:
+For a TCP-style socket, while processing the COOKIE_ECHO chunk in
+sctp_sf_do_5_1D_ce(), after it has passed a series of sanity check,
+a new association would be created in sctp_unpack_cookie(), but afterwards,
+some processing maybe failed, and sctp_association_free() will be called to
+free the previously allocated association, in sctp_association_free(),
+sk_ack_backlog value is decremented for this socket, since the initial
+value for sk_ack_backlog is 0, after the decrement, it will be 65535,
+a wrap-around problem happens, and if we want to establish new associations
+afterward in the same socket, ABORT would be triggered since sctp deem the
+accept queue as full.
+Fix this issue by only decrementing sk_ack_backlog for associations in
+the endpoint's list.
+
+Fix-suggested-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: Xufeng Zhang <xufeng.zhang@windriver.com>
+Acked-by: Daniel Borkmann <dborkman@redhat.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/associola.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -389,7 +389,7 @@ void sctp_association_free(struct sctp_a
+ /* Only real associations count against the endpoint, so
+ * don't bother for if this is a temporary association.
+ */
+- if (!asoc->temp) {
++ if (!list_empty(&asoc->asocs)) {
+ list_del(&asoc->asocs);
+
+ /* Decrement the backlog value for a TCP-style listening
iscsi-target-reject-mutual-authentication-with-reflected-chap_c.patch
evm-prohibit-userspace-writing-security.evm-hmac-value.patch
+netlink-rate-limit-leftover-bytes-warning-and-print-process-name.patch
+net-tunnels-enable-module-autoloading.patch
+net-fix-inet_getid-and-ipv6_select_ident-bugs.patch
+team-fix-mtu-setting.patch
+sctp-fix-sk_ack_backlog-wrap-around-problem.patch
+mlx4_core-stash-pci-id-driver_data-in-mlx4_priv-structure.patch
+net-mlx4_core-preserve-pci_dev_data-after-__mlx4_remove_one.patch
--- /dev/null
+From foo@baz Wed Jun 18 20:03:44 PDT 2014
+From: Jiri Pirko <jiri@resnulli.us>
+Date: Thu, 29 May 2014 20:46:17 +0200
+Subject: team: fix mtu setting
+
+From: Jiri Pirko <jiri@resnulli.us>
+
+[ Upstream commit 9d0d68faea6962d62dd501cd6e71ce5cc8ed262b ]
+
+Now it is not possible to set mtu to team device which has a port
+enslaved to it. The reason is that when team_change_mtu() calls
+dev_set_mtu() for port device, notificator for NETDEV_PRECHANGEMTU
+event is called and team_device_event() returns NOTIFY_BAD forbidding
+the change. So fix this by returning NOTIFY_DONE here in case team is
+changing mtu in team_change_mtu().
+
+Introduced-by: 3d249d4c "net: introduce ethernet teaming device"
+Signed-off-by: Jiri Pirko <jiri@resnulli.us>
+Acked-by: Flavio Leitner <fbl@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/team/team.c | 7 ++++++-
+ include/linux/if_team.h | 1 +
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -889,6 +889,7 @@ static int team_change_mtu(struct net_de
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
++ team->port_mtu_change_allowed = true;
+ list_for_each_entry(port, &team->port_list, list) {
+ err = dev_set_mtu(port->dev, new_mtu);
+ if (err) {
+@@ -897,6 +898,7 @@ static int team_change_mtu(struct net_de
+ goto unwind;
+ }
+ }
++ team->port_mtu_change_allowed = false;
+ mutex_unlock(&team->lock);
+
+ dev->mtu = new_mtu;
+@@ -906,6 +908,7 @@ static int team_change_mtu(struct net_de
+ unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ dev_set_mtu(port->dev, dev->mtu);
++ team->port_mtu_change_allowed = false;
+ mutex_unlock(&team->lock);
+
+ return err;
+@@ -1671,7 +1674,9 @@ static int team_device_event(struct noti
+ break;
+ case NETDEV_CHANGEMTU:
+ /* Forbid to change mtu of underlaying device */
+- return NOTIFY_BAD;
++ if (!port->team->port_mtu_change_allowed)
++ return NOTIFY_BAD;
++ break;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid to change type of underlaying device */
+ return NOTIFY_BAD;
+--- a/include/linux/if_team.h
++++ b/include/linux/if_team.h
+@@ -113,6 +113,7 @@ struct team {
+
+ const struct team_mode *mode;
+ struct team_mode_ops ops;
++ bool port_mtu_change_allowed;
+ long mode_priv[TEAM_MODE_PRIV_LONGS];
+ };
+