--- /dev/null
+From 8ad67ae1d532940db7bc0fef7cc804c43e8bc903 Mon Sep 17 00:00:00 2001
+From: Maciej Żenczykowski <maze@google.com>
+Date: Fri, 15 Mar 2013 11:56:17 +0000
+Subject: bnx2x: fix occasional statistics off-by-4GB error
+
+
+From: Maciej Żenczykowski <maze@google.com>
+
+[ Upstream commit b009aac12cd0fe34293c68af8ac48b85be3bd858 ]
+
+The UPDATE_QSTAT function introduced on February 15, 2012
+in commit 1355b704b9ba "bnx2x: consistent statistics after
+internal driver reload" incorrectly fails to handle overflow
+during addition of the lower 32-bit field of a stat.
+
+This bug is present since 3.4-rc1 and should thus be considered
+a candidate for stable 3.4+ releases.
+
+Google-Bug-Id: 8374428
+Signed-off-by: Maciej Żenczykowski <maze@google.com>
+Cc: Mintz Yuval <yuvalmin@broadcom.com>
+Acked-by: Eilon Greenstein <eilong@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+@@ -439,8 +439,9 @@ struct bnx2x_fw_port_stats_old {
+
+ #define UPDATE_QSTAT(s, t) \
+ do { \
+- qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
+ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
++ qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \
++ + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \
+ } while (0)
+
+ #define UPDATE_QSTAT_OLD(f) \
--- /dev/null
+From cada51dba45a8553f9eac3e3ee7b2b09e363835c Mon Sep 17 00:00:00 2001
+From: Veaceslav Falico <vfalico@redhat.com>
+Date: Tue, 12 Mar 2013 06:31:32 +0000
+Subject: bonding: don't call update_speed_duplex() under spinlocks
+
+
+From: Veaceslav Falico <vfalico@redhat.com>
+
+[ Upstream commit 876254ae2758d50dcb08c7bd00caf6a806571178 ]
+
+bond_update_speed_duplex() might sleep while calling underlying slave's
+routines. Move it out of atomic context in bond_enslave() and remove it
+from bond_miimon_commit() - it was introduced by commit 546add79, however
+when the slave interfaces go up/change state it's their responsibility to
+fire NETDEV_UP/NETDEV_CHANGE events so that bonding can properly update
+their speed.
+
+I've tested it on all combinations of ifup/ifdown, autoneg/speed/duplex
+changes, remote-controlled and local, on (not) MII-based cards. All changes
+are visible.
+
+Signed-off-by: Veaceslav Falico <vfalico@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_main.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -1737,6 +1737,8 @@ int bond_enslave(struct net_device *bond
+
+ bond_compute_features(bond);
+
++ bond_update_speed_duplex(new_slave);
++
+ read_lock(&bond->lock);
+
+ new_slave->last_arp_rx = jiffies;
+@@ -1780,8 +1782,6 @@ int bond_enslave(struct net_device *bond
+ new_slave->link = BOND_LINK_DOWN;
+ }
+
+- bond_update_speed_duplex(new_slave);
+-
+ if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
+ /* if there is a primary slave, remember it */
+ if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
+@@ -2462,8 +2462,6 @@ static void bond_miimon_commit(struct bo
+ bond_set_backup_slave(slave);
+ }
+
+- bond_update_speed_duplex(slave);
+-
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ bond->dev->name, slave->dev->name,
+ slave->speed, slave->duplex ? "full" : "half");
--- /dev/null
+From b6e9ca78d850eff01aef0d93763a44d0f5f980d9 Mon Sep 17 00:00:00 2001
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Date: Fri, 15 Mar 2013 11:32:30 +0000
+Subject: inet: limit length of fragment queue hash table bucket lists
+
+
+From: Hannes Frederic Sowa <hannes@stressinduktion.org>
+
+[ Upstream commit 5a3da1fe9561828d0ca7eca664b16ec2b9bf0055 ]
+
+This patch introduces a constant limit of the fragment queue hash
+table bucket list lengths. Currently the limit 128 is choosen somewhat
+arbitrary and just ensures that we can fill up the fragment cache with
+empty packets up to the default ip_frag_high_thresh limits. It should
+just protect from list iteration eating considerable amounts of cpu.
+
+If we reach the maximum length in one hash bucket a warning is printed.
+This is implemented on the caller side of inet_frag_find to distinguish
+between the different users of inet_fragment.c.
+
+I dropped the out of memory warning in the ipv4 fragment lookup path,
+because we already get a warning by the slab allocator.
+
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Jesper Dangaard Brouer <jbrouer@redhat.com>
+Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inet_frag.h | 9 +++++++++
+ net/ipv4/inet_fragment.c | 20 +++++++++++++++++++-
+ net/ipv4/ip_fragment.c | 11 ++++-------
+ net/ipv6/netfilter/nf_conntrack_reasm.c | 11 ++++++-----
+ net/ipv6/reassembly.c | 8 ++++++--
+ 5 files changed, 44 insertions(+), 15 deletions(-)
+
+--- a/include/net/inet_frag.h
++++ b/include/net/inet_frag.h
+@@ -33,6 +33,13 @@ struct inet_frag_queue {
+
+ #define INETFRAGS_HASHSZ 64
+
++/* averaged:
++ * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
++ * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
++ * struct frag_queue))
++ */
++#define INETFRAGS_MAXDEPTH 128
++
+ struct inet_frags {
+ struct hlist_head hash[INETFRAGS_HASHSZ];
+ rwlock_t lock;
+@@ -64,6 +71,8 @@ int inet_frag_evictor(struct netns_frags
+ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
+ struct inet_frags *f, void *key, unsigned int hash)
+ __releases(&f->lock);
++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
++ const char *prefix);
+
+ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+ {
+--- a/net/ipv4/inet_fragment.c
++++ b/net/ipv4/inet_fragment.c
+@@ -21,6 +21,7 @@
+ #include <linux/rtnetlink.h>
+ #include <linux/slab.h>
+
++#include <net/sock.h>
+ #include <net/inet_frag.h>
+
+ static void inet_frag_secret_rebuild(unsigned long dummy)
+@@ -271,6 +272,7 @@ struct inet_frag_queue *inet_frag_find(s
+ {
+ struct inet_frag_queue *q;
+ struct hlist_node *n;
++ int depth = 0;
+
+ hlist_for_each_entry(q, n, &f->hash[hash], list) {
+ if (q->net == nf && f->match(q, key)) {
+@@ -278,9 +280,25 @@ struct inet_frag_queue *inet_frag_find(s
+ read_unlock(&f->lock);
+ return q;
+ }
++ depth++;
+ }
+ read_unlock(&f->lock);
+
+- return inet_frag_create(nf, f, key);
++ if (depth <= INETFRAGS_MAXDEPTH)
++ return inet_frag_create(nf, f, key);
++ else
++ return ERR_PTR(-ENOBUFS);
+ }
+ EXPORT_SYMBOL(inet_frag_find);
++
++void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
++ const char *prefix)
++{
++ static const char msg[] = "inet_frag_find: Fragment hash bucket"
++ " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
++ ". Dropping fragment.\n";
++
++ if (PTR_ERR(q) == -ENOBUFS)
++ LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg);
++}
++EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
+--- a/net/ipv4/ip_fragment.c
++++ b/net/ipv4/ip_fragment.c
+@@ -295,14 +295,11 @@ static inline struct ipq *ip_find(struct
+ hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
+
+ q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
+- if (q == NULL)
+- goto out_nomem;
+-
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
++ return NULL;
++ }
+ return container_of(q, struct ipq, q);
+-
+-out_nomem:
+- LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n"));
+- return NULL;
+ }
+
+ /* Is the fragment too far ahead to be part of ipq? */
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -14,6 +14,8 @@
+ * 2 of the License, or (at your option) any later version.
+ */
+
++#define pr_fmt(fmt) "IPv6-nf: " fmt
++
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+@@ -176,13 +178,12 @@ fq_find(__be32 id, u32 user, struct in6_
+
+ q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
+ local_bh_enable();
+- if (q == NULL)
+- goto oom;
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
++ return NULL;
++ }
+
+ return container_of(q, struct nf_ct_frag6_queue, q);
+-
+-oom:
+- return NULL;
+ }
+
+
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -26,6 +26,9 @@
+ * YOSHIFUJI,H. @USAGI Always remove fragment header to
+ * calculate ICV correctly.
+ */
++
++#define pr_fmt(fmt) "IPv6: " fmt
++
+ #include <linux/errno.h>
+ #include <linux/types.h>
+ #include <linux/string.h>
+@@ -240,9 +243,10 @@ fq_find(struct net *net, __be32 id, cons
+ hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
+
+ q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
+- if (q == NULL)
++ if (IS_ERR_OR_NULL(q)) {
++ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+-
++ }
+ return container_of(q, struct frag_queue, q);
+ }
+
--- /dev/null
+From 56234a34a108f56e5e7fdc4bfeb5eb629e24d9d4 Mon Sep 17 00:00:00 2001
+From: "Denis V. Lunev" <den@openvz.org>
+Date: Wed, 13 Mar 2013 00:24:15 +0000
+Subject: ipv4: fix definition of FIB_TABLE_HASHSZ
+
+
+From: "Denis V. Lunev" <den@openvz.org>
+
+[ Upstream commit 5b9e12dbf92b441b37136ea71dac59f05f2673a9 ]
+
+a long time ago by the commit
+
+ commit 93456b6d7753def8760b423ac6b986eb9d5a4a95
+ Author: Denis V. Lunev <den@openvz.org>
+ Date: Thu Jan 10 03:23:38 2008 -0800
+
+ [IPV4]: Unify access to the routing tables.
+
+the defenition of FIB_HASH_TABLE size has obtained wrong dependency:
+it should depend upon CONFIG_IP_MULTIPLE_TABLES (as was in the original
+code) but it was depended from CONFIG_IP_ROUTE_MULTIPATH
+
+This patch returns the situation to the original state.
+
+The problem was spotted by Tingwei Liu.
+
+Signed-off-by: Denis V. Lunev <den@openvz.org>
+CC: Tingwei Liu <tingw.liu@gmail.com>
+CC: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/ip_fib.h | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -129,18 +129,16 @@ struct fib_result_nl {
+ };
+
+ #ifdef CONFIG_IP_ROUTE_MULTIPATH
+-
+ #define FIB_RES_NH(res) ((res).fi->fib_nh[(res).nh_sel])
+-
+-#define FIB_TABLE_HASHSZ 2
+-
+ #else /* CONFIG_IP_ROUTE_MULTIPATH */
+-
+ #define FIB_RES_NH(res) ((res).fi->fib_nh[0])
++#endif /* CONFIG_IP_ROUTE_MULTIPATH */
+
++#ifdef CONFIG_IP_MULTIPLE_TABLES
+ #define FIB_TABLE_HASHSZ 256
+-
+-#endif /* CONFIG_IP_ROUTE_MULTIPATH */
++#else
++#define FIB_TABLE_HASHSZ 2
++#endif
+
+ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
+
--- /dev/null
+From 3a5faa2edaecb2f83f3a86d06a7fc4ca4250a141 Mon Sep 17 00:00:00 2001
+From: David Ward <david.ward@ll.mit.edu>
+Date: Mon, 11 Mar 2013 10:43:39 +0000
+Subject: net/ipv4: Ensure that location of timestamp option is stored
+
+
+From: David Ward <david.ward@ll.mit.edu>
+
+[ Upstream commit 4660c7f498c07c43173142ea95145e9dac5a6d14 ]
+
+This is needed in order to detect if the timestamp option appears
+more than once in a packet, to remove the option if the packet is
+fragmented, etc. My previous change neglected to store the option
+location when the router addresses were prespecified and Pointer >
+Length. But now the option location is also stored when Flag is an
+unrecognized value, to ensure these option handling behaviors are
+still performed.
+
+Signed-off-by: David Ward <david.ward@ll.mit.edu>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/ip_options.c | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+--- a/net/ipv4/ip_options.c
++++ b/net/ipv4/ip_options.c
+@@ -360,7 +360,6 @@ int ip_options_compile(struct net *net,
+ }
+ switch (optptr[3]&0xF) {
+ case IPOPT_TS_TSONLY:
+- opt->ts = optptr - iph;
+ if (skb)
+ timeptr = &optptr[optptr[2]-1];
+ opt->ts_needtime = 1;
+@@ -371,7 +370,6 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 2;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ if (rt) {
+ memcpy(&optptr[optptr[2]-1], &rt->rt_spec_dst, 4);
+ timeptr = &optptr[optptr[2]+3];
+@@ -385,7 +383,6 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 2;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ {
+ __be32 addr;
+ memcpy(&addr, &optptr[optptr[2]-1], 4);
+@@ -418,12 +415,12 @@ int ip_options_compile(struct net *net,
+ pp_ptr = optptr + 3;
+ goto error;
+ }
+- opt->ts = optptr - iph;
+ if (skb) {
+ optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);
+ opt->is_changed = 1;
+ }
+ }
++ opt->ts = optptr - iph;
+ break;
+ case IPOPT_RA:
+ if (optlen < 4) {
--- /dev/null
+From 3fdcc817dc6afa466f594bf232db8a44bc9d01c0 Mon Sep 17 00:00:00 2001
+From: Veaceslav Falico <vfalico@redhat.com>
+Date: Mon, 11 Mar 2013 00:21:48 +0000
+Subject: netconsole: don't call __netpoll_cleanup() while atomic
+
+
+From: Veaceslav Falico <vfalico@redhat.com>
+
+[ Upstream commit 3f315bef23075ea8a98a6fe4221a83b83456d970 ]
+
+__netpoll_cleanup() is called in netconsole_netdev_event() while holding a
+spinlock. Release/acquire the spinlock before/after it and restart the
+loop. Also, disable the netconsole completely, because we won't have chance
+after the restart of the loop, and might end up in a situation where
+nt->enabled == 1 and nt->np.dev == NULL.
+
+Signed-off-by: Veaceslav Falico <vfalico@redhat.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/netconsole.c | 20 +++++++++-----------
+ 1 file changed, 9 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/netconsole.c
++++ b/drivers/net/netconsole.c
+@@ -626,6 +626,7 @@ static int netconsole_netdev_event(struc
+ goto done;
+
+ spin_lock_irqsave(&target_list_lock, flags);
++restart:
+ list_for_each_entry(nt, &target_list, list) {
+ netconsole_target_get(nt);
+ if (nt->np.dev == dev) {
+@@ -637,21 +638,18 @@ static int netconsole_netdev_event(struc
+ case NETDEV_JOIN:
+ case NETDEV_UNREGISTER:
+ /*
++ * we might sleep in __netpoll_cleanup()
+ * rtnl_lock already held
+ */
+- if (nt->np.dev) {
+- spin_unlock_irqrestore(
+- &target_list_lock,
+- flags);
+- __netpoll_cleanup(&nt->np);
+- spin_lock_irqsave(&target_list_lock,
+- flags);
+- dev_put(nt->np.dev);
+- nt->np.dev = NULL;
+- }
++ spin_unlock_irqrestore(&target_list_lock, flags);
++ __netpoll_cleanup(&nt->np);
++ spin_lock_irqsave(&target_list_lock, flags);
++ dev_put(nt->np.dev);
++ nt->np.dev = NULL;
+ nt->enabled = 0;
+ stopped = true;
+- break;
++ netconsole_target_put(nt);
++ goto restart;
+ }
+ }
+ netconsole_target_put(nt);
--- /dev/null
+From 2ebeed58f161c60327322261208d6095e76b3d24 Mon Sep 17 00:00:00 2001
+From: Vlad Yasevich <vyasevic@redhat.com>
+Date: Wed, 13 Mar 2013 04:18:58 +0000
+Subject: rtnetlink: Mask the rta_type when range checking
+
+
+From: Vlad Yasevich <vyasevic@redhat.com>
+
+[ Upstream commit a5b8db91442fce9c9713fcd656c3698f1adde1d6 ]
+
+Range/validity checks on rta_type in rtnetlink_rcv_msg() do
+not account for flags that may be set. This causes the function
+to return -EINVAL when flags are set on the type (for example
+NLA_F_NESTED).
+
+Signed-off-by: Vlad Yasevich <vyasevic@redhat.com>
+Acked-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/rtnetlink.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -2050,7 +2050,7 @@ static int rtnetlink_rcv_msg(struct sk_b
+ struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
+
+ while (RTA_OK(attr, attrlen)) {
+- unsigned flavor = attr->rta_type;
++ unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
+ if (flavor) {
+ if (flavor > rta_max[sz_idx])
+ return -EINVAL;
--- /dev/null
+From f9e89f9d166d5e7293af01ae6f07dff3d0999b6a Mon Sep 17 00:00:00 2001
+From: Xufeng Zhang <xufeng.zhang@windriver.com>
+Date: Thu, 7 Mar 2013 21:39:37 +0000
+Subject: sctp: don't break the loop while meeting the active_path so as to find the matched transport
+
+
+From: Xufeng Zhang <xufeng.zhang@windriver.com>
+
+[ Upstream commit 2317f449af30073cfa6ec8352e4a65a89e357bdd ]
+
+sctp_assoc_lookup_tsn() function searchs which transport a certain TSN
+was sent on, if not found in the active_path transport, then go search
+all the other transports in the peer's transport_addr_list, however, we
+should continue to the next entry rather than break the loop when meet
+the active_path transport.
+
+Signed-off-by: Xufeng Zhang <xufeng.zhang@windriver.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Vlad Yasevich <vyasevich@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/associola.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -1050,7 +1050,7 @@ struct sctp_transport *sctp_assoc_lookup
+ transports) {
+
+ if (transport == active)
+- break;
++ continue;
+ list_for_each_entry(chunk, &transport->transmitted,
+ transmitted_list) {
+ if (key == chunk->subh.data_hdr->tsn) {
--- /dev/null
+From d4f76fc180376f2cddc250e2d2f7f1ce6babc82f Mon Sep 17 00:00:00 2001
+From: Vlad Yasevich <vyasevich@gmail.com>
+Date: Tue, 12 Mar 2013 15:53:23 +0000
+Subject: sctp: Use correct sideffect command in duplicate cookie handling
+
+
+From: Vlad Yasevich <vyasevich@gmail.com>
+
+[ Upstream commit f2815633504b442ca0b0605c16bf3d88a3a0fcea ]
+
+When SCTP is done processing a duplicate cookie chunk, it tries
+to delete a newly created association. For that, it has to set
+the right association for the side-effect processing to work.
+However, when it uses the SCTP_CMD_NEW_ASOC command, that performs
+more work then really needed (like hashing the associationa and
+assigning it an id) and there is no point to do that only to
+delete the association as a next step. In fact, it also creates
+an impossible condition where an association may be found by
+the getsockopt() call, and that association is empty. This
+causes a crash in some sctp getsockopts.
+
+The solution is rather simple. We simply use SCTP_CMD_SET_ASOC
+command that doesn't have all the overhead and does exactly
+what we need.
+
+Reported-by: Karl Heiss <kheiss@gmail.com>
+Tested-by: Karl Heiss <kheiss@gmail.com>
+CC: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: Vlad Yasevich <vyasevich@gmail.com>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sctp/sm_statefuns.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/sctp/sm_statefuns.c
++++ b/net/sctp/sm_statefuns.c
+@@ -2044,7 +2044,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupc
+ }
+
+ /* Delete the tempory new association. */
+- sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
++ sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
+ sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
+
+ /* Restore association pointer to provide SCTP command interpeter
revert-usb-ehci-don-t-check-dma-values-in-qh-overlays.patch
sunsu-fix-panic-in-case-of-nonexistent-port-at-console-ttysy-cmdline-option.patch
+net-ipv4-ensure-that-location-of-timestamp-option-is-stored.patch
+netconsole-don-t-call-__netpoll_cleanup-while-atomic.patch
+bonding-don-t-call-update_speed_duplex-under-spinlocks.patch
+tg3-5715-does-not-link-up-when-autoneg-off.patch
+sctp-use-correct-sideffect-command-in-duplicate-cookie-handling.patch
+sctp-don-t-break-the-loop-while-meeting-the-active_path-so-as-to-find-the-matched-transport.patch
+ipv4-fix-definition-of-fib_table_hashsz.patch
+tcp-fix-skb_availroom.patch
+rtnetlink-mask-the-rta_type-when-range-checking.patch
+vhost-net-fix-heads-usage-of-ubuf_info.patch
+bnx2x-fix-occasional-statistics-off-by-4gb-error.patch
+inet-limit-length-of-fragment-queue-hash-table-bucket-lists.patch
+sfc-do-not-attempt-to-flush-queues-if-dma-is-disabled.patch
+sfc-convert-firmware-subtypes-to-native-byte-order-in-efx_mcdi_get_board_cfg.patch
+sfc-add-parentheses-around-use-of-bitfield-macro-arguments.patch
+sfc-fix-mcdi-structure-field-lookup.patch
+sfc-really-disable-flow-control-while-flushing.patch
+sfc-work-around-flush-timeout-when-flushes-have-completed.patch
+sfc-lock-tx-queues-when-calling-netif_device_detach.patch
+sfc-fix-timekeeping-in-efx_mcdi_poll.patch
+sfc-disable-vf-queues-during-register-self-test.patch
+sfc-avoid-generating-over-length-mc_cmd_flush_rx_queues-request.patch
+sfc-correctly-initialise-reset_method-in-siena_test_chip.patch
+sfc-properly-sync-rx-dma-buffer-when-it-is-not-the-last-in-the-page.patch
+sfc-fix-efx_rx_buf_offset-in-the-presence-of-swiotlb.patch
+sfc-detach-net-device-when-stopping-queues-for-reconfiguration.patch
+sfc-disable-soft-interrupt-handling-during-efx_device_detach_sync.patch
+sfc-only-use-tx-push-if-a-single-descriptor-is-to-be-written.patch
--- /dev/null
+From 2fce85818e342e7315f9295472e5129a33e4f666 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Tue, 11 Sep 2012 21:37:36 +0100
+Subject: sfc: Add parentheses around use of bitfield macro arguments
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit 9724a8504c875145f5a513bb8eca50671cee23b4 ]
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/bitfield.h | 22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/bitfield.h
++++ b/drivers/net/ethernet/sfc/bitfield.h
+@@ -120,10 +120,10 @@ typedef union efx_oword {
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+ #define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
+- (((low > max) || (high < min)) ? 0 : \
+- ((low > min) ? \
+- ((native_element) >> (low - min)) : \
+- ((native_element) << (min - low))))
++ ((low) > (max) || (high) < (min) ? 0 : \
++ (low) > (min) ? \
++ (native_element) >> ((low) - (min)) : \
++ (native_element) << ((min) - (low)))
+
+ /*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+@@ -142,27 +142,27 @@ typedef union efx_oword {
+ #define EFX_EXTRACT_OWORD64(oword, low, high) \
+ ((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
+ EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
+- EFX_MASK64(high + 1 - low))
++ EFX_MASK64((high) + 1 - (low)))
+
+ #define EFX_EXTRACT_QWORD64(qword, low, high) \
+ (EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
+- EFX_MASK64(high + 1 - low))
++ EFX_MASK64((high) + 1 - (low)))
+
+ #define EFX_EXTRACT_OWORD32(oword, low, high) \
+ ((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
+ EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
+ EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
+- EFX_MASK32(high + 1 - low))
++ EFX_MASK32((high) + 1 - (low)))
+
+ #define EFX_EXTRACT_QWORD32(qword, low, high) \
+ ((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
+ EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
+- EFX_MASK32(high + 1 - low))
++ EFX_MASK32((high) + 1 - (low)))
+
+ #define EFX_EXTRACT_DWORD(dword, low, high) \
+ (EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
+- EFX_MASK32(high + 1 - low))
++ EFX_MASK32((high) + 1 - (low)))
+
+ #define EFX_OWORD_FIELD64(oword, field) \
+ EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
+@@ -442,10 +442,10 @@ typedef union efx_oword {
+ cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
+
+ #define EFX_INPLACE_MASK64(min, max, low, high) \
+- EFX_INSERT64(min, max, low, high, EFX_MASK64(high + 1 - low))
++ EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
+
+ #define EFX_INPLACE_MASK32(min, max, low, high) \
+- EFX_INSERT32(min, max, low, high, EFX_MASK32(high + 1 - low))
++ EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
+
+ #define EFX_SET_OWORD64(oword, low, high, value) do { \
+ (oword).u64[0] = (((oword).u64[0] \
--- /dev/null
+From 7b669343a777076b3a6e7be7a602abdb15746701 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Wed, 19 Sep 2012 02:53:34 +0100
+Subject: sfc: Avoid generating over-length MC_CMD_FLUSH_RX_QUEUES request
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit 450783747f42dfa3883920acfad4acdd93ce69af ]
+
+MCDI supports requests up to 252 bytes long, which is only enough to
+pass 63 RX queue IDs to MC_CMD_FLUSH_RX_QUEUES. However a VF may have
+up to 64 RX queues, and if we try to flush them all we will generate
+an over-length request and BUG() in efx_mcdi_copyin(). Currently
+all VF drivers limit themselves to 32 RX queues, so reducing the
+limit to 63 does no harm.
+
+Also add a BUILD_BUG_ON in efx_mcdi_flush_rxqs() so we remember to
+deal with the same problem there if EFX_MAX_CHANNELS is increased.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/mcdi.c | 3 +++
+ drivers/net/ethernet/sfc/siena_sriov.c | 7 +++++++
+ 2 files changed, 10 insertions(+)
+
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -1168,6 +1168,9 @@ int efx_mcdi_flush_rxqs(struct efx_nic *
+ __le32 *qid;
+ int rc, count;
+
++ BUILD_BUG_ON(EFX_MAX_CHANNELS >
++ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
++
+ qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
+ if (qid == NULL)
+ return -ENOMEM;
+--- a/drivers/net/ethernet/sfc/siena_sriov.c
++++ b/drivers/net/ethernet/sfc/siena_sriov.c
+@@ -21,6 +21,9 @@
+ /* Number of longs required to track all the VIs in a VF */
+ #define VI_MASK_LENGTH BITS_TO_LONGS(1 << EFX_VI_SCALE_MAX)
+
++/* Maximum number of RX queues supported */
++#define VF_MAX_RX_QUEUES 63
++
+ /**
+ * enum efx_vf_tx_filter_mode - TX MAC filtering behaviour
+ * @VF_TX_FILTER_OFF: Disabled
+@@ -578,6 +581,7 @@ static int efx_vfdi_init_rxq(struct efx_
+ efx_oword_t reg;
+
+ if (bad_vf_index(efx, vf_evq) || bad_vf_index(efx, vf_rxq) ||
++ vf_rxq >= VF_MAX_RX_QUEUES ||
+ bad_buf_count(buf_count, EFX_MAX_DMAQ_SIZE)) {
+ if (net_ratelimit())
+ netif_err(efx, hw, efx->net_dev,
+@@ -683,6 +687,9 @@ static int efx_vfdi_fini_all_queues(stru
+ __le32 *rxqs;
+ int rc;
+
++ BUILD_BUG_ON(VF_MAX_RX_QUEUES >
++ MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
++
+ rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
+ if (rxqs == NULL)
+ return VFDI_RC_ENOMEM;
--- /dev/null
+From d7650258ea286c1c56b90984ec99774ec6ef83d3 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Fri, 7 Sep 2012 00:58:10 +0100
+Subject: sfc: Convert firmware subtypes to native byte order in efx_mcdi_get_board_cfg()
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit bfeed902946a31692e7a24ed355b6d13ac37d014 ]
+
+On big-endian systems the MTD partition names currently have mangled
+subtype numbers and are not recognised by the firmware update tool
+(sfupdate).
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/mcdi.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -641,9 +641,8 @@ int efx_mcdi_get_board_cfg(struct efx_ni
+ u16 *fw_subtype_list, u32 *capabilities)
+ {
+ uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMIN];
+- size_t outlen;
++ size_t outlen, offset, i;
+ int port_num = efx_port_num(efx);
+- int offset;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_BOARD_CFG_IN_LEN != 0);
+@@ -663,11 +662,16 @@ int efx_mcdi_get_board_cfg(struct efx_ni
+ : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
+ if (mac_address)
+ memcpy(mac_address, outbuf + offset, ETH_ALEN);
+- if (fw_subtype_list)
+- memcpy(fw_subtype_list,
+- outbuf + MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST,
+- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM *
+- sizeof(fw_subtype_list[0]));
++ if (fw_subtype_list) {
++ offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
++ for (i = 0;
++ i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM;
++ i++) {
++ fw_subtype_list[i] =
++ le16_to_cpup((__le16 *)(outbuf + offset));
++ offset += 2;
++ }
++ }
+ if (capabilities) {
+ if (port_num)
+ *capabilities = MCDI_DWORD(outbuf,
--- /dev/null
+From 9caaee61715aba71454b6b8edc504eb5bf837b1c Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Sat, 1 Dec 2012 01:55:27 +0000
+Subject: sfc: Correctly initialise reset_method in siena_test_chip()
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit ef492f11efed9a6a1686bf914fb74468df59385c ]
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/siena.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/sfc/siena.c
++++ b/drivers/net/ethernet/sfc/siena.c
+@@ -170,7 +170,7 @@ static const struct efx_nic_register_tes
+
+ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+ {
+- enum reset_type reset_method = reset_method;
++ enum reset_type reset_method = RESET_TYPE_ALL;
+ int rc, rc2;
+
+ efx_reset_down(efx, reset_method);
--- /dev/null
+From 3d57633248df4061b9faef9215856b2285fbf103 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Mon, 28 Jan 2013 19:01:06 +0000
+Subject: sfc: Detach net device when stopping queues for reconfiguration
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit 29c69a4882641285a854d6d03ca5adbba68c0034 ]
+
+We must only ever stop TX queues when they are full or the net device
+is not 'ready' so far as the net core, and specifically the watchdog,
+is concerned. Otherwise, the watchdog may fire *immediately* if no
+packets have been added to the queue in the last 5 seconds.
+
+The device is ready if all the following are true:
+
+(a) It has a qdisc
+(b) It is marked present
+(c) It is running
+(d) The link is reported up
+
+(a) and (c) are normally true, and must not be changed by a driver.
+(d) is under our control, but fake link changes may disturb userland.
+This leaves (b). We already mark the device absent during reset
+and self-test, but we need to do the same during MTU changes and ring
+reallocation. We don't need to do this when the device is brought
+down because then (c) is already false.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/efx.c | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -754,6 +754,7 @@ efx_realloc_channels(struct efx_nic *efx
+ tx_queue->txd.entries);
+ }
+
++ efx_device_detach_sync(efx);
+ efx_stop_all(efx);
+ efx_stop_interrupts(efx, true);
+
+@@ -807,6 +808,7 @@ out:
+
+ efx_start_interrupts(efx, true);
+ efx_start_all(efx);
++ netif_device_attach(efx->net_dev);
+ return rc;
+
+ rollback:
+@@ -1601,8 +1603,12 @@ static void efx_stop_all(struct efx_nic
+ /* Flush efx_mac_work(), refill_workqueue, monitor_work */
+ efx_flush_all(efx);
+
+- /* Stop the kernel transmit interface late, so the watchdog
+- * timer isn't ticking over the flush */
++ /* Stop the kernel transmit interface. This is only valid if
++ * the device is stopped or detached; otherwise the watchdog
++ * may fire immediately.
++ */
++ WARN_ON(netif_running(efx->net_dev) &&
++ netif_device_present(efx->net_dev));
+ netif_tx_disable(efx->net_dev);
+
+ efx_stop_datapath(efx);
+@@ -1921,10 +1927,11 @@ static int efx_change_mtu(struct net_dev
+ if (new_mtu > EFX_MAX_MTU)
+ return -EINVAL;
+
+- efx_stop_all(efx);
+-
+ netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
+
++ efx_device_detach_sync(efx);
++ efx_stop_all(efx);
++
+ mutex_lock(&efx->mac_lock);
+ /* Reconfigure the MAC before enabling the dma queues so that
+ * the RX buffers don't overflow */
+@@ -1933,6 +1940,7 @@ static int efx_change_mtu(struct net_dev
+ mutex_unlock(&efx->mac_lock);
+
+ efx_start_all(efx);
++ netif_device_attach(efx->net_dev);
+ return 0;
+ }
+
--- /dev/null
+From d2ed8371fec73887b521f2fd4b367472fb93100c Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Tue, 5 Mar 2013 01:03:47 +0000
+Subject: sfc: Disable soft interrupt handling during efx_device_detach_sync()
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit 35205b211c8d17a8a0b5e8926cb7c73e9a7ef1ad ]
+
+efx_device_detach_sync() locks all TX queues before marking the device
+detached and thus disabling further TX scheduling. But it can still
+be interrupted by TX completions which then result in TX scheduling in
+soft interrupt context. This will deadlock when it tries to acquire
+a TX queue lock that efx_device_detach_sync() already acquired.
+
+To avoid deadlock, we must use netif_tx_{,un}lock_bh().
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/efx.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -170,9 +170,9 @@ static inline void efx_device_detach_syn
+ * TX scheduler is stopped when we're done and before
+ * netif_device_present() becomes false.
+ */
+- netif_tx_lock(dev);
++ netif_tx_lock_bh(dev);
+ netif_device_detach(dev);
+- netif_tx_unlock(dev);
++ netif_tx_unlock_bh(dev);
+ }
+
+ #endif /* EFX_EFX_H */
--- /dev/null
+From 746e5144c226bb084278e91346bb805c0740e48c Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Wed, 4 Jul 2012 03:58:33 +0100
+Subject: sfc: Disable VF queues during register self-test
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit d4f2cecce138c34960c467d0ae38a6d4bcd6af7b ]
+
+Currently VF queues and drivers may remain active during this test.
+This could cause memory corruption or spurious test failures.
+Therefore we reset the port/function before running these tests on
+Siena.
+
+On Falcon this doesn't work: we have to do some additional
+initialisation before some blocks will work again. So refactor the
+reset/register-test sequence into an efx_nic_type method so
+efx_selftest() doesn't have to consider such quirks.
+
+In the process, fix another minor bug: Siena does not have an
+'invisible' reset and the self-test currently fails to push the PHY
+configuration after resetting. Passing RESET_TYPE_ALL to
+efx_reset_{down,up}() fixes this.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/falcon.c | 35 +++++++++++++++++--
+ drivers/net/ethernet/sfc/net_driver.h | 7 ++-
+ drivers/net/ethernet/sfc/nic.c | 3 -
+ drivers/net/ethernet/sfc/selftest.c | 62 ++++++++--------------------------
+ drivers/net/ethernet/sfc/siena.c | 29 +++++++++++++--
+ 5 files changed, 76 insertions(+), 60 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/falcon.c
++++ b/drivers/net/ethernet/sfc/falcon.c
+@@ -25,9 +25,12 @@
+ #include "io.h"
+ #include "phy.h"
+ #include "workarounds.h"
++#include "selftest.h"
+
+ /* Hardware control for SFC4000 (aka Falcon). */
+
++static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
++
+ static const unsigned int
+ /* "Large" EEPROM device: Atmel AT25640 or similar
+ * 8 KB, 16-bit address, 32 B write block */
+@@ -1034,10 +1037,34 @@ static const struct efx_nic_register_tes
+ EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
+ };
+
+-static int falcon_b0_test_registers(struct efx_nic *efx)
++static int
++falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+ {
+- return efx_nic_test_registers(efx, falcon_b0_register_tests,
+- ARRAY_SIZE(falcon_b0_register_tests));
++ enum reset_type reset_method = RESET_TYPE_INVISIBLE;
++ int rc, rc2;
++
++ mutex_lock(&efx->mac_lock);
++ if (efx->loopback_modes) {
++ /* We need the 312 clock from the PHY to test the XMAC
++ * registers, so move into XGMII loopback if available */
++ if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
++ efx->loopback_mode = LOOPBACK_XGMII;
++ else
++ efx->loopback_mode = __ffs(efx->loopback_modes);
++ }
++ __efx_reconfigure_port(efx);
++ mutex_unlock(&efx->mac_lock);
++
++ efx_reset_down(efx, reset_method);
++
++ tests->registers =
++ efx_nic_test_registers(efx, falcon_b0_register_tests,
++ ARRAY_SIZE(falcon_b0_register_tests))
++ ? -1 : 1;
++
++ rc = falcon_reset_hw(efx, reset_method);
++ rc2 = efx_reset_up(efx, reset_method, rc == 0);
++ return rc ? rc : rc2;
+ }
+
+ /**************************************************************************
+@@ -1820,7 +1847,7 @@ const struct efx_nic_type falcon_b0_nic_
+ .get_wol = falcon_get_wol,
+ .set_wol = falcon_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+- .test_registers = falcon_b0_test_registers,
++ .test_chip = falcon_b0_test_chip,
+ .test_nvram = falcon_test_nvram,
+
+ .revision = EFX_REV_FALCON_B0,
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -68,6 +68,8 @@
+ #define EFX_TXQ_TYPES 4
+ #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
+
++struct efx_self_tests;
++
+ /**
+ * struct efx_special_buffer - An Efx special buffer
+ * @addr: CPU base address of the buffer
+@@ -901,7 +903,8 @@ static inline unsigned int efx_port_num(
+ * @get_wol: Get WoL configuration from driver state
+ * @set_wol: Push WoL configuration to the NIC
+ * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
+- * @test_registers: Test read/write functionality of control registers
++ * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
++ * expected to reset the NIC.
+ * @test_nvram: Test validity of NVRAM contents
+ * @revision: Hardware architecture revision
+ * @mem_map_size: Memory BAR mapped size
+@@ -947,7 +950,7 @@ struct efx_nic_type {
+ void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
+ int (*set_wol)(struct efx_nic *efx, u32 type);
+ void (*resume_wol)(struct efx_nic *efx);
+- int (*test_registers)(struct efx_nic *efx);
++ int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
+ int (*test_nvram)(struct efx_nic *efx);
+
+ int revision;
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -126,9 +126,6 @@ int efx_nic_test_registers(struct efx_ni
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+- /* Falcon should be in loopback to isolate the XMAC from the PHY */
+- WARN_ON(!LOOPBACK_INTERNAL(efx));
+-
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -120,19 +120,6 @@ static int efx_test_nvram(struct efx_nic
+ return rc;
+ }
+
+-static int efx_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+-{
+- int rc = 0;
+-
+- /* Test register access */
+- if (efx->type->test_registers) {
+- rc = efx->type->test_registers(efx);
+- tests->registers = rc ? -1 : 1;
+- }
+-
+- return rc;
+-}
+-
+ /**************************************************************************
+ *
+ * Interrupt and event queue testing
+@@ -699,8 +686,7 @@ int efx_selftest(struct efx_nic *efx, st
+ {
+ enum efx_loopback_mode loopback_mode = efx->loopback_mode;
+ int phy_mode = efx->phy_mode;
+- enum reset_type reset_method = RESET_TYPE_INVISIBLE;
+- int rc_test = 0, rc_reset = 0, rc;
++ int rc_test = 0, rc_reset, rc;
+
+ efx_selftest_async_cancel(efx);
+
+@@ -737,44 +723,26 @@ int efx_selftest(struct efx_nic *efx, st
+ */
+ efx_device_detach_sync(efx);
+
+- mutex_lock(&efx->mac_lock);
+- if (efx->loopback_modes) {
+- /* We need the 312 clock from the PHY to test the XMAC
+- * registers, so move into XGMII loopback if available */
+- if (efx->loopback_modes & (1 << LOOPBACK_XGMII))
+- efx->loopback_mode = LOOPBACK_XGMII;
+- else
+- efx->loopback_mode = __ffs(efx->loopback_modes);
+- }
+-
+- __efx_reconfigure_port(efx);
+- mutex_unlock(&efx->mac_lock);
+-
+- /* free up all consumers of SRAM (including all the queues) */
+- efx_reset_down(efx, reset_method);
+-
+- rc = efx_test_chip(efx, tests);
+- if (rc && !rc_test)
+- rc_test = rc;
++ if (efx->type->test_chip) {
++ rc_reset = efx->type->test_chip(efx, tests);
++ if (rc_reset) {
++ netif_err(efx, hw, efx->net_dev,
++ "Unable to recover from chip test\n");
++ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
++ return rc_reset;
++ }
+
+- /* reset the chip to recover from the register test */
+- rc_reset = efx->type->reset(efx, reset_method);
++ if ((tests->registers < 0) && !rc_test)
++ rc_test = -EIO;
++ }
+
+ /* Ensure that the phy is powered and out of loopback
+ * for the bist and loopback tests */
++ mutex_lock(&efx->mac_lock);
+ efx->phy_mode &= ~PHY_MODE_LOW_POWER;
+ efx->loopback_mode = LOOPBACK_NONE;
+-
+- rc = efx_reset_up(efx, reset_method, rc_reset == 0);
+- if (rc && !rc_reset)
+- rc_reset = rc;
+-
+- if (rc_reset) {
+- netif_err(efx, drv, efx->net_dev,
+- "Unable to recover from chip test\n");
+- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+- return rc_reset;
+- }
++ __efx_reconfigure_port(efx);
++ mutex_unlock(&efx->mac_lock);
+
+ rc = efx_test_phy(efx, tests, flags);
+ if (rc && !rc_test)
+--- a/drivers/net/ethernet/sfc/siena.c
++++ b/drivers/net/ethernet/sfc/siena.c
+@@ -25,10 +25,12 @@
+ #include "workarounds.h"
+ #include "mcdi.h"
+ #include "mcdi_pcol.h"
++#include "selftest.h"
+
+ /* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
+
+ static void siena_init_wol(struct efx_nic *efx);
++static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
+
+
+ static void siena_push_irq_moderation(struct efx_channel *channel)
+@@ -166,10 +168,29 @@ static const struct efx_nic_register_tes
+ EFX_OWORD32(0xFFFFFFFF, 0xFFFFFFFF, 0x00000007, 0x00000000) },
+ };
+
+-static int siena_test_registers(struct efx_nic *efx)
++static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
+ {
+- return efx_nic_test_registers(efx, siena_register_tests,
+- ARRAY_SIZE(siena_register_tests));
++ enum reset_type reset_method = reset_method;
++ int rc, rc2;
++
++ efx_reset_down(efx, reset_method);
++
++ /* Reset the chip immediately so that it is completely
++ * quiescent regardless of what any VF driver does.
++ */
++ rc = siena_reset_hw(efx, reset_method);
++ if (rc)
++ goto out;
++
++ tests->registers =
++ efx_nic_test_registers(efx, siena_register_tests,
++ ARRAY_SIZE(siena_register_tests))
++ ? -1 : 1;
++
++ rc = siena_reset_hw(efx, reset_method);
++out:
++ rc2 = efx_reset_up(efx, reset_method, rc == 0);
++ return rc ? rc : rc2;
+ }
+
+ /**************************************************************************
+@@ -662,7 +683,7 @@ const struct efx_nic_type siena_a0_nic_t
+ .get_wol = siena_get_wol,
+ .set_wol = siena_set_wol,
+ .resume_wol = siena_init_wol,
+- .test_registers = siena_test_registers,
++ .test_chip = siena_test_chip,
+ .test_nvram = efx_mcdi_nvram_test_all,
+
+ .revision = EFX_REV_SIENA_A0,
--- /dev/null
+From eca4273c008c413fbf288317a2403cab44541b9d Mon Sep 17 00:00:00 2001
+From: Stuart Hodgson <smhodgson@solarflare.com>
+Date: Fri, 30 Mar 2012 13:04:51 +0100
+Subject: sfc: Do not attempt to flush queues if DMA is disabled
+
+
+From: Stuart Hodgson <smhodgson@solarflare.com>
+
+[ Upstream commit 3dca9d2dc285faf1910d405b65df845cab061356 ]
+
+efx_nic_fatal_interrupt() disables DMA before scheduling a reset.
+After this, we need not and *cannot* flush queues.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/efx.c | 33 +++++++++++++++++++--------------
+ 1 file changed, 19 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -656,25 +656,30 @@ static void efx_stop_datapath(struct efx
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
++ struct pci_dev *dev = efx->pci_dev;
+ int rc;
+
+ EFX_ASSERT_RESET_SERIALISED(efx);
+ BUG_ON(efx->port_enabled);
+
+- rc = efx_nic_flush_queues(efx);
+- if (rc && EFX_WORKAROUND_7803(efx)) {
+- /* Schedule a reset to recover from the flush failure. The
+- * descriptor caches reference memory we're about to free,
+- * but falcon_reconfigure_mac_wrapper() won't reconnect
+- * the MACs because of the pending reset. */
+- netif_err(efx, drv, efx->net_dev,
+- "Resetting to recover from flush failure\n");
+- efx_schedule_reset(efx, RESET_TYPE_ALL);
+- } else if (rc) {
+- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+- } else {
+- netif_dbg(efx, drv, efx->net_dev,
+- "successfully flushed all queues\n");
++ /* Only perform flush if dma is enabled */
++ if (dev->is_busmaster) {
++ rc = efx_nic_flush_queues(efx);
++
++ if (rc && EFX_WORKAROUND_7803(efx)) {
++ /* Schedule a reset to recover from the flush failure. The
++ * descriptor caches reference memory we're about to free,
++ * but falcon_reconfigure_mac_wrapper() won't reconnect
++ * the MACs because of the pending reset. */
++ netif_err(efx, drv, efx->net_dev,
++ "Resetting to recover from flush failure\n");
++ efx_schedule_reset(efx, RESET_TYPE_ALL);
++ } else if (rc) {
++ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
++ } else {
++ netif_dbg(efx, drv, efx->net_dev,
++ "successfully flushed all queues\n");
++ }
+ }
+
+ efx_for_each_channel(channel, efx) {
--- /dev/null
+From f527594ce2c6d94cab407e4cb02cb0eca80ad571 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Thu, 10 Jan 2013 23:51:54 +0000
+Subject: sfc: Fix efx_rx_buf_offset() in the presence of swiotlb
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commits b590ace09d51cd39744e0f7662c5e4a0d1b5d952 and
+ c73e787a8db9117d59b5180baf83203a42ecadca ]
+
+We assume that the mapping between DMA and virtual addresses is done
+on whole pages, so we can find the page offset of an RX buffer using
+the lower bits of the DMA address. However, swiotlb maps in units of
+2K, breaking this assumption.
+
+Add an explicit page_offset field to struct efx_rx_buffer.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/net_driver.h | 4 +++-
+ drivers/net/ethernet/sfc/rx.c | 10 +++++-----
+ 2 files changed, 8 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -206,6 +206,7 @@ struct efx_tx_queue {
+ * Will be %NULL if the buffer slot is currently free.
+ * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
+ * Will be %NULL if the buffer slot is currently free.
++ * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
+ * @len: Buffer length, in bytes.
+ * @flags: Flags for buffer and packet state.
+ */
+@@ -215,7 +216,8 @@ struct efx_rx_buffer {
+ struct sk_buff *skb;
+ struct page *page;
+ } u;
+- unsigned int len;
++ u16 page_offset;
++ u16 len;
+ u16 flags;
+ };
+ #define EFX_RX_BUF_PAGE 0x0001
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -95,11 +95,7 @@ static unsigned int rx_refill_limit = 95
+ static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
+ struct efx_rx_buffer *buf)
+ {
+- /* Offset is always within one page, so we don't need to consider
+- * the page order.
+- */
+- return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
+- efx->type->rx_buffer_hash_size;
++ return buf->page_offset + efx->type->rx_buffer_hash_size;
+ }
+ static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
+ {
+@@ -193,6 +189,7 @@ static int efx_init_rx_buffers_page(stru
+ struct efx_rx_buffer *rx_buf;
+ struct page *page;
+ void *page_addr;
++ unsigned int page_offset;
+ struct efx_rx_page_state *state;
+ dma_addr_t dma_addr;
+ unsigned index, count;
+@@ -219,12 +216,14 @@ static int efx_init_rx_buffers_page(stru
+
+ page_addr += sizeof(struct efx_rx_page_state);
+ dma_addr += sizeof(struct efx_rx_page_state);
++ page_offset = sizeof(struct efx_rx_page_state);
+
+ split:
+ index = rx_queue->added_count & rx_queue->ptr_mask;
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
+ rx_buf->u.page = page;
++ rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
+ rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+ rx_buf->flags = EFX_RX_BUF_PAGE;
+ ++rx_queue->added_count;
+@@ -236,6 +235,7 @@ static int efx_init_rx_buffers_page(stru
+ get_page(page);
+ dma_addr += (PAGE_SIZE >> 1);
+ page_addr += (PAGE_SIZE >> 1);
++ page_offset += (PAGE_SIZE >> 1);
+ ++count;
+ goto split;
+ }
--- /dev/null
+From 43cd9f8b21f55b2210d627ba27789a7ec586ded6 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Tue, 11 Sep 2012 21:46:41 +0100
+Subject: sfc: Fix MCDI structure field lookup
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit 0a6e5008a9df678b48f8d4e57601aa4270df6c14 ]
+
+The least significant bit number (LBN) of a field within an MCDI
+structure is counted from the start of the structure, not the
+containing dword. In MCDI_ARRAY_FIELD() we need to mask it rather
+than using the usual EFX_DWORD_FIELD() macro.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/mcdi.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/mcdi.h
++++ b/drivers/net/ethernet/sfc/mcdi.h
+@@ -107,11 +107,13 @@ extern void efx_mcdi_sensor_event(struct
+ #define MCDI_EVENT_FIELD(_ev, _field) \
+ EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
+ #define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+- EFX_DWORD_FIELD( \
++ EFX_EXTRACT_DWORD( \
+ *((efx_dword_t *) \
+ (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
+ (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
+- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2)
++ MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
++ (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
++ MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
+
+ extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
+ extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
--- /dev/null
+From 6967d4846947cf5dfe8cca164c12d80d35d94367 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Sat, 1 Dec 2012 02:21:17 +0000
+Subject: sfc: Fix timekeeping in efx_mcdi_poll()
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit ebf98e797b4e26ad52ace1511a0b503ee60a6cd4 ]
+
+efx_mcdi_poll() uses get_seconds() to read the current time and to
+implement a polling timeout. The use of this function was chosen
+partly because it could easily be replaced in a co-sim environment
+with a macro that read the simulated time.
+
+Unfortunately the real get_seconds() returns the system time (real
+time) which is subject to adjustment by e.g. ntpd. If the system time
+is adjusted forward during a polled MCDI operation, the effective
+timeout can be shorter than the intended 10 seconds, resulting in a
+spurious failure. It is also possible for a backward adjustment to
+delay detection of a areal failure.
+
+Use jiffies instead, and change MCDI_RPC_TIMEOUT to be denominated in
+jiffies. Also correct rounding of the timeout: check time > finish
+(or rather time_after(time, finish)) and not time >= finish.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/mcdi.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/mcdi.c
++++ b/drivers/net/ethernet/sfc/mcdi.c
+@@ -22,7 +22,7 @@
+ **************************************************************************
+ */
+
+-#define MCDI_RPC_TIMEOUT 10 /*seconds */
++#define MCDI_RPC_TIMEOUT (10 * HZ)
+
+ #define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
+@@ -120,7 +120,7 @@ static void efx_mcdi_copyout(struct efx_
+ static int efx_mcdi_poll(struct efx_nic *efx)
+ {
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+- unsigned int time, finish;
++ unsigned long time, finish;
+ unsigned int respseq, respcmd, error;
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int rc, spins;
+@@ -136,7 +136,7 @@ static int efx_mcdi_poll(struct efx_nic
+ * and poll once a jiffy (approximately)
+ */
+ spins = TICK_USEC;
+- finish = get_seconds() + MCDI_RPC_TIMEOUT;
++ finish = jiffies + MCDI_RPC_TIMEOUT;
+
+ while (1) {
+ if (spins != 0) {
+@@ -146,7 +146,7 @@ static int efx_mcdi_poll(struct efx_nic
+ schedule_timeout_uninterruptible(1);
+ }
+
+- time = get_seconds();
++ time = jiffies;
+
+ rmb();
+ efx_readd(efx, ®, pdu);
+@@ -158,7 +158,7 @@ static int efx_mcdi_poll(struct efx_nic
+ EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ break;
+
+- if (time >= finish)
++ if (time_after(time, finish))
+ return -ETIMEDOUT;
+ }
+
+@@ -250,7 +250,7 @@ static int efx_mcdi_await_completion(str
+ if (wait_event_timeout(
+ mcdi->wq,
+ atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
+- msecs_to_jiffies(MCDI_RPC_TIMEOUT * 1000)) == 0)
++ MCDI_RPC_TIMEOUT) == 0)
+ return -ETIMEDOUT;
+
+ /* Check if efx_mcdi_set_mode() switched us back to polled completions.
--- /dev/null
+From 6ea7ff1378aeff3d33d3a91180b8d1f3d0869350 Mon Sep 17 00:00:00 2001
+From: Daniel Pieczko <dpieczko@solarflare.com>
+Date: Wed, 17 Oct 2012 13:21:23 +0100
+Subject: sfc: lock TX queues when calling netif_device_detach()
+
+
+From: Daniel Pieczko <dpieczko@solarflare.com>
+
+[ Upstream commit c2f3b8e3a44b6fe9e36704e30157ebe1a88c08b1 ]
+
+The assertion of netif_device_present() at the top of
+efx_hard_start_xmit() may fail if we don't do this.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+[bwh: Backported to 3.4: adjust context]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/efx.c | 4 ++--
+ drivers/net/ethernet/sfc/efx.h | 13 +++++++++++++
+ drivers/net/ethernet/sfc/selftest.c | 2 +-
+ 3 files changed, 16 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/efx.c
++++ b/drivers/net/ethernet/sfc/efx.c
+@@ -2224,7 +2224,7 @@ int efx_reset(struct efx_nic *efx, enum
+ netif_info(efx, drv, efx->net_dev, "resetting (%s)\n",
+ RESET_TYPE(method));
+
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+ efx_reset_down(efx, method);
+
+ rc = efx->type->reset(efx, method);
+@@ -2718,7 +2718,7 @@ static int efx_pm_freeze(struct device *
+
+ efx->state = STATE_FINI;
+
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+
+ efx_stop_all(efx);
+ efx_stop_interrupts(efx, false);
+--- a/drivers/net/ethernet/sfc/efx.h
++++ b/drivers/net/ethernet/sfc/efx.h
+@@ -162,4 +162,17 @@ extern void efx_link_status_changed(stru
+ extern void efx_link_set_advertising(struct efx_nic *efx, u32);
+ extern void efx_link_set_wanted_fc(struct efx_nic *efx, u8);
+
++static inline void efx_device_detach_sync(struct efx_nic *efx)
++{
++ struct net_device *dev = efx->net_dev;
++
++ /* Lock/freeze all TX queues so that we can be sure the
++ * TX scheduler is stopped when we're done and before
++ * netif_device_present() becomes false.
++ */
++ netif_tx_lock(dev);
++ netif_device_detach(dev);
++ netif_tx_unlock(dev);
++}
++
+ #endif /* EFX_EFX_H */
+--- a/drivers/net/ethernet/sfc/selftest.c
++++ b/drivers/net/ethernet/sfc/selftest.c
+@@ -735,7 +735,7 @@ int efx_selftest(struct efx_nic *efx, st
+ /* Detach the device so the kernel doesn't transmit during the
+ * loopback test and the watchdog timeout doesn't fire.
+ */
+- netif_device_detach(efx->net_dev);
++ efx_device_detach_sync(efx);
+
+ mutex_lock(&efx->mac_lock);
+ if (efx->loopback_modes) {
--- /dev/null
+From 3d281540c7cb031c597a74a8f2373f16a9740cfd Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Wed, 27 Feb 2013 16:50:38 +0000
+Subject: sfc: Only use TX push if a single descriptor is to be written
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit fae8563b25f73dc584a07bcda7a82750ff4f7672 ]
+
+Using TX push when notifying the NIC of multiple new descriptors in
+the ring will very occasionally cause the TX DMA engine to re-use an
+old descriptor. This can result in a duplicated or partly duplicated
+packet (new headers with old data), or an IOMMU page fault. This does
+not happen when the pushed descriptor is the only one written.
+
+TX push also provides little latency benefit when a packet requires
+more than one descriptor.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/nic.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -379,7 +379,8 @@ efx_may_push_tx_desc(struct efx_tx_queue
+ return false;
+
+ tx_queue->empty_read_count = 0;
+- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
++ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
++ && tx_queue->write_count - write_count == 1;
+ }
+
+ /* For each entry inserted into the software descriptor ring, create a
--- /dev/null
+From 5c642c59d8774dc6d965917ec672d96f82dbfc59 Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Thu, 20 Dec 2012 18:48:20 +0000
+Subject: sfc: Properly sync RX DMA buffer when it is not the last in the page
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit 3a68f19d7afb80f548d016effbc6ed52643a8085 ]
+
+We may currently allocate two RX DMA buffers to a page, and only unmap
+the page when the second is completed. We do not sync the first RX
+buffer to be completed; this can result in packet loss or corruption
+if the last RX buffer completed in a NAPI poll is the first in a page
+and is not DMA-coherent. (In the middle of a NAPI poll, we will
+handle the following RX completion and unmap the page *before* looking
+at the content of the first buffer.)
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+[bwh: Backported to 3.4: adjust context]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/rx.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/rx.c
++++ b/drivers/net/ethernet/sfc/rx.c
+@@ -245,7 +245,8 @@ static int efx_init_rx_buffers_page(stru
+ }
+
+ static void efx_unmap_rx_buffer(struct efx_nic *efx,
+- struct efx_rx_buffer *rx_buf)
++ struct efx_rx_buffer *rx_buf,
++ unsigned int used_len)
+ {
+ if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
+ struct efx_rx_page_state *state;
+@@ -256,6 +257,10 @@ static void efx_unmap_rx_buffer(struct e
+ state->dma_addr,
+ efx_rx_buf_size(efx),
+ PCI_DMA_FROMDEVICE);
++ } else if (used_len) {
++ dma_sync_single_for_cpu(&efx->pci_dev->dev,
++ rx_buf->dma_addr, used_len,
++ DMA_FROM_DEVICE);
+ }
+ } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
+ pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
+@@ -278,7 +283,7 @@ static void efx_free_rx_buffer(struct ef
+ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf)
+ {
+- efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
++ efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
+ efx_free_rx_buffer(rx_queue->efx, rx_buf);
+ }
+
+@@ -544,10 +549,10 @@ void efx_rx_packet(struct efx_rx_queue *
+ goto out;
+ }
+
+- /* Release card resources - assumes all RX buffers consumed in-order
+- * per RX queue
++ /* Release and/or sync DMA mapping - assumes all RX buffers
++ * consumed in-order per RX queue
+ */
+- efx_unmap_rx_buffer(efx, rx_buf);
++ efx_unmap_rx_buffer(efx, rx_buf, len);
+
+ /* Prefetch nice and early so data will (hopefully) be in cache by
+ * the time we look at it.
--- /dev/null
+From 2ac86920c09eecdc5dad1810358d5b81e02f8e7e Mon Sep 17 00:00:00 2001
+From: Ben Hutchings <bhutchings@solarflare.com>
+Date: Thu, 6 Sep 2012 16:52:31 +0100
+Subject: sfc: Really disable flow control while flushing
+
+
+From: Ben Hutchings <bhutchings@solarflare.com>
+
+[ Upstream commit d5e8cc6c946e0857826dcfbb3585068858445bfe ]
+
+Receiving pause frames can block TX queue flushes. Earlier changes
+work around this by reconfiguring the MAC during flushes for VFs, but
+during flushes for the PF we would only change the fc_disable counter.
+Unless the MAC is reconfigured for some other reason during the flush
+(which I would not expect to happen) this had no effect at all.
+
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/falcon.c | 2 ++
+ drivers/net/ethernet/sfc/net_driver.h | 2 ++
+ drivers/net/ethernet/sfc/nic.c | 3 +--
+ drivers/net/ethernet/sfc/nic.h | 2 ++
+ drivers/net/ethernet/sfc/siena.c | 15 ++++++++++++++-
+ drivers/net/ethernet/sfc/siena_sriov.c | 6 ++----
+ 6 files changed, 23 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/falcon.c
++++ b/drivers/net/ethernet/sfc/falcon.c
+@@ -1765,6 +1765,7 @@ const struct efx_nic_type falcon_a1_nic_
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
++ .finish_flush = efx_port_dummy_op_void,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+@@ -1807,6 +1808,7 @@ const struct efx_nic_type falcon_b0_nic_
+ .remove_port = falcon_remove_port,
+ .handle_global_event = falcon_handle_global_event,
+ .prepare_flush = falcon_prepare_flush,
++ .finish_flush = efx_port_dummy_op_void,
+ .update_stats = falcon_update_nic_stats,
+ .start_stats = falcon_start_nic_stats,
+ .stop_stats = falcon_stop_nic_stats,
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -887,6 +887,7 @@ static inline unsigned int efx_port_num(
+ * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
+ * @prepare_flush: Prepare the hardware for flushing the DMA queues
++ * @finish_flush: Clean up after flushing the DMA queues
+ * @update_stats: Update statistics not provided by event handling
+ * @start_stats: Start the regular fetching of statistics
+ * @stop_stats: Stop the regular fetching of statistics
+@@ -933,6 +934,7 @@ struct efx_nic_type {
+ void (*remove_port)(struct efx_nic *efx);
+ bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ void (*prepare_flush)(struct efx_nic *efx);
++ void (*finish_flush)(struct efx_nic *efx);
+ void (*update_stats)(struct efx_nic *efx);
+ void (*start_stats)(struct efx_nic *efx);
+ void (*stop_stats)(struct efx_nic *efx);
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -681,7 +681,6 @@ int efx_nic_flush_queues(struct efx_nic
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+- efx->fc_disable++;
+ efx->type->prepare_flush(efx);
+
+ efx_for_each_channel(channel, efx) {
+@@ -743,7 +742,7 @@ int efx_nic_flush_queues(struct efx_nic
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+- efx->fc_disable--;
++ efx->type->finish_flush(efx);
+
+ return rc;
+ }
+--- a/drivers/net/ethernet/sfc/nic.h
++++ b/drivers/net/ethernet/sfc/nic.h
+@@ -316,6 +316,8 @@ static inline int efx_nic_irq_test_irq_c
+
+ /* Global Resources */
+ extern int efx_nic_flush_queues(struct efx_nic *efx);
++extern void siena_prepare_flush(struct efx_nic *efx);
++extern void siena_finish_flush(struct efx_nic *efx);
+ extern void falcon_start_nic_stats(struct efx_nic *efx);
+ extern void falcon_stop_nic_stats(struct efx_nic *efx);
+ extern void falcon_setup_xaui(struct efx_nic *efx);
+--- a/drivers/net/ethernet/sfc/siena.c
++++ b/drivers/net/ethernet/sfc/siena.c
+@@ -125,6 +125,18 @@ static void siena_remove_port(struct efx
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+ }
+
++void siena_prepare_flush(struct efx_nic *efx)
++{
++ if (efx->fc_disable++ == 0)
++ efx_mcdi_set_mac(efx);
++}
++
++void siena_finish_flush(struct efx_nic *efx)
++{
++ if (--efx->fc_disable == 0)
++ efx_mcdi_set_mac(efx);
++}
++
+ static const struct efx_nic_register_test siena_register_tests[] = {
+ { FR_AZ_ADR_REGION,
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
+@@ -637,7 +649,8 @@ const struct efx_nic_type siena_a0_nic_t
+ .reset = siena_reset_hw,
+ .probe_port = siena_probe_port,
+ .remove_port = siena_remove_port,
+- .prepare_flush = efx_port_dummy_op_void,
++ .prepare_flush = siena_prepare_flush,
++ .finish_flush = siena_finish_flush,
+ .update_stats = siena_update_nic_stats,
+ .start_stats = siena_start_nic_stats,
+ .stop_stats = siena_stop_nic_stats,
+--- a/drivers/net/ethernet/sfc/siena_sriov.c
++++ b/drivers/net/ethernet/sfc/siena_sriov.c
+@@ -688,8 +688,7 @@ static int efx_vfdi_fini_all_queues(stru
+ return VFDI_RC_ENOMEM;
+
+ rtnl_lock();
+- if (efx->fc_disable++ == 0)
+- efx_mcdi_set_mac(efx);
++ siena_prepare_flush(efx);
+ rtnl_unlock();
+
+ /* Flush all the initialized queues */
+@@ -726,8 +725,7 @@ static int efx_vfdi_fini_all_queues(stru
+ }
+
+ rtnl_lock();
+- if (--efx->fc_disable == 0)
+- efx_mcdi_set_mac(efx);
++ siena_finish_flush(efx);
+ rtnl_unlock();
+
+ /* Irrespective of success/failure, fini the queues */
--- /dev/null
+From 2ec2d70045a3b7354c8039262140fdb20ce51270 Mon Sep 17 00:00:00 2001
+From: Daniel Pieczko <dpieczko@solarflare.com>
+Date: Tue, 2 Oct 2012 13:36:18 +0100
+Subject: sfc: Work-around flush timeout when flushes have completed
+
+
+From: Daniel Pieczko <dpieczko@solarflare.com>
+
+[ Upstream commit 525d9e824018cd7cc8d8d44832ddcd363abfe6e1 ]
+
+We sometimes hit a "failed to flush" timeout on some TX queues, but the
+flushes have completed and the flush completion events seem to go missing.
+In this case, we can check the TX_DESC_PTR_TBL register and drain the
+queues if the flushes had finished.
+
+[bwh: Minor fixes to coding style]
+Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sfc/net_driver.h | 1
+ drivers/net/ethernet/sfc/nic.c | 56 +++++++++++++++++++++++++++++++---
+ 2 files changed, 53 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/sfc/net_driver.h
++++ b/drivers/net/ethernet/sfc/net_driver.h
+@@ -194,6 +194,7 @@ struct efx_tx_queue {
+ /* Members shared between paths and sometimes updated */
+ unsigned int empty_read_count ____cacheline_aligned_in_smp;
+ #define EFX_EMPTY_COUNT_VALID 0x80000000
++ atomic_t flush_outstanding;
+ };
+
+ /**
+--- a/drivers/net/ethernet/sfc/nic.c
++++ b/drivers/net/ethernet/sfc/nic.c
+@@ -73,6 +73,8 @@
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
++static void efx_magic_event(struct efx_channel *channel, u32 magic);
++
+ /**************************************************************************
+ *
+ * Solarstorm hardware access
+@@ -495,6 +497,9 @@ static void efx_flush_tx_queue(struct ef
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
++ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
++ atomic_set(&tx_queue->flush_outstanding, 1);
++
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+@@ -670,6 +675,47 @@ static bool efx_flush_wake(struct efx_ni
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+ }
+
++static bool efx_check_tx_flush_complete(struct efx_nic *efx)
++{
++ bool i = true;
++ efx_oword_t txd_ptr_tbl;
++ struct efx_channel *channel;
++ struct efx_tx_queue *tx_queue;
++
++ efx_for_each_channel(channel, efx) {
++ efx_for_each_channel_tx_queue(tx_queue, channel) {
++ efx_reado_table(efx, &txd_ptr_tbl,
++ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
++ if (EFX_OWORD_FIELD(txd_ptr_tbl,
++ FRF_AZ_TX_DESCQ_FLUSH) ||
++ EFX_OWORD_FIELD(txd_ptr_tbl,
++ FRF_AZ_TX_DESCQ_EN)) {
++ netif_dbg(efx, hw, efx->net_dev,
++ "flush did not complete on TXQ %d\n",
++ tx_queue->queue);
++ i = false;
++ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
++ 1, 0)) {
++ /* The flush is complete, but we didn't
++ * receive a flush completion event
++ */
++ netif_dbg(efx, hw, efx->net_dev,
++ "flush complete on TXQ %d, so drain "
++ "the queue\n", tx_queue->queue);
++ /* Don't need to increment drain_pending as it
++ * has already been incremented for the queues
++ * which did not drain
++ */
++ efx_magic_event(channel,
++ EFX_CHANNEL_MAGIC_TX_DRAIN(
++ tx_queue));
++ }
++ }
++ }
++
++ return i;
++}
++
+ /* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * are no more RX and TX events left on any channel. */
+@@ -730,7 +776,8 @@ int efx_nic_flush_queues(struct efx_nic
+ timeout);
+ }
+
+- if (atomic_read(&efx->drain_pending)) {
++ if (atomic_read(&efx->drain_pending) &&
++ !efx_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
+ atomic_read(&efx->rxq_flush_outstanding),
+@@ -1017,9 +1064,10 @@ efx_handle_tx_flush_done(struct efx_nic
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+-
+- efx_magic_event(tx_queue->channel,
+- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
++ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
++ efx_magic_event(tx_queue->channel,
++ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
++ }
+ }
+ }
+
--- /dev/null
+From 6e86bb28f56c55a2c1cb97fc4f081df46d7511cc Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 14 Mar 2013 05:40:32 +0000
+Subject: tcp: fix skb_availroom()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 16fad69cfe4adbbfa813de516757b87bcae36d93 ]
+
+Chrome OS team reported a crash on a Pixel ChromeBook in TCP stack :
+
+https://code.google.com/p/chromium/issues/detail?id=182056
+
+commit a21d45726acac (tcp: avoid order-1 allocations on wifi and tx
+path) did a poor choice adding an 'avail_size' field to skb, while
+what we really needed was a 'reserved_tailroom' one.
+
+It would have avoided commit 22b4a4f22da (tcp: fix retransmit of
+partially acked frames) and this commit.
+
+Crash occurs because skb_split() is not aware of the 'avail_size'
+management (and should not be aware)
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Mukesh Agrawal <quiche@chromium.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 7 +++++--
+ net/ipv4/tcp.c | 2 +-
+ net/ipv4/tcp_output.c | 1 -
+ 3 files changed, 6 insertions(+), 4 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -479,7 +479,7 @@ struct sk_buff {
+ union {
+ __u32 mark;
+ __u32 dropcount;
+- __u32 avail_size;
++ __u32 reserved_tailroom;
+ };
+
+ sk_buff_data_t transport_header;
+@@ -1373,7 +1373,10 @@ static inline int skb_tailroom(const str
+ */
+ static inline int skb_availroom(const struct sk_buff *skb)
+ {
+- return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
++ if (skb_is_nonlinear(skb))
++ return 0;
++
++ return skb->end - skb->tail - skb->reserved_tailroom;
+ }
+
+ /**
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -704,7 +704,7 @@ struct sk_buff *sk_stream_alloc_skb(stru
+ * Make sure that we have exactly size bytes
+ * available to the caller, no more, no less.
+ */
+- skb->avail_size = size;
++ skb->reserved_tailroom = skb->end - skb->tail - size;
+ return skb;
+ }
+ __kfree_skb(skb);
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1096,7 +1096,6 @@ static void __pskb_trim_head(struct sk_b
+ eat = min_t(int, len, skb_headlen(skb));
+ if (eat) {
+ __skb_pull(skb, eat);
+- skb->avail_size -= eat;
+ len -= eat;
+ if (!len)
+ return;
--- /dev/null
+From 68b9b172ac60fbff9c60f3dba720f529383ac1c5 Mon Sep 17 00:00:00 2001
+From: Nithin Sujir <nsujir@broadcom.com>
+Date: Tue, 12 Mar 2013 15:32:48 +0000
+Subject: tg3: 5715 does not link up when autoneg off
+
+
+From: Nithin Sujir <nsujir@broadcom.com>
+
+[ Upstream commit 7c6cdead7cc9a99650d15497aae47d7472217eb1 ]
+
+Commit d13ba512cbba7de5d55d7a3b2aae7d83c8921457 ("tg3: Remove
+SPEED_UNKNOWN checks") cleaned up the autoneg advertisement by
+removing some dead code. One effect of this change was that the
+advertisement register would not be updated if autoneg is turned off.
+
+This exposed a bug on the 5715 device w.r.t linking. The 5715 defaults
+to advertise only 10Mb Full duplex. But with autoneg disabled, it needs
+the configured speed enabled in the advertisement register to link up.
+
+This patch adds the work around to advertise all speeds on the 5715 when
+autoneg is disabled.
+
+Reported-by: Marcin Miotk <marcinmiotk81@gmail.com>
+Reviewed-by: Benjamin Li <benli@broadcom.com>
+Signed-off-by: Nithin Nayak Sujir <nsujir@broadcom.com>
+Signed-off-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -3946,6 +3946,14 @@ static void tg3_phy_copper_begin(struct
+ tp->link_config.active_speed = tp->link_config.speed;
+ tp->link_config.active_duplex = tp->link_config.duplex;
+
++ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
++ /* With autoneg disabled, 5715 only links up when the
++ * advertisement register has the configured speed
++ * enabled.
++ */
++ tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
++ }
++
+ bmcr = 0;
+ switch (tp->link_config.speed) {
+ default:
--- /dev/null
+From b98bfdbdb1e65e1ef1193794b3228628e47648c7 Mon Sep 17 00:00:00 2001
+From: "Michael S. Tsirkin" <mst@redhat.com>
+Date: Sun, 17 Mar 2013 02:46:09 +0000
+Subject: vhost/net: fix heads usage of ubuf_info
+
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit 46aa92d1ba162b4b3d6b7102440e459d4e4ee255 ]
+
+ubuf info allocator uses guest controlled head as an index,
+so a malicious guest could put the same head entry in the ring twice,
+and we will get two callbacks on the same value.
+To fix use upend_idx which is guaranteed to be unique.
+
+Reported-by: Rusty Russell <rusty@rustcorp.com.au>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Cc: stable@kernel.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/vhost/net.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/vhost/net.c
++++ b/drivers/vhost/net.c
+@@ -235,7 +235,8 @@ static void handle_tx(struct vhost_net *
+ msg.msg_controllen = 0;
+ ubufs = NULL;
+ } else {
+- struct ubuf_info *ubuf = &vq->ubuf_info[head];
++ struct ubuf_info *ubuf;
++ ubuf = vq->ubuf_info + vq->upend_idx;
+
+ vq->heads[vq->upend_idx].len = len;
+ ubuf->callback = vhost_zerocopy_callback;