--- /dev/null
+From 03e05d90cdf2aaebb67594c45af839de52531c07 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Jun 2012 22:59:59 +0000
+Subject: be2net: fix a race in be_xmit()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit cd8f76c0a0c6fce0b2cf23c9bd0123f91453f46d ]
+
+As soon as hardware is notified of a transmit, we no longer can assume
+skb can be dereferenced, as TX completion might have freed the packet.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Sathya Perla <sathya.perla@emulex.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/emulex/benet/be_main.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -731,6 +731,8 @@ static netdev_tx_t be_xmit(struct sk_buf
+
+ copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
+ if (copied) {
++ int gso_segs = skb_shinfo(skb)->gso_segs;
++
+ /* record the sent skb in the sent_skb table */
+ BUG_ON(txo->sent_skb_list[start]);
+ txo->sent_skb_list[start] = skb;
+@@ -748,8 +750,7 @@ static netdev_tx_t be_xmit(struct sk_buf
+
+ be_txq_notify(adapter, txq->id, wrb_cnt);
+
+- be_tx_stats_update(txo, wrb_cnt, copied,
+- skb_shinfo(skb)->gso_segs, stopped);
++ be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
+ } else {
+ txq->head = start;
+ dev_kfree_skb_any(skb);
--- /dev/null
+From 2cd0a72b38f9ddc83cef62243e1a188150d60d27 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 12 Jun 2012 23:50:04 +0000
+Subject: bnx2x: fix checksum validation
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d6cb3e41386f20fb0777d0b59a2def82c65d37f7 ]
+
+bnx2x driver incorrectly sets ip_summed to CHECKSUM_UNNECESSARY on
+encapsulated segments. TCP stack happily accepts frames with bad
+checksums, if they are inside a GRE or IPIP encapsulation.
+
+Our understanding is that if no IP or L4 csum validation was done by the
+hardware, we should leave ip_summed as is (CHECKSUM_NONE), since
+hardware doesn't provide CHECKSUM_COMPLETE support in its cqe.
+
+Then, if IP/L4 checksumming was done by the hardware, set
+CHECKSUM_UNNECESSARY if no error was flagged.
+
+Patch based on findings and analysis from Robert Evans
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Eilon Greenstein <eilong@broadcom.com>
+Cc: Yaniv Rosner <yanivr@broadcom.com>
+Cc: Merav Sicron <meravs@broadcom.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Robert Evans <evansr@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Acked-by: Eilon Greenstein <eilong@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | 15 -------------
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 27 ++++++++++++++++++------
+ 2 files changed, 21 insertions(+), 21 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -744,21 +744,6 @@ struct bnx2x_fastpath {
+
+ #define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+-#define BNX2X_IP_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+-
+-#define BNX2X_L4_CSUM_ERR(cqe) \
+- (!((cqe)->fast_path_cqe.status_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+- ((cqe)->fast_path_cqe.type_error_flags & \
+- ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+-
+-#define BNX2X_RX_CSUM_OK(cqe) \
+- (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+-
+ #define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+ (((le16_to_cpu(flags) & \
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -568,6 +568,25 @@ drop:
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ }
+
++static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
++ struct bnx2x_fastpath *fp)
++{
++ /* Do nothing if no IP/L4 csum validation was done */
++
++ if (cqe->fast_path_cqe.status_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
++ return;
++
++ /* If both IP/L4 validation were done, check if an error was found. */
++
++ if (cqe->fast_path_cqe.type_error_flags &
++ (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
++ ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
++ fp->eth_q_stats.hw_csum_err++;
++ else
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++}
+
+ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+ {
+@@ -757,13 +776,9 @@ reuse_rx:
+
+ skb_checksum_none_assert(skb);
+
+- if (bp->dev->features & NETIF_F_RXCSUM) {
++ if (bp->dev->features & NETIF_F_RXCSUM)
++ bnx2x_csum_validate(skb, cqe, fp);
+
+- if (likely(BNX2X_RX_CSUM_OK(cqe)))
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- else
+- fp->eth_q_stats.hw_csum_err++;
+- }
+
+ skb_record_rx_queue(skb, fp->rx_queue);
+
--- /dev/null
+From c2ef397e0290d9b56a4568c0f47d0cf9626bdb19 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 13 Jun 2012 09:45:16 +0000
+Subject: bnx2x: fix panic when TX ring is full
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit bc14786a100cc6a81cd060e8031ec481241b418c ]
+
+There is a off by one error in the minimal number of BD in
+bnx2x_start_xmit() and bnx2x_tx_int() before stopping/resuming tx queue.
+
+A full size GSO packet, with data included in skb->head really needs
+(MAX_SKB_FRAGS + 4) BDs, because of bnx2x_tx_split()
+
+This error triggers if BQL is disabled and heavy TCP transmit traffic
+occurs.
+
+bnx2x_tx_split() definitely can be called, remove a wrong comment.
+
+Reported-by: Tomas Hruby <thruby@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Eilon Greenstein <eilong@broadcom.com>
+Cc: Yaniv Rosner <yanivr@broadcom.com>
+Cc: Merav Sicron <meravs@broadcom.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Robert Evans <evansr@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -191,7 +191,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struc
+
+ if ((netif_tx_queue_stopped(txq)) &&
+ (bp->state == BNX2X_STATE_OPEN) &&
+- (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
++ (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4))
+ netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
+@@ -2349,8 +2349,6 @@ int bnx2x_poll(struct napi_struct *napi,
+ /* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+- * So far this has only been observed to happen
+- * in Other Operating Systems(TM)
+ */
+ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+ struct bnx2x_fp_txdata *txdata,
+@@ -3002,7 +3000,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_b
+
+ txdata->tx_bd_prod += nbd;
+
+- if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
++ if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 4)) {
+ netif_tx_stop_queue(txq);
+
+ /* paired memory barrier is in bnx2x_tx_int(), we have to keep
+@@ -3011,7 +3009,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_b
+ smp_mb();
+
+ fp->eth_q_stats.driver_xoff++;
+- if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
++ if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4)
+ netif_tx_wake_queue(txq);
+ }
+ txdata->tx_pkt++;
--- /dev/null
+From 064505ba06570b605059f0bc781bade239291d49 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 12 Jun 2012 06:03:51 +0000
+Subject: bonding: Fix corrupted queue_mapping
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 5ee31c6898ea5537fcea160999d60dc63bc0c305 ]
+
+In the transmit path of the bonding driver, skb->cb is used to
+stash the skb->queue_mapping so that the bonding device can set its
+own queue mapping. This value becomes corrupted since the skb->cb is
+also used in __dev_xmit_skb.
+
+When transmitting through bonding driver, bond_select_queue is
+called from dev_queue_xmit. In bond_select_queue the original
+skb->queue_mapping is copied into skb->cb (via bond_queue_mapping)
+and skb->queue_mapping is overwritten with the bond driver queue.
+
+Subsequently in dev_queue_xmit, __dev_xmit_skb is called which writes
+the packet length into skb->cb, thereby overwriting the stashed
+queue mappping. In bond_dev_queue_xmit (called from hard_start_xmit),
+the queue mapping for the skb is set to the stashed value which is now
+the skb length and hence is an invalid queue for the slave device.
+
+If we want to save skb->queue_mapping into skb->cb[], best place is to
+add a field in struct qdisc_skb_cb, to make sure it wont conflict with
+other layers (eg : Qdiscc, Infiniband...)
+
+This patchs also makes sure (struct qdisc_skb_cb)->data is aligned on 8
+bytes :
+
+netem qdisc for example assumes it can store an u64 in it, without
+misalignment penalty.
+
+Note : we only have 20 bytes left in (struct qdisc_skb_cb)->data[].
+The largest user is CHOKe and it fills it.
+
+Based on a previous patch from Tom Herbert.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Tom Herbert <therbert@google.com>
+Cc: John Fastabend <john.r.fastabend@intel.com>
+Cc: Roland Dreier <roland@kernel.org>
+Acked-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_main.c | 9 +++++----
+ include/net/sch_generic.h | 7 +++++--
+ 2 files changed, 10 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -76,6 +76,7 @@
+ #include <net/route.h>
+ #include <net/net_namespace.h>
+ #include <net/netns/generic.h>
++#include <net/pkt_sched.h>
+ #include "bonding.h"
+ #include "bond_3ad.h"
+ #include "bond_alb.h"
+@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct
+ return next;
+ }
+
+-#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
+-
+ /**
+ * bond_dev_queue_xmit - Prepare skb for xmit.
+ *
+@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *
+ {
+ skb->dev = slave_dev;
+
+- skb->queue_mapping = bond_queue_mapping(skb);
++ BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
++ sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
++ skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
+
+ if (unlikely(netpoll_tx_running(slave_dev)))
+ bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
+@@ -4162,7 +4163,7 @@ static u16 bond_select_queue(struct net_
+ /*
+ * Save the original txq to restore before passing to the driver
+ */
+- bond_queue_mapping(skb) = skb->queue_mapping;
++ qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
+
+ if (unlikely(txq >= dev->real_num_tx_queues)) {
+ do {
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -220,13 +220,16 @@ struct tcf_proto {
+
+ struct qdisc_skb_cb {
+ unsigned int pkt_len;
+- unsigned char data[24];
++ u16 bond_queue_mapping;
++ u16 _pad;
++ unsigned char data[20];
+ };
+
+ static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
+ {
+ struct qdisc_skb_cb *qcb;
+- BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz);
++
++ BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
+ BUILD_BUG_ON(sizeof(qcb->data) < sz);
+ }
+
--- /dev/null
+From 3cf098894a2fb3d8ec74103ed29f8fadfc727c73 Mon Sep 17 00:00:00 2001
+From: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+Date: Wed, 30 May 2012 12:25:37 +0000
+Subject: bql: Avoid possible inconsistent calculation.
+
+
+From: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+
+[ Upstream commit 914bec1011a25f65cdc94988a6f974bfb9a3c10d ]
+
+dql->num_queued could change while processing dql_completed().
+To provide consistent calculation, added an on stack variable.
+
+Signed-off-by: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Denys Fedoryshchenko <denys@visp.net.lb>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/dynamic_queue_limits.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/lib/dynamic_queue_limits.c
++++ b/lib/dynamic_queue_limits.c
+@@ -17,16 +17,18 @@
+ void dql_completed(struct dql *dql, unsigned int count)
+ {
+ unsigned int inprogress, prev_inprogress, limit;
+- unsigned int ovlimit, completed;
++ unsigned int ovlimit, completed, num_queued;
+ bool all_prev_completed;
+
++ num_queued = ACCESS_ONCE(dql->num_queued);
++
+ /* Can't complete more than what's in queue */
+- BUG_ON(count > dql->num_queued - dql->num_completed);
++ BUG_ON(count > num_queued - dql->num_completed);
+
+ completed = dql->num_completed + count;
+ limit = dql->limit;
+- ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
+- inprogress = dql->num_queued - completed;
++ ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
++ inprogress = num_queued - completed;
+ prev_inprogress = dql->prev_num_queued - dql->num_completed;
+ all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
+
+@@ -106,7 +108,7 @@ void dql_completed(struct dql *dql, unsi
+ dql->prev_ovlimit = ovlimit;
+ dql->prev_last_obj_cnt = dql->last_obj_cnt;
+ dql->num_completed = completed;
+- dql->prev_num_queued = dql->num_queued;
++ dql->prev_num_queued = num_queued;
+ }
+ EXPORT_SYMBOL(dql_completed);
+
--- /dev/null
+From 666d9dab33f6e93fd3be8992b9b7453252f700f3 Mon Sep 17 00:00:00 2001
+From: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+Date: Wed, 30 May 2012 12:25:19 +0000
+Subject: bql: Avoid unneeded limit decrement.
+
+
+From: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+
+[ Upstream commit 25426b794efdc70dde7fd3134dc56fac3e7d562d ]
+
+When below pattern is observed,
+
+ TIME
+ dql_queued() dql_completed() |
+ a) initial state |
+ |
+ b) X bytes queued V
+
+ c) Y bytes queued
+ d) X bytes completed
+ e) Z bytes queued
+ f) Y bytes completed
+
+a) dql->limit has already some value and there is no in-flight packet.
+b) X bytes queued.
+c) Y bytes queued and excess limit.
+d) X bytes completed and dql->prev_ovlimit is set and also
+ dql->prev_num_queued is set Y.
+e) Z bytes queued.
+f) Y bytes completed. inprogress and prev_inprogress are true.
+
+At f), according to the comment, all_prev_completed becomes
+true and limit should be increased. But POSDIFF() ignores
+(completed == dql->prev_num_queued) case, so limit is decreased.
+
+Signed-off-by: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Denys Fedoryshchenko <denys@visp.net.lb>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/dynamic_queue_limits.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/lib/dynamic_queue_limits.c
++++ b/lib/dynamic_queue_limits.c
+@@ -11,12 +11,14 @@
+ #include <linux/dynamic_queue_limits.h>
+
+ #define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
++#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
+
+ /* Records completed count and recalculates the queue limit */
+ void dql_completed(struct dql *dql, unsigned int count)
+ {
+ unsigned int inprogress, prev_inprogress, limit;
+- unsigned int ovlimit, all_prev_completed, completed;
++ unsigned int ovlimit, completed;
++ bool all_prev_completed;
+
+ /* Can't complete more than what's in queue */
+ BUG_ON(count > dql->num_queued - dql->num_completed);
+@@ -26,7 +28,7 @@ void dql_completed(struct dql *dql, unsi
+ ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit);
+ inprogress = dql->num_queued - completed;
+ prev_inprogress = dql->prev_num_queued - dql->num_completed;
+- all_prev_completed = POSDIFF(completed, dql->prev_num_queued);
++ all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
+
+ if ((ovlimit && !inprogress) ||
+ (dql->prev_ovlimit && all_prev_completed)) {
--- /dev/null
+From 910573d82ca9be8638b92c644544ba6f92772103 Mon Sep 17 00:00:00 2001
+From: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+Date: Wed, 30 May 2012 12:24:39 +0000
+Subject: bql: Fix POSDIFF() to integer overflow aware.
+
+
+From: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+
+[ Upstream commit 0cfd32b736ae0c36b42697584811042726c07cba ]
+
+POSDIFF() fails to take into account integer overflow case.
+
+Signed-off-by: Hiroaki SHIMODA <shimoda.hiroaki@gmail.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Eric Dumazet <eric.dumazet@gmail.com>
+Cc: Denys Fedoryshchenko <denys@visp.net.lb>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/dynamic_queue_limits.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/dynamic_queue_limits.c
++++ b/lib/dynamic_queue_limits.c
+@@ -10,7 +10,7 @@
+ #include <linux/jiffies.h>
+ #include <linux/dynamic_queue_limits.h>
+
+-#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0)
++#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
+
+ /* Records completed count and recalculates the queue limit */
+ void dql_completed(struct dql *dql, unsigned int count)
--- /dev/null
+From acb421fa993fe70010b916728d87db53493850d5 Mon Sep 17 00:00:00 2001
+From: stephen hemminger <shemminger@vyatta.com>
+Date: Tue, 26 Jun 2012 05:48:45 +0000
+Subject: bridge: Assign rtnl_link_ops to bridge devices created via ioctl (v2)
+
+
+From: stephen hemminger <shemminger@vyatta.com>
+
+[ Upstream commit 149ddd83a92b02c658d6c61f3276eb6500d585e8 ]
+
+This ensures that bridges created with brctl(8) or ioctl(2) directly
+also carry IFLA_LINKINFO when dumped over netlink. This also allows
+to create a bridge with ioctl(2) and delete it with RTM_DELLINK.
+
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bridge/br_if.c | 1 +
+ net/bridge/br_netlink.c | 2 +-
+ net/bridge/br_private.h | 1 +
+ 3 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/net/bridge/br_if.c
++++ b/net/bridge/br_if.c
+@@ -240,6 +240,7 @@ int br_add_bridge(struct net *net, const
+ return -ENOMEM;
+
+ dev_net_set(dev, net);
++ dev->rtnl_link_ops = &br_link_ops;
+
+ res = register_netdev(dev);
+ if (res)
+--- a/net/bridge/br_netlink.c
++++ b/net/bridge/br_netlink.c
+@@ -211,7 +211,7 @@ static int br_validate(struct nlattr *tb
+ return 0;
+ }
+
+-static struct rtnl_link_ops br_link_ops __read_mostly = {
++struct rtnl_link_ops br_link_ops __read_mostly = {
+ .kind = "bridge",
+ .priv_size = sizeof(struct net_bridge),
+ .setup = br_dev_setup,
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -538,6 +538,7 @@ extern int (*br_fdb_test_addr_hook)(stru
+ #endif
+
+ /* br_netlink.c */
++extern struct rtnl_link_ops br_link_ops;
+ extern int br_netlink_init(void);
+ extern void br_netlink_fini(void);
+ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
--- /dev/null
+From facea13ad2ee85d268bff23b100fcbeff5af4f92 Mon Sep 17 00:00:00 2001
+From: Paul Moore <pmoore@redhat.com>
+Date: Fri, 1 Jun 2012 05:54:56 +0000
+Subject: cipso: handle CIPSO options correctly when NetLabel is disabled
+
+
+From: Paul Moore <pmoore@redhat.com>
+
+[ Upstream commit 20e2a86485967c385d7c7befc1646e4d1d39362e ]
+
+When NetLabel is not enabled, e.g. CONFIG_NETLABEL=n, and the system
+receives a CIPSO tagged packet it is dropped (cipso_v4_validate()
+returns non-zero). In most cases this is the correct and desired
+behavior, however, in the case where we are simply forwarding the
+traffic, e.g. acting as a network bridge, this becomes a problem.
+
+This patch fixes the forwarding problem by providing the basic CIPSO
+validation code directly in ip_options_compile() without the need for
+the NetLabel or CIPSO code. The new validation code can not perform
+any of the CIPSO option label/value verification that
+cipso_v4_validate() does, but it can verify the basic CIPSO option
+format.
+
+The behavior when NetLabel is enabled is unchanged.
+
+Signed-off-by: Paul Moore <pmoore@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/cipso_ipv4.h | 29 ++++++++++++++++++++++++++++-
+ 1 file changed, 28 insertions(+), 1 deletion(-)
+
+--- a/include/net/cipso_ipv4.h
++++ b/include/net/cipso_ipv4.h
+@@ -42,6 +42,7 @@
+ #include <net/netlabel.h>
+ #include <net/request_sock.h>
+ #include <linux/atomic.h>
++#include <asm/unaligned.h>
+
+ /* known doi values */
+ #define CIPSO_V4_DOI_UNKNOWN 0x00000000
+@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getatt
+ static inline int cipso_v4_validate(const struct sk_buff *skb,
+ unsigned char **option)
+ {
+- return -ENOSYS;
++ unsigned char *opt = *option;
++ unsigned char err_offset = 0;
++ u8 opt_len = opt[1];
++ u8 opt_iter;
++
++ if (opt_len < 8) {
++ err_offset = 1;
++ goto out;
++ }
++
++ if (get_unaligned_be32(&opt[2]) == 0) {
++ err_offset = 2;
++ goto out;
++ }
++
++ for (opt_iter = 6; opt_iter < opt_len;) {
++ if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
++ err_offset = opt_iter + 1;
++ goto out;
++ }
++ opt_iter += opt[opt_iter + 1];
++ }
++
++out:
++ *option = opt + err_offset;
++ return err_offset;
++
+ }
+ #endif /* CONFIG_NETLABEL */
+
--- /dev/null
+From ad06508e40b82961ac5f478c2ad6bbc2c9b3b75f Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 4 Jun 2012 00:18:19 +0000
+Subject: drop_monitor: dont sleep in atomic context
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit bec4596b4e6770c7037f21f6bd27567b152dc0d6 ]
+
+drop_monitor calls several sleeping functions while in atomic context.
+
+ BUG: sleeping function called from invalid context at mm/slub.c:943
+ in_atomic(): 1, irqs_disabled(): 0, pid: 2103, name: kworker/0:2
+ Pid: 2103, comm: kworker/0:2 Not tainted 3.5.0-rc1+ #55
+ Call Trace:
+ [<ffffffff810697ca>] __might_sleep+0xca/0xf0
+ [<ffffffff811345a3>] kmem_cache_alloc_node+0x1b3/0x1c0
+ [<ffffffff8105578c>] ? queue_delayed_work_on+0x11c/0x130
+ [<ffffffff815343fb>] __alloc_skb+0x4b/0x230
+ [<ffffffffa00b0360>] ? reset_per_cpu_data+0x160/0x160 [drop_monitor]
+ [<ffffffffa00b022f>] reset_per_cpu_data+0x2f/0x160 [drop_monitor]
+ [<ffffffffa00b03ab>] send_dm_alert+0x4b/0xb0 [drop_monitor]
+ [<ffffffff810568e0>] process_one_work+0x130/0x4c0
+ [<ffffffff81058249>] worker_thread+0x159/0x360
+ [<ffffffff810580f0>] ? manage_workers.isra.27+0x240/0x240
+ [<ffffffff8105d403>] kthread+0x93/0xa0
+ [<ffffffff816be6d4>] kernel_thread_helper+0x4/0x10
+ [<ffffffff8105d370>] ? kthread_freezable_should_stop+0x80/0x80
+ [<ffffffff816be6d0>] ? gs_change+0xb/0xb
+
+Rework the logic to call the sleeping functions in right context.
+
+Use standard timer/workqueue api to let system chose any cpu to perform
+the allocation and netlink send.
+
+Also avoid a loop if reset_per_cpu_data() cannot allocate memory :
+use mod_timer() to wait 1/10 second before next try.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Neil Horman <nhorman@tuxdriver.com>
+Reviewed-by: Neil Horman <nhorman@tuxdriver.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/drop_monitor.c | 102 +++++++++++++++---------------------------------
+ 1 file changed, 33 insertions(+), 69 deletions(-)
+
+--- a/net/core/drop_monitor.c
++++ b/net/core/drop_monitor.c
+@@ -33,9 +33,6 @@
+ #define TRACE_ON 1
+ #define TRACE_OFF 0
+
+-static void send_dm_alert(struct work_struct *unused);
+-
+-
+ /*
+ * Globals, our netlink socket pointer
+ * and the work handle that will send up
+@@ -45,11 +42,10 @@ static int trace_state = TRACE_OFF;
+ static DEFINE_MUTEX(trace_state_mutex);
+
+ struct per_cpu_dm_data {
+- struct work_struct dm_alert_work;
+- struct sk_buff __rcu *skb;
+- atomic_t dm_hit_count;
+- struct timer_list send_timer;
+- int cpu;
++ spinlock_t lock;
++ struct sk_buff *skb;
++ struct work_struct dm_alert_work;
++ struct timer_list send_timer;
+ };
+
+ struct dm_hw_stat_delta {
+@@ -75,13 +71,13 @@ static int dm_delay = 1;
+ static unsigned long dm_hw_check_delta = 2*HZ;
+ static LIST_HEAD(hw_stats_list);
+
+-static void reset_per_cpu_data(struct per_cpu_dm_data *data)
++static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
+ {
+ size_t al;
+ struct net_dm_alert_msg *msg;
+ struct nlattr *nla;
+ struct sk_buff *skb;
+- struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1);
++ unsigned long flags;
+
+ al = sizeof(struct net_dm_alert_msg);
+ al += dm_hit_limit * sizeof(struct net_dm_drop_point);
+@@ -96,65 +92,40 @@ static void reset_per_cpu_data(struct pe
+ sizeof(struct net_dm_alert_msg));
+ msg = nla_data(nla);
+ memset(msg, 0, al);
+- } else
+- schedule_work_on(data->cpu, &data->dm_alert_work);
+-
+- /*
+- * Don't need to lock this, since we are guaranteed to only
+- * run this on a single cpu at a time.
+- * Note also that we only update data->skb if the old and new skb
+- * pointers don't match. This ensures that we don't continually call
+- * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
+- */
+- if (skb != oskb) {
+- rcu_assign_pointer(data->skb, skb);
+-
+- synchronize_rcu();
+-
+- atomic_set(&data->dm_hit_count, dm_hit_limit);
++ } else {
++ mod_timer(&data->send_timer, jiffies + HZ / 10);
+ }
+
++ spin_lock_irqsave(&data->lock, flags);
++ swap(data->skb, skb);
++ spin_unlock_irqrestore(&data->lock, flags);
++
++ return skb;
+ }
+
+-static void send_dm_alert(struct work_struct *unused)
++static void send_dm_alert(struct work_struct *work)
+ {
+ struct sk_buff *skb;
+- struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data;
+
+- WARN_ON_ONCE(data->cpu != smp_processor_id());
++ data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
+
+- /*
+- * Grab the skb we're about to send
+- */
+- skb = rcu_dereference_protected(data->skb, 1);
++ skb = reset_per_cpu_data(data);
+
+- /*
+- * Replace it with a new one
+- */
+- reset_per_cpu_data(data);
+-
+- /*
+- * Ship it!
+- */
+ if (skb)
+ genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
+-
+- put_cpu_var(dm_cpu_data);
+ }
+
+ /*
+ * This is the timer function to delay the sending of an alert
+ * in the event that more drops will arrive during the
+- * hysteresis period. Note that it operates under the timer interrupt
+- * so we don't need to disable preemption here
++ * hysteresis period.
+ */
+-static void sched_send_work(unsigned long unused)
++static void sched_send_work(unsigned long _data)
+ {
+- struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
++ struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+
+- schedule_work_on(smp_processor_id(), &data->dm_alert_work);
+-
+- put_cpu_var(dm_cpu_data);
++ schedule_work(&data->dm_alert_work);
+ }
+
+ static void trace_drop_common(struct sk_buff *skb, void *location)
+@@ -164,33 +135,28 @@ static void trace_drop_common(struct sk_
+ struct nlattr *nla;
+ int i;
+ struct sk_buff *dskb;
+- struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data);
+-
++ struct per_cpu_dm_data *data;
++ unsigned long flags;
+
+- rcu_read_lock();
+- dskb = rcu_dereference(data->skb);
++ local_irq_save(flags);
++ data = &__get_cpu_var(dm_cpu_data);
++ spin_lock(&data->lock);
++ dskb = data->skb;
+
+ if (!dskb)
+ goto out;
+
+- if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
+- /*
+- * we're already at zero, discard this hit
+- */
+- goto out;
+- }
+-
+ nlh = (struct nlmsghdr *)dskb->data;
+ nla = genlmsg_data(nlmsg_data(nlh));
+ msg = nla_data(nla);
+ for (i = 0; i < msg->entries; i++) {
+ if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
+ msg->points[i].count++;
+- atomic_inc(&data->dm_hit_count);
+ goto out;
+ }
+ }
+-
++ if (msg->entries == dm_hit_limit)
++ goto out;
+ /*
+ * We need to create a new entry
+ */
+@@ -202,13 +168,11 @@ static void trace_drop_common(struct sk_
+
+ if (!timer_pending(&data->send_timer)) {
+ data->send_timer.expires = jiffies + dm_delay * HZ;
+- add_timer_on(&data->send_timer, smp_processor_id());
++ add_timer(&data->send_timer);
+ }
+
+ out:
+- rcu_read_unlock();
+- put_cpu_var(dm_cpu_data);
+- return;
++ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
+@@ -406,11 +370,11 @@ static int __init init_net_drop_monitor(
+
+ for_each_present_cpu(cpu) {
+ data = &per_cpu(dm_cpu_data, cpu);
+- data->cpu = cpu;
+ INIT_WORK(&data->dm_alert_work, send_dm_alert);
+ init_timer(&data->send_timer);
+- data->send_timer.data = cpu;
++ data->send_timer.data = (unsigned long)data;
+ data->send_timer.function = sched_send_work;
++ spin_lock_init(&data->lock);
+ reset_per_cpu_data(data);
+ }
+
--- /dev/null
+From 72dff13e46276dd443b4a6efd225fa84dd46b7c9 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Jun 2012 21:11:57 +0000
+Subject: dummy: fix rcu_sched self-detected stalls
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 16b0dc29c1af9df341428f4c49ada4f626258082 ]
+
+Trying to "modprobe dummy numdummies=30000" triggers :
+
+INFO: rcu_sched self-detected stall on CPU { 8} (t=60000 jiffies)
+
+After this splat, RTNL is locked and reboot is needed.
+
+We must call cond_resched() to avoid this, even holding RTNL.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/dummy.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/dummy.c
++++ b/drivers/net/dummy.c
+@@ -187,8 +187,10 @@ static int __init dummy_init_module(void
+ rtnl_lock();
+ err = __rtnl_link_register(&dummy_link_ops);
+
+- for (i = 0; i < numdummies && !err; i++)
++ for (i = 0; i < numdummies && !err; i++) {
+ err = dummy_init_one();
++ cond_resched();
++ }
+ if (err < 0)
+ __rtnl_link_unregister(&dummy_link_ops);
+ rtnl_unlock();
--- /dev/null
+From 71f9c079e7d11d8641590c44f30cc50fe36f0e3f Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 5 Jun 2012 03:00:18 +0000
+Subject: inetpeer: fix a race in inetpeer_gc_worker()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 55432d2b543a4b6dfae54f5c432a566877a85d90 ]
+
+commit 5faa5df1fa2024 (inetpeer: Invalidate the inetpeer tree along with
+the routing cache) added a race :
+
+Before freeing an inetpeer, we must respect a RCU grace period, and make
+sure no user will attempt to increase refcnt.
+
+inetpeer_invalidate_tree() waits for a RCU grace period before inserting
+inetpeer tree into gc_list and waking the worker. At that time, no
+concurrent lookup can find a inetpeer in this tree.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/net/inetpeer.h | 5 ++++-
+ net/ipv4/inetpeer.c | 16 ++++++++++++----
+ 2 files changed, 16 insertions(+), 5 deletions(-)
+
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -40,7 +40,10 @@ struct inet_peer {
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
+- struct list_head gc_list;
++ union {
++ struct list_head gc_list;
++ struct rcu_head gc_rcu;
++ };
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_p
+ }
+ EXPORT_SYMBOL(inet_peer_xrlim_allow);
+
++static void inetpeer_inval_rcu(struct rcu_head *head)
++{
++ struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
++
++ spin_lock_bh(&gc_lock);
++ list_add_tail(&p->gc_list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
++
+ void inetpeer_invalidate_tree(int family)
+ {
+ struct inet_peer *old, *new, *prev;
+@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family
+ prev = cmpxchg(&base->root, old, new);
+ if (prev == old) {
+ base->total = 0;
+- spin_lock(&gc_lock);
+- list_add_tail(&prev->gc_list, &gc_list);
+- spin_unlock(&gc_lock);
+- schedule_delayed_work(&gc_work, gc_delay);
++ call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
+ }
+
+ out:
--- /dev/null
+From 1a1bdcde369378f24d5c4983138640ee4de9d881 Mon Sep 17 00:00:00 2001
+From: Thomas Graf <tgraf@suug.ch>
+Date: Thu, 7 Jun 2012 06:51:04 +0000
+Subject: ipv6: fib: Restore NTF_ROUTER exception in fib6_age()
+
+
+From: Thomas Graf <tgraf@suug.ch>
+
+[ Upstream commit 8bd74516b1bd9308c17f67583134d93f777203ca ]
+
+Commit 5339ab8b1dd82 (ipv6: fib: Convert fib6_age() to
+dst_neigh_lookup().) seems to have mistakenly inverted the
+exception for cached NTF_ROUTER routes.
+
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/ip6_fib.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1560,7 +1560,7 @@ static int fib6_age(struct rt6_info *rt,
+ neigh_flags = neigh->flags;
+ neigh_release(neigh);
+ }
+- if (neigh_flags & NTF_ROUTER) {
++ if (!(neigh_flags & NTF_ROUTER)) {
+ RT6_TRACE("purging route %p via non-router but gateway\n",
+ rt);
+ return -1;
--- /dev/null
+From 5413590847cdbd348b79c5d642fe782c0b9bcdc4 Mon Sep 17 00:00:00 2001
+From: Thomas Graf <tgraf@suug.ch>
+Date: Mon, 18 Jun 2012 12:08:33 +0000
+Subject: ipv6: Move ipv6 proc file registration to end of init order
+
+
+From: Thomas Graf <tgraf@suug.ch>
+
+[ Upstream commit d189634ecab947c10f6f832258b103d0bbfe73cc ]
+
+/proc/net/ipv6_route reflects the contents of fib_table_hash. The proc
+handler is installed in ip6_route_net_init() whereas fib_table_hash is
+allocated in fib6_net_init() _after_ the proc handler has been installed.
+
+This opens up a short time frame to access fib_table_hash with its pants
+down.
+
+Move the registration of the proc files to a later point in the init
+order to avoid the race.
+
+Tested :-)
+
+Signed-off-by: Thomas Graf <tgraf@suug.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv6/route.c | 41 +++++++++++++++++++++++++++++++----------
+ 1 file changed, 31 insertions(+), 10 deletions(-)
+
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -2953,10 +2953,6 @@ static int __net_init ip6_route_net_init
+ net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
+ net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
+
+-#ifdef CONFIG_PROC_FS
+- proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
+- proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
+-#endif
+ net->ipv6.ip6_rt_gc_expire = 30*HZ;
+
+ ret = 0;
+@@ -2977,10 +2973,6 @@ out_ip6_dst_ops:
+
+ static void __net_exit ip6_route_net_exit(struct net *net)
+ {
+-#ifdef CONFIG_PROC_FS
+- proc_net_remove(net, "ipv6_route");
+- proc_net_remove(net, "rt6_stats");
+-#endif
+ kfree(net->ipv6.ip6_null_entry);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ kfree(net->ipv6.ip6_prohibit_entry);
+@@ -2989,11 +2981,33 @@ static void __net_exit ip6_route_net_exi
+ dst_entries_destroy(&net->ipv6.ip6_dst_ops);
+ }
+
++static int __net_init ip6_route_net_init_late(struct net *net)
++{
++#ifdef CONFIG_PROC_FS
++ proc_net_fops_create(net, "ipv6_route", 0, &ipv6_route_proc_fops);
++ proc_net_fops_create(net, "rt6_stats", S_IRUGO, &rt6_stats_seq_fops);
++#endif
++ return 0;
++}
++
++static void __net_exit ip6_route_net_exit_late(struct net *net)
++{
++#ifdef CONFIG_PROC_FS
++ proc_net_remove(net, "ipv6_route");
++ proc_net_remove(net, "rt6_stats");
++#endif
++}
++
+ static struct pernet_operations ip6_route_net_ops = {
+ .init = ip6_route_net_init,
+ .exit = ip6_route_net_exit,
+ };
+
++static struct pernet_operations ip6_route_net_late_ops = {
++ .init = ip6_route_net_init_late,
++ .exit = ip6_route_net_exit_late,
++};
++
+ static struct notifier_block ip6_route_dev_notifier = {
+ .notifier_call = ip6_route_dev_notify,
+ .priority = 0,
+@@ -3043,19 +3057,25 @@ int __init ip6_route_init(void)
+ if (ret)
+ goto xfrm6_init;
+
++ ret = register_pernet_subsys(&ip6_route_net_late_ops);
++ if (ret)
++ goto fib6_rules_init;
++
+ ret = -ENOBUFS;
+ if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
+ __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
+ __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
+- goto fib6_rules_init;
++ goto out_register_late_subsys;
+
+ ret = register_netdevice_notifier(&ip6_route_dev_notifier);
+ if (ret)
+- goto fib6_rules_init;
++ goto out_register_late_subsys;
+
+ out:
+ return ret;
+
++out_register_late_subsys:
++ unregister_pernet_subsys(&ip6_route_net_late_ops);
+ fib6_rules_init:
+ fib6_rules_cleanup();
+ xfrm6_init:
+@@ -3074,6 +3094,7 @@ out_kmem_cache:
+ void ip6_route_cleanup(void)
+ {
+ unregister_netdevice_notifier(&ip6_route_dev_notifier);
++ unregister_pernet_subsys(&ip6_route_net_late_ops);
+ fib6_rules_cleanup();
+ xfrm6_fini();
+ fib6_gc_cleanup();
--- /dev/null
+From ca49ae5eccf6e744a6df0615f6cd4bcc3622d785 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 8 Jun 2012 06:25:00 +0000
+Subject: l2tp: fix a race in l2tp_ip_sendmsg()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 4399a4df98a63e30fd16e9d0cecc46ea92269e8f ]
+
+Commit 081b1b1bb27f (l2tp: fix l2tp_ip_sendmsg() route handling) added
+a race, in case IP route cache is disabled.
+
+In this case, we should not do the dst_release(&rt->dst), since it'll
+free the dst immediately, instead of waiting a RCU grace period.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: James Chapman <jchapman@katalix.com>
+Cc: Denys Fedoryshchenko <denys@visp.net.lb>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_ip.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -516,10 +516,12 @@ static int l2tp_ip_sendmsg(struct kiocb
+ sk->sk_bound_dev_if);
+ if (IS_ERR(rt))
+ goto no_route;
+- if (connected)
++ if (connected) {
+ sk_setup_caps(sk, &rt->dst);
+- else
+- dst_release(&rt->dst); /* safe since we hold rcu_read_lock */
++ } else {
++ skb_dst_set(skb, &rt->dst);
++ goto xmit;
++ }
+ }
+
+ /* We dont need to clone dst here, it is guaranteed to not disappear.
+@@ -527,6 +529,7 @@ static int l2tp_ip_sendmsg(struct kiocb
+ */
+ skb_dst_set_noref(skb, &rt->dst);
+
++xmit:
+ /* Queue the packet to IP for output */
+ rc = ip_queue_xmit(skb, &inet->cork.fl);
+ rcu_read_unlock();
--- /dev/null
+From dbaead2646f355699f636de9005e6e7576007947 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Jun 2012 23:24:00 +0000
+Subject: lpc_eth: add missing ndo_change_mtu()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit e30478598a8476d02e3b00caa89ce1a3b1dad54b ]
+
+lpc_eth does a copy of transmitted skbs to DMA area, without checking
+skb lengths, so can trigger buffer overflows :
+
+memcpy(pldat->tx_buff_v + txidx * ENET_MAXF_SIZE, skb->data, len);
+
+One way to get bigger skbs is to allow MTU changes above the 1500 limit.
+
+Calling eth_change_mtu() in ndo_change_mtu() makes sure this cannot
+happen.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Roland Stigge <stigge@antcom.de>
+Cc: Kevin Wells <kevin.wells@nxp.com>
+Acked-by: Roland Stigge <stigge@antcom.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/nxp/lpc_eth.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -1310,6 +1310,7 @@ static const struct net_device_ops lpc_n
+ .ndo_set_rx_mode = lpc_eth_set_multicast_list,
+ .ndo_do_ioctl = lpc_eth_ioctl,
+ .ndo_set_mac_address = lpc_set_mac_address,
++ .ndo_change_mtu = eth_change_mtu,
+ };
+
+ static int lpc_eth_drv_probe(struct platform_device *pdev)
--- /dev/null
+From 6dfaf84c13b4b573a125ce54f1d70b556fe0dd76 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Mon, 11 Jun 2012 07:21:36 +0000
+Subject: lpc_eth: fix tx completion
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 3f16da51b0e533871d22a29682f3c3969d4f7e22 ]
+
+__lpc_handle_xmit() has two bugs :
+
+1) It can leak skbs in case TXSTATUS_ERROR is set
+
+2) It can wake up txqueue while no slot was freed.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Roland Stigge <stigge@antcom.de>
+Tested-by: Roland Stigge <stigge@antcom.de>
+Cc: Kevin Wells <kevin.wells@nxp.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/nxp/lpc_eth.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/ethernet/nxp/lpc_eth.c
++++ b/drivers/net/ethernet/nxp/lpc_eth.c
+@@ -936,16 +936,16 @@ static void __lpc_handle_xmit(struct net
+ /* Update stats */
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+-
+- /* Free buffer */
+- dev_kfree_skb_irq(skb);
+ }
++ dev_kfree_skb_irq(skb);
+
+ txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
+ }
+
+- if (netif_queue_stopped(ndev))
+- netif_wake_queue(ndev);
++ if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
++ if (netif_queue_stopped(ndev))
++ netif_wake_queue(ndev);
++ }
+ }
+
+ static int __lpc_handle_recv(struct net_device *ndev, int budget)
--- /dev/null
+From ad17d622580fc388627e843cc6776ec38d1d00e3 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Jun 2012 00:07:20 +0000
+Subject: net: l2tp_eth: fix kernel panic on rmmod l2tp_eth
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a06998b88b1651c5f71c0e35f528bf2057188ead ]
+
+We must prevent module unloading if some devices are still attached to
+l2tp_eth driver.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Denys Fedoryshchenko <denys@visp.net.lb>
+Tested-by: Denys Fedoryshchenko <denys@visp.net.lb>
+Cc: James Chapman <jchapman@katalix.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_eth.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/net/l2tp/l2tp_eth.c
++++ b/net/l2tp/l2tp_eth.c
+@@ -167,6 +167,7 @@ static void l2tp_eth_delete(struct l2tp_
+ if (dev) {
+ unregister_netdev(dev);
+ spriv->dev = NULL;
++ module_put(THIS_MODULE);
+ }
+ }
+ }
+@@ -254,6 +255,7 @@ static int l2tp_eth_create(struct net *n
+ if (rc < 0)
+ goto out_del_dev;
+
++ __module_get(THIS_MODULE);
+ /* Must be done after register_netdev() */
+ strlcpy(session->ifname, dev->name, IFNAMSIZ);
+
--- /dev/null
+From 284363958a02ef4b53866572d0bd5588858eac8e Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 7 Jun 2012 04:58:35 +0000
+Subject: net: neighbour: fix neigh_dump_info()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 4bd6683bd400c8b1d2ad544bb155d86a5d10f91c ]
+
+Denys found out "ip neigh" output was truncated to
+about 54 neighbours.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reported-by: Denys Fedoryshchenko <denys@visp.net.lb>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/neighbour.c | 14 ++++++--------
+ 1 file changed, 6 insertions(+), 8 deletions(-)
+
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2214,9 +2214,7 @@ static int neigh_dump_table(struct neigh
+ rcu_read_lock_bh();
+ nht = rcu_dereference_bh(tbl->nht);
+
+- for (h = 0; h < (1 << nht->hash_shift); h++) {
+- if (h < s_h)
+- continue;
++ for (h = s_h; h < (1 << nht->hash_shift); h++) {
+ if (h > s_h)
+ s_idx = 0;
+ for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
+@@ -2255,9 +2253,7 @@ static int pneigh_dump_table(struct neig
+
+ read_lock_bh(&tbl->lock);
+
+- for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+- if (h < s_h)
+- continue;
++ for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
+ if (h > s_h)
+ s_idx = 0;
+ for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
+@@ -2292,7 +2288,7 @@ static int neigh_dump_info(struct sk_buf
+ struct neigh_table *tbl;
+ int t, family, s_t;
+ int proxy = 0;
+- int err = 0;
++ int err;
+
+ read_lock(&neigh_tbl_lock);
+ family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+@@ -2306,7 +2302,7 @@ static int neigh_dump_info(struct sk_buf
+
+ s_t = cb->args[0];
+
+- for (tbl = neigh_tables, t = 0; tbl && (err >= 0);
++ for (tbl = neigh_tables, t = 0; tbl;
+ tbl = tbl->next, t++) {
+ if (t < s_t || (family && tbl->family != family))
+ continue;
+@@ -2317,6 +2313,8 @@ static int neigh_dump_info(struct sk_buf
+ err = pneigh_dump_table(tbl, skb, cb);
+ else
+ err = neigh_dump_table(tbl, skb, cb);
++ if (err < 0)
++ break;
+ }
+ read_unlock(&neigh_tbl_lock);
+
--- /dev/null
+From f65ece907db282d2c0f1090ee65f909367862be1 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Thu, 14 Jun 2012 06:42:44 +0000
+Subject: net: remove skb_orphan_try()
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 62b1a8ab9b3660bb820d8dfe23148ed6cda38574 ]
+
+Orphaning skb in dev_hard_start_xmit() makes bonding behavior
+unfriendly for applications sending big UDP bursts : Once packets
+pass the bonding device and come to real device, they might hit a full
+qdisc and be dropped. Without orphaning, the sender is automatically
+throttled because sk->sk_wmemalloc reaches sk->sk_sndbuf (assuming
+sk_sndbuf is not too big)
+
+We could try to defer the orphaning adding another test in
+dev_hard_start_xmit(), but all this seems of little gain,
+now that BQL tends to make packets more likely to be parked
+in Qdisc queues instead of NIC TX ring, in cases where performance
+matters.
+
+Reverts commits :
+fc6055a5ba31 net: Introduce skb_orphan_try()
+87fd308cfc6b net: skb_tx_hash() fix relative to skb_orphan_try()
+and removes SKBTX_DRV_NEEDS_SK_REF flag
+
+Reported-and-bisected-by: Jean-Michel Hautbois <jhautbois@gmail.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Acked-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/skbuff.h | 7 ++-----
+ net/can/raw.c | 3 ---
+ net/core/dev.c | 23 +----------------------
+ net/iucv/af_iucv.c | 1 -
+ 4 files changed, 3 insertions(+), 31 deletions(-)
+
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -225,14 +225,11 @@ enum {
+ /* device driver is going to provide hardware time stamp */
+ SKBTX_IN_PROGRESS = 1 << 2,
+
+- /* ensure the originating sk reference is available on driver level */
+- SKBTX_DRV_NEEDS_SK_REF = 1 << 3,
+-
+ /* device driver supports TX zero-copy buffers */
+- SKBTX_DEV_ZEROCOPY = 1 << 4,
++ SKBTX_DEV_ZEROCOPY = 1 << 3,
+
+ /* generate wifi status information (where possible) */
+- SKBTX_WIFI_STATUS = 1 << 5,
++ SKBTX_WIFI_STATUS = 1 << 4,
+ };
+
+ /*
+--- a/net/can/raw.c
++++ b/net/can/raw.c
+@@ -681,9 +681,6 @@ static int raw_sendmsg(struct kiocb *ioc
+ if (err < 0)
+ goto free_skb;
+
+- /* to be able to check the received tx sock reference in raw_rcv() */
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+-
+ skb->dev = dev;
+ skb->sk = sk;
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2091,25 +2091,6 @@ static int dev_gso_segment(struct sk_buf
+ return 0;
+ }
+
+-/*
+- * Try to orphan skb early, right before transmission by the device.
+- * We cannot orphan skb if tx timestamp is requested or the sk-reference
+- * is needed on driver level for other reasons, e.g. see net/can/raw.c
+- */
+-static inline void skb_orphan_try(struct sk_buff *skb)
+-{
+- struct sock *sk = skb->sk;
+-
+- if (sk && !skb_shinfo(skb)->tx_flags) {
+- /* skb_tx_hash() wont be able to get sk.
+- * We copy sk_hash into skb->rxhash
+- */
+- if (!skb->rxhash)
+- skb->rxhash = sk->sk_hash;
+- skb_orphan(skb);
+- }
+-}
+-
+ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
+ {
+ return ((features & NETIF_F_GEN_CSUM) ||
+@@ -2195,8 +2176,6 @@ int dev_hard_start_xmit(struct sk_buff *
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
+- skb_orphan_try(skb);
+-
+ features = netif_skb_features(skb);
+
+ if (vlan_tx_tag_present(skb) &&
+@@ -2306,7 +2285,7 @@ u16 __skb_tx_hash(const struct net_devic
+ if (skb->sk && skb->sk->sk_hash)
+ hash = skb->sk->sk_hash;
+ else
+- hash = (__force u16) skb->protocol ^ skb->rxhash;
++ hash = (__force u16) skb->protocol;
+ hash = jhash_1word(hash, hashrnd);
+
+ return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -372,7 +372,6 @@ static int afiucv_hs_send(struct iucv_me
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = ETH_P_AF_IUCV;
+- skb_shinfo(skb)->tx_flags |= SKBTX_DRV_NEEDS_SK_REF;
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return -ENOMEM;
--- /dev/null
+From 9c4257c379851be28c520051838f74eebfa43d7d Mon Sep 17 00:00:00 2001
+From: Jason Wang <jasowang@redhat.com>
+Date: Wed, 30 May 2012 21:18:10 +0000
+Subject: net: sock: validate data_len before allocating skb in sock_alloc_send_pskb()
+
+
+From: Jason Wang <jasowang@redhat.com>
+
+[ Upstream commit cc9b17ad29ecaa20bfe426a8d4dbfb94b13ff1cc ]
+
+We need to validate the number of pages consumed by data_len, otherwise frags
+array could be overflowed by userspace. So this patch validate data_len and
+return -EMSGSIZE when data_len may occupies more frags than MAX_SKB_FRAGS.
+
+Signed-off-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/sock.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1600,6 +1600,11 @@ struct sk_buff *sock_alloc_send_pskb(str
+ gfp_t gfp_mask;
+ long timeo;
+ int err;
++ int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
++
++ err = -EMSGSIZE;
++ if (npages > MAX_SKB_FRAGS)
++ goto failure;
+
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+@@ -1618,14 +1623,12 @@ struct sk_buff *sock_alloc_send_pskb(str
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, gfp_mask);
+ if (skb) {
+- int npages;
+ int i;
+
+ /* No pages, we're done... */
+ if (!data_len)
+ break;
+
+- npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ skb->truesize += data_len;
+ skb_shinfo(skb)->nr_frags = npages;
+ for (i = 0; i < npages; i++) {
--- /dev/null
+From ab1bb6e75fbbc50fc93e1bc777c5f8eaf534a4c6 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Tue, 12 Jun 2012 19:30:21 +0000
+Subject: netpoll: fix netpoll_send_udp() bugs
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 954fba0274058d27c7c07b5ea07c41b3b7477894 ]
+
+Bogdan Hamciuc diagnosed and fixed following bug in netpoll_send_udp() :
+
+"skb->len += len;" instead of "skb_put(skb, len);"
+
+Meaning that _if_ a network driver needs to call skb_realloc_headroom(),
+only packet headers would be copied, leaving garbage in the payload.
+
+However the skb_realloc_headroom() must be avoided as much as possible
+since it requires memory and netpoll tries hard to work even if memory
+is exhausted (using a pool of preallocated skbs)
+
+It appears netpoll_send_udp() reserved 16 bytes for the ethernet header,
+which happens to work for typicall drivers but not all.
+
+Right thing is to use LL_RESERVED_SPACE(dev)
+(And also add dev->needed_tailroom of tailroom)
+
+This patch combines both fixes.
+
+Many thanks to Bogdan for raising this issue.
+
+Reported-by: Bogdan Hamciuc <bogdan.hamciuc@freescale.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Bogdan Hamciuc <bogdan.hamciuc@freescale.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: Neil Horman <nhorman@tuxdriver.com>
+Reviewed-by: Neil Horman <nhorman@tuxdriver.com>
+Reviewed-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/netpoll.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
+
+ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
+ {
+- int total_len, eth_len, ip_len, udp_len;
++ int total_len, ip_len, udp_len;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ struct ethhdr *eth;
+
+ udp_len = len + sizeof(*udph);
+- ip_len = eth_len = udp_len + sizeof(*iph);
+- total_len = eth_len + ETH_HLEN + NET_IP_ALIGN;
++ ip_len = udp_len + sizeof(*iph);
++ total_len = ip_len + LL_RESERVED_SPACE(np->dev);
+
+- skb = find_skb(np, total_len, total_len - len);
++ skb = find_skb(np, total_len + np->dev->needed_tailroom,
++ total_len - len);
+ if (!skb)
+ return;
+
+ skb_copy_to_linear_data(skb, msg, len);
+- skb->len += len;
++ skb_put(skb, len);
+
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
--- /dev/null
+From 015fc314e55db8fe3f4ac4f995727948cb4f8b31 Mon Sep 17 00:00:00 2001
+From: Devendra Naga <devendra.aaru@gmail.com>
+Date: Thu, 31 May 2012 01:51:20 +0000
+Subject: r8169: call netif_napi_del at errpaths and at driver unload
+
+
+From: Devendra Naga <devendra.aaru@gmail.com>
+
+[ Upstream commit ad1be8d345416a794dea39761a374032aa471a76 ]
+
+when register_netdev fails, the init'ed NAPIs by netif_napi_add must be
+deleted with netif_napi_del, and also when driver unloads, it should
+delete the NAPI before unregistering netdevice using unregister_netdev.
+
+Signed-off-by: Devendra Naga <devendra.aaru@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/realtek/r8169.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5966,6 +5966,8 @@ static void __devexit rtl_remove_one(str
+
+ cancel_work_sync(&tp->wk.work);
+
++ netif_napi_del(&tp->napi);
++
+ unregister_netdev(dev);
+
+ rtl_release_firmware(tp);
+@@ -6288,6 +6290,7 @@ out:
+ return rc;
+
+ err_out_msi_4:
++ netif_napi_del(&tp->napi);
+ rtl_disable_msi(pdev, tp);
+ iounmap(ioaddr);
+ err_out_free_res_3:
--- /dev/null
+From 02c753a3b78f39b23defe33bcb37c9b6263dcb53 Mon Sep 17 00:00:00 2001
+From: "David S. Miller" <davem@davemloft.net>
+Date: Fri, 8 Jun 2012 00:28:16 -0700
+Subject: Revert "niu: Add support for byte queue limits."
+
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 6a2b28ef036ab5c66fdc606fe97d9e5cb34ea409 ]
+
+This reverts commit efa230f2c68abab817f13473077f8d0cc74f43f3.
+
+BQL doesn't work with how this driver currently only takes TX
+interrupts every 1/4 of the TX ring. That behavior needs to be fixed,
+but that's a larger non-trivial task and for now we have to revert
+BQL support as this makes the device currently completely unusable.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sun/niu.c | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+--- a/drivers/net/ethernet/sun/niu.c
++++ b/drivers/net/ethernet/sun/niu.c
+@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu
+ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
+ {
+ struct netdev_queue *txq;
+- unsigned int tx_bytes;
+ u16 pkt_cnt, tmp;
+ int cons, index;
+ u64 cs;
+@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np,
+ netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+ "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
+
+- tx_bytes = 0;
+- tmp = pkt_cnt;
+- while (tmp--) {
+- tx_bytes += rp->tx_buffs[cons].skb->len;
++ while (pkt_cnt--)
+ cons = release_tx_packet(np, rp, cons);
+- }
+
+ rp->cons = cons;
+ smp_mb();
+
+- netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+-
+ out:
+ if (unlikely(netif_tx_queue_stopped(txq) &&
+ (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
+@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu
+ struct tx_ring_info *rp = &np->tx_rings[i];
+
+ niu_free_tx_ring_info(np, rp);
+- netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
+ }
+ kfree(np->tx_rings);
+ np->tx_rings = NULL;
+@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct
+ prod = NEXT_TX(rp, prod);
+ }
+
+- netdev_tx_sent_queue(txq, skb->len);
+-
+ if (prod < rp->prod)
+ rp->wrap_bit ^= TX_RING_KICK_WRAP;
+ rp->prod = prod;
powerpc-kvm-sldi-should-be-sld.patch
powerpc-xmon-use-cpumask-iterator-to-avoid-warning.patch
media-smsusb-add-autodetection-support-for-usb-id-2040-f5a0.patch
+bql-fix-posdiff-to-integer-overflow-aware.patch
+bql-avoid-unneeded-limit-decrement.patch
+bql-avoid-possible-inconsistent-calculation.patch
+net-sock-validate-data_len-before-allocating-skb-in-sock_alloc_send_pskb.patch
+cipso-handle-cipso-options-correctly-when-netlabel-is-disabled.patch
+r8169-call-netif_napi_del-at-errpaths-and-at-driver-unload.patch
+drop_monitor-dont-sleep-in-atomic-context.patch
+inetpeer-fix-a-race-in-inetpeer_gc_worker.patch
+net-l2tp_eth-fix-kernel-panic-on-rmmod-l2tp_eth.patch
+l2tp-fix-a-race-in-l2tp_ip_sendmsg.patch
+lpc_eth-add-missing-ndo_change_mtu.patch
+lpc_eth-fix-tx-completion.patch
+net-neighbour-fix-neigh_dump_info.patch
+ipv6-fib-restore-ntf_router-exception-in-fib6_age.patch
+ipv6-move-ipv6-proc-file-registration-to-end-of-init-order.patch
+sky2-fix-checksum-bit-management-on-some-chips.patch
+revert-niu-add-support-for-byte-queue-limits.patch
+be2net-fix-a-race-in-be_xmit.patch
+dummy-fix-rcu_sched-self-detected-stalls.patch
+bonding-fix-corrupted-queue_mapping.patch
+netpoll-fix-netpoll_send_udp-bugs.patch
+bnx2x-fix-checksum-validation.patch
+bnx2x-fix-panic-when-tx-ring-is-full.patch
+net-remove-skb_orphan_try.patch
+bridge-assign-rtnl_link_ops-to-bridge-devices-created-via-ioctl-v2.patch
+xen-netfront-teardown-the-device-before-unregistering-it.patch
--- /dev/null
+From 837f5fa245a35ce33b85d55d43a125000f598a97 Mon Sep 17 00:00:00 2001
+From: stephen hemminger <shemminger@vyatta.com>
+Date: Wed, 6 Jun 2012 10:01:30 +0000
+Subject: sky2: fix checksum bit management on some chips
+
+
+From: stephen hemminger <shemminger@vyatta.com>
+
+[ Upstream commit 5ff0feac88ced864f44adb145142269196fa79d9 ]
+
+The newer flavors of Yukon II use a different method for receive
+checksum offload. This is indicated in the driver by the SKY2_HW_NEW_LE
+flag. On these newer chips, the BMU_ENA_RX_CHKSUM should not be set.
+
+The driver would get incorrectly toggle the bit, enabling the old
+checksum logic on these chips and cause a BUG_ON() assertion. If
+receive checksum was toggled via ethtool.
+
+Reported-by: Kirill Smelkov <kirr@mns.spb.ru>
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/sky2.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_
+ struct sky2_port *sky2 = netdev_priv(dev);
+ netdev_features_t changed = dev->features ^ features;
+
+- if (changed & NETIF_F_RXCSUM) {
+- bool on = features & NETIF_F_RXCSUM;
+- sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+- on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
++ if ((changed & NETIF_F_RXCSUM) &&
++ !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
++ sky2_write32(sky2->hw,
++ Q_ADDR(rxqaddr[sky2->port], Q_CSR),
++ (features & NETIF_F_RXCSUM)
++ ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
+ }
+
+ if (changed & NETIF_F_RXHASH)
--- /dev/null
+From 84521b50a73f6ee6425f49389675a7641d73d90c Mon Sep 17 00:00:00 2001
+From: Ian Campbell <Ian.Campbell@citrix.com>
+Date: Mon, 25 Jun 2012 22:48:41 +0000
+Subject: xen/netfront: teardown the device before unregistering it.
+
+
+From: Ian Campbell <Ian.Campbell@citrix.com>
+
+[ Upstream commit 6bc96d047fe32d76ef79f3195c52a542edf7c705 ]
+
+Fixes:
+[ 15.470311] WARNING: at /local/scratch/ianc/devel/kernels/linux/fs/sysfs/file.c:498 sysfs_attr_ns+0x95/0xa0()
+[ 15.470326] sysfs: kobject eth0 without dirent
+[ 15.470333] Modules linked in:
+[ 15.470342] Pid: 12, comm: xenwatch Not tainted 3.4.0-x86_32p-xenU #93
+and
+[ 9.150554] BUG: unable to handle kernel paging request at 2b359000
+[ 9.150577] IP: [<c1279561>] linkwatch_do_dev+0x81/0xc0
+[ 9.150592] *pdpt = 000000002c3c9027 *pde = 0000000000000000
+[ 9.150604] Oops: 0002 [#1] SMP
+[ 9.150613] Modules linked in:
+
+This is http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=675190
+
+Reported-by: George Shuklin <george.shuklin@gmail.com>
+Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
+Tested-by: William Dauchy <wdauchy@gmail.com>
+Cc: stable@kernel.org
+Cc: 675190@bugs.debian.org
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/xen-netfront.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/xen-netfront.c
++++ b/drivers/net/xen-netfront.c
+@@ -1935,14 +1935,14 @@ static int __devexit xennet_remove(struc
+
+ dev_dbg(&dev->dev, "%s\n", dev->nodename);
+
+- unregister_netdev(info->netdev);
+-
+ xennet_disconnect_backend(info);
+
+- del_timer_sync(&info->rx_refill_timer);
+-
+ xennet_sysfs_delif(info->netdev);
+
++ unregister_netdev(info->netdev);
++
++ del_timer_sync(&info->rx_refill_timer);
++
+ free_percpu(info->stats);
+
+ free_netdev(info->netdev);