--- /dev/null
+From 30813173c3add102a90d702f7345a537d55be5e1 Mon Sep 17 00:00:00 2001
+From: Ingo van Lil <inguin@gmx.de>
+Date: Mon, 23 Apr 2012 22:05:38 +0000
+Subject: asix: Fix tx transfer padding for full-speed USB
+
+
+From: Ingo van Lil <inguin@gmx.de>
+
+[ Upstream commit 2a5809499e35b53a6044fd34e72b242688b7a862 ]
+
+The asix.c USB Ethernet driver avoids ending a tx transfer with a zero-
+length packet by appending a four-byte padding to transfers whose length
+is a multiple of maxpacket. However, the hard-coded 512 byte maxpacket
+length is valid for high-speed USB only; full-speed USB uses 64 byte
+packets.
+
+Signed-off-by: Ingo van Lil <inguin@gmx.de>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/asix.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/usb/asix.c
++++ b/drivers/net/usb/asix.c
+@@ -403,7 +403,7 @@ static struct sk_buff *asix_tx_fixup(str
+ u32 packet_len;
+ u32 padbytes = 0xffff0000;
+
+- padlen = ((skb->len + 4) % 512) ? 0 : 4;
++ padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) &&
+ ((headroom + tailroom) >= (4 + padlen))) {
+@@ -425,7 +425,7 @@ static struct sk_buff *asix_tx_fixup(str
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+- if ((skb->len % 512) == 0) {
++ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
--- /dev/null
+From c58e988584c4664bc1eb8d5efc70ebf007f51ce3 Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+Date: Tue, 7 Feb 2012 02:29:06 +0000
+Subject: net: Add memory barriers to prevent possible race in byte queue limits
+
+
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+
+[ Upstream commit b37c0fbe3f6dfba1f8ad2aed47fb40578a254635 ]
+
+This change adds a memory barrier to the byte queue limit code to address a
+possible race as has been seen in the past with the
+netif_stop_queue/netif_wake_queue logic.
+
+Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Tested-by: Stephen Ko <stephen.s.ko@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/netdevice.h | 49 ++++++++++++++++++++++++++++++----------------
+ 1 file changed, 33 insertions(+), 16 deletions(-)
+
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1898,12 +1898,22 @@ static inline void netdev_tx_sent_queue(
+ {
+ #ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+- if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
+- set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+- if (unlikely(dql_avail(&dev_queue->dql) >= 0))
+- clear_bit(__QUEUE_STATE_STACK_XOFF,
+- &dev_queue->state);
+- }
++
++ if (likely(dql_avail(&dev_queue->dql) >= 0))
++ return;
++
++ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
++
++ /*
++ * The XOFF flag must be set before checking the dql_avail below,
++ * because in netdev_tx_completed_queue we update the dql_completed
++ * before checking the XOFF flag.
++ */
++ smp_mb();
++
++ /* check again in case another CPU has just made room avail */
++ if (unlikely(dql_avail(&dev_queue->dql) >= 0))
++ clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+ #endif
+ }
+
+@@ -1916,16 +1926,23 @@ static inline void netdev_tx_completed_q
+ unsigned pkts, unsigned bytes)
+ {
+ #ifdef CONFIG_BQL
+- if (likely(bytes)) {
+- dql_completed(&dev_queue->dql, bytes);
+- if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
+- &dev_queue->state) &&
+- dql_avail(&dev_queue->dql) >= 0)) {
+- if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
+- &dev_queue->state))
+- netif_schedule_queue(dev_queue);
+- }
+- }
++ if (unlikely(!bytes))
++ return;
++
++ dql_completed(&dev_queue->dql, bytes);
++
++ /*
++ * Without the memory barrier there is a small possiblity that
++ * netdev_tx_sent_queue will miss the update and cause the queue to
++ * be stopped forever
++ */
++ smp_mb();
++
++ if (dql_avail(&dev_queue->dql) < 0)
++ return;
++
++ if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
++ netif_schedule_queue(dev_queue);
+ #endif
+ }
+
--- /dev/null
+From b64e540a89b568ceaa12f982a21789bedc00c88a Mon Sep 17 00:00:00 2001
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+Date: Tue, 7 Feb 2012 02:29:01 +0000
+Subject: net: Fix issue with netdev_tx_reset_queue not resetting queue from XOFF state
+
+
+From: Alexander Duyck <alexander.h.duyck@intel.com>
+
+[ Upstream commit 5c4903549c05bbb373479e0ce2992573c120654a ]
+
+We are seeing dev_watchdog hangs on several drivers. I suspect this is due
+to the __QUEUE_STATE_STACK_XOFF bit being set prior to a reset for link
+change, and then not being cleared by netdev_tx_reset_queue. This change
+corrects that.
+
+In addition we were seeing dev_watchdog hangs on igb after running the
+ethtool tests. We found this to be due to the fact that the ethtool test
+runs the same logic as ndo_start_xmit, but we were never clearing the XOFF
+flag since the loopback test in ethtool does not do byte queue accounting.
+
+Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
+Tested-by: Stephen Ko <stephen.s.ko@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/igb/igb_main.c | 3 ++-
+ include/linux/netdevice.h | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/igb/igb_main.c
++++ b/drivers/net/ethernet/intel/igb/igb_main.c
+@@ -2750,6 +2750,8 @@ void igb_configure_tx_ring(struct igb_ad
+
+ txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+ wr32(E1000_TXDCTL(reg_idx), txdctl);
++
++ netdev_tx_reset_queue(txring_txq(ring));
+ }
+
+ /**
+@@ -3242,7 +3244,6 @@ static void igb_clean_tx_ring(struct igb
+ buffer_info = &tx_ring->tx_buffer_info[i];
+ igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+ }
+- netdev_tx_reset_queue(txring_txq(tx_ring));
+
+ size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -1955,6 +1955,7 @@ static inline void netdev_completed_queu
+ static inline void netdev_tx_reset_queue(struct netdev_queue *q)
+ {
+ #ifdef CONFIG_BQL
++ clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state);
+ dql_reset(&q->dql);
+ #endif
+ }
--- /dev/null
+From e46e2813b4a0b98cf319c9c84afb7aa5950d7b9d Mon Sep 17 00:00:00 2001
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Fri, 6 Apr 2012 15:33:35 +0000
+Subject: net: In unregister_netdevice_notifier unregister the netdevices.
+
+
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+
+[ Upstream commit 7d3d43dab4e978d8d9ad1acf8af15c9b1c4b0f0f ]
+
+We already synthesize events in register_netdevice_notifier and synthesizing
+events in unregister_netdevice_notifier allows to us remove the need for
+special case cleanup code.
+
+This change should be safe as it adds no new cases for existing callers
+of unregiser_netdevice_notifier to handle.
+
+Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/core/dev.c | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -1412,14 +1412,34 @@ EXPORT_SYMBOL(register_netdevice_notifie
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
++ *
++ * After unregistering unregister and down device events are synthesized
++ * for all devices on the device list to the removed notifier to remove
++ * the need for special case cleanup code.
+ */
+
+ int unregister_netdevice_notifier(struct notifier_block *nb)
+ {
++ struct net_device *dev;
++ struct net *net;
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_unregister(&netdev_chain, nb);
++ if (err)
++ goto unlock;
++
++ for_each_net(net) {
++ for_each_netdev(net, dev) {
++ if (dev->flags & IFF_UP) {
++ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
++ nb->notifier_call(nb, NETDEV_DOWN, dev);
++ }
++ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
++ nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
++ }
++ }
++unlock:
+ rtnl_unlock();
+ return err;
+ }
--- /dev/null
+From 3bfc6bc4d0e517923d7b1564aa2f8092f8f10667 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <levinsasha928@gmail.com>
+Date: Wed, 2 May 2012 03:58:43 +0000
+Subject: net: l2tp: unlock socket lock before returning from l2tp_ip_sendmsg
+
+
+From: Sasha Levin <levinsasha928@gmail.com>
+
+[ Upstream commit 84768edbb2721637620b2d84501bb0d5aed603f1 ]
+
+l2tp_ip_sendmsg could return without releasing socket lock, making it all the
+way to userspace, and generating the following warning:
+
+[ 130.891594] ================================================
+[ 130.894569] [ BUG: lock held when returning to user space! ]
+[ 130.897257] 3.4.0-rc5-next-20120501-sasha #104 Tainted: G W
+[ 130.900336] ------------------------------------------------
+[ 130.902996] trinity/8384 is leaving the kernel with locks still held!
+[ 130.906106] 1 lock held by trinity/8384:
+[ 130.907924] #0: (sk_lock-AF_INET){+.+.+.}, at: [<ffffffff82b9503f>] l2tp_ip_sendmsg+0x2f/0x550
+
+Introduced by commit 2f16270 ("l2tp: Fix locking in l2tp_ip.c").
+
+Signed-off-by: Sasha Levin <levinsasha928@gmail.com>
+Acked-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/l2tp/l2tp_ip.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/net/l2tp/l2tp_ip.c
++++ b/net/l2tp/l2tp_ip.c
+@@ -441,8 +441,9 @@ static int l2tp_ip_sendmsg(struct kiocb
+
+ daddr = lip->l2tp_addr.s_addr;
+ } else {
++ rc = -EDESTADDRREQ;
+ if (sk->sk_state != TCP_ESTABLISHED)
+- return -EDESTADDRREQ;
++ goto out;
+
+ daddr = inet->inet_daddr;
+ connected = 1;
--- /dev/null
+From 6b984732641614678e8a85b82915c863bd4c24e5 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 29 Apr 2012 09:08:22 +0000
+Subject: netem: fix possible skb leak
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 116a0fc31c6c9b8fc821be5a96e5bf0b43260131 ]
+
+skb_checksum_help(skb) can return an error, we must free skb in this
+case. qdisc_drop(skb, sch) can also be feeded with a NULL skb (if
+skb_unshare() failed), so lets use this generic helper.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Stephen Hemminger <shemminger@osdl.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sched/sch_netem.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff
+ if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+ if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL &&
+- skb_checksum_help(skb))) {
+- sch->qstats.drops++;
+- return NET_XMIT_DROP;
+- }
++ skb_checksum_help(skb)))
++ return qdisc_drop(skb, sch);
+
+ skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
+ }
regulator-fix-the-logic-to-ensure-new-voltage-setting-in-valid-range.patch
arm-orion5x-fix-gpio-enable-bits-for-mpp9.patch
arm-omap-revert-arm-omap-ctrl-fix-control_dsiphy-register-fields.patch
+asix-fix-tx-transfer-padding-for-full-speed-usb.patch
+netem-fix-possible-skb-leak.patch
+net-in-unregister_netdevice_notifier-unregister-the-netdevices.patch
+net-l2tp-unlock-socket-lock-before-returning-from-l2tp_ip_sendmsg.patch
+sky2-propogate-rx-hash-when-packet-is-copied.patch
+sky2-fix-receive-length-error-in-mixed-non-vlan-vlan-traffic.patch
+sungem-fix-wakeonlan.patch
+tg3-avoid-panic-from-reserved-statblk-field-access.patch
+tcp-fix-infinite-cwnd-in-tcp_complete_cwr.patch
+tcp-change-tcp_adv_win_scale-and-tcp_rmem.patch
+net-add-memory-barriers-to-prevent-possible-race-in-byte-queue-limits.patch
+net-fix-issue-with-netdev_tx_reset_queue-not-resetting-queue-from-xoff-state.patch
--- /dev/null
+From 3614fd6750c66acb42f85e6dc13a1fb478a5c11e Mon Sep 17 00:00:00 2001
+From: stephen hemminger <shemminger@vyatta.com>
+Date: Mon, 30 Apr 2012 06:47:37 +0000
+Subject: sky2: fix receive length error in mixed non-VLAN/VLAN traffic
+
+
+From: stephen hemminger <shemminger@vyatta.com>
+
+[ Upstream commit e072b3fad5f3915102c94628b4971f52ff99dd05 ]
+
+Bug: The VLAN bit of the MAC RX Status Word is unreliable in several older
+supported chips. Sometimes the VLAN bit is not set for valid VLAN packets
+and also sometimes the VLAN bit is set for non-VLAN packets that came after
+a VLAN packet. This results in a receive length error when VLAN hardware
+tagging is enabled.
+
+Fix: Variation on original fix proposed by Mirko.
+The VLAN information is decoded in the status loop, and can be
+applied to the received SKB there. This eliminates the need for the
+separate tag field in the interface data structure. The tag has to
+be copied and cleared if packet is copied. This version checked out
+with vlan and normal traffic.
+
+Note: vlan_tx_tag_present should be renamed vlan_tag_present, but that
+is outside scope of this.
+
+Reported-by: Mirko Lindner <mlindner@marvell.com>
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/sky2.c | 28 +++++++++++++++++-----------
+ drivers/net/ethernet/marvell/sky2.h | 1 -
+ 2 files changed, 17 insertions(+), 12 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -2484,9 +2484,11 @@ static struct sk_buff *receive_copy(stru
+ skb->ip_summed = re->skb->ip_summed;
+ skb->csum = re->skb->csum;
+ skb->rxhash = re->skb->rxhash;
++ skb->vlan_tci = re->skb->vlan_tci;
+
+ pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
+ length, PCI_DMA_FROMDEVICE);
++ re->skb->vlan_tci = 0;
+ re->skb->rxhash = 0;
+ re->skb->ip_summed = CHECKSUM_NONE;
+ skb_put(skb, length);
+@@ -2572,9 +2574,6 @@ static struct sk_buff *sky2_receive(stru
+ struct sk_buff *skb = NULL;
+ u16 count = (status & GMR_FS_LEN) >> 16;
+
+- if (status & GMR_FS_VLAN)
+- count -= VLAN_HLEN; /* Account for vlan tag */
+-
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
+@@ -2582,6 +2581,9 @@ static struct sk_buff *sky2_receive(stru
+ sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
+ prefetch(sky2->rx_ring + sky2->rx_next);
+
++ if (vlan_tx_tag_present(re->skb))
++ count -= VLAN_HLEN; /* Account for vlan tag */
++
+ /* This chip has hardware problems that generates bogus status.
+ * So do only marginal checking and expect higher level protocols
+ * to handle crap frames.
+@@ -2639,11 +2641,8 @@ static inline void sky2_tx_done(struct n
+ }
+
+ static inline void sky2_skb_rx(const struct sky2_port *sky2,
+- u32 status, struct sk_buff *skb)
++ struct sk_buff *skb)
+ {
+- if (status & GMR_FS_VLAN)
+- __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
+-
+ if (skb->ip_summed == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+@@ -2697,6 +2696,14 @@ static void sky2_rx_checksum(struct sky2
+ }
+ }
+
++static void sky2_rx_tag(struct sky2_port *sky2, u16 length)
++{
++ struct sk_buff *skb;
++
++ skb = sky2->rx_ring[sky2->rx_next].skb;
++ __vlan_hwaccel_put_tag(skb, be16_to_cpu(length));
++}
++
+ static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
+ {
+ struct sk_buff *skb;
+@@ -2755,8 +2762,7 @@ static int sky2_status_intr(struct sky2_
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+-
+- sky2_skb_rx(sky2, status, skb);
++ sky2_skb_rx(sky2, skb);
+
+ /* Stop after net poll weight */
+ if (++work_done >= to_do)
+@@ -2764,11 +2770,11 @@ static int sky2_status_intr(struct sky2_
+ break;
+
+ case OP_RXVLAN:
+- sky2->rx_tag = length;
++ sky2_rx_tag(sky2, length);
+ break;
+
+ case OP_RXCHKSVLAN:
+- sky2->rx_tag = length;
++ sky2_rx_tag(sky2, length);
+ /* fall through */
+ case OP_RXCHKS:
+ if (likely(dev->features & NETIF_F_RXCSUM))
+--- a/drivers/net/ethernet/marvell/sky2.h
++++ b/drivers/net/ethernet/marvell/sky2.h
+@@ -2241,7 +2241,6 @@ struct sky2_port {
+ u16 rx_pending;
+ u16 rx_data_size;
+ u16 rx_nfrags;
+- u16 rx_tag;
+
+ struct {
+ unsigned long last;
--- /dev/null
+From 3cab11147df5cc3a4b344754236ef108bc47dc75 Mon Sep 17 00:00:00 2001
+From: stephen hemminger <shemminger@vyatta.com>
+Date: Mon, 30 Apr 2012 05:49:45 +0000
+Subject: sky2: propogate rx hash when packet is copied
+
+
+From: stephen hemminger <shemminger@vyatta.com>
+
+[ Upstream commit 3f42941b5d1d13542b1a755a9e4f633aa72e4d3e ]
+
+When a small packet is received, the driver copies it to a new skb to allow
+reusing the full size Rx buffer. The copy was propogating the checksum offload
+but not the receive hash information. The bug is impact was mostly harmless
+and therefore not observed until reviewing this area of code.
+
+Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/sky2.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -2483,8 +2483,11 @@ static struct sk_buff *receive_copy(stru
+ skb_copy_from_linear_data(re->skb, skb->data, length);
+ skb->ip_summed = re->skb->ip_summed;
+ skb->csum = re->skb->csum;
++ skb->rxhash = re->skb->rxhash;
++
+ pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
+ length, PCI_DMA_FROMDEVICE);
++ re->skb->rxhash = 0;
+ re->skb->ip_summed = CHECKSUM_NONE;
+ skb_put(skb, length);
+ }
--- /dev/null
+From 6265550705f10d3a3d3d2c80102874ffd6e6792f Mon Sep 17 00:00:00 2001
+From: Gerard Lledo <gerard.lledo@gmail.com>
+Date: Sat, 28 Apr 2012 08:52:37 +0000
+Subject: sungem: Fix WakeOnLan
+
+
+From: Gerard Lledo <gerard.lledo@gmail.com>
+
+[ Upstream commit 5a8887d39e1ba5ee2d4ccb94b14d6f2dce5ddfca ]
+
+WakeOnLan was broken in this driver because gp->asleep_wol is a 1-bit
+bitfield and it was being assigned WAKE_MAGIC, which is (1 << 5).
+gp->asleep_wol remains 0 and the machine never wakes up. Fixed by casting
+gp->wake_on_lan to bool. Tested on an iBook G4.
+
+Signed-off-by: Gerard Lledo <gerard.lledo@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/sun/sungem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/sun/sungem.c
++++ b/drivers/net/ethernet/sun/sungem.c
+@@ -2340,7 +2340,7 @@ static int gem_suspend(struct pci_dev *p
+ netif_device_detach(dev);
+
+ /* Switch off chip, remember WOL setting */
+- gp->asleep_wol = gp->wake_on_lan;
++ gp->asleep_wol = !!gp->wake_on_lan;
+ gem_do_stop(dev, gp->asleep_wol);
+
+ /* Unlock the network stack */
--- /dev/null
+From 194fe373907125876d93f88ea44335764ee5fc51 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Wed, 2 May 2012 02:28:41 +0000
+Subject: tcp: change tcp_adv_win_scale and tcp_rmem[2]
+
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b49960a05e32121d29316cfdf653894b88ac9190 ]
+
+tcp_adv_win_scale default value is 2, meaning we expect a good citizen
+skb to have skb->len / skb->truesize ratio of 75% (3/4)
+
+In 2.6 kernels we (mis)accounted for typical MSS=1460 frame :
+1536 + 64 + 256 = 1856 'estimated truesize', and 1856 * 3/4 = 1392.
+So these skbs were considered as not bloated.
+
+With recent truesize fixes, a typical MSS=1460 frame truesize is now the
+more precise :
+2048 + 256 = 2304. But 2304 * 3/4 = 1728.
+So these skb are not good citizen anymore, because 1460 < 1728
+
+(GRO can escape this problem because it build skbs with a too low
+truesize.)
+
+This also means tcp advertises a too optimistic window for a given
+allocated rcvspace : When receiving frames, sk_rmem_alloc can hit
+sk_rcvbuf limit and we call tcp_prune_queue()/tcp_collapse() too often,
+especially when application is slow to drain its receive queue or in
+case of losses (netperf is fast, scp is slow). This is a major latency
+source.
+
+We should adjust the len/truesize ratio to 50% instead of 75%
+
+This patch :
+
+1) changes tcp_adv_win_scale default to 1 instead of 2
+
+2) increase tcp_rmem[2] limit from 4MB to 6MB to take into account
+better truesize tracking and to allow autotuning tcp receive window to
+reach same value than before. Note that same amount of kernel memory is
+consumed compared to 2.6 kernels.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Cc: Tom Herbert <therbert@google.com>
+Cc: Yuchung Cheng <ycheng@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/networking/ip-sysctl.txt | 4 ++--
+ net/ipv4/tcp.c | 9 +++++----
+ net/ipv4/tcp_input.c | 2 +-
+ 3 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/Documentation/networking/ip-sysctl.txt
++++ b/Documentation/networking/ip-sysctl.txt
+@@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER
+ (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
+ if it is <= 0.
+ Possible values are [-31, 31], inclusive.
+- Default: 2
++ Default: 1
+
+ tcp_allowed_congestion_control - STRING
+ Show/set the congestion control choices available to non-privileged
+@@ -410,7 +410,7 @@ tcp_rmem - vector of 3 INTEGERs: min, de
+ net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables
+ automatic tuning of that socket's receive buffer size, in which
+ case this value is ignored.
+- Default: between 87380B and 4MB, depending on RAM size.
++ Default: between 87380B and 6MB, depending on RAM size.
+
+ tcp_sack - BOOLEAN
+ Enable select acknowledgments (SACKS).
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3240,7 +3240,7 @@ void __init tcp_init(void)
+ {
+ struct sk_buff *skb = NULL;
+ unsigned long limit;
+- int max_share, cnt;
++ int max_rshare, max_wshare, cnt;
+ unsigned int i;
+ unsigned long jiffy = jiffies;
+
+@@ -3300,15 +3300,16 @@ void __init tcp_init(void)
+ tcp_init_mem(&init_net);
+ /* Set per-socket limits to no more than 1/128 the pressure threshold */
+ limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
+- max_share = min(4UL*1024*1024, limit);
++ max_wshare = min(4UL*1024*1024, limit);
++ max_rshare = min(6UL*1024*1024, limit);
+
+ sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
+ sysctl_tcp_wmem[1] = 16*1024;
+- sysctl_tcp_wmem[2] = max(64*1024, max_share);
++ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
+
+ sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
+ sysctl_tcp_rmem[1] = 87380;
+- sysctl_tcp_rmem[2] = max(87380, max_share);
++ sysctl_tcp_rmem[2] = max(87380, max_rshare);
+
+ printk(KERN_INFO "TCP: Hash tables configured "
+ "(established %u bind %u)\n",
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -83,7 +83,7 @@ int sysctl_tcp_ecn __read_mostly = 2;
+ EXPORT_SYMBOL(sysctl_tcp_ecn);
+ int sysctl_tcp_dsack __read_mostly = 1;
+ int sysctl_tcp_app_win __read_mostly = 31;
+-int sysctl_tcp_adv_win_scale __read_mostly = 2;
++int sysctl_tcp_adv_win_scale __read_mostly = 1;
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
+ int sysctl_tcp_stdurg __read_mostly;
--- /dev/null
+From 9f7cf25f200a16301c97045fdf028ce6b27000e2 Mon Sep 17 00:00:00 2001
+From: Yuchung Cheng <ycheng@google.com>
+Date: Mon, 30 Apr 2012 06:00:18 +0000
+Subject: tcp: fix infinite cwnd in tcp_complete_cwr()
+
+
+From: Yuchung Cheng <ycheng@google.com>
+
+[ Upstream commit 1cebce36d660c83bd1353e41f3e66abd4686f215 ]
+
+When the cwnd reduction is done, ssthresh may be infinite
+if TCP enters CWR via ECN or F-RTO. If cwnd is not undone, i.e.,
+undo_marker is set, tcp_complete_cwr() falsely set cwnd to the
+infinite ssthresh value. The correct operation is to keep cwnd
+intact because it has been updated in ECN or F-RTO.
+
+Signed-off-by: Yuchung Cheng <ycheng@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/ipv4/tcp_input.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -2866,11 +2866,14 @@ static inline void tcp_complete_cwr(stru
+
+ /* Do not moderate cwnd if it's already undone in cwr or recovery. */
+ if (tp->undo_marker) {
+- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
++ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) {
+ tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
+- else /* PRR */
++ tp->snd_cwnd_stamp = tcp_time_stamp;
++ } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) {
++ /* PRR algorithm. */
+ tp->snd_cwnd = tp->snd_ssthresh;
+- tp->snd_cwnd_stamp = tcp_time_stamp;
++ tp->snd_cwnd_stamp = tcp_time_stamp;
++ }
+ }
+ tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
+ }
--- /dev/null
+From bbade94280909ebd5284ce117d9682ccb4e53825 Mon Sep 17 00:00:00 2001
+From: Matt Carlson <mcarlson@broadcom.com>
+Date: Tue, 24 Apr 2012 13:37:01 +0000
+Subject: tg3: Avoid panic from reserved statblk field access
+
+
+From: Matt Carlson <mcarlson@broadcom.com>
+
+[ Upstream commit f891ea1634ce41f5f47ae40d8594809f4cd2ca66 ]
+
+When RSS is enabled, interrupt vector 0 does not receive any rx traffic.
+The rx producer index fields for vector 0's status block should be
+considered reserved in this case. This patch changes the code to
+respect these reserved fields, which avoids a kernel panic when these
+fields take on non-zero values.
+
+Signed-off-by: Matt Carlson <mcarlson@broadcom.com>
+Signed-off-by: Michael Chan <mchan@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/tg3.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(
+ if (sblk->status & SD_STATUS_LINK_CHG)
+ work_exists = 1;
+ }
+- /* check for RX/TX work to do */
+- if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
++
++ /* check for TX work to do */
++ if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
++ work_exists = 1;
++
++ /* check for RX work to do */
++ if (tnapi->rx_rcb_prod_idx &&
+ *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+ work_exists = 1;
+
+@@ -5877,6 +5882,9 @@ static int tg3_poll_work(struct tg3_napi
+ return work_done;
+ }
+
++ if (!tnapi->rx_rcb_prod_idx)
++ return work_done;
++
+ /* run RX thread, within the bounds set by NAPI.
+ * All RX "locking" is done by ensuring outside
+ * code synchronizes with tg3->napi.poll()
+@@ -7428,6 +7436,12 @@ static int tg3_alloc_consistent(struct t
+ */
+ switch (i) {
+ default:
++ if (tg3_flag(tp, ENABLE_RSS)) {
++ tnapi->rx_rcb_prod_idx = NULL;
++ break;
++ }
++ /* Fall through */
++ case 1:
+ tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+ break;
+ case 2: