--- /dev/null
+From 2e1706f234f86ff71056ef69683d734fbf7e9e40 Mon Sep 17 00:00:00 2001
+From: Bruce Allan <bruce.w.allan@intel.com>
+Date: Sat, 30 Jun 2012 20:02:42 +0000
+Subject: e1000e: remove use of IP payload checksum
+
+From: Bruce Allan <bruce.w.allan@intel.com>
+
+commit 2e1706f234f86ff71056ef69683d734fbf7e9e40 upstream.
+
+Currently only used when packet split mode is enabled with jumbo frames,
+IP payload checksum (for fragmented UDP packets) is mutually exclusive with
+receive hashing offload since the hardware uses the same space in the
+receive descriptor for the hardware-provided packet checksum and the RSS
+hash, respectively. Users currently must disable jumbos when receive
+hashing offload is enabled, or vice versa, because of this incompatibility.
+Since testing has shown that IP payload checksum does not provide any real
+benefit, just remove it so that there is no longer a choice between jumbos
+or receive hashing offload but not both as done in other Intel GbE drivers
+(e.g. e1000, igb).
+
+Also, add a missing check for IP checksum error reported by the hardware;
+let the stack verify the checksum when this happens.
+
+Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
+Tested-by: Aaron Brown <aaron.f.brown@intel.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/net/ethernet/intel/e1000e/defines.h | 1
+ drivers/net/ethernet/intel/e1000e/netdev.c | 75 +++++-----------------------
+ 2 files changed, 15 insertions(+), 61 deletions(-)
+
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -101,6 +101,7 @@
+ #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */
+ #define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */
+ #define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
++#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
+ #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
+ #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+
+--- a/drivers/net/ethernet/intel/e1000e/netdev.c
++++ b/drivers/net/ethernet/intel/e1000e/netdev.c
+@@ -495,7 +495,7 @@ static void e1000_receive_skb(struct e10
+ * @sk_buff: socket buffer with received data
+ **/
+ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+- __le16 csum, struct sk_buff *skb)
++ struct sk_buff *skb)
+ {
+ u16 status = (u16)status_err;
+ u8 errors = (u8)(status_err >> 24);
+@@ -510,8 +510,8 @@ static void e1000_rx_checksum(struct e10
+ if (status & E1000_RXD_STAT_IXSM)
+ return;
+
+- /* TCP/UDP checksum error bit is set */
+- if (errors & E1000_RXD_ERR_TCPE) {
++ /* TCP/UDP checksum error bit or IP checksum error bit is set */
++ if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
+ /* let the stack verify checksum errors */
+ adapter->hw_csum_err++;
+ return;
+@@ -522,19 +522,7 @@ static void e1000_rx_checksum(struct e10
+ return;
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+- if (status & E1000_RXD_STAT_TCPCS) {
+- /* TCP checksum is good */
+- skb->ip_summed = CHECKSUM_UNNECESSARY;
+- } else {
+- /*
+- * IP fragment with UDP payload
+- * Hardware complements the payload checksum, so we undo it
+- * and then put the value in host order for further stack use.
+- */
+- __sum16 sum = (__force __sum16)swab16((__force u16)csum);
+- skb->csum = csum_unfold(~sum);
+- skb->ip_summed = CHECKSUM_COMPLETE;
+- }
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_good++;
+ }
+
+@@ -978,8 +966,7 @@ static bool e1000_clean_rx_irq(struct e1
+ skb_put(skb, length);
+
+ /* Receive Checksum Offload */
+- e1000_rx_checksum(adapter, staterr,
+- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
++ e1000_rx_checksum(adapter, staterr, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
+
+@@ -1360,8 +1347,7 @@ copydone:
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+- e1000_rx_checksum(adapter, staterr,
+- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
++ e1000_rx_checksum(adapter, staterr, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
+
+@@ -1531,9 +1517,8 @@ static bool e1000_clean_jumbo_rx_irq(str
+ }
+ }
+
+- /* Receive Checksum Offload XXX recompute due to CRC strip? */
+- e1000_rx_checksum(adapter, staterr,
+- rx_desc->wb.lower.hi_dword.csum_ip.csum, skb);
++ /* Receive Checksum Offload */
++ e1000_rx_checksum(adapter, staterr, skb);
+
+ e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
+
+@@ -3120,19 +3105,10 @@ static void e1000_configure_rx(struct e1
+
+ /* Enable Receive Checksum Offload for TCP and UDP */
+ rxcsum = er32(RXCSUM);
+- if (adapter->netdev->features & NETIF_F_RXCSUM) {
++ if (adapter->netdev->features & NETIF_F_RXCSUM)
+ rxcsum |= E1000_RXCSUM_TUOFL;
+-
+- /*
+- * IPv4 payload checksum for UDP fragments must be
+- * used in conjunction with packet-split.
+- */
+- if (adapter->rx_ps_pages)
+- rxcsum |= E1000_RXCSUM_IPPCSE;
+- } else {
++ else
+ rxcsum &= ~E1000_RXCSUM_TUOFL;
+- /* no need to clear IPPCSE as it defaults to 0 */
+- }
+ ew32(RXCSUM, rxcsum);
+
+ if (adapter->hw.mac.type == e1000_pch2lan) {
+@@ -5260,22 +5236,10 @@ static int e1000_change_mtu(struct net_d
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* Jumbo frame support */
+- if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) {
+- if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
+- e_err("Jumbo Frames not supported.\n");
+- return -EINVAL;
+- }
+-
+- /*
+- * IP payload checksum (enabled with jumbos/packet-split when
+- * Rx checksum is enabled) and generation of RSS hash is
+- * mutually exclusive in the hardware.
+- */
+- if ((netdev->features & NETIF_F_RXCSUM) &&
+- (netdev->features & NETIF_F_RXHASH)) {
+- e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n");
+- return -EINVAL;
+- }
++ if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
++ !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
++ e_err("Jumbo Frames not supported.\n");
++ return -EINVAL;
+ }
+
+ /* Supported frame sizes */
+@@ -6049,17 +6013,6 @@ static int e1000_set_features(struct net
+ NETIF_F_RXALL)))
+ return 0;
+
+- /*
+- * IP payload checksum (enabled with jumbos/packet-split when Rx
+- * checksum is enabled) and generation of RSS hash is mutually
+- * exclusive in the hardware.
+- */
+- if (adapter->rx_ps_pages &&
+- (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) {
+- e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n");
+- return -EINVAL;
+- }
+-
+ if (changed & NETIF_F_RXFCS) {
+ if (features & NETIF_F_RXFCS) {
+ adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
--- /dev/null
+From 476a7eeb60e70ddab138e7cb4bc44ef5ac20782e Mon Sep 17 00:00:00 2001
+From: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
+Date: Sat, 7 Jul 2012 13:37:42 +0300
+Subject: hwspinlock/core: use global ID to register hwspinlocks on multiple devices
+
+From: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
+
+commit 476a7eeb60e70ddab138e7cb4bc44ef5ac20782e upstream.
+
+Commit 300bab9770 (hwspinlock/core: register a bank of hwspinlocks in a
+single API call, 2011-09-06) introduced 'hwspin_lock_register_single()'
+to register numerous (a bank of) hwspinlock instances in a single API,
+'hwspin_lock_register()'.
+
+At which time, 'hwspin_lock_register()' accidentally passes 'local IDs'
+to 'hwspin_lock_register_single()', despite that ..._single() requires
+'global IDs' to register hwspinlocks.
+
+We have to convert into global IDs by supplying the missing 'base_id'.
+
+Signed-off-by: Shinya Kuribayashi <shinya.kuribayashi.px@renesas.com>
+[ohad: fix error path of hwspin_lock_register, too]
+Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hwspinlock/hwspinlock_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/hwspinlock/hwspinlock_core.c
++++ b/drivers/hwspinlock/hwspinlock_core.c
+@@ -345,7 +345,7 @@ int hwspin_lock_register(struct hwspinlo
+ spin_lock_init(&hwlock->lock);
+ hwlock->bank = bank;
+
+- ret = hwspin_lock_register_single(hwlock, i);
++ ret = hwspin_lock_register_single(hwlock, base_id + i);
+ if (ret)
+ goto reg_failed;
+ }
+@@ -354,7 +354,7 @@ int hwspin_lock_register(struct hwspinlo
+
+ reg_failed:
+ while (--i >= 0)
+- hwspin_lock_unregister_single(i);
++ hwspin_lock_unregister_single(base_id + i);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(hwspin_lock_register);